repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
eoghan2t9/Oppo-Find5-4.2-Kernel | arch/arm/mach-versatile/versatile_ab.c | 5094 | 1471 | /*
* linux/arch/arm/mach-versatile/versatile_ab.c
*
* Copyright (C) 2004 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "core.h"
MACHINE_START(VERSATILE_AB, "ARM-Versatile AB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.atag_offset = 0x100,
.map_io = versatile_map_io,
.init_early = versatile_init_early,
.init_irq = versatile_init_irq,
.handle_irq = vic_handle_irq,
.timer = &versatile_timer,
.init_machine = versatile_init,
.restart = versatile_restart,
MACHINE_END
| gpl-2.0 |
lucatib/a33_linux | drivers/gpu/drm/nouveau/nv50_evo.c | 5350 | 10937 | /*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
nouveau_ramht_ref(NULL, &evo->ramht, evo);
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
if (evo->user)
iounmap(evo->user);
kfree(evo);
}
void
nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
{
struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
u32 flags5;
if (dev_priv->chipset < 0xc0) {
/* not supported on 0x50, specified in format mthd */
if (dev_priv->chipset == 0x50)
memtype = 0;
flags5 = 0x00010000;
} else {
if (memtype & 0x80000000)
flags5 = 0x00000000; /* large pages */
else
flags5 = 0x00020000;
}
nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
nv_wo32(obj, 0x14, flags5);
dev_priv->engine.instmem.flush(obj->dev);
}
int
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
nv50_evo_dmaobj_init(obj, memtype, base, size);
ret = nouveau_ramht_insert(evo, handle, obj);
if (ret)
goto out;
if (pobj)
nouveau_gpuobj_ref(obj, pobj);
out:
nouveau_gpuobj_ref(NULL, &obj);
return ret;
}
static int
nv50_evo_channel_new(struct drm_device *dev, int chid,
struct nouveau_channel **pevo)
{
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
if (!evo)
return -ENOMEM;
*pevo = evo;
evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
nv50_evo_channel_del(pevo);
return ret;
}
ret = nouveau_bo_map(evo->pushbuf_bo);
if (ret) {
NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
nv50_evo_channel_del(pevo);
return ret;
}
evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
if (!evo->user) {
NV_ERROR(dev, "Error mapping EVO control regs.\n");
nv50_evo_channel_del(pevo);
return -ENOMEM;
}
/* bind primary evo channel's ramht to the channel */
if (disp->master && evo != disp->master)
nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
static int
nv50_evo_channel_init(struct nouveau_channel *evo)
{
struct drm_device *dev = evo->dev;
int id = evo->id, ret, i;
u64 pushbuf = evo->pushbuf_bo->bo.offset;
u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
if ((tmp & 0x009f0000) == 0x00020000)
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
if ((tmp & 0x003f0000) == 0x00030000)
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
/* initialise fifo */
nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
NV50_PDISPLAY_EVO_DMA_CB_VALID);
nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
return -EBUSY;
}
/* enable error reporting on the channel */
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
evo->dma.max = (4096/4) - 2;
evo->dma.max &= ~7;
evo->dma.put = 0;
evo->dma.cur = evo->dma.put;
evo->dma.free = evo->dma.max - evo->dma.cur;
ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING(evo, 0);
return 0;
}
static void
nv50_evo_channel_fini(struct nouveau_channel *evo)
{
struct drm_device *dev = evo->dev;
int id = evo->id;
nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
}
}
void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
int i;
for (i = 0; i < 2; i++) {
if (disp->crtc[i].sem.bo) {
nouveau_bo_unmap(disp->crtc[i].sem.bo);
nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
}
nv50_evo_channel_del(&disp->crtc[i].sync);
}
nouveau_gpuobj_ref(NULL, &disp->ntfy);
nv50_evo_channel_del(&disp->master);
}
int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
* hardware
*/
ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
goto err;
/* not sure exactly what this is..
*
* the first dword of the structure is used by nvidia to wait on
* full completion of an EVO "update" command.
*
* method 0x8c on the master evo channel will fill a lot more of
* this structure with some undefined info
*/
ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
disp->ntfy->vinst, disp->ntfy->size, NULL);
if (ret)
goto err;
/* create some default objects for the scanout memtypes we support */
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
(dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
(dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
/* create "display sync" channels and other structures we need
* to implement page flipping
*/
for (i = 0; i < 2; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
u64 offset;
ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
if (ret)
goto err;
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo);
if (!ret) {
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(dispc->sem.bo);
if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo);
offset = dispc->sem.bo->bo.offset;
}
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
offset, 4096, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
0x7a00 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
0x7000 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
for (j = 0; j < 4096; j += 4)
nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
dispc->sem.offset = 0;
}
return 0;
err:
nv50_evo_destroy(dev);
return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
int ret, i;
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
for (i = 0; i < 2; i++) {
ret = nv50_evo_channel_init(disp->crtc[i].sync);
if (ret)
return ret;
}
return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
int i;
for (i = 0; i < 2; i++) {
if (disp->crtc[i].sync)
nv50_evo_channel_fini(disp->crtc[i].sync);
}
if (disp->master)
nv50_evo_channel_fini(disp->master);
}
| gpl-2.0 |
smac0628/caf-LA.BF.1.1.2.1 | sound/synth/util_mem.c | 9958 | 4663 | /*
* Copyright (C) 2000 Takashi Iwai <tiwai@suse.de>
*
* Generic memory management routines for soundcard memory allocation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/util_mem.h>
MODULE_AUTHOR("Takashi Iwai");
MODULE_DESCRIPTION("Generic memory management routines for soundcard memory allocation");
MODULE_LICENSE("GPL");
#define get_memblk(p) list_entry(p, struct snd_util_memblk, list)
/*
* create a new memory manager
*/
struct snd_util_memhdr *
snd_util_memhdr_new(int memsize)
{
struct snd_util_memhdr *hdr;
hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
if (hdr == NULL)
return NULL;
hdr->size = memsize;
mutex_init(&hdr->block_mutex);
INIT_LIST_HEAD(&hdr->block);
return hdr;
}
/*
* free a memory manager
*/
void snd_util_memhdr_free(struct snd_util_memhdr *hdr)
{
struct list_head *p;
if (!hdr)
return;
/* release all blocks */
while ((p = hdr->block.next) != &hdr->block) {
list_del(p);
kfree(get_memblk(p));
}
kfree(hdr);
}
/*
* allocate a memory block (without mutex)
*/
struct snd_util_memblk *
__snd_util_mem_alloc(struct snd_util_memhdr *hdr, int size)
{
struct snd_util_memblk *blk;
unsigned int units, prev_offset;
struct list_head *p;
if (snd_BUG_ON(!hdr || size <= 0))
return NULL;
/* word alignment */
units = size;
if (units & 1)
units++;
if (units > hdr->size)
return NULL;
/* look for empty block */
prev_offset = 0;
list_for_each(p, &hdr->block) {
blk = get_memblk(p);
if (blk->offset - prev_offset >= units)
goto __found;
prev_offset = blk->offset + blk->size;
}
if (hdr->size - prev_offset < units)
return NULL;
__found:
return __snd_util_memblk_new(hdr, units, p->prev);
}
/*
* create a new memory block with the given size
* the block is linked next to prev
*/
struct snd_util_memblk *
__snd_util_memblk_new(struct snd_util_memhdr *hdr, unsigned int units,
struct list_head *prev)
{
struct snd_util_memblk *blk;
blk = kmalloc(sizeof(struct snd_util_memblk) + hdr->block_extra_size,
GFP_KERNEL);
if (blk == NULL)
return NULL;
if (prev == &hdr->block)
blk->offset = 0;
else {
struct snd_util_memblk *p = get_memblk(prev);
blk->offset = p->offset + p->size;
}
blk->size = units;
list_add(&blk->list, prev);
hdr->nblocks++;
hdr->used += units;
return blk;
}
/*
* allocate a memory block (with mutex)
*/
struct snd_util_memblk *
snd_util_mem_alloc(struct snd_util_memhdr *hdr, int size)
{
struct snd_util_memblk *blk;
mutex_lock(&hdr->block_mutex);
blk = __snd_util_mem_alloc(hdr, size);
mutex_unlock(&hdr->block_mutex);
return blk;
}
/*
* remove the block from linked-list and free resource
* (without mutex)
*/
void
__snd_util_mem_free(struct snd_util_memhdr *hdr, struct snd_util_memblk *blk)
{
list_del(&blk->list);
hdr->nblocks--;
hdr->used -= blk->size;
kfree(blk);
}
/*
* free a memory block (with mutex)
*/
int snd_util_mem_free(struct snd_util_memhdr *hdr, struct snd_util_memblk *blk)
{
if (snd_BUG_ON(!hdr || !blk))
return -EINVAL;
mutex_lock(&hdr->block_mutex);
__snd_util_mem_free(hdr, blk);
mutex_unlock(&hdr->block_mutex);
return 0;
}
/*
* return available memory size
*/
int snd_util_mem_avail(struct snd_util_memhdr *hdr)
{
unsigned int size;
mutex_lock(&hdr->block_mutex);
size = hdr->size - hdr->used;
mutex_unlock(&hdr->block_mutex);
return size;
}
EXPORT_SYMBOL(snd_util_memhdr_new);
EXPORT_SYMBOL(snd_util_memhdr_free);
EXPORT_SYMBOL(snd_util_mem_alloc);
EXPORT_SYMBOL(snd_util_mem_free);
EXPORT_SYMBOL(snd_util_mem_avail);
EXPORT_SYMBOL(__snd_util_mem_alloc);
EXPORT_SYMBOL(__snd_util_mem_free);
EXPORT_SYMBOL(__snd_util_memblk_new);
/*
* INIT part
*/
static int __init alsa_util_mem_init(void)
{
return 0;
}
static void __exit alsa_util_mem_exit(void)
{
}
module_init(alsa_util_mem_init)
module_exit(alsa_util_mem_exit)
| gpl-2.0 |
ffolkes/android_kernel_samsung_smdk4412 | arch/ia64/kernel/crash.c | 11238 | 6919 | /*
* arch/ia64/kernel/crash.c
*
* Architecture specific (ia64) functions for kexec based crash dumps.
*
* Created by: Khalid Aziz <khalid.aziz@hp.com>
* Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
*
*/
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/bootmem.h>
#include <linux/kexec.h>
#include <linux/elfcore.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/kdebug.h>
#include <asm/mca.h>
int kdump_status[NR_CPUS];
static atomic_t kdump_cpu_frozen;
atomic_t kdump_in_progress;
static int kdump_freeze_monarch;
static int kdump_on_init = 1;
static int kdump_on_fatal_mca = 1;
static inline Elf64_Word
*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
size_t data_len)
{
struct elf_note *note = (struct elf_note *)buf;
note->n_namesz = strlen(name) + 1;
note->n_descsz = data_len;
note->n_type = type;
buf += (sizeof(*note) + 3)/4;
memcpy(buf, name, note->n_namesz);
buf += (note->n_namesz + 3)/4;
memcpy(buf, data, data_len);
buf += (data_len + 3)/4;
return buf;
}
static void
final_note(void *buf)
{
memset(buf, 0, sizeof(struct elf_note));
}
extern void ia64_dump_cpu_regs(void *);
static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
void
crash_save_this_cpu(void)
{
void *buf;
unsigned long cfm, sof, sol;
int cpu = smp_processor_id();
struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
memset(prstatus, 0, sizeof(*prstatus));
prstatus->pr_pid = current->pid;
ia64_dump_cpu_regs(dst);
cfm = dst[43];
sol = (cfm >> 7) & 0x7f;
sof = cfm & 0x7f;
dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
sof - sol);
buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
if (!buf)
return;
buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
sizeof(*prstatus));
final_note(buf);
}
#ifdef CONFIG_SMP
static int
kdump_wait_cpu_freeze(void)
{
int cpu_num = num_online_cpus() - 1;
int timeout = 1000;
while(timeout-- > 0) {
if (atomic_read(&kdump_cpu_frozen) == cpu_num)
return 0;
udelay(1000);
}
return 1;
}
#endif
void
machine_crash_shutdown(struct pt_regs *pt)
{
/* This function is only called after the system
* has paniced or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
* In practice this means shooting down the other cpus in
* an SMP system.
*/
kexec_disable_iosapic();
#ifdef CONFIG_SMP
/*
* If kdump_on_init is set and an INIT is asserted here, kdump will
* be started again via INIT monarch.
*/
local_irq_disable();
ia64_set_psr_mc(); /* mask MCA/INIT */
if (atomic_inc_return(&kdump_in_progress) != 1)
unw_init_running(kdump_cpu_freeze, NULL);
/*
* Now this cpu is ready for kdump.
* Stop all others by IPI or INIT. They could receive INIT from
* outside and might be INIT monarch, but only thing they have to
* do is falling into kdump_cpu_freeze().
*
* If an INIT is asserted here:
* - All receivers might be slaves, since some of cpus could already
* be frozen and INIT might be masked on monarch. In this case,
* all slaves will be frozen soon since kdump_in_progress will let
* them into DIE_INIT_SLAVE_LEAVE.
* - One might be a monarch, but INIT rendezvous will fail since
* at least this cpu already have INIT masked so it never join
* to the rendezvous. In this case, all slaves and monarch will
* be frozen soon with no wait since the INIT rendezvous is skipped
* by kdump_in_progress.
*/
kdump_smp_send_stop();
/* not all cpu response to IPI, send INIT to freeze them */
if (kdump_wait_cpu_freeze()) {
kdump_smp_send_init();
/* wait again, don't go ahead if possible */
kdump_wait_cpu_freeze();
}
#endif
}
static void
machine_kdump_on_init(void)
{
crash_save_vmcoreinfo();
local_irq_disable();
kexec_disable_iosapic();
machine_kexec(ia64_kimage);
}
void
kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
{
int cpuid;
local_irq_disable();
cpuid = smp_processor_id();
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
atomic_inc(&kdump_cpu_frozen);
kdump_status[cpuid] = 1;
mb();
for (;;)
cpu_relax();
}
static int
kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
{
struct ia64_mca_notify_die *nd;
struct die_args *args = data;
if (atomic_read(&kdump_in_progress)) {
switch (val) {
case DIE_INIT_MONARCH_LEAVE:
if (!kdump_freeze_monarch)
break;
/* fall through */
case DIE_INIT_SLAVE_LEAVE:
case DIE_INIT_MONARCH_ENTER:
case DIE_MCA_RENDZVOUS_LEAVE:
unw_init_running(kdump_cpu_freeze, NULL);
break;
}
}
if (!kdump_on_init && !kdump_on_fatal_mca)
return NOTIFY_DONE;
if (!ia64_kimage) {
if (val == DIE_INIT_MONARCH_LEAVE)
ia64_mca_printk(KERN_NOTICE
"%s: kdump not configured\n",
__func__);
return NOTIFY_DONE;
}
if (val != DIE_INIT_MONARCH_LEAVE &&
val != DIE_INIT_MONARCH_PROCESS &&
val != DIE_MCA_MONARCH_LEAVE)
return NOTIFY_DONE;
nd = (struct ia64_mca_notify_die *)args->err;
switch (val) {
case DIE_INIT_MONARCH_PROCESS:
/* Reason code 1 means machine check rendezvous*/
if (kdump_on_init && (nd->sos->rv_rc != 1)) {
if (atomic_inc_return(&kdump_in_progress) != 1)
kdump_freeze_monarch = 1;
}
break;
case DIE_INIT_MONARCH_LEAVE:
/* Reason code 1 means machine check rendezvous*/
if (kdump_on_init && (nd->sos->rv_rc != 1))
machine_kdump_on_init();
break;
case DIE_MCA_MONARCH_LEAVE:
/* *(nd->data) indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !(*(nd->data))) {
if (atomic_inc_return(&kdump_in_progress) == 1)
machine_kdump_on_init();
/* We got fatal MCA while kdump!? No way!! */
}
break;
}
return NOTIFY_DONE;
}
#ifdef CONFIG_SYSCTL
static ctl_table kdump_ctl_table[] = {
{
.procname = "kdump_on_init",
.data = &kdump_on_init,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "kdump_on_fatal_mca",
.data = &kdump_on_fatal_mca,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static ctl_table sys_table[] = {
{
.procname = "kernel",
.mode = 0555,
.child = kdump_ctl_table,
},
{ }
};
#endif
static int
machine_crash_setup(void)
{
/* be notified before default_monarch_init_process */
static struct notifier_block kdump_init_notifier_nb = {
.notifier_call = kdump_init_notifier,
.priority = 1,
};
int ret;
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
return ret;
#ifdef CONFIG_SYSCTL
register_sysctl_table(sys_table);
#endif
return 0;
}
__initcall(machine_crash_setup);
| gpl-2.0 |
lyapota/s7e_nougat | sound/pci/echoaudio/mona_dsp.c | 12518 | 11013 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int write_control_reg(struct echoaudio *chip, u32 value, char force);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic);
static int check_asic_status(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Mona\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->input_clock_types =
ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
chip->digital_modes =
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
/* Mona comes in both '301 and '361 flavors */
if (chip->device_id == DEVICE_ID_56361)
chip->dsp_code_to_load = FW_MONA_361_DSP;
else
chip->dsp_code_to_load = FW_MONA_301_DSP;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->digital_in_automute = TRUE;
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock
detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT)
clock_bits |= ECHO_CLOCK_BIT_ADAT;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD)
clock_bits |= ECHO_CLOCK_BIT_WORD;
return clock_bits;
}
/* Mona has an ASIC on the PCI card and another ASIC in the external box;
both need to be loaded. */
static int load_asic(struct echoaudio *chip)
{
u32 control_reg;
int err;
short asic;
if (chip->asic_loaded)
return 0;
mdelay(10);
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic);
if (err < 0)
return err;
chip->asic_code = asic;
mdelay(10);
/* Do the external one */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_EXTERNAL_ASIC,
FW_MONA_2_ASIC);
if (err < 0)
return err;
mdelay(10);
err = check_asic_status(chip);
/* Set up the control register if the load succeeded -
48 kHz, internal clock, S/PDIF RCA mode */
if (!err) {
control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
err = write_control_reg(chip, control_reg, TRUE);
}
return err;
}
/* Depending on what digital mode you want, Mona needs different ASICs
loaded. This function checks the ASIC needed for the new mode and sees
if it matches the one already loaded. */
static int switch_asic(struct echoaudio *chip, char double_speed)
{
int err;
short asic;
/* Check the clock detect bits to see if this is
a single-speed clock or a double-speed clock; load
a new ASIC if necessary. */
if (chip->device_id == DEVICE_ID_56361) {
if (double_speed)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_361_1_ASIC48;
} else {
if (double_speed)
asic = FW_MONA_301_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC48;
}
if (asic != chip->asic_code) {
/* Load the desired ASIC */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
if (err < 0)
return err;
chip->asic_code = asic;
}
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg, clock;
short asic;
char force_write;
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
DE_ACT(("set_sample_rate: Cannot set sample rate - "
"clock not set to CLK_CLOCKININTERNAL\n"));
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
}
/* Now, check to see if the required ASIC is loaded */
if (rate >= 88200) {
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EINVAL;
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC96;
} else {
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
}
force_write = 0;
if (asic != chip->asic_code) {
int err;
/* Load the desired ASIC (load_asic_generic() can sleep) */
spin_unlock_irq(&chip->lock);
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
chip->asic_code = asic;
force_write = 1;
}
/* Compute the new control register value */
clock = 0;
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_CLOCK_CLEAR_MASK;
control_reg &= GML_SPDIF_RATE_CLEAR_MASK;
switch (rate) {
case 96000:
clock = GML_96KHZ;
break;
case 88200:
clock = GML_88KHZ;
break;
case 48000:
clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1;
break;
case 44100:
clock = GML_44KHZ;
/* Professional mode */
if (control_reg & GML_SPDIF_PRO_MODE)
clock |= GML_SPDIF_SAMPLE_RATE0;
break;
case 32000:
clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 |
GML_SPDIF_SAMPLE_RATE1;
break;
case 22050:
clock = GML_22KHZ;
break;
case 16000:
clock = GML_16KHZ;
break;
case 11025:
clock = GML_11KHZ;
break;
case 8000:
clock = GML_8KHZ;
break;
default:
DE_ACT(("set_sample_rate: %d invalid!\n", rate));
return -EINVAL;
}
control_reg |= clock;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
return write_control_reg(chip, control_reg, force_write);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
int err;
DE_ACT(("set_input_clock:\n"));
/* Prevent two simultaneous calls to switch_asic() */
if (atomic_read(&chip->opencount))
return -EAGAIN;
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
GML_CLOCK_CLEAR_MASK;
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
switch (clock) {
case ECHO_CLOCK_INTERNAL:
DE_ACT(("Set Mona clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_SPDIF96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
DE_ACT(("Set Mona clock to SPDIF\n"));
control_reg |= GML_SPDIF_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_WORD:
DE_ACT(("Set Mona clock to WORD\n"));
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_WORD96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
control_reg |= GML_WORD_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ADAT:
DE_ACT(("Set Mona clock to ADAT\n"));
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
default:
DE_ACT(("Input clock 0x%x not supported for Mona\n", clock));
return -EINVAL;
}
chip->input_clock = clock;
return write_control_reg(chip, control_reg, TRUE);
}
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
{
u32 control_reg;
int err, incompatible_clock;
/* Set clock to "internal" if it's not compatible with the new mode */
incompatible_clock = FALSE;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
case DIGITAL_MODE_SPDIF_RCA:
if (chip->input_clock == ECHO_CLOCK_ADAT)
incompatible_clock = TRUE;
break;
case DIGITAL_MODE_ADAT:
if (chip->input_clock == ECHO_CLOCK_SPDIF)
incompatible_clock = TRUE;
break;
default:
DE_ACT(("Digital mode not supported: %d\n", mode));
return -EINVAL;
}
spin_lock_irq(&chip->lock);
if (incompatible_clock) { /* Switch to 48KHz, internal */
chip->sample_rate = 48000;
set_input_clock(chip, ECHO_CLOCK_INTERNAL);
}
/* Clear the current digital mode */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_DIGITAL_MODE_CLEAR_MASK;
/* Tweak the control reg */
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
control_reg |= GML_SPDIF_OPTICAL_MODE;
break;
case DIGITAL_MODE_SPDIF_RCA:
/* GML_SPDIF_OPTICAL_MODE bit cleared */
break;
case DIGITAL_MODE_ADAT:
/* If the current ASIC is the 96KHz ASIC, switch the ASIC
and set to 48 KHz */
if (chip->asic_code == FW_MONA_361_1_ASIC96 ||
chip->asic_code == FW_MONA_301_1_ASIC96) {
set_sample_rate(chip, 48000);
}
control_reg |= GML_ADAT_MODE;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
}
err = write_control_reg(chip, control_reg, FALSE);
spin_unlock_irq(&chip->lock);
if (err < 0)
return err;
chip->digital_mode = mode;
DE_ACT(("set_digital_mode to %d\n", mode));
return incompatible_clock;
}
| gpl-2.0 |
allwinner/linux-2.6.36 | sound/pci/echoaudio/mona_dsp.c | 12518 | 11013 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int write_control_reg(struct echoaudio *chip, u32 value, char force);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic);
static int check_asic_status(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Mona\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->input_clock_types =
ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
chip->digital_modes =
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
/* Mona comes in both '301 and '361 flavors */
if (chip->device_id == DEVICE_ID_56361)
chip->dsp_code_to_load = FW_MONA_361_DSP;
else
chip->dsp_code_to_load = FW_MONA_301_DSP;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->digital_in_automute = TRUE;
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock
detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT)
clock_bits |= ECHO_CLOCK_BIT_ADAT;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD)
clock_bits |= ECHO_CLOCK_BIT_WORD;
return clock_bits;
}
/* Mona has an ASIC on the PCI card and another ASIC in the external box;
both need to be loaded. */
static int load_asic(struct echoaudio *chip)
{
u32 control_reg;
int err;
short asic;
if (chip->asic_loaded)
return 0;
mdelay(10);
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic);
if (err < 0)
return err;
chip->asic_code = asic;
mdelay(10);
/* Do the external one */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_EXTERNAL_ASIC,
FW_MONA_2_ASIC);
if (err < 0)
return err;
mdelay(10);
err = check_asic_status(chip);
/* Set up the control register if the load succeeded -
48 kHz, internal clock, S/PDIF RCA mode */
if (!err) {
control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
err = write_control_reg(chip, control_reg, TRUE);
}
return err;
}
/* Depending on what digital mode you want, Mona needs different ASICs
loaded. This function checks the ASIC needed for the new mode and sees
if it matches the one already loaded. */
static int switch_asic(struct echoaudio *chip, char double_speed)
{
int err;
short asic;
/* Check the clock detect bits to see if this is
a single-speed clock or a double-speed clock; load
a new ASIC if necessary. */
if (chip->device_id == DEVICE_ID_56361) {
if (double_speed)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_361_1_ASIC48;
} else {
if (double_speed)
asic = FW_MONA_301_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC48;
}
if (asic != chip->asic_code) {
/* Load the desired ASIC */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
if (err < 0)
return err;
chip->asic_code = asic;
}
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg, clock;
short asic;
char force_write;
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
DE_ACT(("set_sample_rate: Cannot set sample rate - "
"clock not set to CLK_CLOCKININTERNAL\n"));
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
}
/* Now, check to see if the required ASIC is loaded */
if (rate >= 88200) {
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EINVAL;
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC96;
} else {
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
}
force_write = 0;
if (asic != chip->asic_code) {
int err;
/* Load the desired ASIC (load_asic_generic() can sleep) */
spin_unlock_irq(&chip->lock);
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
chip->asic_code = asic;
force_write = 1;
}
/* Compute the new control register value */
clock = 0;
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_CLOCK_CLEAR_MASK;
control_reg &= GML_SPDIF_RATE_CLEAR_MASK;
switch (rate) {
case 96000:
clock = GML_96KHZ;
break;
case 88200:
clock = GML_88KHZ;
break;
case 48000:
clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1;
break;
case 44100:
clock = GML_44KHZ;
/* Professional mode */
if (control_reg & GML_SPDIF_PRO_MODE)
clock |= GML_SPDIF_SAMPLE_RATE0;
break;
case 32000:
clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 |
GML_SPDIF_SAMPLE_RATE1;
break;
case 22050:
clock = GML_22KHZ;
break;
case 16000:
clock = GML_16KHZ;
break;
case 11025:
clock = GML_11KHZ;
break;
case 8000:
clock = GML_8KHZ;
break;
default:
DE_ACT(("set_sample_rate: %d invalid!\n", rate));
return -EINVAL;
}
control_reg |= clock;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
return write_control_reg(chip, control_reg, force_write);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
int err;
DE_ACT(("set_input_clock:\n"));
/* Prevent two simultaneous calls to switch_asic() */
if (atomic_read(&chip->opencount))
return -EAGAIN;
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
GML_CLOCK_CLEAR_MASK;
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
switch (clock) {
case ECHO_CLOCK_INTERNAL:
DE_ACT(("Set Mona clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_SPDIF96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
DE_ACT(("Set Mona clock to SPDIF\n"));
control_reg |= GML_SPDIF_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_WORD:
DE_ACT(("Set Mona clock to WORD\n"));
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_WORD96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
control_reg |= GML_WORD_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ADAT:
DE_ACT(("Set Mona clock to ADAT\n"));
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
default:
DE_ACT(("Input clock 0x%x not supported for Mona\n", clock));
return -EINVAL;
}
chip->input_clock = clock;
return write_control_reg(chip, control_reg, TRUE);
}
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
{
u32 control_reg;
int err, incompatible_clock;
/* Set clock to "internal" if it's not compatible with the new mode */
incompatible_clock = FALSE;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
case DIGITAL_MODE_SPDIF_RCA:
if (chip->input_clock == ECHO_CLOCK_ADAT)
incompatible_clock = TRUE;
break;
case DIGITAL_MODE_ADAT:
if (chip->input_clock == ECHO_CLOCK_SPDIF)
incompatible_clock = TRUE;
break;
default:
DE_ACT(("Digital mode not supported: %d\n", mode));
return -EINVAL;
}
spin_lock_irq(&chip->lock);
if (incompatible_clock) { /* Switch to 48KHz, internal */
chip->sample_rate = 48000;
set_input_clock(chip, ECHO_CLOCK_INTERNAL);
}
/* Clear the current digital mode */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_DIGITAL_MODE_CLEAR_MASK;
/* Tweak the control reg */
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
control_reg |= GML_SPDIF_OPTICAL_MODE;
break;
case DIGITAL_MODE_SPDIF_RCA:
/* GML_SPDIF_OPTICAL_MODE bit cleared */
break;
case DIGITAL_MODE_ADAT:
/* If the current ASIC is the 96KHz ASIC, switch the ASIC
and set to 48 KHz */
if (chip->asic_code == FW_MONA_361_1_ASIC96 ||
chip->asic_code == FW_MONA_301_1_ASIC96) {
set_sample_rate(chip, 48000);
}
control_reg |= GML_ADAT_MODE;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
}
err = write_control_reg(chip, control_reg, FALSE);
spin_unlock_irq(&chip->lock);
if (err < 0)
return err;
chip->digital_mode = mode;
DE_ACT(("set_digital_mode to %d\n", mode));
return incompatible_clock;
}
| gpl-2.0 |
javilonas/Lonas_KL-GT-I9300-Sammy | drivers/misc/modem_if/modem_modemctl_device_sprd8803.c | 231 | 6470 | /* /linux/drivers/misc/modem_if/modem_modemctl_device_sprd8803.c
*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2010 Samsung Electronics.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/cma.h>
#include <plat/devs.h>
#include <linux/platform_data/modem.h>
#include "modem_prj.h"
#include <plat/gpio-cfg.h>
spinlock_t irq_lock;
int irq_lock_flag;
int sprd_boot_done;
extern int spi_thread_restart(void);
static int sprd8803_on(struct modem_ctl *mc)
{
if (!mc->gpio_cp_on || !mc->gpio_pda_active) {
pr_err("[MODEM_IF] no gpio data\n");
return -ENXIO;
}
s3c_gpio_cfgpin(EXYNOS4_GPA1(4), S3C_GPIO_SFN(2));
s3c_gpio_cfgpin(EXYNOS4_GPA1(5), S3C_GPIO_SFN(2));
#ifdef CONFIG_SEC_DUAL_MODEM_MODE
gpio_set_value(mc->gpio_sim_io_sel, 1);
gpio_set_value(mc->gpio_cp_ctrl1, 0);
gpio_set_value(mc->gpio_cp_ctrl2, 1);
#endif
msleep(100);
pr_info("[MODEM_IF] %s\n", __func__);
gpio_set_value(mc->gpio_cp_on, 1);
gpio_set_value(mc->gpio_pda_active, 1);
spin_lock(&irq_lock);
if (!irq_lock_flag) {
enable_irq(mc->irq_phone_active);
enable_irq(gpio_to_irq(mc->gpio_cp_dump_int));
enable_irq_wake(mc->irq_phone_active);
enable_irq_wake(gpio_to_irq(mc->gpio_cp_dump_int));
irq_lock_flag = 1;
}
spin_unlock(&irq_lock);
mc->phone_state = STATE_BOOTING;
return 0;
}
static int sprd8803_off(struct modem_ctl *mc)
{
pr_info("[MODEM_IF] %s\n", __func__);
if (!mc->gpio_cp_on) {
mif_err("no gpio data\n");
return -ENXIO;
}
gpio_set_value(mc->gpio_cp_on, 0);
gpio_set_value(mc->gpio_pda_active, 0);
spin_lock(&irq_lock);
if (irq_lock_flag) {
disable_irq(mc->irq_phone_active);
disable_irq(gpio_to_irq(mc->gpio_cp_dump_int));
disable_irq_wake(mc->irq_phone_active);
disable_irq_wake(gpio_to_irq(mc->gpio_cp_dump_int));
irq_lock_flag = 0;
}
spin_unlock(&irq_lock);
mc->phone_state = STATE_OFFLINE;
return 0;
}
static int sprd8803_reset(struct modem_ctl *mc)
{
pr_info("[MODEM_IF] %s\n", __func__);
spi_thread_restart();
return 0;
}
static int sprd8803_boot_on(struct modem_ctl *mc)
{
pr_info("[MODEM_IF] %s %d\n", __func__, mc->phone_state);
return mc->phone_state;
}
static int sprd8803_boot_off(struct modem_ctl *mc)
{
pr_info("[MODEM_IF] %s\n", __func__);
spi_sema_init();
return 0;
}
static int sprd8803_dump_reset(struct modem_ctl *mc)
{
pr_info("[MODEM_IF] %s\n", __func__);
if (!mc->gpio_ap_cp_int2)
return -ENXIO;
gpio_set_value(mc->gpio_ap_cp_int2, 0);
mc->phone_state = STATE_OFFLINE;
msleep(100);
gpio_set_value(mc->gpio_ap_cp_int2, 1);
return 0;
}
static irqreturn_t phone_active_irq_handler(int irq, void *_mc)
{
int phone_reset = 0;
int phone_active_value = 0;
int cp_dump_value = 0;
int phone_state = 0;
struct modem_ctl *mc = (struct modem_ctl *)_mc;
disable_irq_nosync(mc->irq_phone_active);
if (!mc->gpio_phone_active ||
!mc->gpio_cp_dump_int) {
pr_err("[MODEM_IF] no gpio data\n");
goto exit;
}
if (!sprd_boot_done)
goto exit;
phone_active_value = gpio_get_value(mc->gpio_phone_active);
cp_dump_value = gpio_get_value(mc->gpio_cp_dump_int);
pr_err("PA EVENT : pa=%d, cp_dump=%d\n",
phone_active_value, cp_dump_value);
if (phone_active_value)
phone_state = STATE_ONLINE;
else
phone_state = STATE_OFFLINE;
if (phone_active_value && cp_dump_value)
phone_state = STATE_CRASH_EXIT;
if (mc->iod && mc->iod->modem_state_changed)
mc->iod->modem_state_changed(mc->iod, phone_state);
if (mc->bootd && mc->bootd->modem_state_changed)
mc->bootd->modem_state_changed(mc->bootd, phone_state);
exit:
enable_irq(mc->irq_phone_active);
return IRQ_HANDLED;
}
static void sprd8803_get_ops(struct modem_ctl *mc)
{
mc->ops.modem_on = sprd8803_on;
mc->ops.modem_off = sprd8803_off;
mc->ops.modem_reset = sprd8803_reset;
mc->ops.modem_boot_on = sprd8803_boot_on;
mc->ops.modem_boot_off = sprd8803_boot_off;
mc->ops.modem_dump_reset = sprd8803_dump_reset;
mc->ops.modem_force_crash_exit = sprd8803_dump_reset;
}
int sprd8803_init_modemctl_device(struct modem_ctl *mc,
struct modem_data *pdata)
{
int ret = 0;
int irq_cp_dump_int;
struct platform_device *pdev;
mc->gpio_cp_on = pdata->gpio_cp_on;
mc->gpio_pda_active = pdata->gpio_pda_active;
mc->gpio_phone_active = pdata->gpio_phone_active;
mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int;
mc->gpio_ap_cp_int1 = pdata->gpio_ap_cp_int1;
mc->gpio_ap_cp_int2 = pdata->gpio_ap_cp_int2;
#ifdef CONFIG_SEC_DUAL_MODEM_MODE
mc->gpio_sim_io_sel = pdata->gpio_sim_io_sel;
mc->gpio_cp_ctrl1 = pdata->gpio_cp_ctrl1;
mc->gpio_cp_ctrl2 = pdata->gpio_cp_ctrl2;
#endif
pdev = to_platform_device(mc->dev);
mc->irq_phone_active = gpio_to_irq(mc->gpio_phone_active);
irq_cp_dump_int = gpio_to_irq(mc->gpio_cp_dump_int);
sprd8803_get_ops(mc);
ret = request_irq(mc->irq_phone_active, phone_active_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"phone_active", mc);
if (ret) {
pr_err("[MODEM_IF] %s: failed to request_irq:%d\n",
__func__, ret);
return ret;
}
ret = enable_irq_wake(mc->irq_phone_active);
if (ret) {
pr_err("[MODEM_IF] %s: failed to enable_irq_wake:%d\n",
__func__, ret);
free_irq(mc->irq_phone_active, mc);
}
ret = request_irq(irq_cp_dump_int, phone_active_irq_handler,
IRQF_TRIGGER_RISING,
"cp_dump_int", mc);
if (ret) {
pr_err("[MODEM_IF] %s: failed to request_irq:%d\n",
__func__, ret);
return ret;
}
ret = enable_irq_wake(irq_cp_dump_int);
if (ret) {
pr_err("[MODEM_IF] %s: failed to enable_irq_wake:%d\n",
__func__, ret);
free_irq(irq_cp_dump_int, mc);
}
irq_lock_flag = 1;
spin_lock_init(&irq_lock);
spin_lock(&irq_lock);
if (irq_lock_flag) {
disable_irq(mc->irq_phone_active);
disable_irq(gpio_to_irq(mc->gpio_cp_dump_int));
disable_irq_wake(mc->irq_phone_active);
disable_irq_wake(gpio_to_irq(mc->gpio_cp_dump_int));
irq_lock_flag = 0;
}
spin_unlock(&irq_lock);
return ret;
}
| gpl-2.0 |
KingLiuDao/linux | arch/powerpc/crypto/sha1-spe-glue.c | 487 | 5153 | /*
* Glue code for SHA-1 implementation for SPE instructions (PPC)
*
* Based on generic implementation.
*
* Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
/*
* MAX_BYTES defines the number of bytes that are allowed to be processed
* between preempt_disable() and preempt_enable(). SHA1 takes ~1000
* operations per 64 bytes. e500 cores can issue two arithmetic instructions
* per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
* Thus 2KB of input data will need an estimated maximum of 18,000 cycles.
* Headroom for cache misses included. Even with the low end model clocked
* at 667 MHz this equals to a critical time window of less than 27us.
*
*/
#define MAX_BYTES 2048
extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
static void spe_begin(void)
{
/* We just start SPE operations and will save SPE registers later. */
preempt_disable();
enable_kernel_spe();
}
static void spe_end(void)
{
disable_kernel_spe();
/* reenable preemption */
preempt_enable();
}
static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
{
int count = sizeof(struct sha1_state) >> 2;
u32 *ptr = (u32 *)sctx;
/* make sure we can clear the fast way */
BUILD_BUG_ON(sizeof(struct sha1_state) % 4);
do { *ptr++ = 0; } while (--count);
}
static int ppc_spe_sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA1_H0;
sctx->state[1] = SHA1_H1;
sctx->state[2] = SHA1_H2;
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
return 0;
}
static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
const unsigned int offset = sctx->count & 0x3f;
const unsigned int avail = 64 - offset;
unsigned int bytes;
const u8 *src = data;
if (avail > len) {
sctx->count += len;
memcpy((char *)sctx->buffer + offset, src, len);
return 0;
}
sctx->count += len;
if (offset) {
memcpy((char *)sctx->buffer + offset, src, avail);
spe_begin();
ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1);
spe_end();
len -= avail;
src += avail;
}
while (len > 63) {
bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
bytes = bytes & ~0x3f;
spe_begin();
ppc_spe_sha1_transform(sctx->state, src, bytes >> 6);
spe_end();
src += bytes;
len -= bytes;
};
memcpy((char *)sctx->buffer, src, len);
return 0;
}
static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
const unsigned int offset = sctx->count & 0x3f;
char *p = (char *)sctx->buffer + offset;
int padlen;
__be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56);
__be32 *dst = (__be32 *)out;
padlen = 55 - offset;
*p++ = 0x80;
spe_begin();
if (padlen < 0) {
memset(p, 0x00, padlen + sizeof (u64));
ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
p = (char *)sctx->buffer;
padlen = 56;
}
memset(p, 0, padlen);
*pbits = cpu_to_be64(sctx->count << 3);
ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
spe_end();
dst[0] = cpu_to_be32(sctx->state[0]);
dst[1] = cpu_to_be32(sctx->state[1]);
dst[2] = cpu_to_be32(sctx->state[2]);
dst[3] = cpu_to_be32(sctx->state[3]);
dst[4] = cpu_to_be32(sctx->state[4]);
ppc_sha1_clear_context(sctx);
return 0;
}
static int ppc_spe_sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = ppc_spe_sha1_init,
.update = ppc_spe_sha1_update,
.final = ppc_spe_sha1_final,
.export = ppc_spe_sha1_export,
.import = ppc_spe_sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init ppc_spe_sha1_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit ppc_spe_sha1_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(ppc_spe_sha1_mod_init);
module_exit(ppc_spe_sha1_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, SPE optimized");
MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-ppc-spe");
| gpl-2.0 |
rbauduin/mptcp | drivers/media/i2c/adv7180.c | 487 | 18700 | /*
* adv7180.c Analog Devices ADV7180 video decoder driver
* Copyright (c) 2009 Intel Corporation
* Copyright (C) 2013 Cogent Embedded, Inc.
* Copyright (C) 2013 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <linux/mutex.h>
#define ADV7180_INPUT_CONTROL_REG 0x00
#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM_PED 0x10
#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_J_SECAM 0x20
#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_M_SECAM 0x30
#define ADV7180_INPUT_CONTROL_NTSC_J 0x40
#define ADV7180_INPUT_CONTROL_NTSC_M 0x50
#define ADV7180_INPUT_CONTROL_PAL60 0x60
#define ADV7180_INPUT_CONTROL_NTSC_443 0x70
#define ADV7180_INPUT_CONTROL_PAL_BG 0x80
#define ADV7180_INPUT_CONTROL_PAL_N 0x90
#define ADV7180_INPUT_CONTROL_PAL_M 0xa0
#define ADV7180_INPUT_CONTROL_PAL_M_PED 0xb0
#define ADV7180_INPUT_CONTROL_PAL_COMB_N 0xc0
#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
#define ADV7180_INPUT_CONTROL_INSEL_MASK 0x0f
#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
#define ADV7180_AUTODETECT_ENABLE_REG 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
/* Contrast */
#define ADV7180_CON_REG 0x08 /*Unsigned */
#define ADV7180_CON_MIN 0
#define ADV7180_CON_DEF 128
#define ADV7180_CON_MAX 255
/* Brightness*/
#define ADV7180_BRI_REG 0x0a /*Signed */
#define ADV7180_BRI_MIN -128
#define ADV7180_BRI_DEF 0
#define ADV7180_BRI_MAX 127
/* Hue */
#define ADV7180_HUE_REG 0x0b /*Signed, inverted */
#define ADV7180_HUE_MIN -127
#define ADV7180_HUE_DEF 0
#define ADV7180_HUE_MAX 128
#define ADV7180_ADI_CTRL_REG 0x0e
#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
#define ADV7180_PWR_MAN_REG 0x0f
#define ADV7180_PWR_MAN_ON 0x04
#define ADV7180_PWR_MAN_OFF 0x24
#define ADV7180_PWR_MAN_RES 0x80
#define ADV7180_STATUS1_REG 0x10
#define ADV7180_STATUS1_IN_LOCK 0x01
#define ADV7180_STATUS1_AUTOD_MASK 0x70
#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00
#define ADV7180_STATUS1_AUTOD_NTSC_4_43 0x10
#define ADV7180_STATUS1_AUTOD_PAL_M 0x20
#define ADV7180_STATUS1_AUTOD_PAL_60 0x30
#define ADV7180_STATUS1_AUTOD_PAL_B_G 0x40
#define ADV7180_STATUS1_AUTOD_SECAM 0x50
#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60
#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70
#define ADV7180_IDENT_REG 0x11
#define ADV7180_ID_7180 0x18
#define ADV7180_ICONF1_ADI 0x40
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
/* Saturation */
#define ADV7180_SD_SAT_CB_REG 0xe3 /*Unsigned */
#define ADV7180_SD_SAT_CR_REG 0xe4 /*Unsigned */
#define ADV7180_SAT_MIN 0
#define ADV7180_SAT_DEF 128
#define ADV7180_SAT_MAX 255
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
#define ADV7180_ISR1_ADI 0x42
#define ADV7180_ICR1_ADI 0x43
#define ADV7180_IMR1_ADI 0x44
#define ADV7180_IMR2_ADI 0x48
#define ADV7180_IRQ3_AD_CHANGE 0x08
#define ADV7180_ISR3_ADI 0x4A
#define ADV7180_ICR3_ADI 0x4B
#define ADV7180_IMR3_ADI 0x4C
#define ADV7180_IMR4_ADI 0x50
#define ADV7180_NTSC_V_BIT_END_REG 0xE6
#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
struct adv7180_state {
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
struct work_struct work;
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
v4l2_std_id curr_norm;
bool autodetect;
u8 input;
};
#define to_adv7180_sd(_ctrl) (&container_of(_ctrl->handler, \
struct adv7180_state, \
ctrl_hdl)->sd)
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
/* in case V4L2_IN_ST_NO_SIGNAL */
if (!(status1 & ADV7180_STATUS1_IN_LOCK))
return V4L2_STD_UNKNOWN;
switch (status1 & ADV7180_STATUS1_AUTOD_MASK) {
case ADV7180_STATUS1_AUTOD_NTSM_M_J:
return V4L2_STD_NTSC;
case ADV7180_STATUS1_AUTOD_NTSC_4_43:
return V4L2_STD_NTSC_443;
case ADV7180_STATUS1_AUTOD_PAL_M:
return V4L2_STD_PAL_M;
case ADV7180_STATUS1_AUTOD_PAL_60:
return V4L2_STD_PAL_60;
case ADV7180_STATUS1_AUTOD_PAL_B_G:
return V4L2_STD_PAL;
case ADV7180_STATUS1_AUTOD_SECAM:
return V4L2_STD_SECAM;
case ADV7180_STATUS1_AUTOD_PAL_COMB:
return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
case ADV7180_STATUS1_AUTOD_SECAM_525:
return V4L2_STD_SECAM;
default:
return V4L2_STD_UNKNOWN;
}
}
static int v4l2_std_to_adv7180(v4l2_std_id std)
{
if (std == V4L2_STD_PAL_60)
return ADV7180_INPUT_CONTROL_PAL60;
if (std == V4L2_STD_NTSC_443)
return ADV7180_INPUT_CONTROL_NTSC_443;
if (std == V4L2_STD_PAL_N)
return ADV7180_INPUT_CONTROL_PAL_N;
if (std == V4L2_STD_PAL_M)
return ADV7180_INPUT_CONTROL_PAL_M;
if (std == V4L2_STD_PAL_Nc)
return ADV7180_INPUT_CONTROL_PAL_COMB_N;
if (std & V4L2_STD_PAL)
return ADV7180_INPUT_CONTROL_PAL_BG;
if (std & V4L2_STD_NTSC)
return ADV7180_INPUT_CONTROL_NTSC_M;
if (std & V4L2_STD_SECAM)
return ADV7180_INPUT_CONTROL_PAL_SECAM;
return -EINVAL;
}
static u32 adv7180_status_to_v4l2(u8 status1)
{
if (!(status1 & ADV7180_STATUS1_IN_LOCK))
return V4L2_IN_ST_NO_SIGNAL;
return 0;
}
static int __adv7180_status(struct i2c_client *client, u32 *status,
v4l2_std_id *std)
{
int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
if (status1 < 0)
return status1;
if (status)
*status = adv7180_status_to_v4l2(status1);
if (std)
*std = adv7180_std_to_v4l2(status1);
return 0;
}
static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7180_state, sd);
}
static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv7180_state *state = to_state(sd);
int err = mutex_lock_interruptible(&state->mutex);
if (err)
return err;
/* when we are interrupt driven we know the state */
if (!state->autodetect || state->irq > 0)
*std = state->curr_norm;
else
err = __adv7180_status(v4l2_get_subdevdata(sd), NULL, std);
mutex_unlock(&state->mutex);
return err;
}
static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
u32 output, u32 config)
{
struct adv7180_state *state = to_state(sd);
int ret = mutex_lock_interruptible(&state->mutex);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (ret)
return ret;
/* We cannot discriminate between LQFP and 40-pin LFCSP, so accept
* all inputs and let the card driver take care of validation
*/
if ((input & ADV7180_INPUT_CONTROL_INSEL_MASK) != input)
goto out;
ret = i2c_smbus_read_byte_data(client, ADV7180_INPUT_CONTROL_REG);
if (ret < 0)
goto out;
ret &= ~ADV7180_INPUT_CONTROL_INSEL_MASK;
ret = i2c_smbus_write_byte_data(client,
ADV7180_INPUT_CONTROL_REG, ret | input);
state->input = input;
out:
mutex_unlock(&state->mutex);
return ret;
}
static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct adv7180_state *state = to_state(sd);
int ret = mutex_lock_interruptible(&state->mutex);
if (ret)
return ret;
ret = __adv7180_status(v4l2_get_subdevdata(sd), status, NULL);
mutex_unlock(&state->mutex);
return ret;
}
static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7180_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = mutex_lock_interruptible(&state->mutex);
if (ret)
return ret;
/* all standards -> autodetect */
if (std == V4L2_STD_ALL) {
ret =
i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
| state->input);
if (ret < 0)
goto out;
__adv7180_status(client, NULL, &state->curr_norm);
state->autodetect = true;
} else {
ret = v4l2_std_to_adv7180(std);
if (ret < 0)
goto out;
ret = i2c_smbus_write_byte_data(client,
ADV7180_INPUT_CONTROL_REG,
ret | state->input);
if (ret < 0)
goto out;
state->curr_norm = std;
state->autodetect = false;
}
ret = 0;
out:
mutex_unlock(&state->mutex);
return ret;
}
static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = mutex_lock_interruptible(&state->mutex);
int val;
if (ret)
return ret;
val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG, val);
break;
case V4L2_CID_HUE:
/*Hue is inverted according to HSL chart */
ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, -val);
break;
case V4L2_CID_CONTRAST:
ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG, val);
break;
case V4L2_CID_SATURATION:
/*
*This could be V4L2_CID_BLUE_BALANCE/V4L2_CID_RED_BALANCE
*Let's not confuse the user, everybody understands saturation
*/
ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
val);
if (ret < 0)
break;
ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
val);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&state->mutex);
return ret;
}
static const struct v4l2_ctrl_ops adv7180_ctrl_ops = {
.s_ctrl = adv7180_s_ctrl,
};
static int adv7180_init_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_BRIGHTNESS, ADV7180_BRI_MIN,
ADV7180_BRI_MAX, 1, ADV7180_BRI_DEF);
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_CONTRAST, ADV7180_CON_MIN,
ADV7180_CON_MAX, 1, ADV7180_CON_DEF);
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_SATURATION, ADV7180_SAT_MIN,
ADV7180_SAT_MAX, 1, ADV7180_SAT_DEF);
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_HUE, ADV7180_HUE_MIN,
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
int err = state->ctrl_hdl.error;
v4l2_ctrl_handler_free(&state->ctrl_hdl);
return err;
}
v4l2_ctrl_handler_setup(&state->ctrl_hdl);
return 0;
}
static void adv7180_exit_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_free(&state->ctrl_hdl);
}
static int adv7180_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (index > 0)
return -EINVAL;
*code = V4L2_MBUS_FMT_YUYV8_2X8;
return 0;
}
static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct adv7180_state *state = to_state(sd);
fmt->code = V4L2_MBUS_FMT_YUYV8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->width = 720;
fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
return 0;
}
static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
/*
* The ADV7180 sensor supports BT.601/656 output modes.
* The BT.656 is default and not yet configurable by s/w.
*/
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_BT656;
return 0;
}
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
.enum_mbus_fmt = adv7180_enum_mbus_fmt,
.try_mbus_fmt = adv7180_mbus_fmt,
.g_mbus_fmt = adv7180_mbus_fmt,
.s_mbus_fmt = adv7180_mbus_fmt,
.g_mbus_config = adv7180_g_mbus_config,
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.s_std = adv7180_s_std,
};
static const struct v4l2_subdev_ops adv7180_ops = {
.core = &adv7180_core_ops,
.video = &adv7180_video_ops,
};
static void adv7180_work(struct work_struct *work)
{
struct adv7180_state *state = container_of(work, struct adv7180_state,
work);
struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
u8 isr3;
mutex_lock(&state->mutex);
i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
ADV7180_ADI_CTRL_IRQ_SPACE);
isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
/* clear */
i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG, 0);
if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
__adv7180_status(client, NULL, &state->curr_norm);
mutex_unlock(&state->mutex);
enable_irq(state->irq);
}
static irqreturn_t adv7180_irq(int irq, void *devid)
{
struct adv7180_state *state = devid;
schedule_work(&state->work);
disable_irq_nosync(state->irq);
return IRQ_HANDLED;
}
static int init_device(struct i2c_client *client, struct adv7180_state *state)
{
int ret;
/* Initialize adv7180 */
/* Enable autodetection */
if (state->autodetect) {
ret =
i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
| state->input);
if (ret < 0)
return ret;
ret =
i2c_smbus_write_byte_data(client,
ADV7180_AUTODETECT_ENABLE_REG,
ADV7180_AUTODETECT_DEFAULT);
if (ret < 0)
return ret;
} else {
ret = v4l2_std_to_adv7180(state->curr_norm);
if (ret < 0)
return ret;
ret =
i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
ret | state->input);
if (ret < 0)
return ret;
}
/* ITU-R BT.656-4 compatible */
ret = i2c_smbus_write_byte_data(client,
ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
if (ret < 0)
return ret;
/* Manually set V bit end position in NTSC mode */
ret = i2c_smbus_write_byte_data(client,
ADV7180_NTSC_V_BIT_END_REG,
ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
if (ret < 0)
return ret;
/* read current norm */
__adv7180_status(client, NULL, &state->curr_norm);
/* register for interrupts */
if (state->irq > 0) {
ret = request_irq(state->irq, adv7180_irq, 0, KBUILD_MODNAME,
state);
if (ret)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
ADV7180_ADI_CTRL_IRQ_SPACE);
if (ret < 0)
return ret;
/* config the Interrupt pin to be active low */
ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
ADV7180_ICONF1_ACTIVE_LOW |
ADV7180_ICONF1_PSYNC_ONLY);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
if (ret < 0)
return ret;
/* enable AD change interrupts interrupts */
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
ADV7180_IRQ3_AD_CHANGE);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
0);
if (ret < 0)
return ret;
}
return 0;
}
static int adv7180_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7180_state *state;
struct v4l2_subdev *sd;
int ret;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr, client->adapter->name);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL) {
ret = -ENOMEM;
goto err;
}
state->irq = client->irq;
INIT_WORK(&state->work, adv7180_work);
mutex_init(&state->mutex);
state->autodetect = true;
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
ret = adv7180_init_controls(state);
if (ret)
goto err_unreg_subdev;
ret = init_device(client, state);
if (ret)
goto err_free_ctrl;
return 0;
err_free_ctrl:
adv7180_exit_controls(state);
err_unreg_subdev:
mutex_destroy(&state->mutex);
v4l2_device_unregister_subdev(sd);
err:
printk(KERN_ERR KBUILD_MODNAME ": Failed to probe: %d\n", ret);
return ret;
}
static int adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
if (state->irq > 0) {
free_irq(client->irq, state);
if (cancel_work_sync(&state->work)) {
/*
* Work was pending, therefore we need to enable
* IRQ here to balance the disable_irq() done in the
* interrupt handler.
*/
enable_irq(state->irq);
}
}
mutex_destroy(&state->mutex);
v4l2_device_unregister_subdev(sd);
return 0;
}
static const struct i2c_device_id adv7180_id[] = {
{KBUILD_MODNAME, 0},
{},
};
#ifdef CONFIG_PM_SLEEP
static int adv7180_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
ADV7180_PWR_MAN_OFF);
if (ret < 0)
return ret;
return 0;
}
static int adv7180_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
int ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
ADV7180_PWR_MAN_ON);
if (ret < 0)
return ret;
ret = init_device(client, state);
if (ret < 0)
return ret;
return 0;
}
static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#define ADV7180_PM_OPS (&adv7180_pm_ops)
#else
#define ADV7180_PM_OPS NULL
#endif
MODULE_DEVICE_TABLE(i2c, adv7180_id);
static struct i2c_driver adv7180_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.pm = ADV7180_PM_OPS,
},
.probe = adv7180_probe,
.remove = adv7180_remove,
.id_table = adv7180_id,
};
module_i2c_driver(adv7180_driver);
MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
MODULE_AUTHOR("Mocean Laboratories");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
teemodk/android_kernel_htc_endeavoru | sound/soc/soc-jack.c | 1767 | 9821 | /*
* soc-jack.c -- ALSA SoC jack handling
*
* Copyright 2008 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <sound/jack.h>
#include <sound/soc.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <trace/events/asoc.h>
/**
* snd_soc_jack_new - Create a new jack
* @card: ASoC card
* @id: an identifying string for this jack
* @type: a bitmask of enum snd_jack_type values that can be detected by
* this jack
* @jack: structure to use for the jack
*
* Creates a new jack object.
*
* Returns zero if successful, or a negative error code on failure.
* On success jack will be initialised.
*/
int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
struct snd_soc_jack *jack)
{
jack->codec = codec;
INIT_LIST_HEAD(&jack->pins);
INIT_LIST_HEAD(&jack->jack_zones);
BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
return snd_jack_new(codec->card->snd_card, id, type, &jack->jack);
}
EXPORT_SYMBOL_GPL(snd_soc_jack_new);
/**
* snd_soc_jack_report - Report the current status for a jack
*
* @jack: the jack
* @status: a bitmask of enum snd_jack_type values that are currently detected.
* @mask: a bitmask of enum snd_jack_type values that being reported.
*
* If configured using snd_soc_jack_add_pins() then the associated
* DAPM pins will be enabled or disabled as appropriate and DAPM
* synchronised.
*
* Note: This function uses mutexes and should be called from a
* context which can sleep (such as a workqueue).
*/
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
{
struct snd_soc_codec *codec;
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack_pin *pin;
int enable;
int oldstatus;
trace_snd_soc_jack_report(jack, mask, status);
if (!jack)
return;
codec = jack->codec;
dapm = &codec->dapm;
mutex_lock(&codec->mutex);
oldstatus = jack->status;
jack->status &= ~mask;
jack->status |= status & mask;
/* The DAPM sync is expensive enough to be worth skipping.
* However, empty mask means pin synchronization is desired. */
if (mask && (jack->status == oldstatus))
goto out;
trace_snd_soc_jack_notify(jack, status);
list_for_each_entry(pin, &jack->pins, list) {
enable = pin->mask & jack->status;
if (pin->invert)
enable = !enable;
if (enable)
snd_soc_dapm_enable_pin(dapm, pin->pin);
else
snd_soc_dapm_disable_pin(dapm, pin->pin);
}
/* Report before the DAPM sync to help users updating micbias status */
blocking_notifier_call_chain(&jack->notifier, status, jack);
snd_soc_dapm_sync(dapm);
snd_jack_report(jack->jack, jack->status);
out:
mutex_unlock(&codec->mutex);
}
EXPORT_SYMBOL_GPL(snd_soc_jack_report);
/**
* snd_soc_jack_add_zones - Associate voltage zones with jack
*
* @jack: ASoC jack
* @count: Number of zones
* @zone: Array of zones
*
* After this function has been called the zones specified in the
* array will be associated with the jack.
*/
int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_zone *zones)
{
int i;
for (i = 0; i < count; i++) {
INIT_LIST_HEAD(&zones[i].list);
list_add(&(zones[i].list), &jack->jack_zones);
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_jack_add_zones);
/**
* snd_soc_jack_get_type - Based on the mic bias value, this function returns
* the type of jack from the zones delcared in the jack type
*
* @micbias_voltage: mic bias voltage at adc channel when jack is plugged in
*
* Based on the mic bias value passed, this function helps identify
* the type of jack from the already delcared jack zones
*/
int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage)
{
struct snd_soc_jack_zone *zone;
list_for_each_entry(zone, &jack->jack_zones, list) {
if (micbias_voltage >= zone->min_mv &&
micbias_voltage < zone->max_mv)
return zone->jack_type;
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_jack_get_type);
/**
* snd_soc_jack_add_pins - Associate DAPM pins with an ASoC jack
*
* @jack: ASoC jack
* @count: Number of pins
* @pins: Array of pins
*
* After this function has been called the DAPM pins specified in the
* pins array will have their status updated to reflect the current
* state of the jack whenever the jack status is updated.
*/
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins)
{
int i;
for (i = 0; i < count; i++) {
if (!pins[i].pin) {
printk(KERN_ERR "No name for pin %d\n", i);
return -EINVAL;
}
if (!pins[i].mask) {
printk(KERN_ERR "No mask for pin %d (%s)\n", i,
pins[i].pin);
return -EINVAL;
}
INIT_LIST_HEAD(&pins[i].list);
list_add(&(pins[i].list), &jack->pins);
}
/* Update to reflect the last reported status; canned jack
* implementations are likely to set their state before the
* card has an opportunity to associate pins.
*/
snd_soc_jack_report(jack, 0, 0);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_jack_add_pins);
/**
* snd_soc_jack_notifier_register - Register a notifier for jack status
*
* @jack: ASoC jack
* @nb: Notifier block to register
*
* Register for notification of the current status of the jack. Note
* that it is not possible to report additional jack events in the
* callback from the notifier, this is intended to support
* applications such as enabling electrical detection only when a
* mechanical detection event has occurred.
*/
void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
struct notifier_block *nb)
{
blocking_notifier_chain_register(&jack->notifier, nb);
}
EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_register);
/**
* snd_soc_jack_notifier_unregister - Unregister a notifier for jack status
*
* @jack: ASoC jack
* @nb: Notifier block to unregister
*
* Stop notifying for status changes.
*/
void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&jack->notifier, nb);
}
EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_unregister);
#ifdef CONFIG_GPIOLIB
/* gpio detect */
static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio)
{
struct snd_soc_jack *jack = gpio->jack;
int enable;
int report;
enable = gpio_get_value_cansleep(gpio->gpio);
if (gpio->invert)
enable = !enable;
if (enable)
report = gpio->report;
else
report = 0;
if (gpio->jack_status_check)
report = gpio->jack_status_check();
snd_soc_jack_report(jack, report, gpio->report);
}
/* irq handler for gpio pin */
static irqreturn_t gpio_handler(int irq, void *data)
{
struct snd_soc_jack_gpio *gpio = data;
struct device *dev = gpio->jack->codec->card->dev;
trace_snd_soc_jack_irq(gpio->name);
if (device_may_wakeup(dev))
pm_wakeup_event(dev, gpio->debounce_time + 50);
schedule_delayed_work(&gpio->work,
msecs_to_jiffies(gpio->debounce_time));
return IRQ_HANDLED;
}
/* gpio work */
static void gpio_work(struct work_struct *work)
{
struct snd_soc_jack_gpio *gpio;
gpio = container_of(work, struct snd_soc_jack_gpio, work.work);
snd_soc_jack_gpio_detect(gpio);
}
/**
* snd_soc_jack_add_gpios - Associate GPIO pins with an ASoC jack
*
* @jack: ASoC jack
* @count: number of pins
* @gpios: array of gpio pins
*
* This function will request gpio, set data direction and request irq
* for each gpio in the array.
*/
int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_gpio *gpios)
{
int i, ret;
for (i = 0; i < count; i++) {
if (!gpio_is_valid(gpios[i].gpio)) {
printk(KERN_ERR "Invalid gpio %d\n",
gpios[i].gpio);
ret = -EINVAL;
goto undo;
}
if (!gpios[i].name) {
printk(KERN_ERR "No name for gpio %d\n",
gpios[i].gpio);
ret = -EINVAL;
goto undo;
}
ret = gpio_request(gpios[i].gpio, gpios[i].name);
if (ret)
goto undo;
ret = gpio_direction_input(gpios[i].gpio);
if (ret)
goto err;
INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
gpios[i].jack = jack;
ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
gpio_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
gpios[i].name,
&gpios[i]);
if (ret < 0)
goto err;
if (gpios[i].wake) {
ret = irq_set_irq_wake(gpio_to_irq(gpios[i].gpio), 1);
if (ret != 0)
printk(KERN_ERR
"Failed to mark GPIO %d as wake source: %d\n",
gpios[i].gpio, ret);
}
#ifdef CONFIG_GPIO_SYSFS
/* Expose GPIO value over sysfs for diagnostic purposes */
gpio_export(gpios[i].gpio, false);
#endif
/* Update initial jack status */
snd_soc_jack_gpio_detect(&gpios[i]);
}
return 0;
err:
gpio_free(gpios[i].gpio);
undo:
snd_soc_jack_free_gpios(jack, i, gpios);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_jack_add_gpios);
/**
* snd_soc_jack_free_gpios - Release GPIO pins' resources of an ASoC jack
*
* @jack: ASoC jack
* @count: number of pins
* @gpios: array of gpio pins
*
* Release gpio and irq resources for gpio pins associated with an ASoC jack.
*/
void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_gpio *gpios)
{
int i;
for (i = 0; i < count; i++) {
#ifdef CONFIG_GPIO_SYSFS
gpio_unexport(gpios[i].gpio);
#endif
free_irq(gpio_to_irq(gpios[i].gpio), &gpios[i]);
cancel_delayed_work_sync(&gpios[i].work);
gpio_free(gpios[i].gpio);
gpios[i].jack = NULL;
}
}
EXPORT_SYMBOL_GPL(snd_soc_jack_free_gpios);
#endif /* CONFIG_GPIOLIB */
| gpl-2.0 |
javifo/nameless_kernel_samsung_smdk4412 | arch/arm/mach-pxa/colibri-pxa270.c | 2279 | 7902 | /*
* linux/arch/arm/mach-pxa/colibri-pxa270.c
*
* Support for Toradex PXA270 based Colibri module
* Daniel Mack <daniel@caiaq.de>
* Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <linux/ucb1400.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach-types.h>
#include <asm/sizes.h>
#include <mach/audio.h>
#include <mach/colibri.h>
#include <mach/pxa27x.h>
#include "devices.h"
#include "generic.h"
/******************************************************************************
* Evaluation board MFP
******************************************************************************/
#ifdef CONFIG_MACH_COLIBRI_EVALBOARD
static mfp_cfg_t colibri_pxa270_evalboard_pin_config[] __initdata = {
/* MMC */
GPIO32_MMC_CLK,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
GPIO112_MMC_CMD,
GPIO0_GPIO, /* SD detect */
/* FFUART */
GPIO39_FFUART_TXD,
GPIO34_FFUART_RXD,
/* UHC */
GPIO88_USBH1_PWR,
GPIO89_USBH1_PEN,
GPIO119_USBH2_PWR,
GPIO120_USBH2_PEN,
/* PCMCIA */
GPIO85_nPCE_1,
GPIO54_nPCE_2,
GPIO55_nPREG,
GPIO50_nPIOR,
GPIO51_nPIOW,
GPIO49_nPWE,
GPIO48_nPOE,
GPIO57_nIOIS16,
GPIO56_nPWAIT,
GPIO104_PSKTSEL,
GPIO53_GPIO, /* RESET */
GPIO83_GPIO, /* BVD1 */
GPIO82_GPIO, /* BVD2 */
GPIO1_GPIO, /* READY */
GPIO84_GPIO, /* DETECT */
GPIO107_GPIO, /* PPEN */
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
};
#else
static mfp_cfg_t colibri_pxa270_evalboard_pin_config[] __initdata = {};
#endif
#ifdef CONFIG_MACH_COLIBRI_PXA270_INCOME
static mfp_cfg_t income_pin_config[] __initdata = {
/* MMC */
GPIO32_MMC_CLK,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
GPIO112_MMC_CMD,
GPIO0_GPIO, /* SD detect */
GPIO1_GPIO, /* SD read-only */
/* FFUART */
GPIO39_FFUART_TXD,
GPIO34_FFUART_RXD,
/* BFUART */
GPIO42_BTUART_RXD,
GPIO43_BTUART_TXD,
GPIO45_BTUART_RTS,
/* STUART */
GPIO46_STUART_RXD,
GPIO47_STUART_TXD,
/* UHC */
GPIO88_USBH1_PWR,
GPIO89_USBH1_PEN,
/* LCD */
GPIOxx_LCD_TFT_16BPP,
/* PWM */
GPIO16_PWM0_OUT,
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
/* LED */
GPIO54_GPIO, /* LED A */
GPIO55_GPIO, /* LED B */
};
#else
static mfp_cfg_t income_pin_config[] __initdata = {};
#endif
/******************************************************************************
* Pin configuration
******************************************************************************/
static mfp_cfg_t colibri_pxa270_pin_config[] __initdata = {
/* Ethernet */
GPIO78_nCS_2, /* Ethernet CS */
GPIO114_GPIO, /* Ethernet IRQ */
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
GPIO95_AC97_nRESET,
GPIO98_AC97_SYSCLK,
GPIO113_GPIO, /* Touchscreen IRQ */
};
/******************************************************************************
* NOR Flash
******************************************************************************/
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition colibri_partitions[] = {
{
.name = "Bootloader",
.offset = 0x00000000,
.size = 0x00040000,
.mask_flags = MTD_WRITEABLE /* force read-only */
}, {
.name = "Kernel",
.offset = 0x00040000,
.size = 0x00400000,
.mask_flags = 0
}, {
.name = "Rootfs",
.offset = 0x00440000,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0
}
};
static struct physmap_flash_data colibri_flash_data[] = {
{
.width = 4, /* bankwidth in bytes */
.parts = colibri_partitions,
.nr_parts = ARRAY_SIZE(colibri_partitions)
}
};
static struct resource colibri_pxa270_flash_resource = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device colibri_pxa270_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = colibri_flash_data,
},
.resource = &colibri_pxa270_flash_resource,
.num_resources = 1,
};
static void __init colibri_pxa270_nor_init(void)
{
platform_device_register(&colibri_pxa270_flash_device);
}
#else
static inline void colibri_pxa270_nor_init(void) {}
#endif
/******************************************************************************
* Ethernet
******************************************************************************/
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource colibri_pxa270_dm9000_resources[] = {
{
.start = PXA_CS2_PHYS,
.end = PXA_CS2_PHYS + 3,
.flags = IORESOURCE_MEM,
},
{
.start = PXA_CS2_PHYS + 4,
.end = PXA_CS2_PHYS + 4 + 500,
.flags = IORESOURCE_MEM,
},
{
.start = gpio_to_irq(GPIO114_COLIBRI_PXA270_ETH_IRQ),
.end = gpio_to_irq(GPIO114_COLIBRI_PXA270_ETH_IRQ),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING,
},
};
static struct platform_device colibri_pxa270_dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(colibri_pxa270_dm9000_resources),
.resource = colibri_pxa270_dm9000_resources,
};
static void __init colibri_pxa270_eth_init(void)
{
platform_device_register(&colibri_pxa270_dm9000_device);
}
#else
static inline void colibri_pxa270_eth_init(void) {}
#endif
/******************************************************************************
* Audio and Touchscreen
******************************************************************************/
#if defined(CONFIG_TOUCHSCREEN_UCB1400) || \
defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE)
static pxa2xx_audio_ops_t colibri_pxa270_ac97_pdata = {
.reset_gpio = 95,
};
static struct ucb1400_pdata colibri_pxa270_ucb1400_pdata = {
.irq = gpio_to_irq(GPIO113_COLIBRI_PXA270_TS_IRQ),
};
static struct platform_device colibri_pxa270_ucb1400_device = {
.name = "ucb1400_core",
.id = -1,
.dev = {
.platform_data = &colibri_pxa270_ucb1400_pdata,
},
};
static void __init colibri_pxa270_tsc_init(void)
{
pxa_set_ac97_info(&colibri_pxa270_ac97_pdata);
platform_device_register(&colibri_pxa270_ucb1400_device);
}
#else
static inline void colibri_pxa270_tsc_init(void) {}
#endif
static int colibri_pxa270_baseboard;
core_param(colibri_pxa270_baseboard, colibri_pxa270_baseboard, int, 0444);
static void __init colibri_pxa270_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa270_pin_config));
colibri_pxa270_nor_init();
colibri_pxa270_eth_init();
colibri_pxa270_tsc_init();
switch (colibri_pxa270_baseboard) {
case COLIBRI_EVALBOARD:
pxa2xx_mfp_config(ARRAY_AND_SIZE(
colibri_pxa270_evalboard_pin_config));
colibri_evalboard_init();
break;
case COLIBRI_PXA270_INCOME:
pxa2xx_mfp_config(ARRAY_AND_SIZE(income_pin_config));
colibri_pxa270_income_boardinit();
break;
default:
printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
colibri_pxa270_baseboard);
}
}
/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
* with the INCOME mach type or with COLIBRI and the kernel parameter
* "colibri_pxa270_baseboard=1"
*/
static void __init colibri_pxa270_income_init(void)
{
colibri_pxa270_baseboard = COLIBRI_PXA270_INCOME;
colibri_pxa270_init();
}
MACHINE_START(COLIBRI, "Toradex Colibri PXA270")
.boot_params = COLIBRI_SDRAM_BASE + 0x100,
.init_machine = colibri_pxa270_init,
.map_io = pxa27x_map_io,
.init_irq = pxa27x_init_irq,
.timer = &pxa_timer,
MACHINE_END
MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
.boot_params = 0xa0000100,
.init_machine = colibri_pxa270_income_init,
.map_io = pxa27x_map_io,
.init_irq = pxa27x_init_irq,
.timer = &pxa_timer,
MACHINE_END
| gpl-2.0 |
bigzz/shamu_franc | arch/arm/mach-s3c24xx/mach-anubis.c | 2279 | 10658 | /* linux/arch/arm/mach-s3c2440/mach-anubis.c
*
* Copyright 2003-2009 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-lcd.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <net/ax88796.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <linux/platform_data/asoc-s3c24xx_simtec.h>
#include <plat/samsung-time.h>
#include "anubis.h"
#include "common.h"
#include "simtec.h"
#define COPYRIGHT ", Copyright 2005-2009 Simtec Electronics"
static struct map_desc anubis_iodesc[] __initdata = {
/* ISA IO areas */
{
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = __phys_to_pfn(0x0),
.length = SZ_4M,
.type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = __phys_to_pfn(0x0),
.length = SZ_4M,
.type = MT_DEVICE,
},
/* we could possibly compress the next set down into a set of smaller tables
* pagetables, but that would mean using an L2 section, and it still means
* we cannot actually feed the same register to an LDR due to 16K spacing
*/
/* CPLD control registers */
{
.virtual = (u32)ANUBIS_VA_CTRL1,
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (u32)ANUBIS_VA_IDREG,
.pfn = __phys_to_pfn(ANUBIS_PA_IDREG),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
};
/* NAND Flash on Anubis board */
static int external_map[] = { 2 };
static int chip0_map[] = { 0 };
static int chip1_map[] = { 1 };
static struct mtd_partition __initdata anubis_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
.offset = 0,
},
[1] = {
.name = "/boot",
.size = SZ_4M - SZ_16K,
.offset = SZ_16K,
},
[2] = {
.name = "user1",
.offset = SZ_4M,
.size = SZ_32M - SZ_4M,
},
[3] = {
.name = "user2",
.offset = SZ_32M,
.size = MTDPART_SIZ_FULL,
}
};
static struct mtd_partition __initdata anubis_default_nand_part_large[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_128K,
.offset = 0,
},
[1] = {
.name = "/boot",
.size = SZ_4M - SZ_128K,
.offset = SZ_128K,
},
[2] = {
.name = "user1",
.offset = SZ_4M,
.size = SZ_32M - SZ_4M,
},
[3] = {
.name = "user2",
.offset = SZ_32M,
.size = MTDPART_SIZ_FULL,
}
};
/* the Anubis has 3 selectable slots for nand-flash, the two
* on-board chip areas, as well as the external slot.
*
* Note, there is no current hot-plug support for the External
* socket.
*/
static struct s3c2410_nand_set __initdata anubis_nand_sets[] = {
[1] = {
.name = "External",
.nr_chips = 1,
.nr_map = external_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part,
},
[0] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part,
},
};
static void anubis_nand_select(struct s3c2410_nand_set *set, int slot)
{
unsigned int tmp;
slot = set->nr_map[slot] & 3;
pr_debug("anubis_nand: selecting slot %d (set %p,%p)\n",
slot, set, set->nr_map);
tmp = __raw_readb(ANUBIS_VA_CTRL1);
tmp &= ~ANUBIS_CTRL1_NANDSEL;
tmp |= slot;
pr_debug("anubis_nand: ctrl1 now %02x\n", tmp);
__raw_writeb(tmp, ANUBIS_VA_CTRL1);
}
static struct s3c2410_platform_nand __initdata anubis_nand_info = {
.tacls = 25,
.twrph0 = 55,
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(anubis_nand_sets),
.sets = anubis_nand_sets,
.select_chip = anubis_nand_select,
};
/* IDE channels */
static struct pata_platform_info anubis_ide_platdata = {
.ioport_shift = 5,
};
static struct resource anubis_ide0_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS3, 8 * 32),
[2] = DEFINE_RES_MEM(S3C2410_CS3 + (1 << 26) + (6 * 32), 32),
[3] = DEFINE_RES_IRQ(ANUBIS_IRQ_IDE0),
};
static struct platform_device anubis_device_ide0 = {
.name = "pata_platform",
.id = 0,
.num_resources = ARRAY_SIZE(anubis_ide0_resource),
.resource = anubis_ide0_resource,
.dev = {
.platform_data = &anubis_ide_platdata,
.coherent_dma_mask = ~0,
},
};
static struct resource anubis_ide1_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS4, 8 * 32),
[1] = DEFINE_RES_MEM(S3C2410_CS4 + (1 << 26) + (6 * 32), 32),
[2] = DEFINE_RES_IRQ(ANUBIS_IRQ_IDE0),
};
static struct platform_device anubis_device_ide1 = {
.name = "pata_platform",
.id = 1,
.num_resources = ARRAY_SIZE(anubis_ide1_resource),
.resource = anubis_ide1_resource,
.dev = {
.platform_data = &anubis_ide_platdata,
.coherent_dma_mask = ~0,
},
};
/* Asix AX88796 10/100 ethernet controller */
static struct ax_plat_data anubis_asix_platdata = {
.flags = AXFLG_MAC_FROMDEV,
.wordlength = 2,
.dcr_val = 0x48,
.rcr_val = 0x40,
};
static struct resource anubis_asix_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS5, 0x20 * 0x20),
[1] = DEFINE_RES_IRQ(ANUBIS_IRQ_ASIX),
};
static struct platform_device anubis_device_asix = {
.name = "ax88796",
.id = 0,
.num_resources = ARRAY_SIZE(anubis_asix_resource),
.resource = anubis_asix_resource,
.dev = {
.platform_data = &anubis_asix_platdata,
}
};
/* SM501 */
static struct resource anubis_sm501_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS2, SZ_8M),
[1] = DEFINE_RES_MEM(S3C2410_CS2 + SZ_64M - SZ_2M, SZ_2M),
[2] = DEFINE_RES_IRQ(IRQ_EINT0),
};
static struct sm501_initdata anubis_sm501_initdata = {
.gpio_high = {
.set = 0x3F000000, /* 24bit panel */
.mask = 0x0,
},
.misc_timing = {
.set = 0x010100, /* SDRAM timing */
.mask = 0x1F1F00,
},
.misc_control = {
.set = SM501_MISC_PNL_24BIT,
.mask = 0,
},
.devices = SM501_USE_GPIO,
/* set the SDRAM and bus clocks */
.mclk = 72 * MHZ,
.m1xclk = 144 * MHZ,
};
static struct sm501_platdata_gpio_i2c anubis_sm501_gpio_i2c[] = {
[0] = {
.bus_num = 1,
.pin_scl = 44,
.pin_sda = 45,
},
[1] = {
.bus_num = 2,
.pin_scl = 40,
.pin_sda = 41,
},
};
static struct sm501_platdata anubis_sm501_platdata = {
.init = &anubis_sm501_initdata,
.gpio_base = -1,
.gpio_i2c = anubis_sm501_gpio_i2c,
.gpio_i2c_nr = ARRAY_SIZE(anubis_sm501_gpio_i2c),
};
static struct platform_device anubis_device_sm501 = {
.name = "sm501",
.id = 0,
.num_resources = ARRAY_SIZE(anubis_sm501_resource),
.resource = anubis_sm501_resource,
.dev = {
.platform_data = &anubis_sm501_platdata,
},
};
/* Standard Anubis devices */
static struct platform_device *anubis_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_wdt,
&s3c_device_adc,
&s3c_device_i2c0,
&s3c_device_rtc,
&s3c_device_nand,
&anubis_device_ide0,
&anubis_device_ide1,
&anubis_device_asix,
&anubis_device_sm501,
};
static struct clk *anubis_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
&s3c24xx_clkout1,
&s3c24xx_uclk,
};
/* I2C devices. */
static struct i2c_board_info anubis_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("tps65011", 0x48),
.irq = IRQ_EINT20,
}
};
/* Audio setup */
static struct s3c24xx_audio_simtec_pdata __initdata anubis_audio = {
.have_mic = 1,
.have_lout = 1,
.output_cdclk = 1,
.use_mpllin = 1,
.amp_gpio = S3C2410_GPB(2),
.amp_gain[0] = S3C2410_GPD(10),
.amp_gain[1] = S3C2410_GPD(11),
};
static void __init anubis_map_io(void)
{
/* initialise the clocks */
s3c24xx_dclk0.parent = &clk_upll;
s3c24xx_dclk0.rate = 12*1000*1000;
s3c24xx_dclk1.parent = &clk_upll;
s3c24xx_dclk1.rate = 24*1000*1000;
s3c24xx_clkout0.parent = &s3c24xx_dclk0;
s3c24xx_clkout1.parent = &s3c24xx_dclk1;
s3c24xx_uclk.parent = &s3c24xx_clkout1;
s3c24xx_register_clocks(anubis_clocks, ARRAY_SIZE(anubis_clocks));
s3c24xx_init_io(anubis_iodesc, ARRAY_SIZE(anubis_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(anubis_uartcfgs, ARRAY_SIZE(anubis_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
/* check for the newer revision boards with large page nand */
if ((__raw_readb(ANUBIS_VA_IDREG) & ANUBIS_IDREG_REVMASK) >= 4) {
printk(KERN_INFO "ANUBIS-B detected (revision %d)\n",
__raw_readb(ANUBIS_VA_IDREG) & ANUBIS_IDREG_REVMASK);
anubis_nand_sets[0].partitions = anubis_default_nand_part_large;
anubis_nand_sets[0].nr_partitions = ARRAY_SIZE(anubis_default_nand_part_large);
} else {
/* ensure that the GPIO is setup */
gpio_request_one(S3C2410_GPA(0), GPIOF_OUT_INIT_HIGH, NULL);
gpio_free(S3C2410_GPA(0));
}
}
static void __init anubis_init(void)
{
s3c_i2c0_set_platdata(NULL);
s3c_nand_set_platdata(&anubis_nand_info);
simtec_audio_add(NULL, false, &anubis_audio);
platform_add_devices(anubis_devices, ARRAY_SIZE(anubis_devices));
i2c_register_board_info(0, anubis_i2c_devs,
ARRAY_SIZE(anubis_i2c_devs));
}
MACHINE_START(ANUBIS, "Simtec-Anubis")
/* Maintainer: Ben Dooks <ben@simtec.co.uk> */
.atag_offset = 0x100,
.map_io = anubis_map_io,
.init_machine = anubis_init,
.init_irq = s3c2440_init_irq,
.init_time = samsung_timer_init,
.restart = s3c244x_restart,
MACHINE_END
| gpl-2.0 |
Dm47021/android_kernel_samsung_centura_sch738c | arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c | 2279 | 7900 | /*
* Copyright (C) 2010 Eric Benard - eric@eukrea.com
*
* Based on pcm970-baseboard.c which is :
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <video/platform_lcd.h>
#include <linux/i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
#include <mach/audmux.h>
#include "devices-imx35.h"
static const struct fb_videomode fb_modedb[] = {
{
.name = "CMO-QVGA",
.refresh = 60,
.xres = 320,
.yres = 240,
.pixclock = KHZ2PICOS(6500),
.left_margin = 68,
.right_margin = 20,
.upper_margin = 15,
.lower_margin = 4,
.hsync_len = 30,
.vsync_len = 3,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
{
.name = "DVI-VGA",
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 32000,
.left_margin = 100,
.right_margin = 100,
.upper_margin = 7,
.lower_margin = 100,
.hsync_len = 7,
.vsync_len = 7,
.sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT |
FB_SYNC_OE_ACT_HIGH | FB_SYNC_CLK_INVERT,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
{
.name = "DVI-SVGA",
.refresh = 60,
.xres = 800,
.yres = 600,
.pixclock = 25000,
.left_margin = 75,
.right_margin = 75,
.upper_margin = 7,
.lower_margin = 75,
.hsync_len = 7,
.vsync_len = 7,
.sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT |
FB_SYNC_OE_ACT_HIGH | FB_SYNC_CLK_INVERT,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
};
static const struct ipu_platform_data mx3_ipu_data __initconst = {
.irq_base = MXC_IPU_IRQ_START,
};
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "CMO-QVGA",
.mode = fb_modedb,
.num_modes = ARRAY_SIZE(fb_modedb),
};
static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
/* LCD */
MX35_PAD_LD0__IPU_DISPB_DAT_0,
MX35_PAD_LD1__IPU_DISPB_DAT_1,
MX35_PAD_LD2__IPU_DISPB_DAT_2,
MX35_PAD_LD3__IPU_DISPB_DAT_3,
MX35_PAD_LD4__IPU_DISPB_DAT_4,
MX35_PAD_LD5__IPU_DISPB_DAT_5,
MX35_PAD_LD6__IPU_DISPB_DAT_6,
MX35_PAD_LD7__IPU_DISPB_DAT_7,
MX35_PAD_LD8__IPU_DISPB_DAT_8,
MX35_PAD_LD9__IPU_DISPB_DAT_9,
MX35_PAD_LD10__IPU_DISPB_DAT_10,
MX35_PAD_LD11__IPU_DISPB_DAT_11,
MX35_PAD_LD12__IPU_DISPB_DAT_12,
MX35_PAD_LD13__IPU_DISPB_DAT_13,
MX35_PAD_LD14__IPU_DISPB_DAT_14,
MX35_PAD_LD15__IPU_DISPB_DAT_15,
MX35_PAD_LD16__IPU_DISPB_DAT_16,
MX35_PAD_LD17__IPU_DISPB_DAT_17,
MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC,
MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK,
MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY,
MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC,
/* Backlight */
MX35_PAD_CONTRAST__IPU_DISPB_CONTR,
/* LCD_PWR */
MX35_PAD_D3_CLS__GPIO1_4,
/* LED */
MX35_PAD_LD23__GPIO3_29,
/* SWITCH */
MX35_PAD_LD19__GPIO3_25,
/* UART2 */
MX35_PAD_CTS2__UART2_CTS,
MX35_PAD_RTS2__UART2_RTS,
MX35_PAD_TXD2__UART2_TXD_MUX,
MX35_PAD_RXD2__UART2_RXD_MUX,
/* I2S */
MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS,
MX35_PAD_STXD4__AUDMUX_AUD4_TXD,
MX35_PAD_SRXD4__AUDMUX_AUD4_RXD,
MX35_PAD_SCK4__AUDMUX_AUD4_TXC,
/* CAN2 */
MX35_PAD_TX5_RX0__CAN2_TXCAN,
MX35_PAD_TX4_RX1__CAN2_RXCAN,
/* SDCARD */
MX35_PAD_SD1_CMD__ESDHC1_CMD,
MX35_PAD_SD1_CLK__ESDHC1_CLK,
MX35_PAD_SD1_DATA0__ESDHC1_DAT0,
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
/* SD1 CD */
MX35_PAD_LD18__GPIO3_24,
};
#define GPIO_LED1 IMX_GPIO_NR(3, 29)
#define GPIO_SWITCH1 IMX_GPIO_NR(3, 25)
#define GPIO_LCDPWR IMX_GPIO_NR(1, 4)
#define GPIO_SD1CD IMX_GPIO_NR(3, 24)
static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
if (power)
gpio_direction_output(GPIO_LCDPWR, 1);
else
gpio_direction_output(GPIO_LCDPWR, 0);
}
static struct plat_lcd_data eukrea_mbimxsd_lcd_power_data = {
.set_power = eukrea_mbimxsd_lcd_power_set,
};
static struct platform_device eukrea_mbimxsd_lcd_powerdev = {
.name = "platform-lcd",
.dev.platform_data = &eukrea_mbimxsd_lcd_power_data,
};
static struct gpio_led eukrea_mbimxsd_leds[] = {
{
.name = "led1",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = GPIO_LED1,
},
};
static struct gpio_led_platform_data eukrea_mbimxsd_led_info = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
static struct platform_device eukrea_mbimxsd_leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &eukrea_mbimxsd_led_info,
},
};
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
.code = BTN_0,
.desc = "BP1",
.active_low = 1,
.wakeup = 1,
},
};
static const struct gpio_keys_platform_data
eukrea_mbimxsd_button_data __initconst = {
.buttons = eukrea_mbimxsd_gpio_buttons,
.nbuttons = ARRAY_SIZE(eukrea_mbimxsd_gpio_buttons),
};
static struct platform_device *platform_devices[] __initdata = {
&eukrea_mbimxsd_leds_gpio,
&eukrea_mbimxsd_lcd_powerdev,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static struct i2c_board_info eukrea_mbimxsd_i2c_devices[] = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
},
};
static const
struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata __initconst = {
.flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
};
static struct esdhc_platform_data sd1_pdata = {
.cd_gpio = GPIO_SD1CD,
.wp_gpio = -EINVAL,
};
/*
* system init for baseboard usage. Will be called by cpuimx35 init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init eukrea_mbimxsd35_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
printk(KERN_ERR "error setting mbimxsd pads !\n");
#if defined(CONFIG_SND_SOC_EUKREA_TLV320)
/* SSI unit master I2S codec connected to SSI_AUD4 */
mxc_audmux_v2_configure_port(0,
MXC_AUDMUX_V2_PTCR_SYN |
MXC_AUDMUX_V2_PTCR_TFSDIR |
MXC_AUDMUX_V2_PTCR_TFSEL(3) |
MXC_AUDMUX_V2_PTCR_TCLKDIR |
MXC_AUDMUX_V2_PTCR_TCSEL(3),
MXC_AUDMUX_V2_PDCR_RXDSEL(3)
);
mxc_audmux_v2_configure_port(3,
MXC_AUDMUX_V2_PTCR_SYN,
MXC_AUDMUX_V2_PDCR_RXDSEL(0)
);
#endif
imx35_add_imx_uart1(&uart_pdata);
imx35_add_ipu_core(&mx3_ipu_data);
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
imx35_add_flexcan1(NULL);
imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
gpio_free(GPIO_LED1);
gpio_request(GPIO_SWITCH1, "SWITCH1");
gpio_direction_input(GPIO_SWITCH1);
gpio_free(GPIO_SWITCH1);
gpio_request(GPIO_LCDPWR, "LCDPWR");
gpio_direction_output(GPIO_LCDPWR, 1);
i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
| gpl-2.0 |
geeknik/android_omap_tuna | arch/arm/mach-imx/mach-mx25_3ds.c | 2279 | 6643 | /*
* Copyright 2009 Sascha Hauer, <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/*
* This machine is known as:
* - i.MX25 3-Stack Development System
* - i.MX25 Platform Development Kit (i.MX25 PDK)
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/usb/otg.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/memory.h>
#include <asm/mach/map.h>
#include <mach/common.h>
#include <mach/mx25.h>
#include <mach/iomux-mx25.h>
#include "devices-imx25.h"
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static iomux_v3_cfg_t mx25pdk_pads[] = {
MX25_PAD_FEC_MDC__FEC_MDC,
MX25_PAD_FEC_MDIO__FEC_MDIO,
MX25_PAD_FEC_TDATA0__FEC_TDATA0,
MX25_PAD_FEC_TDATA1__FEC_TDATA1,
MX25_PAD_FEC_TX_EN__FEC_TX_EN,
MX25_PAD_FEC_RDATA0__FEC_RDATA0,
MX25_PAD_FEC_RDATA1__FEC_RDATA1,
MX25_PAD_FEC_RX_DV__FEC_RX_DV,
MX25_PAD_FEC_TX_CLK__FEC_TX_CLK,
MX25_PAD_A17__GPIO_2_3, /* FEC_EN, GPIO 35 */
MX25_PAD_D12__GPIO_4_8, /* FEC_RESET_B, GPIO 104 */
/* LCD */
MX25_PAD_LD0__LD0,
MX25_PAD_LD1__LD1,
MX25_PAD_LD2__LD2,
MX25_PAD_LD3__LD3,
MX25_PAD_LD4__LD4,
MX25_PAD_LD5__LD5,
MX25_PAD_LD6__LD6,
MX25_PAD_LD7__LD7,
MX25_PAD_LD8__LD8,
MX25_PAD_LD9__LD9,
MX25_PAD_LD10__LD10,
MX25_PAD_LD11__LD11,
MX25_PAD_LD12__LD12,
MX25_PAD_LD13__LD13,
MX25_PAD_LD14__LD14,
MX25_PAD_LD15__LD15,
MX25_PAD_GPIO_E__LD16,
MX25_PAD_GPIO_F__LD17,
MX25_PAD_HSYNC__HSYNC,
MX25_PAD_VSYNC__VSYNC,
MX25_PAD_LSCLK__LSCLK,
MX25_PAD_OE_ACD__OE_ACD,
MX25_PAD_CONTRAST__CONTRAST,
/* Keypad */
MX25_PAD_KPP_ROW0__KPP_ROW0,
MX25_PAD_KPP_ROW1__KPP_ROW1,
MX25_PAD_KPP_ROW2__KPP_ROW2,
MX25_PAD_KPP_ROW3__KPP_ROW3,
MX25_PAD_KPP_COL0__KPP_COL0,
MX25_PAD_KPP_COL1__KPP_COL1,
MX25_PAD_KPP_COL2__KPP_COL2,
MX25_PAD_KPP_COL3__KPP_COL3,
/* SD1 */
MX25_PAD_SD1_CMD__SD1_CMD,
MX25_PAD_SD1_CLK__SD1_CLK,
MX25_PAD_SD1_DATA0__SD1_DATA0,
MX25_PAD_SD1_DATA1__SD1_DATA1,
MX25_PAD_SD1_DATA2__SD1_DATA2,
MX25_PAD_SD1_DATA3__SD1_DATA3,
MX25_PAD_A14__GPIO_2_0, /* WriteProtect */
MX25_PAD_A15__GPIO_2_1, /* CardDetect */
/* I2C1 */
MX25_PAD_I2C1_CLK__I2C1_CLK,
MX25_PAD_I2C1_DAT__I2C1_DAT,
};
static const struct fec_platform_data mx25_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
#define FEC_ENABLE_GPIO IMX_GPIO_NR(2, 3)
#define FEC_RESET_B_GPIO IMX_GPIO_NR(4, 8)
static void __init mx25pdk_fec_reset(void)
{
gpio_request(FEC_ENABLE_GPIO, "FEC PHY enable");
gpio_request(FEC_RESET_B_GPIO, "FEC PHY reset");
gpio_direction_output(FEC_ENABLE_GPIO, 0); /* drop PHY power */
gpio_direction_output(FEC_RESET_B_GPIO, 0); /* assert reset */
udelay(2);
/* turn on PHY power and lift reset */
gpio_set_value(FEC_ENABLE_GPIO, 1);
gpio_set_value(FEC_RESET_B_GPIO, 1);
}
static const struct mxc_nand_platform_data
mx25pdk_nand_board_info __initconst = {
.width = 1,
.hw_ecc = 1,
.flash_bbt = 1,
};
static struct imx_fb_videomode mx25pdk_modes[] = {
{
.mode = {
.name = "CRT-VGA",
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 39683,
.left_margin = 45,
.right_margin = 114,
.upper_margin = 33,
.lower_margin = 11,
.hsync_len = 1,
.vsync_len = 1,
},
.bpp = 16,
.pcr = 0xFA208B80,
},
};
static const struct imx_fb_platform_data mx25pdk_fb_pdata __initconst = {
.mode = mx25pdk_modes,
.num_modes = ARRAY_SIZE(mx25pdk_modes),
.pwmr = 0x00A903FF,
.lscr1 = 0x00120300,
.dmacr = 0x00020010,
};
static const uint32_t mx25pdk_keymap[] = {
KEY(0, 0, KEY_UP),
KEY(0, 1, KEY_DOWN),
KEY(0, 2, KEY_VOLUMEDOWN),
KEY(0, 3, KEY_HOME),
KEY(1, 0, KEY_RIGHT),
KEY(1, 1, KEY_LEFT),
KEY(1, 2, KEY_ENTER),
KEY(1, 3, KEY_VOLUMEUP),
KEY(2, 0, KEY_F6),
KEY(2, 1, KEY_F8),
KEY(2, 2, KEY_F9),
KEY(2, 3, KEY_F10),
KEY(3, 0, KEY_F1),
KEY(3, 1, KEY_F2),
KEY(3, 2, KEY_F3),
KEY(3, 3, KEY_POWER),
};
static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
.keymap = mx25pdk_keymap,
.keymap_size = ARRAY_SIZE(mx25pdk_keymap),
};
static int mx25pdk_usbh2_init(struct platform_device *pdev)
{
return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERNAL_PHY);
}
static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
.init = mx25pdk_usbh2_init,
.portsc = MXC_EHCI_MODE_SERIAL,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
.phy_mode = FSL_USB2_PHY_UTMI,
};
static const struct imxi2c_platform_data mx25_3ds_i2c0_data __initconst = {
.bitrate = 100000,
};
#define SD1_GPIO_WP IMX_GPIO_NR(2, 0)
#define SD1_GPIO_CD IMX_GPIO_NR(2, 1)
static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
.wp_gpio = SD1_GPIO_WP,
.cd_gpio = SD1_GPIO_CD,
};
static void __init mx25pdk_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
ARRAY_SIZE(mx25pdk_pads));
imx25_add_imx_uart0(&uart_pdata);
imx25_add_fsl_usb2_udc(&otg_device_pdata);
imx25_add_mxc_ehci_hs(&usbh2_pdata);
imx25_add_mxc_nand(&mx25pdk_nand_board_info);
imx25_add_imxdi_rtc(NULL);
imx25_add_imx_fb(&mx25pdk_fb_pdata);
imx25_add_imx2_wdt(NULL);
mx25pdk_fec_reset();
imx25_add_fec(&mx25_fec_pdata);
imx25_add_imx_keypad(&mx25pdk_keymap_data);
imx25_add_sdhci_esdhc_imx(0, &mx25pdk_esdhc_pdata);
imx25_add_imx_i2c0(&mx25_3ds_i2c0_data);
}
static void __init mx25pdk_timer_init(void)
{
mx25_clocks_init();
}
static struct sys_timer mx25pdk_timer = {
.init = mx25pdk_timer_init,
};
MACHINE_START(MX25_3DS, "Freescale MX25PDK (3DS)")
/* Maintainer: Freescale Semiconductor, Inc. */
.boot_params = MX25_PHYS_OFFSET + 0x100,
.map_io = mx25_map_io,
.init_early = imx25_init_early,
.init_irq = mx25_init_irq,
.timer = &mx25pdk_timer,
.init_machine = mx25pdk_init,
MACHINE_END
| gpl-2.0 |
zhaochengw/ef40s_jb_kernel | drivers/staging/et131x/et131x_isr.c | 2535 | 14680 | /*
* Agere Systems Inc.
* 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
*------------------------------------------------------------------------------
*
* et131x_isr.c - File which contains the ISR, ISR handler, and related routines
* for processing interrupts from the device.
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include "et131x_version.h"
#include "et131x_defs.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/system.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include "et1310_phy.h"
#include "et131x_adapter.h"
#include "et131x.h"
/*
* For interrupts, normal running is:
* rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
* watchdog_interrupt & txdma_xfer_done
*
* In both cases, when flow control is enabled for either Tx or bi-direction,
* we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
* buffer rings are running low.
*/
#define INT_MASK_DISABLE 0xffffffff
/* NOTE: Masking out MAC_STAT Interrupt for now...
* #define INT_MASK_ENABLE 0xfff6bf17
* #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
*/
#define INT_MASK_ENABLE 0xfffebf17
#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
/**
* et131x_enable_interrupts - enable interrupt
* @adapter: et131x device
*
* Enable the appropriate interrupts on the ET131x according to our
* configuration
*/
void et131x_enable_interrupts(struct et131x_adapter *adapter)
{
u32 mask;
/* Enable all global interrupts */
if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH)
mask = INT_MASK_ENABLE;
else
mask = INT_MASK_ENABLE_NO_FLOW;
adapter->CachedMaskValue = mask;
writel(mask, &adapter->regs->global.int_mask);
}
/**
* et131x_disable_interrupts - interrupt disable
* @adapter: et131x device
*
* Block all interrupts from the et131x device at the device itself
*/
void et131x_disable_interrupts(struct et131x_adapter *adapter)
{
/* Disable all global interrupts */
adapter->CachedMaskValue = INT_MASK_DISABLE;
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
}
/**
* et131x_isr - The Interrupt Service Routine for the driver.
* @irq: the IRQ on which the interrupt was received.
* @dev_id: device-specific info (here a pointer to a net_device struct)
*
* Returns a value indicating if the interrupt was handled.
*/
irqreturn_t et131x_isr(int irq, void *dev_id)
{
bool handled = true;
struct net_device *netdev = (struct net_device *)dev_id;
struct et131x_adapter *adapter = NULL;
u32 status;
if (!netif_device_present(netdev)) {
handled = false;
goto out;
}
adapter = netdev_priv(netdev);
/* If the adapter is in low power state, then it should not
* recognize any interrupt
*/
/* Disable Device Interrupts */
et131x_disable_interrupts(adapter);
/* Get a copy of the value in the interrupt status register
* so we can process the interrupting section
*/
status = readl(&adapter->regs->global.int_status);
if (adapter->flowcontrol == FLOW_TXONLY ||
adapter->flowcontrol == FLOW_BOTH) {
status &= ~INT_MASK_ENABLE;
} else {
status &= ~INT_MASK_ENABLE_NO_FLOW;
}
/* Make sure this is our interrupt */
if (!status) {
handled = false;
et131x_enable_interrupts(adapter);
goto out;
}
/* This is our interrupt, so process accordingly */
if (status & ET_INTR_WATCHDOG) {
struct tcb *tcb = adapter->tx_ring.send_head;
if (tcb)
if (++tcb->stale > 1)
status |= ET_INTR_TXDMA_ISR;
if (adapter->rx_ring.UnfinishedReceives)
status |= ET_INTR_RXDMA_XFR_DONE;
else if (tcb == NULL)
writel(0, &adapter->regs->global.watchdog_timer);
status &= ~ET_INTR_WATCHDOG;
}
if (status == 0) {
/* This interrupt has in some way been "handled" by
* the ISR. Either it was a spurious Rx interrupt, or
* it was a Tx interrupt that has been filtered by
* the ISR.
*/
et131x_enable_interrupts(adapter);
goto out;
}
/* We need to save the interrupt status value for use in our
* DPC. We will clear the software copy of that in that
* routine.
*/
adapter->Stats.InterruptStatus = status;
/* Schedule the ISR handler as a bottom-half task in the
* kernel's tq_immediate queue, and mark the queue for
* execution
*/
schedule_work(&adapter->task);
out:
return IRQ_RETVAL(handled);
}
/**
* et131x_isr_handler - The ISR handler
* @p_adapter, a pointer to the device's private adapter structure
*
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
*/
void et131x_isr_handler(struct work_struct *work)
{
struct et131x_adapter *etdev =
container_of(work, struct et131x_adapter, task);
u32 status = etdev->Stats.InterruptStatus;
ADDRESS_MAP_t __iomem *iomem = etdev->regs;
/*
* These first two are by far the most common. Once handled, we clear
* their two bits in the status word. If the word is now zero, we
* exit.
*/
/* Handle all the completed Transmit interrupts */
if (status & ET_INTR_TXDMA_ISR)
et131x_handle_send_interrupt(etdev);
/* Handle all the completed Receives interrupts */
if (status & ET_INTR_RXDMA_XFR_DONE)
et131x_handle_recv_interrupt(etdev);
status &= 0xffffffd7;
if (status) {
/* Handle the TXDMA Error interrupt */
if (status & ET_INTR_TXDMA_ERR) {
u32 txdma_err;
/* Following read also clears the register (COR) */
txdma_err = readl(&iomem->txdma.TxDmaError);
dev_warn(&etdev->pdev->dev,
"TXDMA_ERR interrupt, error = %d\n",
txdma_err);
}
/* Handle Free Buffer Ring 0 and 1 Low interrupt */
if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
/*
* This indicates the number of unused buffers in
* RXDMA free buffer ring 0 is <= the limit you
* programmed. Free buffer resources need to be
* returned. Free buffers are consumed as packets
* are passed from the network to the host. The host
* becomes aware of the packets from the contents of
* the packet status ring. This ring is queried when
* the packet done interrupt occurs. Packets are then
* passed to the OS. When the OS is done with the
* packets the resources can be returned to the
* ET1310 for re-use. This interrupt is one method of
* returning resources.
*/
/* If the user has flow control on, then we will
* send a pause packet, otherwise just exit
*/
if (etdev->flowcontrol == FLOW_TXONLY ||
etdev->flowcontrol == FLOW_BOTH) {
u32 pm_csr;
/* Tell the device to send a pause packet via
* the back pressure register (bp req and
* bp xon/xoff)
*/
pm_csr = readl(&iomem->global.pm_csr);
if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
writel(3, &iomem->txmac.bp_ctrl);
}
}
/* Handle Packet Status Ring Low Interrupt */
if (status & ET_INTR_RXDMA_STAT_LOW) {
/*
* Same idea as with the two Free Buffer Rings.
* Packets going from the network to the host each
* consume a free buffer resource and a packet status
* resource. These resoures are passed to the OS.
* When the OS is done with the resources, they need
* to be returned to the ET1310. This is one method
* of returning the resources.
*/
}
/* Handle RXDMA Error Interrupt */
if (status & ET_INTR_RXDMA_ERR) {
/*
* The rxdma_error interrupt is sent when a time-out
* on a request issued by the JAGCore has occurred or
* a completion is returned with an un-successful
* status. In both cases the request is considered
* complete. The JAGCore will automatically re-try the
* request in question. Normally information on events
* like these are sent to the host using the "Advanced
* Error Reporting" capability. This interrupt is
* another way of getting similar information. The
* only thing required is to clear the interrupt by
* reading the ISR in the global resources. The
* JAGCore will do a re-try on the request. Normally
* you should never see this interrupt. If you start
* to see this interrupt occurring frequently then
* something bad has occurred. A reset might be the
* thing to do.
*/
/* TRAP();*/
dev_warn(&etdev->pdev->dev,
"RxDMA_ERR interrupt, error %x\n",
readl(&iomem->txmac.tx_test));
}
/* Handle the Wake on LAN Event */
if (status & ET_INTR_WOL) {
/*
* This is a secondary interrupt for wake on LAN.
* The driver should never see this, if it does,
* something serious is wrong. We will TRAP the
* message when we are in DBG mode, otherwise we
* will ignore it.
*/
dev_err(&etdev->pdev->dev, "WAKE_ON_LAN interrupt\n");
}
/* Handle the PHY interrupt */
if (status & ET_INTR_PHY) {
u32 pm_csr;
MI_BMSR_t BmsrInts, BmsrData;
u16 myisr;
/* If we are in coma mode when we get this interrupt,
* we need to disable it.
*/
pm_csr = readl(&iomem->global.pm_csr);
if (pm_csr & ET_PM_PHY_SW_COMA) {
/*
* Check to see if we are in coma mode and if
* so, disable it because we will not be able
* to read PHY values until we are out.
*/
DisablePhyComa(etdev);
}
/* Read the PHY ISR to clear the reason for the
* interrupt.
*/
MiRead(etdev, (uint8_t) offsetof(struct mi_regs, isr),
&myisr);
if (!etdev->ReplicaPhyLoopbk) {
MiRead(etdev,
(uint8_t) offsetof(struct mi_regs, bmsr),
&BmsrData.value);
BmsrInts.value =
etdev->Bmsr.value ^ BmsrData.value;
etdev->Bmsr.value = BmsrData.value;
/* Do all the cable in / cable out stuff */
et131x_Mii_check(etdev, BmsrData, BmsrInts);
}
}
/* Let's move on to the TxMac */
if (status & ET_INTR_TXMAC) {
u32 err = readl(&iomem->txmac.err);
/*
* When any of the errors occur and TXMAC generates
* an interrupt to report these errors, it usually
* means that TXMAC has detected an error in the data
* stream retrieved from the on-chip Tx Q. All of
* these errors are catastrophic and TXMAC won't be
* able to recover data when these errors occur. In
* a nutshell, the whole Tx path will have to be reset
* and re-configured afterwards.
*/
dev_warn(&etdev->pdev->dev,
"TXMAC interrupt, error 0x%08x\n",
err);
/* If we are debugging, we want to see this error,
* otherwise we just want the device to be reset and
* continue
*/
}
/* Handle RXMAC Interrupt */
if (status & ET_INTR_RXMAC) {
/*
* These interrupts are catastrophic to the device,
* what we need to do is disable the interrupts and
* set the flag to cause us to reset so we can solve
* this issue.
*/
/* MP_SET_FLAG( etdev,
fMP_ADAPTER_HARDWARE_ERROR); */
dev_warn(&etdev->pdev->dev,
"RXMAC interrupt, error 0x%08x. Requesting reset\n",
readl(&iomem->rxmac.err_reg));
dev_warn(&etdev->pdev->dev,
"Enable 0x%08x, Diag 0x%08x\n",
readl(&iomem->rxmac.ctrl),
readl(&iomem->rxmac.rxq_diag));
/*
* If we are debugging, we want to see this error,
* otherwise we just want the device to be reset and
* continue
*/
}
/* Handle MAC_STAT Interrupt */
if (status & ET_INTR_MAC_STAT) {
/*
* This means at least one of the un-masked counters
* in the MAC_STAT block has rolled over. Use this
* to maintain the top, software managed bits of the
* counter(s).
*/
HandleMacStatInterrupt(etdev);
}
/* Handle SLV Timeout Interrupt */
if (status & ET_INTR_SLV_TIMEOUT) {
/*
* This means a timeout has occurred on a read or
* write request to one of the JAGCore registers. The
* Global Resources block has terminated the request
* and on a read request, returned a "fake" value.
* The most likely reasons are: Bad Address or the
* addressed module is in a power-down state and
* can't respond.
*/
}
}
et131x_enable_interrupts(etdev);
}
| gpl-2.0 |
omegamoon/Rockchip-GPL-Kernel | drivers/net/ucc_geth.c | 2535 | 122209 | /*
* Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
*
* Description:
* QE UCC Gigabit Ethernet Driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
#undef DEBUG
#define ugeth_printk(level, format, arg...) \
printk(level format "\n", ## arg)
#define ugeth_dbg(format, arg...) \
ugeth_printk(KERN_DEBUG , format , ## arg)
#define ugeth_err(format, arg...) \
ugeth_printk(KERN_ERR , format , ## arg)
#define ugeth_info(format, arg...) \
ugeth_printk(KERN_INFO , format , ## arg)
#define ugeth_warn(format, arg...) \
ugeth_printk(KERN_WARNING , format , ## arg)
#ifdef UGETH_VERBOSE_DEBUG
#define ugeth_vdbg ugeth_dbg
#else
#define ugeth_vdbg(fmt, args...) do { } while (0)
#endif /* UGETH_VERBOSE_DEBUG */
#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
static DEFINE_SPINLOCK(ugeth_lock);
static struct {
u32 msg_enable;
} debug = { -1 };
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
static struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
.bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
.urfs = UCC_GETH_URFS_INIT,
.urfet = UCC_GETH_URFET_INIT,
.urfset = UCC_GETH_URFSET_INIT,
.utfs = UCC_GETH_UTFS_INIT,
.utfet = UCC_GETH_UTFET_INIT,
.utftt = UCC_GETH_UTFTT_INIT,
.ufpt = 256,
.mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
.tenc = UCC_FAST_TX_ENCODING_NRZ,
.renc = UCC_FAST_RX_ENCODING_NRZ,
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
.numQueuesTx = 1,
.numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
.nonBackToBackIfgPart2 = 0x60,
.miminumInterFrameGapEnforcement = 0x50,
.backToBackInterFrameGap = 0x60,
.mblinterval = 128,
.nortsrbytetime = 5,
.fracsiz = 1,
.strictpriorityq = 0xff,
.altBebTruncation = 0xa,
.excessDefer = 1,
.maxRetransmission = 0xf,
.collisionWindow = 0x37,
.receiveFlowControl = 1,
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
.prel = 7,
.maxFrameLength = 1518,
.minFrameLength = 64,
.maxD1Length = 1520,
.maxD2Length = 1520,
.vlantype = 0x8100,
.ecamptr = ((uint32_t) NULL),
.eventRegMask = UCCE_OTHER,
.pausePeriod = 0xf000,
.interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
.bdRingLenTx = {
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN},
.bdRingLenRx = {
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN},
.numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
.largestexternallookupkeysize =
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
.statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
.vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
.vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
.aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
.padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
.numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
.numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
.riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
static struct ucc_geth_info ugeth_info[8];
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
u8 *i;
int size16Aling = (size >> 4) << 4;
int size4Aling = (size >> 2) << 2;
int notAlign = 0;
if (size % 16)
notAlign = 1;
for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
printk("0x%08x: %08x %08x %08x %08x\r\n",
(u32) i,
*((u32 *) (i)),
*((u32 *) (i + 4)),
*((u32 *) (i + 8)), *((u32 *) (i + 12)));
if (notAlign == 1)
printk("0x%08x: ", (u32) i);
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
printk("%02x", *((u8 *) (i)));
if (notAlign == 1)
printk("\r\n");
}
#endif /* DEBUG */
static struct list_head *dequeue(struct list_head *lh)
{
unsigned long flags;
spin_lock_irqsave(&ugeth_lock, flags);
if (!list_empty(lh)) {
struct list_head *node = lh->next;
list_del(node);
spin_unlock_irqrestore(&ugeth_lock, flags);
return node;
} else {
spin_unlock_irqrestore(&ugeth_lock, flags);
return NULL;
}
}
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd)
{
struct sk_buff *skb = NULL;
skb = __skb_dequeue(&ugeth->rx_recycle);
if (!skb)
skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (skb == NULL)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb,
UCC_GETH_RX_DATA_BUF_ALIGNMENT -
(((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
1)));
skb->dev = ugeth->ndev;
out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev,
skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE));
out_be32((u32 __iomem *)bd,
(R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
return skb;
}
static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
{
u8 __iomem *bd;
u32 bd_status;
struct sk_buff *skb;
int i;
bd = ugeth->p_rx_bd_ring[rxQ];
i = 0;
do {
bd_status = in_be32((u32 __iomem *)bd);
skb = get_new_skb(ugeth, bd);
if (!skb) /* If can not allocate data buffer,
abort. Cleanup will be elsewhere */
return -ENOMEM;
ugeth->rx_skbuff[rxQ][i] = skb;
/* advance the BD pointer */
bd += sizeof(struct qe_bd);
i++;
} while (!(bd_status & R_W));
return 0;
}
static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
u32 *p_start,
u8 num_entries,
u32 thread_size,
u32 thread_alignment,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
if ((snum = qe_get_snum()) < 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("fill_init_enet_entries: Can not get SNUM.");
return snum;
}
if ((i == 0) && skip_page_for_first_entry)
/* First entry of Rx does not have page */
init_enet_offset = 0;
else {
init_enet_offset =
qe_muram_alloc(thread_size, thread_alignment);
if (IS_ERR_VALUE(init_enet_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
qe_put_snum((u8) snum);
return -ENOMEM;
}
}
*(p_start++) =
((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
| risc;
}
return 0;
}
static int return_init_enet_entries(struct ucc_geth_private *ugeth,
u32 *p_start,
u8 num_entries,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
u32 val = *p_start;
/* Check that this entry was actually valid --
needed in case failed in allocations */
if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
(u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
/* First entry of Rx does not have page */
init_enet_offset =
(val & ENET_INIT_PARAM_PTR_MASK);
qe_muram_free(init_enet_offset);
}
*p_start++ = 0;
}
}
return 0;
}
#ifdef DEBUG
static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
u32 __iomem *p_start,
u8 num_entries,
u32 thread_size,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
u32 val = in_be32(p_start);
/* Check that this entry was actually valid --
needed in case failed in allocations */
if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
(u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
/* First entry of Rx does not have page */
init_enet_offset =
(in_be32(p_start) &
ENET_INIT_PARAM_PTR_MASK);
ugeth_info("Init enet entry %d:", i);
ugeth_info("Base address: 0x%08x",
(u32)
qe_muram_addr(init_enet_offset));
mem_disp(qe_muram_addr(init_enet_offset),
thread_size);
}
p_start++;
}
}
return 0;
}
#endif
static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
{
kfree(enet_addr_cont);
}
static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
{
out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
}
static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
if (!(paddr_num < NUM_OF_PADDRS)) {
ugeth_warn("%s: Illagel paddr_num.", __func__);
return -EINVAL;
}
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
/* Writing address ff.ff.ff.ff.ff.ff disables address
recognition for this register */
out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
return 0;
}
static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
u8 *p_enet_addr)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
u32 cecr_subblock;
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
/* Ethernet frames are defined in Little Endian mode,
therefore to insert */
/* the address to the hash (Big Endian mode), we reverse the bytes.*/
set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
}
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
}
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
tx_firmware_statistics,
struct ucc_geth_rx_firmware_statistics *
rx_firmware_statistics,
struct ucc_geth_hardware_statistics *hardware_statistics)
{
struct ucc_fast __iomem *uf_regs;
struct ucc_geth __iomem *ug_regs;
struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
ug_regs = ugeth->ug_regs;
uf_regs = (struct ucc_fast __iomem *) ug_regs;
p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
/* Tx firmware only if user handed pointer and driver actually
gathers Tx firmware statistics */
if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
tx_firmware_statistics->sicoltx =
in_be32(&p_tx_fw_statistics_pram->sicoltx);
tx_firmware_statistics->mulcoltx =
in_be32(&p_tx_fw_statistics_pram->mulcoltx);
tx_firmware_statistics->latecoltxfr =
in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
tx_firmware_statistics->frabortduecol =
in_be32(&p_tx_fw_statistics_pram->frabortduecol);
tx_firmware_statistics->frlostinmactxer =
in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
tx_firmware_statistics->carriersenseertx =
in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
tx_firmware_statistics->frtxok =
in_be32(&p_tx_fw_statistics_pram->frtxok);
tx_firmware_statistics->txfrexcessivedefer =
in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
tx_firmware_statistics->txpkts256 =
in_be32(&p_tx_fw_statistics_pram->txpkts256);
tx_firmware_statistics->txpkts512 =
in_be32(&p_tx_fw_statistics_pram->txpkts512);
tx_firmware_statistics->txpkts1024 =
in_be32(&p_tx_fw_statistics_pram->txpkts1024);
tx_firmware_statistics->txpktsjumbo =
in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
}
/* Rx firmware only if user handed pointer and driver actually
* gathers Rx firmware statistics */
if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
int i;
rx_firmware_statistics->frrxfcser =
in_be32(&p_rx_fw_statistics_pram->frrxfcser);
rx_firmware_statistics->fraligner =
in_be32(&p_rx_fw_statistics_pram->fraligner);
rx_firmware_statistics->inrangelenrxer =
in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
rx_firmware_statistics->outrangelenrxer =
in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
rx_firmware_statistics->frtoolong =
in_be32(&p_rx_fw_statistics_pram->frtoolong);
rx_firmware_statistics->runt =
in_be32(&p_rx_fw_statistics_pram->runt);
rx_firmware_statistics->verylongevent =
in_be32(&p_rx_fw_statistics_pram->verylongevent);
rx_firmware_statistics->symbolerror =
in_be32(&p_rx_fw_statistics_pram->symbolerror);
rx_firmware_statistics->dropbsy =
in_be32(&p_rx_fw_statistics_pram->dropbsy);
for (i = 0; i < 0x8; i++)
rx_firmware_statistics->res0[i] =
p_rx_fw_statistics_pram->res0[i];
rx_firmware_statistics->mismatchdrop =
in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
rx_firmware_statistics->underpkts =
in_be32(&p_rx_fw_statistics_pram->underpkts);
rx_firmware_statistics->pkts256 =
in_be32(&p_rx_fw_statistics_pram->pkts256);
rx_firmware_statistics->pkts512 =
in_be32(&p_rx_fw_statistics_pram->pkts512);
rx_firmware_statistics->pkts1024 =
in_be32(&p_rx_fw_statistics_pram->pkts1024);
rx_firmware_statistics->pktsjumbo =
in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
rx_firmware_statistics->frlossinmacer =
in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
rx_firmware_statistics->pausefr =
in_be32(&p_rx_fw_statistics_pram->pausefr);
for (i = 0; i < 0x4; i++)
rx_firmware_statistics->res1[i] =
p_rx_fw_statistics_pram->res1[i];
rx_firmware_statistics->removevlan =
in_be32(&p_rx_fw_statistics_pram->removevlan);
rx_firmware_statistics->replacevlan =
in_be32(&p_rx_fw_statistics_pram->replacevlan);
rx_firmware_statistics->insertvlan =
in_be32(&p_rx_fw_statistics_pram->insertvlan);
}
/* Hardware only if user handed pointer and driver actually
gathers hardware statistics */
if (hardware_statistics &&
(in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
hardware_statistics->txok = in_be32(&ug_regs->txok);
hardware_statistics->txcf = in_be16(&ug_regs->txcf);
hardware_statistics->tmca = in_be32(&ug_regs->tmca);
hardware_statistics->tbca = in_be32(&ug_regs->tbca);
hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
hardware_statistics->rmca = in_be32(&ug_regs->rmca);
hardware_statistics->rbca = in_be32(&ug_regs->rbca);
}
}
static void dump_bds(struct ucc_geth_private *ugeth)
{
int i;
int length;
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
sizeof(struct qe_bd));
ugeth_info("TX BDs[%d]", i);
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
sizeof(struct qe_bd));
ugeth_info("RX BDs[%d]", i);
mem_disp(ugeth->p_rx_bd_ring[i], length);
}
}
}
static void dump_regs(struct ucc_geth_private *ugeth)
{
int i;
ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->maccfg1,
in_be32(&ugeth->ug_regs->maccfg1));
ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->maccfg2,
in_be32(&ugeth->ug_regs->maccfg2));
ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->ipgifg,
in_be32(&ugeth->ug_regs->ipgifg));
ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->hafdup,
in_be32(&ugeth->ug_regs->hafdup));
ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->ifctl,
in_be32(&ugeth->ug_regs->ifctl));
ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->ifstat,
in_be32(&ugeth->ug_regs->ifstat));
ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->macstnaddr1,
in_be32(&ugeth->ug_regs->macstnaddr1));
ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->macstnaddr2,
in_be32(&ugeth->ug_regs->macstnaddr2));
ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->uempr,
in_be32(&ugeth->ug_regs->uempr));
ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->utbipar,
in_be32(&ugeth->ug_regs->utbipar));
ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->ug_regs->uescr,
in_be16(&ugeth->ug_regs->uescr));
ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->tx64,
in_be32(&ugeth->ug_regs->tx64));
ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->tx127,
in_be32(&ugeth->ug_regs->tx127));
ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->tx255,
in_be32(&ugeth->ug_regs->tx255));
ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rx64,
in_be32(&ugeth->ug_regs->rx64));
ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rx127,
in_be32(&ugeth->ug_regs->rx127));
ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rx255,
in_be32(&ugeth->ug_regs->rx255));
ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->txok,
in_be32(&ugeth->ug_regs->txok));
ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->ug_regs->txcf,
in_be16(&ugeth->ug_regs->txcf));
ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->tmca,
in_be32(&ugeth->ug_regs->tmca));
ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->tbca,
in_be32(&ugeth->ug_regs->tbca));
ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rxfok,
in_be32(&ugeth->ug_regs->rxfok));
ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rxbok,
in_be32(&ugeth->ug_regs->rxbok));
ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rbyt,
in_be32(&ugeth->ug_regs->rbyt));
ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rmca,
in_be32(&ugeth->ug_regs->rmca));
ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->rbca,
in_be32(&ugeth->ug_regs->rbca));
ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->scar,
in_be32(&ugeth->ug_regs->scar));
ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->ug_regs->scam,
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
int numThreadsTxNumerical;
switch (ugeth->ug_info->numThreadsTx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsTxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsTxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsTxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsTxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsTxNumerical = 8;
break;
default:
numThreadsTxNumerical = 0;
break;
}
ugeth_info("Thread data TXs:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_thread_data_tx);
for (i = 0; i < numThreadsTxNumerical; i++) {
ugeth_info("Thread data TX[%d]:", i);
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_thread_data_tx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
sizeof(struct ucc_geth_thread_data_tx));
}
}
if (ugeth->p_thread_data_rx) {
int numThreadsRxNumerical;
switch (ugeth->ug_info->numThreadsRx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsRxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsRxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsRxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsRxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsRxNumerical = 8;
break;
default:
numThreadsRxNumerical = 0;
break;
}
ugeth_info("Thread data RX:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_thread_data_rx);
for (i = 0; i < numThreadsRxNumerical; i++) {
ugeth_info("Thread data RX[%d]:", i);
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_thread_data_rx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
sizeof(struct ucc_geth_thread_data_rx));
}
}
if (ugeth->p_exf_glbl_param) {
ugeth_info("EXF global param:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_exf_glbl_param);
mem_disp((u8 *) ugeth->p_exf_glbl_param,
sizeof(*ugeth->p_exf_glbl_param));
}
if (ugeth->p_tx_glbl_pram) {
ugeth_info("TX global param:");
ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_tx_glbl_pram->temoder,
in_be16(&ugeth->p_tx_glbl_pram->temoder));
ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->sqptr,
in_be32(&ugeth->p_tx_glbl_pram->sqptr));
ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
in_be32(&ugeth->p_tx_glbl_pram->
schedulerbasepointer));
ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->tstate,
in_be32(&ugeth->p_tx_glbl_pram->tstate));
ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
ugeth->p_tx_glbl_pram->iphoffset[0]);
ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
ugeth->p_tx_glbl_pram->iphoffset[1]);
ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
ugeth->p_tx_glbl_pram->iphoffset[2]);
ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
ugeth->p_tx_glbl_pram->iphoffset[3]);
ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
ugeth->p_tx_glbl_pram->iphoffset[4]);
ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
ugeth->p_tx_glbl_pram->iphoffset[5]);
ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
ugeth->p_tx_glbl_pram->iphoffset[6]);
ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
ugeth->p_tx_glbl_pram->iphoffset[7]);
ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_tx_glbl_pram->tqptr,
in_be32(&ugeth->p_tx_glbl_pram->tqptr));
}
if (ugeth->p_rx_glbl_pram) {
ugeth_info("RX global param:");
ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->remoder,
in_be32(&ugeth->p_rx_glbl_pram->remoder));
ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->rqptr,
in_be32(&ugeth->p_rx_glbl_pram->rqptr));
ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->typeorlen,
in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_rx_glbl_pram->rxgstpack,
ugeth->p_rx_glbl_pram->rxgstpack);
ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
(u32) & ugeth->p_rx_glbl_pram->rstate,
ugeth->p_rx_glbl_pram->rstate);
ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->mrblr,
in_be16(&ugeth->p_rx_glbl_pram->mrblr));
ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->rbdqptr,
in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->mflr,
in_be16(&ugeth->p_rx_glbl_pram->mflr));
ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->minflr,
in_be16(&ugeth->p_rx_glbl_pram->minflr));
ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->maxd1,
in_be16(&ugeth->p_rx_glbl_pram->maxd1));
ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->maxd2,
in_be16(&ugeth->p_rx_glbl_pram->maxd2));
ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->ecamptr,
in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l2qt,
in_be32(&ugeth->p_rx_glbl_pram->l2qt));
ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[0],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[1],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[2],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[3],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[4],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[5],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[6],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->l3qt[7],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->vlantype,
in_be16(&ugeth->p_rx_glbl_pram->vlantype));
ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
(u32) & ugeth->p_rx_glbl_pram->vlantci,
in_be16(&ugeth->p_rx_glbl_pram->vlantci));
for (i = 0; i < 64; i++)
ugeth_info
("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
i,
(u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
ugeth->p_rx_glbl_pram->addressfiltering[i]);
ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
}
if (ugeth->p_send_q_mem_reg) {
ugeth_info("Send Q memory registers:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_send_q_mem_reg);
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
ugeth_info("SQQD[%d]:", i);
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
sizeof(struct ucc_geth_send_queue_qd));
}
}
if (ugeth->p_scheduler) {
ugeth_info("Scheduler:");
ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
mem_disp((u8 *) ugeth->p_scheduler,
sizeof(*ugeth->p_scheduler));
}
if (ugeth->p_tx_fw_statistics_pram) {
ugeth_info("TX FW statistics pram:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_tx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
sizeof(*ugeth->p_tx_fw_statistics_pram));
}
if (ugeth->p_rx_fw_statistics_pram) {
ugeth_info("RX FW statistics pram:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_rx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
sizeof(*ugeth->p_rx_fw_statistics_pram));
}
if (ugeth->p_rx_irq_coalescing_tbl) {
ugeth_info("RX IRQ coalescing tables:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_rx_irq_coalescing_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
ugeth_info("RX IRQ coalescing table entry[%d]:", i);
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i]);
ugeth_info
("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].interruptcoalescingmaxvalue,
in_be32(&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].
interruptcoalescingmaxvalue));
ugeth_info
("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].interruptcoalescingcounter,
in_be32(&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].
interruptcoalescingcounter));
}
}
if (ugeth->p_rx_bd_qs_tbl) {
ugeth_info("RX BD QS tables:");
ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
ugeth_info("RX BD QS table[%d]:", i);
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_rx_bd_qs_tbl[i]);
ugeth_info
("bdbaseptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
ugeth_info
("bdptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
ugeth_info
("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].
externalbdbaseptr));
ugeth_info
("externalbdptr : addr - 0x%08x, val - 0x%08x",
(u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
ugeth_info("ucode RX Prefetched BDs:");
ugeth_info("Base address: 0x%08x",
(u32)
qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
bdbaseptr)));
mem_disp((u8 *)
qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
bdbaseptr)),
sizeof(struct ucc_geth_rx_prefetched_bds));
}
}
if (ugeth->p_init_enet_param_shadow) {
int size;
ugeth_info("Init enet param shadow:");
ugeth_info("Base address: 0x%08x",
(u32) ugeth->p_init_enet_param_shadow);
mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
sizeof(*ugeth->p_init_enet_param_shadow));
size = sizeof(struct ucc_geth_thread_rx_pram);
if (ugeth->ug_info->rxExtendedFiltering) {
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ugeth->ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ugeth->ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
dump_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_TX,
sizeof(struct ucc_geth_thread_tx_pram),
ugeth->ug_info->riscTx, 0);
dump_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
rxthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
ugeth->ug_info->riscRx, 1);
}
}
#endif /* DEBUG */
static void init_default_reg_vals(u32 __iomem *upsmr_register,
u32 __iomem *maccfg1_register,
u32 __iomem *maccfg2_register)
{
out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
}
static int init_half_duplex_params(int alt_beb,
int back_pressure_no_backoff,
int no_backoff,
int excess_defer,
u8 alt_beb_truncation,
u8 max_retransmissions,
u8 collision_window,
u32 __iomem *hafdup_register)
{
u32 value = 0;
if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
(max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
(collision_window > HALFDUP_COLLISION_WINDOW_MAX))
return -EINVAL;
value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
if (alt_beb)
value |= HALFDUP_ALT_BEB;
if (back_pressure_no_backoff)
value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
if (no_backoff)
value |= HALFDUP_NO_BACKOFF;
if (excess_defer)
value |= HALFDUP_EXCESSIVE_DEFER;
value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
value |= collision_window;
out_be32(hafdup_register, value);
return 0;
}
static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
u8 non_btb_ipg,
u8 min_ifg,
u8 btb_ipg,
u32 __iomem *ipgifg_register)
{
u32 value = 0;
/* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
IPG part 2 */
if (non_btb_cs_ipg > non_btb_ipg)
return -EINVAL;
if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
(non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
/*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
(btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
return -EINVAL;
value |=
((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
IPGIFG_NBTB_CS_IPG_MASK);
value |=
((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
IPGIFG_NBTB_IPG_MASK);
value |=
((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
IPGIFG_MIN_IFG_MASK);
value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
out_be32(ipgifg_register, value);
return 0;
}
int init_flow_control_params(u32 automatic_flow_control_mode,
int rx_flow_control_enable,
int tx_flow_control_enable,
u16 pause_period,
u16 extension_field,
u32 __iomem *upsmr_register,
u32 __iomem *uempr_register,
u32 __iomem *maccfg1_register)
{
u32 value = 0;
/* Set UEMPR register */
value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
out_be32(uempr_register, value);
/* Set UPSMR register */
setbits32(upsmr_register, automatic_flow_control_mode);
value = in_be32(maccfg1_register);
if (rx_flow_control_enable)
value |= MACCFG1_FLOW_RX;
if (tx_flow_control_enable)
value |= MACCFG1_FLOW_TX;
out_be32(maccfg1_register, value);
return 0;
}
static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
int auto_zero_hardware_statistics,
u32 __iomem *upsmr_register,
u16 __iomem *uescr_register)
{
u16 uescr_value = 0;
/* Enable hardware statistics gathering if requested */
if (enable_hardware_statistics)
setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
/* Clear hardware statistics counters */
uescr_value = in_be16(uescr_register);
uescr_value |= UESCR_CLRCNT;
/* Automatically zero hardware statistics counters on read,
if requested */
if (auto_zero_hardware_statistics)
uescr_value |= UESCR_AUTOZ;
out_be16(uescr_register, uescr_value);
return 0;
}
static int init_firmware_statistics_gathering_mode(int
enable_tx_firmware_statistics,
int enable_rx_firmware_statistics,
u32 __iomem *tx_rmon_base_ptr,
u32 tx_firmware_statistics_structure_address,
u32 __iomem *rx_rmon_base_ptr,
u32 rx_firmware_statistics_structure_address,
u16 __iomem *temoder_register,
u32 __iomem *remoder_register)
{
/* Note: this function does not check if */
/* the parameters it receives are NULL */
if (enable_tx_firmware_statistics) {
out_be32(tx_rmon_base_ptr,
tx_firmware_statistics_structure_address);
setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
}
if (enable_rx_firmware_statistics) {
out_be32(rx_rmon_base_ptr,
rx_firmware_statistics_structure_address);
setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
}
return 0;
}
static int init_mac_station_addr_regs(u8 address_byte_0,
u8 address_byte_1,
u8 address_byte_2,
u8 address_byte_3,
u8 address_byte_4,
u8 address_byte_5,
u32 __iomem *macstnaddr1_register,
u32 __iomem *macstnaddr2_register)
{
u32 value = 0;
/* Example: for a station address of 0x12345678ABCD, */
/* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
/* MACSTNADDR1 Register: */
/* 0 7 8 15 */
/* station address byte 5 station address byte 4 */
/* 16 23 24 31 */
/* station address byte 3 station address byte 2 */
value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
out_be32(macstnaddr1_register, value);
/* MACSTNADDR2 Register: */
/* 0 7 8 15 */
/* station address byte 1 station address byte 0 */
/* 16 23 24 31 */
/* reserved reserved */
value = 0;
value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
out_be32(macstnaddr2_register, value);
return 0;
}
static int init_check_frame_length_mode(int length_check,
u32 __iomem *maccfg2_register)
{
u32 value = 0;
value = in_be32(maccfg2_register);
if (length_check)
value |= MACCFG2_LC;
else
value &= ~MACCFG2_LC;
out_be32(maccfg2_register, value);
return 0;
}
static int init_preamble_length(u8 preamble_length,
u32 __iomem *maccfg2_register)
{
if ((preamble_length < 3) || (preamble_length > 7))
return -EINVAL;
clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
preamble_length << MACCFG2_PREL_SHIFT);
return 0;
}
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
{
u32 value = 0;
value = in_be32(upsmr_register);
if (reject_broadcast)
value |= UCC_GETH_UPSMR_BRO;
else
value &= ~UCC_GETH_UPSMR_BRO;
if (receive_short_frames)
value |= UCC_GETH_UPSMR_RSH;
else
value &= ~UCC_GETH_UPSMR_RSH;
if (promiscuous)
value |= UCC_GETH_UPSMR_PRO;
else
value &= ~UCC_GETH_UPSMR_PRO;
out_be32(upsmr_register, value);
return 0;
}
static int init_max_rx_buff_len(u16 max_rx_buf_len,
u16 __iomem *mrblr_register)
{
/* max_rx_buf_len value must be a multiple of 128 */
if ((max_rx_buf_len == 0) ||
(max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
return -EINVAL;
out_be16(mrblr_register, max_rx_buf_len);
return 0;
}
static int init_min_frame_len(u16 min_frame_length,
u16 __iomem *minflr_register,
u16 __iomem *mrblr_register)
{
u16 mrblr_value = 0;
mrblr_value = in_be16(mrblr_register);
if (min_frame_length >= (mrblr_value - 4))
return -EINVAL;
out_be16(minflr_register, min_frame_length);
return 0;
}
static int adjust_enet_interface(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
ug_info = ugeth->ug_info;
ug_regs = ugeth->ug_regs;
uf_regs = ugeth->uccf->uf_regs;
/* Set MACCFG2 */
maccfg2 = in_be32(&ug_regs->maccfg2);
maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
if ((ugeth->max_speed == SPEED_10) ||
(ugeth->max_speed == SPEED_100))
maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
else if (ugeth->max_speed == SPEED_1000)
maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
maccfg2 |= ug_info->padAndCrc;
out_be32(&ug_regs->maccfg2, maccfg2);
/* Set UPSMR */
upsmr = in_be32(&uf_regs->upsmr);
upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;
/* FALLTHROUGH */
case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UCC_GETH_UPSMR_RMM;
}
}
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
/* Disable autonegotiation in tbi mode, because by default it
comes up in autonegotiation mode. */
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
struct ucc_geth_info *ug_info = ugeth->ug_info;
struct phy_device *tbiphy;
if (!ug_info->tbi_node)
ugeth_warn("TBI mode requires that the device "
"tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy)
ugeth_warn("Could not get TBI device\n");
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
if (ret_val != 0) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
__func__);
return ret_val;
}
return 0;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
u32 temp;
int i = 10;
uccf = ugeth->uccf;
/* Mask GRACEFUL STOP TX interrupt bit and clear it */
clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
/* Issue host command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
/* Wait for command to complete */
do {
msleep(10);
temp = in_be32(uccf->p_ucce);
} while (!(temp & UCC_GETH_UCCE_GRA) && --i);
uccf->stopped_tx = 1;
return 0;
}
static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
u8 temp;
int i = 10;
uccf = ugeth->uccf;
/* Clear acknowledge bit */
temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
/* Keep issuing command and checking acknowledge bit until
it is asserted, according to spec */
do {
/* Issue host command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
msleep(10);
temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
uccf->stopped_rx = 1;
return 0;
}
static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
uccf->stopped_tx = 0;
return 0;
}
static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
0);
uccf->stopped_rx = 0;
return 0;
}
static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
{
struct ucc_fast_private *uccf;
int enabled_tx, enabled_rx;
uccf = ugeth->uccf;
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: ucc_num out of range.", __func__);
return -EINVAL;
}
enabled_tx = uccf->enabled_tx;
enabled_rx = uccf->enabled_rx;
/* Get Tx and Rx going again, in case this channel was actively
disabled. */
if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
ugeth_restart_tx(ugeth);
if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
ugeth_restart_rx(ugeth);
ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
return 0;
}
static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
{
struct ucc_fast_private *uccf;
uccf = ugeth->uccf;
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: ucc_num out of range.", __func__);
return -EINVAL;
}
/* Stop any transmissions */
if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
ugeth_graceful_stop_tx(ugeth);
/* Stop any receptions */
if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
ugeth_graceful_stop_rx(ugeth);
ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
return 0;
}
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
/* Prevent any further xmits, plus detach the device. */
netif_device_detach(ugeth->ndev);
/* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
disable_irq(ugeth->ug_info->uf_info.irq);
/* Stop NAPI, and possibly wait for its completion. */
napi_disable(&ugeth->napi);
}
static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the ugeth structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
*/
static void adjust_link(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
struct phy_device *phydev = ugeth->phydev;
int new_state = 0;
ug_regs = ugeth->ug_regs;
uf_regs = ugeth->uccf->uf_regs;
if (phydev->link) {
u32 tempval = in_be32(&ug_regs->maccfg2);
u32 upsmr = in_be32(&uf_regs->upsmr);
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (phydev->duplex != ugeth->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
tempval &= ~(MACCFG2_FDX);
else
tempval |= MACCFG2_FDX;
ugeth->oldduplex = phydev->duplex;
}
if (phydev->speed != ugeth->oldspeed) {
new_state = 1;
switch (phydev->speed) {
case SPEED_1000:
tempval = ((tempval &
~(MACCFG2_INTERFACE_MODE_MASK)) |
MACCFG2_INTERFACE_MODE_BYTE);
break;
case SPEED_100:
case SPEED_10:
tempval = ((tempval &
~(MACCFG2_INTERFACE_MODE_MASK)) |
MACCFG2_INTERFACE_MODE_NIBBLE);
/* if reduced mode, re-set UPSMR.R10M */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (phydev->speed == SPEED_10)
upsmr |= UCC_GETH_UPSMR_R10M;
else
upsmr &= ~UCC_GETH_UPSMR_R10M;
}
break;
default:
if (netif_msg_link(ugeth))
ugeth_warn(
"%s: Ack! Speed (%d) is not 10/100/1000!",
dev->name, phydev->speed);
break;
}
ugeth->oldspeed = phydev->speed;
}
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
if (new_state) {
/*
* To change the MAC configuration we need to disable
* the controller. To do so, we have to either grab
* ugeth->lock, which is a bad idea since 'graceful
* stop' commands might take quite a while, or we can
* quiesce driver's activity.
*/
ugeth_quiesce(ugeth);
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
out_be32(&ug_regs->maccfg2, tempval);
out_be32(&uf_regs->upsmr, upsmr);
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
ugeth_activate(ugeth);
}
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
ugeth->oldspeed = 0;
ugeth->oldduplex = -1;
}
if (new_state && netif_msg_link(ugeth))
phy_print_status(phydev);
}
/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the UTBIPA register. We assume
* that the UTBIPA register is valid. Either the MDIO bus code will set
* it to a value that doesn't conflict with other PHYs on the bus, or the
* value doesn't matter, as there are no other PHYs on the bus.
*/
static void uec_configure_serdes(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
dev_warn(&dev->dev, "SGMII mode requires that the device "
"tree specify a tbi-handle\n");
return;
}
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy) {
dev_err(&dev->dev, "error: Could not get TBI device\n");
return;
}
/*
* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
return;
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
}
/* Configure the PHY for dev.
* returns 0 if success. -1 if failure
*/
static int init_phy(struct net_device *dev)
{
struct ucc_geth_private *priv = netdev_priv(dev);
struct ucc_geth_info *ug_info = priv->ug_info;
struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
priv->phy_interface);
if (!phydev)
phydev = of_phy_connect_fixed_link(dev, &adjust_link,
priv->phy_interface);
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
}
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
phydev->supported &= (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full);
if (priv->max_speed == SPEED_1000)
phydev->supported |= ADVERTISED_1000baseT_Full;
phydev->advertising = phydev->supported;
priv->phydev = phydev;
return 0;
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
{
#ifdef DEBUG
ucc_fast_dump_regs(ugeth->uccf);
dump_regs(ugeth);
dump_bds(ugeth);
#endif
}
static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
ugeth,
enum enet_addr_type
enet_addr_type)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
struct ucc_fast_private *uccf;
enum comm_dir comm_dir;
struct list_head *p_lh;
u16 i, num;
u32 __iomem *addr_h;
u32 __iomem *addr_l;
u8 *p_counter;
uccf = ugeth->uccf;
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *)
ugeth->p_rx_glbl_pram->addressfiltering;
if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
addr_h = &(p_82xx_addr_filt->gaddr_h);
addr_l = &(p_82xx_addr_filt->gaddr_l);
p_lh = &ugeth->group_hash_q;
p_counter = &(ugeth->numGroupAddrInHash);
} else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
addr_h = &(p_82xx_addr_filt->iaddr_h);
addr_l = &(p_82xx_addr_filt->iaddr_l);
p_lh = &ugeth->ind_hash_q;
p_counter = &(ugeth->numIndAddrInHash);
} else
return -EINVAL;
comm_dir = 0;
if (uccf->enabled_tx)
comm_dir |= COMM_DIR_TX;
if (uccf->enabled_rx)
comm_dir |= COMM_DIR_RX;
if (comm_dir)
ugeth_disable(ugeth, comm_dir);
/* Clear the hash table. */
out_be32(addr_h, 0x00000000);
out_be32(addr_l, 0x00000000);
if (!p_lh)
return 0;
num = *p_counter;
/* Delete all remaining CQ elements */
for (i = 0; i < num; i++)
put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
*p_counter = 0;
if (comm_dir)
ugeth_enable(ugeth, comm_dir);
return 0;
}
static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
u8 paddr_num)
{
ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
}
static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
{
u16 i, j;
u8 __iomem *bd;
if (!ugeth)
return;
if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf);
ugeth->uccf = NULL;
}
if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset);
ugeth->p_thread_data_tx = NULL;
}
if (ugeth->p_thread_data_rx) {
qe_muram_free(ugeth->thread_dat_rx_offset);
ugeth->p_thread_data_rx = NULL;
}
if (ugeth->p_exf_glbl_param) {
qe_muram_free(ugeth->exf_glbl_param_offset);
ugeth->p_exf_glbl_param = NULL;
}
if (ugeth->p_rx_glbl_pram) {
qe_muram_free(ugeth->rx_glbl_pram_offset);
ugeth->p_rx_glbl_pram = NULL;
}
if (ugeth->p_tx_glbl_pram) {
qe_muram_free(ugeth->tx_glbl_pram_offset);
ugeth->p_tx_glbl_pram = NULL;
}
if (ugeth->p_send_q_mem_reg) {
qe_muram_free(ugeth->send_q_mem_reg_offset);
ugeth->p_send_q_mem_reg = NULL;
}
if (ugeth->p_scheduler) {
qe_muram_free(ugeth->scheduler_offset);
ugeth->p_scheduler = NULL;
}
if (ugeth->p_tx_fw_statistics_pram) {
qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
ugeth->p_tx_fw_statistics_pram = NULL;
}
if (ugeth->p_rx_fw_statistics_pram) {
qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
ugeth->p_rx_fw_statistics_pram = NULL;
}
if (ugeth->p_rx_irq_coalescing_tbl) {
qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
ugeth->p_rx_irq_coalescing_tbl = NULL;
}
if (ugeth->p_rx_bd_qs_tbl) {
qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
ugeth->p_rx_bd_qs_tbl = NULL;
}
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
rxthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_RX,
ugeth->ug_info->riscRx, 1);
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_TX,
ugeth->ug_info->riscTx, 0);
kfree(ugeth->p_init_enet_param_shadow);
ugeth->p_init_enet_param_shadow = NULL;
}
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(ugeth->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf),
(in_be32((u32 __iomem *)bd) &
BD_LENGTH_MASK),
DMA_TO_DEVICE);
dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
ugeth->tx_skbuff[i][j] = NULL;
}
}
kfree(ugeth->tx_skbuff[i]);
if (ugeth->p_tx_bd_ring[i]) {
if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_SYSTEM)
kfree((void *)ugeth->tx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->tx_bd_ring_offset[i]);
ugeth->p_tx_bd_ring[i] = NULL;
}
}
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) {
dma_unmap_single(ugeth->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf),
ugeth->ug_info->
uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE);
dev_kfree_skb_any(
ugeth->rx_skbuff[i][j]);
ugeth->rx_skbuff[i][j] = NULL;
}
bd += sizeof(struct qe_bd);
}
kfree(ugeth->rx_skbuff[i]);
if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_SYSTEM)
kfree((void *)ugeth->rx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->rx_bd_ring_offset[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
while (!list_empty(&ugeth->group_hash_q))
put_enet_addr_container(ENET_ADDR_CONT_ENTRY
(dequeue(&ugeth->group_hash_q)));
while (!list_empty(&ugeth->ind_hash_q))
put_enet_addr_container(ENET_ADDR_CONT_ENTRY
(dequeue(&ugeth->ind_hash_q)));
if (ugeth->ug_regs) {
iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL;
}
skb_queue_purge(&ugeth->rx_recycle);
}
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
ugeth = netdev_priv(dev);
uf_regs = ugeth->uccf->uf_regs;
if (dev->flags & IFF_PROMISC) {
setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
} else {
clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
if (dev->flags & IFF_ALLMULTI) {
/* Catch all multicast addresses, so set the
* filter to all 1's.
*/
out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
} else {
/* Clear filter and add the addresses in the list.
*/
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
if (!is_multicast_ether_addr(ha->addr))
continue;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
}
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
/*
* Tell the kernel the link is down.
* Must be done before disabling the controller
* or deadlock may happen.
*/
phy_stop(phydev);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
/* Clear all interrupts */
out_be32(ugeth->uccf->p_ucce, 0xffffffff);
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
ucc_geth_memclean(ugeth);
}
static int ucc_struct_init(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
int i;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Bad memory partition value.",
__func__);
return -EINVAL;
}
/* Rx BD lengths */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
ugeth_err
("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
__func__);
return -EINVAL;
}
}
/* Tx BD lengths */
for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
ugeth_err
("%s: Tx BD ring length must be no smaller than 2.",
__func__);
return -EINVAL;
}
}
/* mrblr */
if ((uf_info->max_rx_buf_length == 0) ||
(uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
ugeth_err
("%s: max_rx_buf_length must be non-zero multiple of 128.",
__func__);
return -EINVAL;
}
/* num Tx queues */
if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: number of tx queues too large.", __func__);
return -EINVAL;
}
/* num Rx queues */
if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: number of rx queues too large.", __func__);
return -EINVAL;
}
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
ugeth_err
("%s: VLAN priority table entry must not be"
" larger than number of Rx queues.",
__func__);
return -EINVAL;
}
}
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
ugeth_err
("%s: IP priority table entry must not be"
" larger than number of Rx queues.",
__func__);
return -EINVAL;
}
}
if (ug_info->cam && !ug_info->ecamptr) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
__func__);
return -EINVAL;
}
if ((ug_info->numStationAddresses !=
UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
ug_info->rxExtendedFiltering) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Number of station addresses greater than 1 "
"not allowed in extended parsing mode.",
__func__);
return -EINVAL;
}
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
for (i = 0; i < ug_info->numQueuesRx; i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
for (i = 0; i < ug_info->numQueuesTx; i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Failed to init uccf.", __func__);
return -ENOMEM;
}
/* read the number of risc engines, update the riscTx and riscRx
* if there are 4 riscs in QE
*/
if (qe_get_num_of_risc() == 4) {
ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
}
ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
if (!ugeth->ug_regs) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Failed to ioremap regs.", __func__);
return -ENOMEM;
}
skb_queue_head_init(&ugeth->rx_recycle);
return 0;
}
static int ucc_geth_startup(struct ucc_geth_private *ugeth)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
struct ucc_geth_init_pram __iomem *p_init_enet_pram;
struct ucc_fast_private *uccf;
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth __iomem *ug_regs;
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt, length;
u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test;
u8 function_code = 0;
u8 __iomem *bd;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
switch (ug_info->numThreadsRx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsRxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsRxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsRxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsRxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsRxNumerical = 8;
break;
default:
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Bad number of Rx threads value.",
__func__);
return -EINVAL;
break;
}
switch (ug_info->numThreadsTx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsTxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsTxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsTxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsTxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsTxNumerical = 8;
break;
default:
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Bad number of Tx threads value.",
__func__);
return -EINVAL;
break;
}
/* Calculate rx_extended_features */
ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
ug_info->ipAddressAlignment ||
(ug_info->numStationAddresses !=
UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
(ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
(ug_info->vlanOperationNonTagged !=
UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
init_default_reg_vals(&uf_regs->upsmr,
&ug_regs->maccfg1, &ug_regs->maccfg2);
/* Set UPSMR */
/* For more details see the hardware spec. */
init_rx_parameters(ug_info->bro,
ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
/* We're going to ignore other registers for now, */
/* except as needed to get up and running */
/* Set MACCFG1 */
/* For more details see the hardware spec. */
init_flow_control_params(ug_info->aufc,
ug_info->receiveFlowControl,
ug_info->transmitFlowControl,
ug_info->pausePeriod,
ug_info->extensionField,
&uf_regs->upsmr,
&ug_regs->uempr, &ug_regs->maccfg1);
setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
/* Set IPGIFG */
/* For more details see the hardware spec. */
ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
ug_info->nonBackToBackIfgPart2,
ug_info->
miminumInterFrameGapEnforcement,
ug_info->backToBackInterFrameGap,
&ug_regs->ipgifg);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: IPGIFG initialization parameter too large.",
__func__);
return ret_val;
}
/* Set HAFDUP */
/* For more details see the hardware spec. */
ret_val = init_half_duplex_params(ug_info->altBeb,
ug_info->backPressureNoBackoff,
ug_info->noBackoff,
ug_info->excessDefer,
ug_info->altBebTruncation,
ug_info->maxRetransmission,
ug_info->collisionWindow,
&ug_regs->hafdup);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Half Duplex initialization parameter too large.",
__func__);
return ret_val;
}
/* Set IFSTAT */
/* For more details see the hardware spec. */
/* Read only - resets upon read */
ifstat = in_be32(&ug_regs->ifstat);
/* Clear UEMPR */
/* For more details see the hardware spec. */
out_be32(&ug_regs->uempr, 0);
/* Set UESCR */
/* For more details see the hardware spec. */
init_hw_statistics_gathering_mode((ug_info->statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
0, &uf_regs->upsmr, &ug_regs->uescr);
/* Allocate Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) {
/* Allocate in multiple of
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
according to spec */
length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
/ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
* UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = 4;
if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_TX_BD_RING_ALIGNMENT;
ugeth->tx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->tx_bd_ring_offset[j] != 0)
ugeth->p_tx_bd_ring[j] =
(u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->tx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_TX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
ugeth->p_tx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
tx_bd_ring_offset[j]);
}
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate memory for Tx bd rings.",
__func__);
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
}
/* Allocate Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = 4;
if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_RX_BD_RING_ALIGNMENT;
ugeth->rx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->rx_bd_ring_offset[j] != 0)
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->rx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_RX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
rx_bd_ring_offset[j]);
}
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate memory for Rx bd rings.",
__func__);
return -ENOMEM;
}
}
/* Init Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
ugeth->ug_info->bdRingLenTx[j],
GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Could not allocate tx_skbuff",
__func__);
return -ENOMEM;
}
for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
ugeth->tx_skbuff[j][i] = NULL;
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
/* clear bd buffer */
out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
/* set bd status and length */
out_be32((u32 __iomem *)bd, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
}
/* Init Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
ugeth->ug_info->bdRingLenRx[j],
GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Could not allocate rx_skbuff",
__func__);
return -ENOMEM;
}
for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
ugeth->rx_skbuff[j][i] = NULL;
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
/* set bd status and length */
out_be32((u32 __iomem *)bd, R_I);
/* clear bd buffer */
out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
}
/*
* Global PRAM
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
ugeth->tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
__func__);
return -ENOMEM;
}
ugeth->p_tx_glbl_pram =
(struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
tx_glbl_pram_offset);
/* Zero out p_tx_glbl_pram */
memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
/* Fill global PRAM */
/* TQPTR */
/* Size varies with number of Tx threads */
ugeth->thread_dat_tx_offset =
qe_muram_alloc(numThreadsTxNumerical *
sizeof(struct ucc_geth_thread_data_tx) +
32 * (numThreadsTxNumerical == 1),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
__func__);
return -ENOMEM;
}
ugeth->p_thread_data_tx =
(struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
thread_dat_tx_offset);
out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
/* vtagtable */
for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
ug_info->vtagtable[i]);
/* iphoffset */
for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
ug_info->iphoffset[i]);
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
qe_muram_alloc(ug_info->numQueuesTx *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
__func__);
return -ENOMEM;
}
ugeth->p_send_q_mem_reg =
(struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
send_q_mem_reg_offset);
out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
/* Setup the table */
/* Assume BD rings are already established */
for (i = 0; i < ug_info->numQueuesTx; i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
(u32) virt_to_phys(endOfRing));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32) immrbar_virt_to_phys(ugeth->
p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
(u32) immrbar_virt_to_phys(endOfRing));
}
}
/* schedulerbasepointer */
if (ug_info->numQueuesTx > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
UCC_GETH_SCHEDULER_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_scheduler.",
__func__);
return -ENOMEM;
}
ugeth->p_scheduler =
(struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
/* Zero out p_scheduler */
memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
ug_info->mblinterval);
out_be16(&ugeth->p_scheduler->nortsrbytetime,
ug_info->nortsrbytetime);
out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
out_8(&ugeth->p_scheduler->strictpriorityq,
ug_info->strictpriorityq);
out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
for (i = 0; i < NUM_TX_QUEUES; i++)
out_8(&ugeth->p_scheduler->weightfactor[i],
ug_info->weightfactor[i]);
/* Set pointers to cpucount registers in scheduler */
ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
}
/* schedulerbasepointer */
/* TxRMON_PTR (statistics) */
if (ug_info->
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
ugeth->tx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
(struct ucc_geth_tx_firmware_statistics_pram),
UCC_GETH_TX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for"
" p_tx_fw_statistics_pram.",
__func__);
return -ENOMEM;
}
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
/* Zero out p_tx_fw_statistics_pram */
memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
if (ug_info->numQueuesTx > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
/* function code register */
out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
ugeth->rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
__func__);
return -ENOMEM;
}
ugeth->p_rx_glbl_pram =
(struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
rx_glbl_pram_offset);
/* Zero out p_rx_glbl_pram */
memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
/* Fill global PRAM */
/* RQPTR */
/* Size varies with number of Rx threads */
ugeth->thread_dat_rx_offset =
qe_muram_alloc(numThreadsRxNumerical *
sizeof(struct ucc_geth_thread_data_rx),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
__func__);
return -ENOMEM;
}
ugeth->p_thread_data_rx =
(struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
thread_dat_rx_offset);
out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
/* typeorlen */
out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
/* rxrmonbaseptr (statistics) */
if (ug_info->
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
ugeth->rx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
(struct ucc_geth_rx_firmware_statistics_pram),
UCC_GETH_RX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for"
" p_rx_fw_statistics_pram.", __func__);
return -ENOMEM;
}
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
/* Zero out p_rx_fw_statistics_pram */
memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for"
" p_rx_irq_coalescing_tbl.", __func__);
return -ENOMEM;
}
ugeth->p_rx_irq_coalescing_tbl =
(struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
for (i = 0; i < ug_info->numQueuesRx; i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingcounter,
ug_info->interruptcoalescingmaxvalue[i]);
}
/* MRBLR */
init_max_rx_buff_len(uf_info->max_rx_buf_length,
&ugeth->p_rx_glbl_pram->mrblr);
/* MFLR */
out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
/* MINFLR */
init_min_frame_len(ug_info->minFrameLength,
&ugeth->p_rx_glbl_pram->minflr,
&ugeth->p_rx_glbl_pram->mrblr);
/* MAXD1 */
out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
/* MAXD2 */
out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
/* l2qt */
l2qt = 0;
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
/* l3qt */
for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
l3qt = 0;
for (i = 0; i < 8; i++)
l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
}
/* vlantype */
out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
/* vlantci */
out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
/* ecamptr */
out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
__func__);
return -ENOMEM;
}
ugeth->p_rx_bd_qs_tbl =
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
/* Zero out p_rx_bd_qs_tbl */
memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
0,
ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
(u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
(u32) immrbar_virt_to_phys(ugeth->
p_rx_bd_ring[i]));
}
/* rest of fields handled by QE */
}
/* remoder */
/* Already has speed set */
if (ugeth->rx_extended_features)
remoder |= REMODER_RX_EXTENDED_FEATURES;
if (ug_info->rxExtendedFiltering)
remoder |= REMODER_RX_EXTENDED_FILTERING;
if (ug_info->dynamicMaxFrameLength)
remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
if (ug_info->dynamicMinFrameLength)
remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
remoder |=
ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
remoder |=
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
/* Note that this function must be called */
/* ONLY AFTER p_tx_fw_statistics_pram */
/* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
init_firmware_statistics_gathering_mode((ug_info->
statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
(ug_info->statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
&ugeth->p_tx_glbl_pram->txrmonbaseptr,
ugeth->tx_fw_statistics_pram_offset,
&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
ugeth->rx_fw_statistics_pram_offset,
&ugeth->p_tx_glbl_pram->temoder,
&ugeth->p_rx_glbl_pram->remoder);
/* function code register */
out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
/* initialize extended filtering */
if (ug_info->rxExtendedFiltering) {
if (!ug_info->extendedFilteringChainPointer) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Null Extended Filtering Chain Pointer.",
__func__);
return -EINVAL;
}
/* Allocate memory for extended filtering Mode Global
Parameters */
ugeth->exf_glbl_param_offset =
qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for"
" p_exf_glbl_param.", __func__);
return -ENOMEM;
}
ugeth->p_exf_glbl_param =
(struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
exf_glbl_param_offset);
out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
ugeth->exf_glbl_param_offset);
out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
(u32) ug_info->extendedFilteringChainPointer);
} else { /* initialize 82xx style address filtering */
/* Init individual address recognition registers to disabled */
for (j = 0; j < NUM_OF_PADDRS; j++)
ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
ENET_ADDR_TYPE_GROUP);
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
ENET_ADDR_TYPE_INDIVIDUAL);
}
/*
* Initialize UCC at QE level
*/
command = QE_INIT_TX_RX;
/* Allocate shadow InitEnet command parameter structure.
* This is needed because after the InitEnet command is executed,
* the structure in DPRAM is released, because DPRAM is a premium
* resource.
* This shadow structure keeps a copy of what was done so that the
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate memory for"
" p_UccInitEnetParamShadows.", __func__);
return -ENOMEM;
}
/* Zero out *p_init_enet_param_shadow */
memset((char *)ugeth->p_init_enet_param_shadow,
0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
ugeth->p_init_enet_param_shadow->resinit1 =
ENET_INIT_PARAM_MAGIC_RES_INIT1;
ugeth->p_init_enet_param_shadow->resinit2 =
ENET_INIT_PARAM_MAGIC_RES_INIT2;
ugeth->p_init_enet_param_shadow->resinit3 =
ENET_INIT_PARAM_MAGIC_RES_INIT3;
ugeth->p_init_enet_param_shadow->resinit4 =
ENET_INIT_PARAM_MAGIC_RES_INIT4;
ugeth->p_init_enet_param_shadow->resinit5 =
ENET_INIT_PARAM_MAGIC_RES_INIT5;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
ugeth->rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
(ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Invalid largest External Lookup Key Size.",
__func__);
return -EINVAL;
}
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
ug_info->largestexternallookupkeysize;
size = sizeof(struct ucc_geth_thread_rx_pram);
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
p_init_enet_param_shadow->rxthread[0]),
(u8) (numThreadsRxNumerical + 1)
/* Rx needs one extra for terminator */
, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
ug_info->riscRx, 1)) != 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
__func__);
return ret_val;
}
ugeth->p_init_enet_param_shadow->txglobal =
ugeth->tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]), numThreadsTxNumerical,
sizeof(struct ucc_geth_thread_tx_pram),
UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
ug_info->riscTx, 0)) != 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
__func__);
return ret_val;
}
/* Load Rx bds with buffers */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Can not fill Rx bds with buffers.",
__func__);
return ret_val;
}
}
/* Allocate InitEnet command parameter structure */
init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
if (IS_ERR_VALUE(init_enet_pram_offset)) {
if (netif_msg_ifup(ugeth))
ugeth_err
("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
__func__);
return -ENOMEM;
}
p_init_enet_pram =
(struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
/* Copy shadow InitEnet command parameter structure into PRAM */
out_8(&p_init_enet_pram->resinit1,
ugeth->p_init_enet_param_shadow->resinit1);
out_8(&p_init_enet_pram->resinit2,
ugeth->p_init_enet_param_shadow->resinit2);
out_8(&p_init_enet_pram->resinit3,
ugeth->p_init_enet_param_shadow->resinit3);
out_8(&p_init_enet_pram->resinit4,
ugeth->p_init_enet_param_shadow->resinit4);
out_be16(&p_init_enet_pram->resinit5,
ugeth->p_init_enet_param_shadow->resinit5);
out_8(&p_init_enet_pram->largestexternallookupkeysize,
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
out_be32(&p_init_enet_pram->rgftgfrxglobal,
ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
out_be32(&p_init_enet_pram->rxthread[i],
ugeth->p_init_enet_param_shadow->rxthread[i]);
out_be32(&p_init_enet_pram->txglobal,
ugeth->p_init_enet_param_shadow->txglobal);
for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
out_be32(&p_init_enet_pram->txthread[i],
ugeth->p_init_enet_param_shadow->txthread[i]);
/* Issue QE command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
init_enet_pram_offset);
/* Free InitEnet command parameter */
qe_muram_free(init_enet_pram_offset);
return 0;
}
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
#ifdef CONFIG_UGETH_TX_ON_DEMAND
struct ucc_fast_private *uccf;
#endif
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
unsigned long flags;
ugeth_vdbg("%s: IN", __func__);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
/* Start from the next BD that should be filled */
bd = ugeth->txBd[txQ];
bd_status = in_be32((u32 __iomem *)bd);
/* Save the skb pointer so we can free it later */
ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
/* Update the current skb pointer (wrapping if this was the last) */
ugeth->skb_curtx[txQ] =
(ugeth->skb_curtx[txQ] +
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* set up the buffer descriptor */
out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev, skb->data,
skb->len, DMA_TO_DEVICE));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
else
bd = ugeth->p_tx_bd_ring[txQ];
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (bd == ugeth->confBd[txQ]) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
}
ugeth->txBd[txQ] = bd;
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
transmission */
/* This is done by writing a running counter of the bd
count to the scheduler PRAM. */
out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
}
#ifdef CONFIG_UGETH_TX_ON_DEMAND
uccf = ugeth->uccf;
out_be16(uccf->p_utodr, UCC_FAST_TOD);
#endif
spin_unlock_irqrestore(&ugeth->lock, flags);
return NETDEV_TX_OK;
}
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
{
struct sk_buff *skb;
u8 __iomem *bd;
u16 length, howmany = 0;
u32 bd_status;
u8 *bdBuffer;
struct net_device *dev;
ugeth_vdbg("%s: IN", __func__);
dev = ugeth->ndev;
/* collect received buffers */
bd = ugeth->rxBd[rxQ];
bd_status = in_be32((u32 __iomem *)bd);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
/* determine whether buffer is first, last, first and last
(single buffer frame) or middle (not first and not last) */
if (!skb ||
(!(bd_status & (R_F | R_L))) ||
(bd_status & R_ERRORS_FATAL)) {
if (netif_msg_rx_err(ugeth))
ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
__func__, __LINE__, (u32) skb);
if (skb) {
skb->data = skb->head + NET_SKB_PAD;
skb->len = 0;
skb_reset_tail_pointer(skb);
__skb_queue_head(&ugeth->rx_recycle, skb);
}
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++;
} else {
dev->stats.rx_packets++;
howmany++;
/* Prep the skb for the packet */
skb_put(skb, length);
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, ugeth->ndev);
dev->stats.rx_bytes += length;
/* Send the packet up the stack */
netif_receive_skb(skb);
}
skb = get_new_skb(ugeth, bd);
if (!skb) {
if (netif_msg_rx_err(ugeth))
ugeth_warn("%s: No Rx Data Buffer", __func__);
dev->stats.rx_dropped++;
break;
}
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
/* update to point at the next skb */
ugeth->skb_currx[rxQ] =
(ugeth->skb_currx[rxQ] +
1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
if (bd_status & R_W)
bd = ugeth->p_rx_bd_ring[rxQ];
else
bd += sizeof(struct qe_bd);
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->rxBd[rxQ] = bd;
return howmany;
}
static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
bd = ugeth->confBd[txQ];
bd_status = in_be32((u32 __iomem *)bd);
/* Normal processing. */
while ((bd_status & T_R) == 0) {
struct sk_buff *skb;
/* BD contains already transmitted buffer. */
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
dev->stats.tx_packets++;
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT))
__skb_queue_head(&ugeth->rx_recycle, skb);
else
dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
(ugeth->skb_dirtytx[txQ] +
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* We freed a buffer, so now we can restart transmission */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
else
bd = ugeth->p_tx_bd_ring[txQ];
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
return 0;
}
static int ucc_geth_poll(struct napi_struct *napi, int budget)
{
struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
struct ucc_geth_info *ug_info;
int howmany, i;
ug_info = ugeth->ug_info;
/* Tx event processing */
spin_lock(&ugeth->lock);
for (i = 0; i < ug_info->numQueuesTx; i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
for (i = 0; i < ug_info->numQueuesRx; i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
napi_complete(napi);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
return howmany;
}
static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
{
struct net_device *dev = info;
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_fast_private *uccf;
struct ucc_geth_info *ug_info;
register u32 ucce;
register u32 uccm;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
ug_info = ugeth->ug_info;
/* read and clear events */
ucce = (u32) in_be32(uccf->p_ucce);
uccm = (u32) in_be32(uccf->p_uccm);
ucce &= uccm;
out_be32(uccf->p_ucce, ucce);
/* check for receive events that require processing */
if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
if (napi_schedule_prep(&ugeth->napi)) {
uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
out_be32(uccf->p_uccm, uccm);
__napi_schedule(&ugeth->napi);
}
}
/* Errors and other events */
if (ucce & UCCE_OTHER) {
if (ucce & UCC_GETH_UCCE_BSY)
dev->stats.rx_errors++;
if (ucce & UCC_GETH_UCCE_TXE)
dev->stats.tx_errors++;
}
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void ucc_netpoll(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
int irq = ugeth->ug_info->uf_info.irq;
disable_irq(irq);
ucc_geth_irq_handler(irq, dev);
enable_irq(irq);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
/*
* If device is not running, we will set mac addr register
* when opening the device.
*/
if (!netif_running(dev))
return 0;
spin_lock_irq(&ugeth->lock);
init_mac_station_addr_regs(dev->dev_addr[0],
dev->dev_addr[1],
dev->dev_addr[2],
dev->dev_addr[3],
dev->dev_addr[4],
dev->dev_addr[5],
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
spin_unlock_irq(&ugeth->lock);
return 0;
}
static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
{
struct net_device *dev = ugeth->ndev;
int err;
err = ucc_struct_init(ugeth);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot configure internal struct, "
"aborting.", dev->name);
goto err;
}
err = ucc_geth_startup(ugeth);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot configure net device, aborting.",
dev->name);
goto err;
}
err = adjust_enet_interface(ugeth);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot configure net device, aborting.",
dev->name);
goto err;
}
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
dev->dev_addr[1],
dev->dev_addr[2],
dev->dev_addr[3],
dev->dev_addr[4],
dev->dev_addr[5],
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
goto err;
}
return 0;
err:
ucc_geth_stop(ugeth);
return err;
}
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
int err;
ugeth_vdbg("%s: IN", __func__);
/* Test station address */
if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Multicast address used for station "
"address - is this what you wanted?",
__func__);
return -EINVAL;
}
err = init_phy(dev);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot initialize PHY, aborting.",
dev->name);
return err;
}
err = ucc_geth_init_mac(ugeth);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot initialize MAC, aborting.",
dev->name);
goto err;
}
err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
0, "UCC Geth", dev);
if (err) {
if (netif_msg_ifup(ugeth))
ugeth_err("%s: Cannot get IRQ for net device, aborting.",
dev->name);
goto err;
}
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
qe_alive_during_sleep() || ugeth->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
err:
ucc_geth_stop(ugeth);
return err;
}
/* Stops the kernel queue, and halts the controller */
static int ucc_geth_close(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
ugeth_vdbg("%s: IN", __func__);
napi_disable(&ugeth->napi);
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
phy_disconnect(ugeth->phydev);
ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
return 0;
}
/* Reopen device. This will reset the MAC and PHY. */
static void ucc_geth_timeout_work(struct work_struct *work)
{
struct ucc_geth_private *ugeth;
struct net_device *dev;
ugeth = container_of(work, struct ucc_geth_private, timeout_work);
dev = ugeth->ndev;
ugeth_vdbg("%s: IN", __func__);
dev->stats.tx_errors++;
ugeth_dump_regs(ugeth);
if (dev->flags & IFF_UP) {
/*
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
netif_tx_stop_all_queues(dev);
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
phy_start(ugeth->phydev);
netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
}
/*
* ucc_geth_timeout gets called when a packet has not been
* transmitted after a set amount of time.
*/
static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
schedule_work(&ugeth->timeout_work);
}
#ifdef CONFIG_PM
static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
if (!netif_running(ndev))
return 0;
netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
* Disable the controller, otherwise we'll wakeup on any network
* activity.
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
if (ugeth->wol_en & WAKE_MAGIC) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
} else if (!(ugeth->wol_en & WAKE_PHY)) {
phy_stop(ugeth->phydev);
}
return 0;
}
static int ucc_geth_resume(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
int err;
if (!netif_running(ndev))
return 0;
if (qe_alive_during_sleep()) {
if (ugeth->wol_en & WAKE_MAGIC) {
ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
}
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
} else {
/*
* Full reinitialization is required if QE shuts down
* during sleep.
*/
ucc_geth_memclean(ugeth);
err = ucc_geth_init_mac(ugeth);
if (err) {
ugeth_err("%s: Cannot initialize MAC, aborting.",
ndev->name);
return err;
}
}
ugeth->oldlink = 0;
ugeth->oldspeed = 0;
ugeth->oldduplex = -1;
phy_stop(ugeth->phydev);
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
return 0;
}
#else
#define ucc_geth_suspend NULL
#define ucc_geth_resume NULL
#endif
static phy_interface_t to_phy_interface(const char *phy_connection_type)
{
if (strcasecmp(phy_connection_type, "mii") == 0)
return PHY_INTERFACE_MODE_MII;
if (strcasecmp(phy_connection_type, "gmii") == 0)
return PHY_INTERFACE_MODE_GMII;
if (strcasecmp(phy_connection_type, "tbi") == 0)
return PHY_INTERFACE_MODE_TBI;
if (strcasecmp(phy_connection_type, "rmii") == 0)
return PHY_INTERFACE_MODE_RMII;
if (strcasecmp(phy_connection_type, "rgmii") == 0)
return PHY_INTERFACE_MODE_RGMII;
if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
return PHY_INTERFACE_MODE_RGMII_ID;
if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
return PHY_INTERFACE_MODE_RGMII_TXID;
if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
return PHY_INTERFACE_MODE_RGMII_RXID;
if (strcasecmp(phy_connection_type, "rtbi") == 0)
return PHY_INTERFACE_MODE_RTBI;
if (strcasecmp(phy_connection_type, "sgmii") == 0)
return PHY_INTERFACE_MODE_SGMII;
return PHY_INTERFACE_MODE_MII;
}
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (!ugeth->phydev)
return -ENODEV;
return phy_mii_ioctl(ugeth->phydev, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_open = ucc_geth_open,
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_multicast_list = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
.ndo_do_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ucc_netpoll,
#endif
};
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
SPEED_10, SPEED_10, SPEED_10,
SPEED_100, SPEED_100, SPEED_100,
SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
};
static const phy_interface_t enet_to_phy_interface[] = {
PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SGMII,
};
ugeth_vdbg("%s: IN", __func__);
prop = of_get_property(np, "cell-index", NULL);
if (!prop) {
prop = of_get_property(np, "device-id", NULL);
if (!prop)
return -ENODEV;
}
ucc_num = *prop - 1;
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
ug_info = &ugeth_info[ucc_num];
if (ug_info == NULL) {
if (netif_msg_probe(&debug))
ugeth_err("%s: [%d] Missing additional data!",
__func__, ucc_num);
return -ENODEV;
}
ug_info->uf_info.ucc_num = ucc_num;
sprop = of_get_property(np, "rx-clock-name", NULL);
if (sprop) {
ug_info->uf_info.rx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.rx_clock > QE_CLK24)) {
printk(KERN_ERR
"ucc_geth: invalid rx-clock-name property\n");
return -EINVAL;
}
} else {
prop = of_get_property(np, "rx-clock", NULL);
if (!prop) {
/* If both rx-clock-name and rx-clock are missing,
we want to tell people to use rx-clock-name. */
printk(KERN_ERR
"ucc_geth: missing rx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
printk(KERN_ERR
"ucc_geth: invalid rx-clock propperty\n");
return -EINVAL;
}
ug_info->uf_info.rx_clock = *prop;
}
sprop = of_get_property(np, "tx-clock-name", NULL);
if (sprop) {
ug_info->uf_info.tx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.tx_clock > QE_CLK24)) {
printk(KERN_ERR
"ucc_geth: invalid tx-clock-name property\n");
return -EINVAL;
}
} else {
prop = of_get_property(np, "tx-clock", NULL);
if (!prop) {
printk(KERN_ERR
"ucc_geth: missing tx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
printk(KERN_ERR
"ucc_geth: invalid tx-clock property\n");
return -EINVAL;
}
ug_info->uf_info.tx_clock = *prop;
}
err = of_address_to_resource(np, 0, &res);
if (err)
return -EINVAL;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
/* get the phy interface type, or default to MII */
prop = of_get_property(np, "phy-connection-type", NULL);
if (!prop) {
/* handle interface property present in old trees */
prop = of_get_property(ug_info->phy_node, "interface", NULL);
if (prop != NULL) {
phy_interface = enet_to_phy_interface[*prop];
max_speed = enet_to_speed[*prop];
} else
phy_interface = PHY_INTERFACE_MODE_MII;
} else {
phy_interface = to_phy_interface((const char *)prop);
}
/* get speed, or derive from PHY interface */
if (max_speed == 0)
switch (phy_interface) {
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_TBI:
case PHY_INTERFACE_MODE_RTBI:
case PHY_INTERFACE_MODE_SGMII:
max_speed = SPEED_1000;
break;
default:
max_speed = SPEED_100;
break;
}
if (max_speed == SPEED_1000) {
/* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
/* If QE's snum number is 46 which means we need to support
* 4 UECs at 1000Base-T simultaneously, we need to allocate
* more Threads to Rx.
*/
if (qe_get_num_of_snums() == 46)
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
else
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
}
if (netif_msg_probe(&debug))
printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof(*ugeth));
if (dev == NULL)
return -ENOMEM;
ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock);
/* Create CQs for hash tables */
INIT_LIST_HEAD(&ugeth->group_hash_q);
INIT_LIST_HEAD(&ugeth->ind_hash_q);
dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
SET_NETDEV_DEV(dev, device);
/* Fill in the dev structure */
uec_set_ethtool_ops(dev);
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))
ugeth_err("%s: Cannot register net device, aborting.",
dev->name);
free_netdev(dev);
return err;
}
mac_addr = of_get_mac_address(np);
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, 6);
ugeth->ug_info = ug_info;
ugeth->dev = device;
ugeth->ndev = dev;
ugeth->node = np;
return 0;
}
static int ucc_geth_remove(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
dev_set_drvdata(device, NULL);
return 0;
}
static struct of_device_id ucc_geth_match[] = {
{
.type = "network",
.compatible = "ucc_geth",
},
{},
};
MODULE_DEVICE_TABLE(of, ucc_geth_match);
static struct platform_driver ucc_geth_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
.remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
static int __init ucc_geth_init(void)
{
int i, ret;
if (netif_msg_drv(&debug))
printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
for (i = 0; i < 8; i++)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
ret = platform_driver_register(&ucc_geth_driver);
return ret;
}
static void __exit ucc_geth_exit(void)
{
platform_driver_unregister(&ucc_geth_driver);
}
module_init(ucc_geth_init);
module_exit(ucc_geth_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
MojieBuddhist/linux-1 | arch/hexagon/kernel/dma.c | 3559 | 5767 | /*
* DMA implementation for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
return phys_to_virt((unsigned long) dma_addr);
}
int dma_supported(struct device *dev, u64 mask)
{
if (mask == DMA_BIT_MASK(32))
return 1;
else
return 0;
}
EXPORT_SYMBOL(dma_supported);
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
EXPORT_SYMBOL(dma_set_mask);
static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
void *ret;
/*
* Our max_low_pfn should have been backed off by 16MB in
* mm/init.c to create DMA coherent space. Use that as the VA
* for the pool.
*/
if (coherent_pool == NULL) {
coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
if (coherent_pool == NULL)
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
ret = (void *) gen_pool_alloc(coherent_pool, size);
if (ret) {
memset(ret, 0, size);
*dma_addr = (dma_addr_t) virt_to_phys(ret);
} else
*dma_addr = ~0;
return ret;
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
static int check_addr(const char *name, struct device *hwdev,
dma_addr_t bus, size_t size)
{
if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0;
}
return 1;
}
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
s->dma_address = sg_phys(s);
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;
flush_dcache_range(dma_addr_to_virt(s->dma_address),
dma_addr_to_virt(s->dma_address + s->length));
}
return nents;
}
/*
* address is virtual
*/
static inline void dma_sync(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
hexagon_clean_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_FROM_DEVICE:
hexagon_inv_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_BIDIRECTIONAL:
flush_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
default:
BUG();
}
}
/**
* hexagon_map_page() - maps an address for device DMA
* @dev: pointer to DMA device
* @page: pointer to page struct of DMA memory
* @offset: offset within page
* @size: size of memory to map
* @dir: transfer direction
* @attrs: pointer to DMA attrs (not used)
*
* Called to map a memory address to a DMA address prior
* to accesses to/from device.
*
* We don't particularly have many hoops to jump through
* so far. Straight translation between phys and virtual.
*
* DMA is not cache coherent so sync is necessary; this
* seems to be a convenient place to do it.
*
*/
static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;
dma_sync(dma_addr_to_virt(bus), size, dir);
return bus;
}
static void hexagon_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
static void hexagon_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
.is_phys = 1,
};
void __init hexagon_dma_init(void)
{
if (dma_ops)
return;
dma_ops = &hexagon_dma_ops;
}
| gpl-2.0 |
myjang0507/Polaris-a8elte | drivers/video/omap/lcd_mipid.c | 4071 | 13932 | /*
* LCD driver for MIPI DBI-C / DCS compatible LCDs
*
* Copyright (C) 2006 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/platform_data/lcd-mipid.h>
#include "omapfb.h"
#define MIPID_MODULE_NAME "lcd_mipid"
#define MIPID_CMD_READ_DISP_ID 0x04
#define MIPID_CMD_READ_RED 0x06
#define MIPID_CMD_READ_GREEN 0x07
#define MIPID_CMD_READ_BLUE 0x08
#define MIPID_CMD_READ_DISP_STATUS 0x09
#define MIPID_CMD_RDDSDR 0x0F
#define MIPID_CMD_SLEEP_IN 0x10
#define MIPID_CMD_SLEEP_OUT 0x11
#define MIPID_CMD_DISP_OFF 0x28
#define MIPID_CMD_DISP_ON 0x29
#define MIPID_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
#define to_mipid_device(p) container_of(p, struct mipid_device, \
panel)
struct mipid_device {
int enabled;
int revision;
unsigned int saved_bklight_level;
unsigned long hw_guard_end; /* next value of jiffies
when we can issue the
next sleep in/out command */
unsigned long hw_guard_wait; /* max guard time in jiffies */
struct omapfb_device *fbdev;
struct spi_device *spi;
struct mutex mutex;
struct lcd_panel panel;
struct workqueue_struct *esd_wq;
struct delayed_work esd_work;
void (*esd_check)(struct mipid_device *m);
};
static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf,
int wlen, u8 *rbuf, int rlen)
{
struct spi_message m;
struct spi_transfer *x, xfer[4];
u16 w;
int r;
BUG_ON(md->spi == NULL);
spi_message_init(&m);
memset(xfer, 0, sizeof(xfer));
x = &xfer[0];
cmd &= 0xff;
x->tx_buf = &cmd;
x->bits_per_word = 9;
x->len = 2;
spi_message_add_tail(x, &m);
if (wlen) {
x++;
x->tx_buf = wbuf;
x->len = wlen;
x->bits_per_word = 9;
spi_message_add_tail(x, &m);
}
if (rlen) {
x++;
x->rx_buf = &w;
x->len = 1;
spi_message_add_tail(x, &m);
if (rlen > 1) {
/* Arrange for the extra clock before the first
* data bit.
*/
x->bits_per_word = 9;
x->len = 2;
x++;
x->rx_buf = &rbuf[1];
x->len = rlen - 1;
spi_message_add_tail(x, &m);
}
}
r = spi_sync(md->spi, &m);
if (r < 0)
dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
if (rlen)
rbuf[0] = w & 0xff;
}
static inline void mipid_cmd(struct mipid_device *md, int cmd)
{
mipid_transfer(md, cmd, NULL, 0, NULL, 0);
}
static inline void mipid_write(struct mipid_device *md,
int reg, const u8 *buf, int len)
{
mipid_transfer(md, reg, buf, len, NULL, 0);
}
static inline void mipid_read(struct mipid_device *md,
int reg, u8 *buf, int len)
{
mipid_transfer(md, reg, NULL, 0, buf, len);
}
static void set_data_lines(struct mipid_device *md, int data_lines)
{
u16 par;
switch (data_lines) {
case 16:
par = 0x150;
break;
case 18:
par = 0x160;
break;
case 24:
par = 0x170;
break;
}
mipid_write(md, 0x3a, (u8 *)&par, 2);
}
static void send_init_string(struct mipid_device *md)
{
u16 initpar[] = { 0x0102, 0x0100, 0x0100 };
mipid_write(md, 0xc2, (u8 *)initpar, sizeof(initpar));
set_data_lines(md, md->panel.data_lines);
}
static void hw_guard_start(struct mipid_device *md, int guard_msec)
{
md->hw_guard_wait = msecs_to_jiffies(guard_msec);
md->hw_guard_end = jiffies + md->hw_guard_wait;
}
static void hw_guard_wait(struct mipid_device *md)
{
unsigned long wait = md->hw_guard_end - jiffies;
if ((long)wait > 0 && wait <= md->hw_guard_wait) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait);
}
}
static void set_sleep_mode(struct mipid_device *md, int on)
{
int cmd, sleep_time = 50;
if (on)
cmd = MIPID_CMD_SLEEP_IN;
else
cmd = MIPID_CMD_SLEEP_OUT;
hw_guard_wait(md);
mipid_cmd(md, cmd);
hw_guard_start(md, 120);
/*
* When we enable the panel, it seems we _have_ to sleep
* 120 ms before sending the init string. When disabling the
* panel we'll sleep for the duration of 2 frames, so that the
* controller can still provide the PCLK,HS,VS signals.
*/
if (!on)
sleep_time = 120;
msleep(sleep_time);
}
static void set_display_state(struct mipid_device *md, int enabled)
{
int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
mipid_cmd(md, cmd);
}
static int mipid_set_bklight_level(struct lcd_panel *panel, unsigned int level)
{
struct mipid_device *md = to_mipid_device(panel);
struct mipid_platform_data *pd = md->spi->dev.platform_data;
if (pd->get_bklight_max == NULL || pd->set_bklight_level == NULL)
return -ENODEV;
if (level > pd->get_bklight_max(pd))
return -EINVAL;
if (!md->enabled) {
md->saved_bklight_level = level;
return 0;
}
pd->set_bklight_level(pd, level);
return 0;
}
static unsigned int mipid_get_bklight_level(struct lcd_panel *panel)
{
struct mipid_device *md = to_mipid_device(panel);
struct mipid_platform_data *pd = md->spi->dev.platform_data;
if (pd->get_bklight_level == NULL)
return -ENODEV;
return pd->get_bklight_level(pd);
}
static unsigned int mipid_get_bklight_max(struct lcd_panel *panel)
{
struct mipid_device *md = to_mipid_device(panel);
struct mipid_platform_data *pd = md->spi->dev.platform_data;
if (pd->get_bklight_max == NULL)
return -ENODEV;
return pd->get_bklight_max(pd);
}
static unsigned long mipid_get_caps(struct lcd_panel *panel)
{
return OMAPFB_CAPS_SET_BACKLIGHT;
}
static u16 read_first_pixel(struct mipid_device *md)
{
u16 pixel;
u8 red, green, blue;
mutex_lock(&md->mutex);
mipid_read(md, MIPID_CMD_READ_RED, &red, 1);
mipid_read(md, MIPID_CMD_READ_GREEN, &green, 1);
mipid_read(md, MIPID_CMD_READ_BLUE, &blue, 1);
mutex_unlock(&md->mutex);
switch (md->panel.data_lines) {
case 16:
pixel = ((red >> 1) << 11) | (green << 5) | (blue >> 1);
break;
case 24:
/* 24 bit -> 16 bit */
pixel = ((red >> 3) << 11) | ((green >> 2) << 5) |
(blue >> 3);
break;
default:
pixel = 0;
BUG();
}
return pixel;
}
static int mipid_run_test(struct lcd_panel *panel, int test_num)
{
struct mipid_device *md = to_mipid_device(panel);
static const u16 test_values[4] = {
0x0000, 0xffff, 0xaaaa, 0x5555,
};
int i;
if (test_num != MIPID_TEST_RGB_LINES)
return MIPID_TEST_INVALID;
for (i = 0; i < ARRAY_SIZE(test_values); i++) {
int delay;
unsigned long tmo;
omapfb_write_first_pixel(md->fbdev, test_values[i]);
tmo = jiffies + msecs_to_jiffies(100);
delay = 25;
while (1) {
u16 pixel;
msleep(delay);
pixel = read_first_pixel(md);
if (pixel == test_values[i])
break;
if (time_after(jiffies, tmo)) {
dev_err(&md->spi->dev,
"MIPI LCD RGB I/F test failed: "
"expecting %04x, got %04x\n",
test_values[i], pixel);
return MIPID_TEST_FAILED;
}
delay = 10;
}
}
return 0;
}
static void ls041y3_esd_recover(struct mipid_device *md)
{
dev_err(&md->spi->dev, "performing LCD ESD recovery\n");
set_sleep_mode(md, 1);
set_sleep_mode(md, 0);
}
static void ls041y3_esd_check_mode1(struct mipid_device *md)
{
u8 state1, state2;
mipid_read(md, MIPID_CMD_RDDSDR, &state1, 1);
set_sleep_mode(md, 0);
mipid_read(md, MIPID_CMD_RDDSDR, &state2, 1);
dev_dbg(&md->spi->dev, "ESD mode 1 state1 %02x state2 %02x\n",
state1, state2);
/* Each sleep out command will trigger a self diagnostic and flip
* Bit6 if the test passes.
*/
if (!((state1 ^ state2) & (1 << 6)))
ls041y3_esd_recover(md);
}
static void ls041y3_esd_check_mode2(struct mipid_device *md)
{
int i;
u8 rbuf[2];
static const struct {
int cmd;
int wlen;
u16 wbuf[3];
} *rd, rd_ctrl[7] = {
{ 0xb0, 4, { 0x0101, 0x01fe, } },
{ 0xb1, 4, { 0x01de, 0x0121, } },
{ 0xc2, 4, { 0x0100, 0x0100, } },
{ 0xbd, 2, { 0x0100, } },
{ 0xc2, 4, { 0x01fc, 0x0103, } },
{ 0xb4, 0, },
{ 0x00, 0, },
};
rd = rd_ctrl;
for (i = 0; i < 3; i++, rd++)
mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
udelay(10);
mipid_read(md, rd->cmd, rbuf, 2);
rd++;
for (i = 0; i < 3; i++, rd++) {
udelay(10);
mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
}
dev_dbg(&md->spi->dev, "ESD mode 2 state %02x\n", rbuf[1]);
if (rbuf[1] == 0x00)
ls041y3_esd_recover(md);
}
static void ls041y3_esd_check(struct mipid_device *md)
{
ls041y3_esd_check_mode1(md);
if (md->revision >= 0x88)
ls041y3_esd_check_mode2(md);
}
static void mipid_esd_start_check(struct mipid_device *md)
{
if (md->esd_check != NULL)
queue_delayed_work(md->esd_wq, &md->esd_work,
MIPID_ESD_CHECK_PERIOD);
}
static void mipid_esd_stop_check(struct mipid_device *md)
{
if (md->esd_check != NULL)
cancel_delayed_work_sync(&md->esd_work);
}
static void mipid_esd_work(struct work_struct *work)
{
struct mipid_device *md = container_of(work, struct mipid_device,
esd_work.work);
mutex_lock(&md->mutex);
md->esd_check(md);
mutex_unlock(&md->mutex);
mipid_esd_start_check(md);
}
static int mipid_enable(struct lcd_panel *panel)
{
struct mipid_device *md = to_mipid_device(panel);
mutex_lock(&md->mutex);
if (md->enabled) {
mutex_unlock(&md->mutex);
return 0;
}
set_sleep_mode(md, 0);
md->enabled = 1;
send_init_string(md);
set_display_state(md, 1);
mipid_set_bklight_level(panel, md->saved_bklight_level);
mipid_esd_start_check(md);
mutex_unlock(&md->mutex);
return 0;
}
static void mipid_disable(struct lcd_panel *panel)
{
struct mipid_device *md = to_mipid_device(panel);
/*
* A final ESD work might be called before returning,
* so do this without holding the lock.
*/
mipid_esd_stop_check(md);
mutex_lock(&md->mutex);
if (!md->enabled) {
mutex_unlock(&md->mutex);
return;
}
md->saved_bklight_level = mipid_get_bklight_level(panel);
mipid_set_bklight_level(panel, 0);
set_display_state(md, 0);
set_sleep_mode(md, 1);
md->enabled = 0;
mutex_unlock(&md->mutex);
}
static int panel_enabled(struct mipid_device *md)
{
u32 disp_status;
int enabled;
mipid_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
disp_status = __be32_to_cpu(disp_status);
enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
dev_dbg(&md->spi->dev,
"LCD panel %senabled by bootloader (status 0x%04x)\n",
enabled ? "" : "not ", disp_status);
return enabled;
}
static int mipid_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
{
struct mipid_device *md = to_mipid_device(panel);
md->fbdev = fbdev;
md->esd_wq = create_singlethread_workqueue("mipid_esd");
if (md->esd_wq == NULL) {
dev_err(&md->spi->dev, "can't create ESD workqueue\n");
return -ENOMEM;
}
INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work);
mutex_init(&md->mutex);
md->enabled = panel_enabled(md);
if (md->enabled)
mipid_esd_start_check(md);
else
md->saved_bklight_level = mipid_get_bklight_level(panel);
return 0;
}
static void mipid_cleanup(struct lcd_panel *panel)
{
struct mipid_device *md = to_mipid_device(panel);
if (md->enabled)
mipid_esd_stop_check(md);
destroy_workqueue(md->esd_wq);
}
static struct lcd_panel mipid_panel = {
.config = OMAP_LCDC_PANEL_TFT,
.bpp = 16,
.x_res = 800,
.y_res = 480,
.pixel_clock = 21940,
.hsw = 50,
.hfp = 20,
.hbp = 15,
.vsw = 2,
.vfp = 1,
.vbp = 3,
.init = mipid_init,
.cleanup = mipid_cleanup,
.enable = mipid_enable,
.disable = mipid_disable,
.get_caps = mipid_get_caps,
.set_bklight_level = mipid_set_bklight_level,
.get_bklight_level = mipid_get_bklight_level,
.get_bklight_max = mipid_get_bklight_max,
.run_test = mipid_run_test,
};
static int mipid_detect(struct mipid_device *md)
{
struct mipid_platform_data *pdata;
u8 display_id[3];
pdata = md->spi->dev.platform_data;
if (pdata == NULL) {
dev_err(&md->spi->dev, "missing platform data\n");
return -ENOENT;
}
mipid_read(md, MIPID_CMD_READ_DISP_ID, display_id, 3);
dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
display_id[0], display_id[1], display_id[2]);
switch (display_id[0]) {
case 0x45:
md->panel.name = "lph8923";
break;
case 0x83:
md->panel.name = "ls041y3";
md->esd_check = ls041y3_esd_check;
break;
default:
md->panel.name = "unknown";
dev_err(&md->spi->dev, "invalid display ID\n");
return -ENODEV;
}
md->revision = display_id[1];
md->panel.data_lines = pdata->data_lines;
pr_info("omapfb: %s rev %02x LCD detected, %d data lines\n",
md->panel.name, md->revision, md->panel.data_lines);
return 0;
}
static int mipid_spi_probe(struct spi_device *spi)
{
struct mipid_device *md;
int r;
md = kzalloc(sizeof(*md), GFP_KERNEL);
if (md == NULL) {
dev_err(&spi->dev, "out of memory\n");
return -ENOMEM;
}
spi->mode = SPI_MODE_0;
md->spi = spi;
dev_set_drvdata(&spi->dev, md);
md->panel = mipid_panel;
r = mipid_detect(md);
if (r < 0)
return r;
omapfb_register_panel(&md->panel);
return 0;
}
static int mipid_spi_remove(struct spi_device *spi)
{
struct mipid_device *md = dev_get_drvdata(&spi->dev);
mipid_disable(&md->panel);
kfree(md);
return 0;
}
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = MIPID_MODULE_NAME,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
.remove = mipid_spi_remove,
};
module_spi_driver(mipid_spi_driver);
MODULE_DESCRIPTION("MIPI display driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
allspark2020/hawx | drivers/acpi/acpi_pad.c | 4839 | 13736 | /*
* acpi_pad.c ACPI Processor Aggregator Driver
*
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <asm/mwait.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
static unsigned long power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
static unsigned char lapic_detected_unstable;
static unsigned char lapic_marked_unstable;
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
int i;
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
return;
edx >>= MWAIT_SUBSTATE_SIZE;
for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
if (edx & MWAIT_SUBSTATE_MASK) {
highest_cstate = i;
highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
}
}
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
(highest_subcstate - 1);
#if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
tsc_detected_unstable = 1;
if (!boot_cpu_has(X86_FEATURE_ARAT))
lapic_detected_unstable = 1;
break;
default:
/* TSC & LAPIC could halt in idle */
tsc_detected_unstable = 1;
lapic_detected_unstable = 1;
}
#endif
}
static unsigned long cpu_weight[NR_CPUS];
static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
static void round_robin_cpu(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_var_t tmp;
int cpu;
unsigned long min_weight = -1;
unsigned long uninitialized_var(preferred_cpu);
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
mutex_lock(&isolated_cpus_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
cpumask_andnot(tmp, cpu_online_mask, tmp);
/* avoid HT sibilings if possible */
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
mutex_unlock(&isolated_cpus_lock);
return;
}
for_each_cpu(cpu, tmp) {
if (cpu_weight[cpu] < min_weight) {
min_weight = cpu_weight[cpu];
preferred_cpu = cpu;
}
}
if (tsk_in_cpu[tsk_index] != -1)
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
mutex_unlock(&isolated_cpus_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
static void exit_round_robin(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = -1;
}
static unsigned int idle_pct = 5; /* percentage */
static unsigned int round_robin_time = 10; /* second */
static int power_saving_thread(void *data)
{
struct sched_param param = {.sched_priority = 1};
int do_sleep;
unsigned int tsk_index = (unsigned long)data;
u64 last_jiffies = 0;
sched_setscheduler(current, SCHED_RR, ¶m);
while (!kthread_should_stop()) {
int cpu;
u64 expire_time;
try_to_freeze();
/* round robin to cpus */
if (last_jiffies + round_robin_time * HZ < jiffies) {
last_jiffies = jiffies;
round_robin_cpu(tsk_index);
}
do_sleep = 0;
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
if (tsc_detected_unstable && !tsc_marked_unstable) {
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");
tsc_marked_unstable = 1;
}
if (lapic_detected_unstable && !lapic_marked_unstable) {
int i;
/* LAPIC could halt in idle, so notify users */
for_each_online_cpu(i)
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_ON,
&i);
lapic_marked_unstable = 1;
}
local_irq_disable();
cpu = smp_processor_id();
if (lapic_marked_unstable)
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
__monitor((void *)¤t_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(power_saving_mwait_eax, 1);
start_critical_timings();
if (lapic_marked_unstable)
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
local_irq_enable();
if (jiffies > expire_time) {
do_sleep = 1;
break;
}
}
/*
* current sched_rt has threshold for rt task running time.
* When a rt task uses 95% CPU time, the rt thread will be
* scheduled out for 5% CPU time to not starve other tasks. But
* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
* CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (do_sleep)
schedule_timeout_killable(HZ * idle_pct / 100);
}
exit_round_robin(tsk_index);
return 0;
}
static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
int rc = -ENOMEM;
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"power_saving/%d", ps_tsk_num);
rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
if (!rc)
ps_tsk_num++;
else
ps_tsks[ps_tsk_num] = NULL;
return rc;
}
static void destroy_power_saving_task(void)
{
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
ps_tsks[ps_tsk_num] = NULL;
}
}
static void set_power_saving_task_num(unsigned int num)
{
if (num > ps_tsk_num) {
while (ps_tsk_num < num) {
if (create_power_saving_task())
return;
}
} else if (num < ps_tsk_num) {
while (ps_tsk_num > num)
destroy_power_saving_task();
}
}
static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
get_online_cpus();
num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
set_power_saving_task_num(num_cpus);
put_online_cpus();
}
static uint32_t acpi_pad_idle_cpus_num(void)
{
return ps_tsk_num;
}
static ssize_t acpi_pad_rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (strict_strtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
round_robin_time = num;
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t acpi_pad_rrtime_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
}
static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
acpi_pad_rrtime_show,
acpi_pad_rrtime_store);
static ssize_t acpi_pad_idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (strict_strtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
idle_pct = num;
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t acpi_pad_idlepct_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
}
static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
acpi_pad_idlepct_show,
acpi_pad_idlepct_store);
static ssize_t acpi_pad_idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (strict_strtoul(buf, 0, &num))
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(num);
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t acpi_pad_idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int n = 0;
n = cpumask_scnprintf(buf, PAGE_SIZE-2, to_cpumask(pad_busy_cpus_bits));
buf[n++] = '\n';
buf[n] = '\0';
return n;
}
static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
acpi_pad_idlecpus_show,
acpi_pad_idlecpus_store);
static int acpi_pad_add_sysfs(struct acpi_device *device)
{
int result;
result = device_create_file(&device->dev, &dev_attr_idlecpus);
if (result)
return -ENODEV;
result = device_create_file(&device->dev, &dev_attr_idlepct);
if (result) {
device_remove_file(&device->dev, &dev_attr_idlecpus);
return -ENODEV;
}
result = device_create_file(&device->dev, &dev_attr_rrtime);
if (result) {
device_remove_file(&device->dev, &dev_attr_idlecpus);
device_remove_file(&device->dev, &dev_attr_idlepct);
return -ENODEV;
}
return 0;
}
static void acpi_pad_remove_sysfs(struct acpi_device *device)
{
device_remove_file(&device->dev, &dev_attr_idlecpus);
device_remove_file(&device->dev, &dev_attr_idlepct);
device_remove_file(&device->dev, &dev_attr_rrtime);
}
/*
* Query firmware how many CPUs should be idle
* return -1 on failure
*/
static int acpi_pad_pur(acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package;
int num = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
return num;
if (!buffer.length || !buffer.pointer)
return num;
package = buffer.pointer;
if (package->type == ACPI_TYPE_PACKAGE &&
package->package.count == 2 &&
package->package.elements[0].integer.value == 1) /* rev 1 */
num = package->package.elements[1].integer.value;
kfree(buffer.pointer);
return num;
}
/* Notify firmware how many CPUs are idle */
static void acpi_pad_ost(acpi_handle handle, int stat,
uint32_t idle_cpus)
{
union acpi_object params[3] = {
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_BUFFER,},
};
struct acpi_object_list arg_list = {3, params};
params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
params[1].integer.value = stat;
params[2].buffer.length = 4;
params[2].buffer.pointer = (void *)&idle_cpus;
acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
}
static void acpi_pad_handle_notify(acpi_handle handle)
{
int num_cpus;
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
if (num_cpus < 0) {
mutex_unlock(&isolated_cpus_lock);
return;
}
acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
acpi_pad_ost(handle, 0, idle_cpus);
mutex_unlock(&isolated_cpus_lock);
}
static void acpi_pad_notify(acpi_handle handle, u32 event,
void *data)
{
struct acpi_device *device = data;
switch (event) {
case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
acpi_pad_handle_notify(handle);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
default:
printk(KERN_WARNING "Unsupported event [0x%x]\n", event);
break;
}
}
static int acpi_pad_add(struct acpi_device *device)
{
acpi_status status;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
if (acpi_pad_add_sysfs(device))
return -ENODEV;
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
if (ACPI_FAILURE(status)) {
acpi_pad_remove_sysfs(device);
return -ENODEV;
}
return 0;
}
static int acpi_pad_remove(struct acpi_device *device,
int type)
{
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(0);
mutex_unlock(&isolated_cpus_lock);
acpi_remove_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify);
acpi_pad_remove_sysfs(device);
return 0;
}
static const struct acpi_device_id pad_device_ids[] = {
{"ACPI000C", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pad_device_ids);
static struct acpi_driver acpi_pad_driver = {
.name = "processor_aggregator",
.class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
.ids = pad_device_ids,
.ops = {
.add = acpi_pad_add,
.remove = acpi_pad_remove,
},
};
static int __init acpi_pad_init(void)
{
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
return acpi_bus_register_driver(&acpi_pad_driver);
}
static void __exit acpi_pad_exit(void)
{
acpi_bus_unregister_driver(&acpi_pad_driver);
}
module_init(acpi_pad_init);
module_exit(acpi_pad_exit);
MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mmontuori/tegra-olympus | drivers/isdn/hisax/teles3.c | 4839 | 13614 | /* $Id: teles3.c,v 2.19.2.4 2004/01/13 23:48:39 keil Exp $
*
* low level stuff for Teles 16.3 & PNP isdn cards
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Jan den Ouden
* Fritz Elfert
* Beat Doebeli
*
*/
#include <linux/init.h>
#include <linux/isapnp.h>
#include "hisax.h"
#include "isac.h"
#include "hscx.h"
#include "isdnl1.h"
static const char *teles3_revision = "$Revision: 2.19.2.4 $";
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
static inline u_char
readreg(unsigned int adr, u_char off)
{
return (bytein(adr + off));
}
static inline void
writereg(unsigned int adr, u_char off, u_char data)
{
byteout(adr + off, data);
}
static inline void
read_fifo(unsigned int adr, u_char * data, int size)
{
insb(adr, data, size);
}
static void
write_fifo(unsigned int adr, u_char * data, int size)
{
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.teles3.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.teles3.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
read_fifo(cs->hw.teles3.isacfifo, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
write_fifo(cs->hw.teles3.isacfifo, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.teles3.hscx[hscx], offset));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.teles3.hscx[hscx], offset, value);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.teles3.hscx[nr], reg)
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.teles3.hscx[nr], reg, data)
#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo(cs->hw.teles3.hscxfifo[nr], ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo(cs->hw.teles3.hscxfifo[nr], ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
teles3_interrupt(int intno, void *dev_id)
{
#define MAXCOUNT 5
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
int count = 0;
spin_lock_irqsave(&cs->lock, flags);
val = readreg(cs->hw.teles3.hscx[1], HSCX_ISTA);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readreg(cs->hw.teles3.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
count++;
val = readreg(cs->hw.teles3.hscx[1], HSCX_ISTA);
if (val && count < MAXCOUNT) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readreg(cs->hw.teles3.isac, ISAC_ISTA);
if (val && count < MAXCOUNT) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
if (count >= MAXCOUNT)
printk(KERN_WARNING "Teles3: more than %d loops in teles3_interrupt\n", count);
writereg(cs->hw.teles3.hscx[0], HSCX_MASK, 0xFF);
writereg(cs->hw.teles3.hscx[1], HSCX_MASK, 0xFF);
writereg(cs->hw.teles3.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.teles3.isac, ISAC_MASK, 0x0);
writereg(cs->hw.teles3.hscx[0], HSCX_MASK, 0x0);
writereg(cs->hw.teles3.hscx[1], HSCX_MASK, 0x0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static inline void
release_ioregs(struct IsdnCardState *cs, int mask)
{
if (mask & 1)
release_region(cs->hw.teles3.isac + 32, 32);
if (mask & 2)
release_region(cs->hw.teles3.hscx[0] + 32, 32);
if (mask & 4)
release_region(cs->hw.teles3.hscx[1] + 32, 32);
}
static void
release_io_teles3(struct IsdnCardState *cs)
{
if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
release_region(cs->hw.teles3.hscx[1], 96);
} else {
if (cs->hw.teles3.cfg_reg) {
if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
release_region(cs->hw.teles3.cfg_reg, 1);
} else {
release_region(cs->hw.teles3.cfg_reg, 8);
}
}
release_ioregs(cs, 0x7);
}
}
static int
reset_teles3(struct IsdnCardState *cs)
{
u_char irqcfg;
if (cs->typ != ISDN_CTYPE_TELESPCMCIA) {
if ((cs->hw.teles3.cfg_reg) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA)) {
switch (cs->irq) {
case 2:
case 9:
irqcfg = 0x00;
break;
case 3:
irqcfg = 0x02;
break;
case 4:
irqcfg = 0x04;
break;
case 5:
irqcfg = 0x06;
break;
case 10:
irqcfg = 0x08;
break;
case 11:
irqcfg = 0x0A;
break;
case 12:
irqcfg = 0x0C;
break;
case 15:
irqcfg = 0x0E;
break;
default:
return(1);
}
byteout(cs->hw.teles3.cfg_reg + 4, irqcfg);
HZDELAY(HZ / 10 + 1);
byteout(cs->hw.teles3.cfg_reg + 4, irqcfg | 1);
HZDELAY(HZ / 10 + 1);
} else if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
byteout(cs->hw.teles3.cfg_reg, 0xff);
HZDELAY(2);
byteout(cs->hw.teles3.cfg_reg, 0x00);
HZDELAY(2);
} else {
/* Reset off for 16.3 PnP , thanks to Georg Acher */
byteout(cs->hw.teles3.isac + 0x3c, 0);
HZDELAY(2);
byteout(cs->hw.teles3.isac + 0x3c, 1);
HZDELAY(2);
}
}
return(0);
}
static int
Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_teles3(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_teles3(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
inithscxisac(cs, 3);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
return(0);
}
return(0);
}
#ifdef __ISAPNP__
static struct isapnp_device_id teles_ids[] __devinitdata = {
{ ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
(unsigned long) "Teles 16.3 PnP" },
{ ISAPNP_VENDOR('C', 'T', 'X'), ISAPNP_FUNCTION(0x0),
ISAPNP_VENDOR('C', 'T', 'X'), ISAPNP_FUNCTION(0x0),
(unsigned long) "Creatix 16.3 PnP" },
{ ISAPNP_VENDOR('C', 'P', 'Q'), ISAPNP_FUNCTION(0x1002),
ISAPNP_VENDOR('C', 'P', 'Q'), ISAPNP_FUNCTION(0x1002),
(unsigned long) "Compaq ISDN S0" },
{ 0, }
};
static struct isapnp_device_id *ipid __devinitdata = &teles_ids[0];
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_teles3(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, teles3_revision);
printk(KERN_INFO "HiSax: Teles IO driver Rev. %s\n", HiSax_getrev(tmp));
if ((cs->typ != ISDN_CTYPE_16_3) && (cs->typ != ISDN_CTYPE_PNP)
&& (cs->typ != ISDN_CTYPE_TELESPCMCIA) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA))
return (0);
#ifdef __ISAPNP__
if (!card->para[1] && isapnp_present()) {
struct pnp_dev *pnp_d;
while(ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return(0);
}
card->para[3] = pnp_port_start(pnp_d, 2);
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1] || !card->para[2]) {
printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
card->para[0], card->para[1], card->para[2]);
pnp_disable_dev(pnp_d);
return(0);
}
break;
} else {
printk(KERN_ERR "Teles PnP: PnP error card found, no device\n");
}
}
ipid++;
pnp_c = NULL;
}
if (!ipid->card_vendor) {
printk(KERN_INFO "Teles PnP: no ISAPnP card found\n");
return(0);
}
}
#endif
if (cs->typ == ISDN_CTYPE_16_3) {
cs->hw.teles3.cfg_reg = card->para[1];
switch (cs->hw.teles3.cfg_reg) {
case 0x180:
case 0x280:
case 0x380:
cs->hw.teles3.cfg_reg |= 0xc00;
break;
}
cs->hw.teles3.isac = cs->hw.teles3.cfg_reg - 0x420;
cs->hw.teles3.hscx[0] = cs->hw.teles3.cfg_reg - 0xc20;
cs->hw.teles3.hscx[1] = cs->hw.teles3.cfg_reg - 0x820;
} else if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
cs->hw.teles3.cfg_reg = 0;
cs->hw.teles3.hscx[0] = card->para[1] - 0x20;
cs->hw.teles3.hscx[1] = card->para[1];
cs->hw.teles3.isac = card->para[1] + 0x20;
} else if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
cs->hw.teles3.cfg_reg = card->para[3];
cs->hw.teles3.isac = card->para[2] - 32;
cs->hw.teles3.hscx[0] = card->para[1] - 32;
cs->hw.teles3.hscx[1] = card->para[1];
} else { /* PNP */
cs->hw.teles3.cfg_reg = 0;
cs->hw.teles3.isac = card->para[1] - 32;
cs->hw.teles3.hscx[0] = card->para[2] - 32;
cs->hw.teles3.hscx[1] = card->para[2];
}
cs->irq = card->para[0];
cs->hw.teles3.isacfifo = cs->hw.teles3.isac + 0x3e;
cs->hw.teles3.hscxfifo[0] = cs->hw.teles3.hscx[0] + 0x3e;
cs->hw.teles3.hscxfifo[1] = cs->hw.teles3.hscx[1] + 0x3e;
if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
if (!request_region(cs->hw.teles3.hscx[1], 96, "HiSax Teles PCMCIA")) {
printk(KERN_WARNING
"HiSax: %s ports %x-%x already in use\n",
CardType[cs->typ],
cs->hw.teles3.hscx[1],
cs->hw.teles3.hscx[1] + 96);
return (0);
}
cs->irq_flags |= IRQF_SHARED; /* cardbus can share */
} else {
if (cs->hw.teles3.cfg_reg) {
if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
if (!request_region(cs->hw.teles3.cfg_reg, 1, "teles3 cfg")) {
printk(KERN_WARNING
"HiSax: %s config port %x already in use\n",
CardType[card->typ],
cs->hw.teles3.cfg_reg);
return (0);
}
} else {
if (!request_region(cs->hw.teles3.cfg_reg, 8, "teles3 cfg")) {
printk(KERN_WARNING
"HiSax: %s config port %x-%x already in use\n",
CardType[card->typ],
cs->hw.teles3.cfg_reg,
cs->hw.teles3.cfg_reg + 8);
return (0);
}
}
}
if (!request_region(cs->hw.teles3.isac + 32, 32, "HiSax isac")) {
printk(KERN_WARNING
"HiSax: %s isac ports %x-%x already in use\n",
CardType[cs->typ],
cs->hw.teles3.isac + 32,
cs->hw.teles3.isac + 64);
if (cs->hw.teles3.cfg_reg) {
if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
release_region(cs->hw.teles3.cfg_reg, 1);
} else {
release_region(cs->hw.teles3.cfg_reg, 8);
}
}
return (0);
}
if (!request_region(cs->hw.teles3.hscx[0] + 32, 32, "HiSax hscx A")) {
printk(KERN_WARNING
"HiSax: %s hscx A ports %x-%x already in use\n",
CardType[cs->typ],
cs->hw.teles3.hscx[0] + 32,
cs->hw.teles3.hscx[0] + 64);
if (cs->hw.teles3.cfg_reg) {
if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
release_region(cs->hw.teles3.cfg_reg, 1);
} else {
release_region(cs->hw.teles3.cfg_reg, 8);
}
}
release_ioregs(cs, 1);
return (0);
}
if (!request_region(cs->hw.teles3.hscx[1] + 32, 32, "HiSax hscx B")) {
printk(KERN_WARNING
"HiSax: %s hscx B ports %x-%x already in use\n",
CardType[cs->typ],
cs->hw.teles3.hscx[1] + 32,
cs->hw.teles3.hscx[1] + 64);
if (cs->hw.teles3.cfg_reg) {
if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
release_region(cs->hw.teles3.cfg_reg, 1);
} else {
release_region(cs->hw.teles3.cfg_reg, 8);
}
}
release_ioregs(cs, 3);
return (0);
}
}
if ((cs->hw.teles3.cfg_reg) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA)) {
if ((val = bytein(cs->hw.teles3.cfg_reg + 0)) != 0x51) {
printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
cs->hw.teles3.cfg_reg + 0, val);
release_io_teles3(cs);
return (0);
}
if ((val = bytein(cs->hw.teles3.cfg_reg + 1)) != 0x93) {
printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
cs->hw.teles3.cfg_reg + 1, val);
release_io_teles3(cs);
return (0);
}
val = bytein(cs->hw.teles3.cfg_reg + 2);/* 0x1e=without AB
* 0x1f=with AB
* 0x1c 16.3 ???
* 0x39 16.3 1.1
* 0x38 16.3 1.3
* 0x46 16.3 with AB + Video (Teles-Vision)
*/
if (val != 0x46 && val != 0x39 && val != 0x38 && val != 0x1c && val != 0x1e && val != 0x1f) {
printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
cs->hw.teles3.cfg_reg + 2, val);
release_io_teles3(cs);
return (0);
}
}
printk(KERN_INFO
"HiSax: %s config irq:%d isac:0x%X cfg:0x%X\n",
CardType[cs->typ], cs->irq,
cs->hw.teles3.isac + 32, cs->hw.teles3.cfg_reg);
printk(KERN_INFO
"HiSax: hscx A:0x%X hscx B:0x%X\n",
cs->hw.teles3.hscx[0] + 32, cs->hw.teles3.hscx[1] + 32);
setup_isac(cs);
if (reset_teles3(cs)) {
printk(KERN_WARNING "Teles3: wrong IRQ\n");
release_io_teles3(cs);
return (0);
}
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &Teles_card_msg;
cs->irq_func = &teles3_interrupt;
ISACVersion(cs, "Teles3:");
if (HscxVersion(cs, "Teles3:")) {
printk(KERN_WARNING
"Teles3: wrong HSCX versions check IO address\n");
release_io_teles3(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
hakcenter/android_kernel_samsung_hlte | drivers/media/video/ivtv/ivtv-gpio.c | 8679 | 10704 | /*
gpio functions.
Merging GPIO support into driver:
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
#include "tuner-xc2028.h"
#include <media/tuner.h>
#include <media/v4l2-ctrls.h>
/*
* GPIO assignment of Yuan MPG600/MPG160
*
* bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
* OUTPUT IN1 IN0 AM3 AM2 AM1 AM0
* INPUT DM1 DM0
*
* IN* : Input selection
* IN1 IN0
* 1 1 N/A
* 1 0 Line
* 0 1 N/A
* 0 0 Tuner
*
* AM* : Audio Mode
* AM3 0: Normal 1: Mixed(Sub+Main channel)
* AM2 0: Subchannel 1: Main channel
* AM1 0: Stereo 1: Mono
* AM0 0: Normal 1: Mute
*
* DM* : Detected tuner audio Mode
* DM1 0: Stereo 1: Mono
* DM0 0: Multiplex 1: Normal
*
* GPIO Initial Settings
* MPG600 MPG160
* DIR 0x3080 0x7080
* OUTPUT 0x000C 0x400C
*
* Special thanks to Makoto Iguchi <iguchi@tahoo.org> and Mr. Anonymous
* for analyzing GPIO of MPG160.
*
*****************************************************************************
*
* GPIO assignment of Avermedia M179 (per information direct from AVerMedia)
*
* bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
* OUTPUT IN0 AM0 IN1 AM1 AM2 IN2 BR0 BR1
* INPUT
*
* IN* : Input selection
* IN0 IN1 IN2
* * 1 * Mute
* 0 0 0 Line-In
* 1 0 0 TV Tuner Audio
* 0 0 1 FM Audio
* 1 0 1 Mute
*
* AM* : Audio Mode
* AM0 AM1 AM2
* 0 0 0 TV Tuner Audio: L_OUT=(L+R)/2, R_OUT=SAP
* 0 0 1 TV Tuner Audio: L_OUT=R_OUT=SAP (SAP)
* 0 1 0 TV Tuner Audio: L_OUT=L, R_OUT=R (stereo)
* 0 1 1 TV Tuner Audio: mute
* 1 * * TV Tuner Audio: L_OUT=R_OUT=(L+R)/2 (mono)
*
* BR* : Audio Sample Rate (BR stands for bitrate for some reason)
* BR0 BR1
* 0 0 32 kHz
* 0 1 44.1 kHz
* 1 0 48 kHz
*
* DM* : Detected tuner audio Mode
* Unknown currently
*
* Special thanks to AVerMedia Technologies, Inc. and Jiun-Kuei Jung at
* AVerMedia for providing the GPIO information used to add support
* for the M179 cards.
*/
/********************* GPIO stuffs *********************/
/* GPIO registers */
#define IVTV_REG_GPIO_IN 0x9008
#define IVTV_REG_GPIO_OUT 0x900c
#define IVTV_REG_GPIO_DIR 0x9020
void ivtv_reset_ir_gpio(struct ivtv *itv)
{
int curdir, curout;
if (itv->card->type != IVTV_CARD_PVR_150)
return;
IVTV_DEBUG_INFO("Resetting PVR150 IR\n");
curout = read_reg(IVTV_REG_GPIO_OUT);
curdir = read_reg(IVTV_REG_GPIO_DIR);
curdir |= 0x80;
write_reg(curdir, IVTV_REG_GPIO_DIR);
curout = (curout & ~0xF) | 1;
write_reg(curout, IVTV_REG_GPIO_OUT);
/* We could use something else for smaller time */
schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= 2;
write_reg(curout, IVTV_REG_GPIO_OUT);
curdir &= ~0x80;
write_reg(curdir, IVTV_REG_GPIO_DIR);
}
/* Xceive tuner reset function */
int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
{
struct i2c_algo_bit_data *algo = dev;
struct ivtv *itv = algo->data;
u32 curout;
if (cmd != XC2028_TUNER_RESET)
return 0;
IVTV_DEBUG_INFO("Resetting tuner\n");
curout = read_reg(IVTV_REG_GPIO_OUT);
curout &= ~(1 << itv->card->xceive_pin);
write_reg(curout, IVTV_REG_GPIO_OUT);
schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= 1 << itv->card->xceive_pin;
write_reg(curout, IVTV_REG_GPIO_OUT);
schedule_timeout_interruptible(msecs_to_jiffies(1));
return 0;
}
static inline struct ivtv *sd_to_ivtv(struct v4l2_subdev *sd)
{
return container_of(sd, struct ivtv, sd_gpio);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct ivtv, hdl_gpio)->sd_gpio;
}
static int subdev_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_freq.mask;
switch (freq) {
case 32000:
data = itv->card->gpio_audio_freq.f32000;
break;
case 44100:
data = itv->card->gpio_audio_freq.f44100;
break;
case 48000:
default:
data = itv->card->gpio_audio_freq.f48000;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask;
mask = itv->card->gpio_audio_detect.mask;
if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask))
vt->rxsubchans = V4L2_TUNER_SUB_STEREO |
V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
else
vt->rxsubchans = V4L2_TUNER_SUB_MONO;
return 0;
}
static int subdev_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_mode.mask;
switch (vt->audmode) {
case V4L2_TUNER_MODE_LANG1:
data = itv->card->gpio_audio_mode.lang1;
break;
case V4L2_TUNER_MODE_LANG2:
data = itv->card->gpio_audio_mode.lang2;
break;
case V4L2_TUNER_MODE_MONO:
data = itv->card->gpio_audio_mode.mono;
break;
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
default:
data = itv->card->gpio_audio_mode.stereo;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_radio(struct v4l2_subdev *sd)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
mask = itv->card->gpio_audio_input.mask;
data = itv->card->gpio_audio_input.radio;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_audio_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
if (input > 2)
return -EINVAL;
mask = itv->card->gpio_audio_input.mask;
switch (input) {
case 0:
data = itv->card->gpio_audio_input.tuner;
break;
case 1:
data = itv->card->gpio_audio_input.linein;
break;
case 2:
default:
data = itv->card->gpio_audio_input.radio;
break;
}
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static int subdev_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
mask = itv->card->gpio_audio_mute.mask;
data = ctrl->val ? itv->card->gpio_audio_mute.mute : 0;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) |
(data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
return -EINVAL;
}
static int subdev_log_status(struct v4l2_subdev *sd)
{
struct ivtv *itv = sd_to_ivtv(sd);
IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT),
read_reg(IVTV_REG_GPIO_IN));
v4l2_ctrl_handler_log_status(&itv->hdl_gpio, sd->name);
return 0;
}
static int subdev_s_video_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
if (input > 2) /* 0:Tuner 1:Composite 2:S-Video */
return -EINVAL;
mask = itv->card->gpio_video_input.mask;
if (input == 0)
data = itv->card->gpio_video_input.tuner;
else if (input == 1)
data = itv->card->gpio_video_input.composite;
else
data = itv->card->gpio_video_input.svideo;
if (mask)
write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
return 0;
}
static const struct v4l2_ctrl_ops gpio_ctrl_ops = {
.s_ctrl = subdev_s_ctrl,
};
static const struct v4l2_subdev_core_ops subdev_core_ops = {
.log_status = subdev_log_status,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
.s_radio = subdev_s_radio,
.g_tuner = subdev_g_tuner,
.s_tuner = subdev_s_tuner,
};
static const struct v4l2_subdev_audio_ops subdev_audio_ops = {
.s_clock_freq = subdev_s_clock_freq,
.s_routing = subdev_s_audio_routing,
};
static const struct v4l2_subdev_video_ops subdev_video_ops = {
.s_routing = subdev_s_video_routing,
};
static const struct v4l2_subdev_ops subdev_ops = {
.core = &subdev_core_ops,
.tuner = &subdev_tuner_ops,
.audio = &subdev_audio_ops,
.video = &subdev_video_ops,
};
int ivtv_gpio_init(struct ivtv *itv)
{
u16 pin = 0;
if (itv->card->xceive_pin)
pin = 1 << itv->card->xceive_pin;
if ((itv->card->gpio_init.direction | pin) == 0)
return 0;
IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT));
/* init output data then direction */
write_reg(itv->card->gpio_init.initial_value | pin, IVTV_REG_GPIO_OUT);
write_reg(itv->card->gpio_init.direction | pin, IVTV_REG_GPIO_DIR);
v4l2_subdev_init(&itv->sd_gpio, &subdev_ops);
snprintf(itv->sd_gpio.name, sizeof(itv->sd_gpio.name), "%s-gpio", itv->v4l2_dev.name);
itv->sd_gpio.grp_id = IVTV_HW_GPIO;
v4l2_ctrl_handler_init(&itv->hdl_gpio, 1);
v4l2_ctrl_new_std(&itv->hdl_gpio, &gpio_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
if (itv->hdl_gpio.error)
return itv->hdl_gpio.error;
itv->sd_gpio.ctrl_handler = &itv->hdl_gpio;
v4l2_ctrl_handler_setup(&itv->hdl_gpio);
return v4l2_device_register_subdev(&itv->v4l2_dev, &itv->sd_gpio);
}
| gpl-2.0 |
synel/synergy2416-linux-kernel | net/netfilter/xt_owner.c | 9191 | 2272 | /*
* Kernel module to match various things tied to sockets associated with
* locally generated outgoing packets.
*
* (C) 2000 Marc Boucher <marc@mbsi.ca>
*
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/file.h>
#include <net/sock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_owner.h>
static bool
owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_owner_match_info *info = par->matchinfo;
const struct file *filp;
if (skb->sk == NULL || skb->sk->sk_socket == NULL)
return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET)
/*
* Socket exists but user wanted ! --socket-exists.
* (Single ampersands intended.)
*/
return false;
filp = skb->sk->sk_socket->file;
if (filp == NULL)
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
if (info->match & XT_OWNER_UID)
if ((filp->f_cred->fsuid >= info->uid_min &&
filp->f_cred->fsuid <= info->uid_max) ^
!(info->invert & XT_OWNER_UID))
return false;
if (info->match & XT_OWNER_GID)
if ((filp->f_cred->fsgid >= info->gid_min &&
filp->f_cred->fsgid <= info->gid_max) ^
!(info->invert & XT_OWNER_GID))
return false;
return true;
}
static struct xt_match owner_mt_reg __read_mostly = {
.name = "owner",
.revision = 1,
.family = NFPROTO_UNSPEC,
.match = owner_mt,
.matchsize = sizeof(struct xt_owner_match_info),
.hooks = (1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING),
.me = THIS_MODULE,
};
static int __init owner_mt_init(void)
{
return xt_register_match(&owner_mt_reg);
}
static void __exit owner_mt_exit(void)
{
xt_unregister_match(&owner_mt_reg);
}
module_init(owner_mt_init);
module_exit(owner_mt_exit);
MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: socket owner matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_owner");
MODULE_ALIAS("ip6t_owner");
| gpl-2.0 |
dwindsor/linux-stable | drivers/firmware/efi/fake_mem.c | 232 | 6120 | /*
* fake_mem.c
*
* Copyright (C) 2015 FUJITSU LIMITED
* Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
*
* This code introduces new boot option named "efi_fake_mem"
* By specifying this parameter, you can add arbitrary attribute to
* specific memory range by updating original (firmware provided) EFI
* memmap.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/sort.h>
#include <asm/efi.h>
#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
struct fake_mem {
struct range range;
u64 attribute;
};
static struct fake_mem fake_mems[EFI_MAX_FAKEMEM];
static int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
const struct fake_mem *m1 = x1;
const struct fake_mem *m2 = x2;
if (m1->range.start < m2->range.start)
return -1;
if (m1->range.start > m2->range.start)
return 1;
return 0;
}
void __init efi_fake_memmap(void)
{
u64 start, end, m_start, m_end, m_attr;
int new_nr_map = memmap.nr_map;
efi_memory_desc_t *md;
phys_addr_t new_memmap_phy;
void *new_memmap;
void *old, *new;
int i;
if (!nr_fake_mem || !efi_enabled(EFI_MEMMAP))
return;
/* count up the number of EFI memory descriptor */
for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
md = old;
start = md->phys_addr;
end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
for (i = 0; i < nr_fake_mem; i++) {
/* modifying range */
m_start = fake_mems[i].range.start;
m_end = fake_mems[i].range.end;
if (m_start <= start) {
/* split into 2 parts */
if (start < m_end && m_end < end)
new_nr_map++;
}
if (start < m_start && m_start < end) {
/* split into 3 parts */
if (m_end < end)
new_nr_map += 2;
/* split into 2 parts */
if (end <= m_end)
new_nr_map++;
}
}
}
/* allocate memory for new EFI memmap */
new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
PAGE_SIZE);
if (!new_memmap_phy)
return;
/* create new EFI memmap */
new_memmap = early_memremap(new_memmap_phy,
memmap.desc_size * new_nr_map);
if (!new_memmap) {
memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
return;
}
for (old = memmap.map, new = new_memmap;
old < memmap.map_end;
old += memmap.desc_size, new += memmap.desc_size) {
/* copy original EFI memory descriptor */
memcpy(new, old, memmap.desc_size);
md = new;
start = md->phys_addr;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
for (i = 0; i < nr_fake_mem; i++) {
/* modifying range */
m_start = fake_mems[i].range.start;
m_end = fake_mems[i].range.end;
m_attr = fake_mems[i].attribute;
if (m_start <= start && end <= m_end)
md->attribute |= m_attr;
if (m_start <= start &&
(start < m_end && m_end < end)) {
/* first part */
md->attribute |= m_attr;
md->num_pages = (m_end - md->phys_addr + 1) >>
EFI_PAGE_SHIFT;
/* latter part */
new += memmap.desc_size;
memcpy(new, old, memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - md->phys_addr + 1) >>
EFI_PAGE_SHIFT;
}
if ((start < m_start && m_start < end) && m_end < end) {
/* first part */
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* middle part */
new += memmap.desc_size;
memcpy(new, old, memmap.desc_size);
md = new;
md->attribute |= m_attr;
md->phys_addr = m_start;
md->num_pages = (m_end - m_start + 1) >>
EFI_PAGE_SHIFT;
/* last part */
new += memmap.desc_size;
memcpy(new, old, memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - m_end) >>
EFI_PAGE_SHIFT;
}
if ((start < m_start && m_start < end) &&
(end <= m_end)) {
/* first part */
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* latter part */
new += memmap.desc_size;
memcpy(new, old, memmap.desc_size);
md = new;
md->phys_addr = m_start;
md->num_pages = (end - md->phys_addr + 1) >>
EFI_PAGE_SHIFT;
md->attribute |= m_attr;
}
}
}
/* swap into new EFI memmap */
efi_unmap_memmap();
memmap.map = new_memmap;
memmap.phys_map = new_memmap_phy;
memmap.nr_map = new_nr_map;
memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
set_bit(EFI_MEMMAP, &efi.flags);
/* print new EFI memmap */
efi_print_memmap();
}
static int __init setup_fake_mem(char *p)
{
u64 start = 0, mem_size = 0, attribute = 0;
int i;
if (!p)
return -EINVAL;
while (*p != '\0') {
mem_size = memparse(p, &p);
if (*p == '@')
start = memparse(p+1, &p);
else
break;
if (*p == ':')
attribute = simple_strtoull(p+1, &p, 0);
else
break;
if (nr_fake_mem >= EFI_MAX_FAKEMEM)
break;
fake_mems[nr_fake_mem].range.start = start;
fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
fake_mems[nr_fake_mem].attribute = attribute;
nr_fake_mem++;
if (*p == ',')
p++;
}
sort(fake_mems, nr_fake_mem, sizeof(struct fake_mem),
cmp_fake_mem, NULL);
for (i = 0; i < nr_fake_mem; i++)
pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
fake_mems[i].attribute, fake_mems[i].range.start,
fake_mems[i].range.end);
return *p == '\0' ? 0 : -EINVAL;
}
early_param("efi_fake_mem", setup_fake_mem);
| gpl-2.0 |
exynos4-sdk/uboot | board/esd/pci405/fpgadata.c | 232 | 61898 | 0x1f,0x8b,0x08,0x08,0xcd,0x78,0x61,0x3f,0x00,0x03,0x70,0x63,0x69,0x34,0x30,0x35,
0x5f,0x31,0x5f,0x30,0x34,0x2e,0x62,0x69,0x74,0x00,0xed,0x9c,0x7f,0x78,0x14,0xc7,
0x99,0xe7,0xdf,0xae,0xee,0x11,0x2d,0xcd,0x88,0x69,0x89,0x11,0x1e,0x3b,0x32,0xdb,
0x1a,0x09,0x3c,0xc1,0x23,0x69,0x18,0x61,0x85,0x60,0x79,0xd4,0x8c,0x04,0x19,0x1b,
0xd9,0x4c,0x6c,0x27,0x61,0xf3,0xf8,0xc9,0x8e,0x1d,0x92,0x23,0x59,0x92,0x47,0x90,
0xdc,0x2e,0x49,0x9c,0x6c,0x8d,0x24,0xd0,0x00,0xb2,0x19,0x30,0x97,0xb0,0x59,0xce,
0x3b,0x60,0x12,0x0b,0x9b,0x64,0x07,0x61,0x1b,0x61,0x88,0xdd,0x92,0x85,0x23,0x40,
0x06,0x85,0xf5,0x79,0xb1,0xcd,0xe2,0xc1,0x91,0xbd,0xb2,0x23,0x63,0x19,0xb3,0x8e,
0x00,0x19,0xdd,0x5b,0xdd,0x3d,0xdd,0x2d,0xe1,0xfd,0x71,0xb7,0xf7,0xdc,0xde,0xf3,
0x9c,0xdb,0x7f,0xb8,0x50,0xa9,0xba,0xab,0xab,0xde,0xfa,0xd4,0xf7,0x7d,0xdf,0x6a,
0x41,0xbe,0x7b,0x54,0xff,0x0f,0x80,0xbb,0x1f,0xa4,0xe6,0xaf,0x7f,0x6b,0x7e,0xf0,
0x96,0x3f,0x9b,0xf7,0x67,0xc1,0xf9,0x55,0xdf,0xfd,0xfa,0x0a,0x78,0x00,0x0a,0x42,
0xdf,0x9b,0x77,0xcb,0xf7,0x57,0xcf,0x9b,0x3f,0x1f,0xbe,0x0e,0xce,0x50,0x30,0x58,
0x53,0x1d,0x5c,0x50,0x1d,0xba,0x05,0x56,0x40,0xfe,0xbc,0x05,0x0b,0x83,0xb7,0x2c,
0x9c,0x57,0x03,0xdf,0x00,0x88,0x27,0x27,0xf0,0x7a,0xe2,0xaf,0xbf,0xf4,0xcd,0x20,
0x50,0x0e,0x00,0xa6,0x05,0xb9,0x38,0xfb,0x7f,0x5e,0x90,0x93,0x39,0xa0,0xf5,0x95,
0x41,0x50,0xd9,0xbf,0xc1,0xa8,0xcf,0x0f,0x82,0x6c,0xff,0x37,0x17,0x04,0x05,0x62,
0xe0,0xf9,0x0e,0x14,0x4b,0x30,0xe5,0xe2,0x14,0x81,0xea,0xa5,0x7f,0xad,0x8e,0x4c,
0xad,0x62,0xd7,0xbf,0xa3,0xae,0xf8,0x13,0xaa,0x84,0xff,0x58,0xdd,0x84,0x04,0x71,
0xf0,0x42,0x1e,0x70,0xd4,0x2c,0xd4,0xf7,0xe9,0x75,0xaa,0x63,0x5c,0x7e,0xbd,0x68,
0x46,0x9f,0xbb,0x9f,0x8c,0xd3,0x93,0x05,0x3e,0xa5,0x70,0x8c,0x0f,0x42,0xb3,0xde,
0x23,0xe9,0x3d,0xba,0x59,0x29,0xff,0x47,0x67,0x92,0x5c,0x8d,0x6c,0x96,0x7d,0x59,
0x31,0x58,0x20,0x0b,0x41,0xad,0x2e,0xe9,0x18,0x96,0x1f,0x89,0x77,0xf6,0x3b,0x81,
0x74,0xc0,0x63,0xa2,0x4f,0xc9,0x0f,0x92,0x5d,0x9c,0xde,0x4e,0x2d,0xe9,0x74,0x3c,
0x29,0xcc,0x55,0x9d,0x69,0x18,0x11,0x9e,0x04,0xd6,0x8e,0x64,0x84,0xb8,0xde,0xa7,
0x92,0x63,0xf0,0xba,0xb0,0x50,0x75,0xb7,0xf1,0x7b,0xe1,0x0f,0x10,0x6a,0x2e,0xcc,
0xf0,0x54,0xd0,0xdb,0x65,0xb9,0x27,0x94,0xd7,0xe1,0x73,0x3d,0xee,0x97,0x1b,0x46,
0xe0,0x1f,0xe0,0x79,0xd5,0x3d,0xca,0xaf,0x05,0xbd,0x9d,0x22,0xed,0x26,0xc7,0x21,
0xa4,0xba,0x93,0xfc,0x00,0x1c,0x87,0xae,0xd8,0xf4,0x0c,0x2f,0x09,0xb2,0x56,0xd7,
0xef,0xa8,0xfb,0xb6,0x9b,0x94,0xab,0xce,0xa7,0xc8,0x55,0x79,0x33,0xc8,0xaa,0x98,
0x21,0x21,0x9c,0x3f,0xed,0x9e,0x8e,0x3b,0xc9,0x26,0xfc,0x91,0x33,0xd8,0x32,0x00,
0x8f,0x80,0x4c,0x0b,0x82,0x64,0x01,0xce,0x2c,0xbb,0xe2,0xdc,0xac,0xb6,0x76,0x5a,
0x96,0x75,0xfe,0x29,0x39,0x0b,0x3f,0xa3,0xe5,0x9d,0xe2,0x5a,0xe2,0x35,0xda,0x45,
0x45,0x11,0x5e,0x68,0x09,0x1e,0x75,0xcf,0xe7,0x07,0x92,0xaf,0xd3,0xe0,0xf0,0x5d,
0x41,0x67,0xb1,0x31,0x6f,0xca,0x8d,0xdd,0xf4,0x7d,0x8e,0x75,0xaf,0x61,0x5c,0x7e,
0x1f,0x6e,0xa3,0xee,0x0f,0xf8,0xc1,0x3c,0xbd,0x9f,0x9d,0xdc,0xb0,0xe0,0xfc,0x76,
0x78,0x60,0x73,0x95,0x33,0x90,0xbe,0x22,0x97,0x52,0x57,0x86,0x7f,0xd7,0x18,0xcf,
0x68,0xd1,0xc6,0x64,0xbb,0x5c,0x39,0xe0,0x8c,0xb5,0x94,0xc2,0x81,0x95,0x95,0x54,
0x6c,0x26,0xa7,0x8c,0x71,0xa1,0xf0,0xa8,0xf2,0xf8,0xb6,0xca,0xec,0xa6,0xe1,0xbc,
0x59,0x70,0x80,0x56,0xa7,0xb1,0x2f,0x59,0x30,0xde,0x4f,0x3a,0x91,0x78,0x0c,0x56,
0xa9,0xce,0xd0,0xcc,0x26,0xd8,0x0f,0x81,0x14,0x8e,0xe7,0x05,0x63,0x1e,0x86,0x4b,
0x0e,0xb5,0xbc,0x0b,0x61,0xea,0x4e,0xf3,0x7e,0xb8,0x0c,0xf5,0x89,0x1d,0x69,0xfe,
0xb2,0x71,0xcf,0x28,0x8e,0xe8,0x69,0x78,0x10,0x70,0xac,0xd3,0x30,0x06,0x61,0x52,
0xdf,0xc6,0x9f,0x02,0xbd,0x9d,0x28,0x1d,0x81,0xa3,0x10,0x52,0x2a,0xbb,0xf8,0x21,
0xe1,0x28,0xd4,0xad,0x3c,0x91,0x2a,0xef,0x35,0xee,0x49,0x85,0x6f,0x8a,0xac,0x4e,
0xdc,0xe6,0xeb,0x53,0x36,0x40,0x55,0x5c,0xfc,0x29,0xf6,0x45,0xb7,0x75,0xaf,0xa3,
0x02,0xb6,0x80,0x6f,0x91,0xb8,0x2d,0x6f,0x08,0x36,0xc0,0x77,0x55,0x48,0x91,0x17,
0x8d,0x31,0x13,0x85,0x42,0x92,0x50,0xe4,0x2f,0x8a,0x52,0xde,0x8b,0x90,0x50,0x56,
0x2d,0x12,0x3d,0xa4,0x97,0xd3,0xc7,0xac,0xbb,0x30,0x4e,0xfa,0xb9,0x10,0xb8,0x28,
0x9f,0x25,0xfd,0x10,0x86,0x2a,0xca,0x9f,0xe7,0xf5,0x79,0xd8,0x7e,0xe3,0x3f,0xc3,
0x3b,0xca,0xb3,0x31,0x77,0x6a,0xc6,0x5b,0x70,0x49,0xa9,0xc7,0x02,0xff,0x76,0x9e,
0x3e,0x66,0xdb,0x1d,0xaf,0xc0,0xab,0x10,0x56,0x02,0x29,0x7e,0x08,0x2e,0x49,0xf5,
0x77,0xb8,0x07,0xf9,0x39,0xc6,0x78,0x2e,0x90,0x7e,0xff,0x66,0x52,0xae,0x12,0x45,
0x8a,0xdd,0xeb,0xf6,0x54,0x49,0xce,0x16,0x32,0xdd,0x78,0x07,0xd1,0xd1,0xe7,0xdc,
0x19,0x0d,0xdc,0x1d,0x95,0x48,0x9f,0xb2,0x8f,0x0b,0x64,0x9d,0x52,0x4b,0x8b,0x61,
0x9f,0xde,0x92,0x5f,0x90,0x3d,0x10,0x50,0xc4,0x54,0xde,0x08,0x77,0x10,0xaa,0x14,
0x67,0x8a,0xac,0x37,0xc6,0x2c,0x5d,0xf4,0xb6,0xe7,0x92,0x10,0x56,0x5c,0xf8,0x3c,
0xe9,0x12,0x84,0xb1,0x2f,0xce,0x4e,0xc3,0xce,0xfc,0xdc,0xe3,0x70,0x3d,0x84,0x17,
0xb9,0xbb,0xf8,0x11,0xc0,0x3a,0x65,0x5a,0x8a,0xbf,0xc3,0x98,0x23,0xbf,0x44,0xd9,
0x10,0x83,0xbb,0x85,0x97,0xe5,0x31,0xe8,0x02,0x57,0x92,0x3f,0x96,0x6b,0xe7,0x58,
0xa8,0x6c,0xe4,0xaa,0xa8,0x73,0x37,0xff,0x2d,0xcf,0xb3,0xe0,0xa3,0xe2,0x30,0x39,
0x66,0x2c,0x51,0xbf,0x63,0x19,0xd7,0x0a,0x01,0xb4,0x4f,0x52,0x88,0x73,0x2b,0x2b,
0x62,0x37,0xd6,0xe9,0xe3,0x92,0xe6,0x4a,0xe5,0xf6,0xda,0xca,0x7e,0xe7,0x18,0xb9,
0x0f,0x6d,0xa2,0x3c,0xee,0x6f,0x6e,0xe9,0x37,0xc6,0xba,0x53,0x9c,0x05,0xff,0x44,
0x7f,0x7c,0xd9,0x3d,0x7c,0xdd,0x2c,0xf8,0x50,0x9d,0x17,0x77,0xad,0xe5,0x8f,0x19,
0xf6,0x29,0x16,0xee,0x26,0x7f,0xa8,0x08,0x9f,0x76,0xaf,0xff,0xc2,0x5b,0xe9,0x2b,
0x2b,0xeb,0x14,0x77,0x86,0xef,0x35,0xec,0x53,0x71,0x5c,0x90,0x4e,0xaa,0x37,0x0f,
0x09,0xe3,0x0d,0x0b,0x61,0x82,0xd6,0xc7,0x5d,0x63,0xbc,0x64,0x8c,0x67,0x67,0x51,
0x86,0x6c,0xce,0x94,0xab,0xe2,0xb7,0xf8,0xe5,0xd8,0x97,0xca,0x95,0xe2,0xa8,0x6f,
0xae,0x31,0x2e,0xc5,0x90,0x8a,0xff,0x5d,0xf1,0xfd,0x6a,0xb4,0xb9,0xc5,0x0b,0x07,
0xa0,0xb2,0x49,0x8c,0x13,0x81,0xea,0xef,0x17,0x97,0xfa,0x0b,0x1e,0x83,0xb4,0xda,
0x29,0x10,0x45,0x44,0xfb,0x8c,0x8b,0x69,0xe2,0x6d,0xd3,0xe7,0xa1,0xb3,0xe4,0xd7,
0xe4,0x7d,0xa8,0x51,0x5d,0xdb,0xf8,0x0a,0x98,0xe0,0xea,0xd4,0x8e,0x94,0xb3,0xaa,
0x4d,0xbf,0x67,0x0a,0xde,0x83,0xbf,0x87,0x9f,0xa8,0x38,0xdd,0x23,0x70,0x05,0x66,
0x0d,0x60,0x21,0x6b,0xd8,0x67,0x42,0x1a,0x87,0x93,0x30,0x4f,0xed,0x18,0x69,0xbd,
0x40,0xeb,0xd1,0x18,0x5d,0x83,0xbc,0xdc,0xa8,0xd7,0xad,0x13,0x16,0x93,0x4d,0x42,
0x48,0xf5,0x27,0x23,0xbf,0x87,0x43,0xe0,0x8b,0x8b,0x29,0x52,0xa1,0xe8,0xf6,0x49,
0x1d,0x15,0xf2,0x26,0xf0,0xa9,0xf8,0xa3,0x21,0x7c,0x07,0x5f,0xa3,0xd8,0x45,0x64,
0x63,0x3c,0x45,0x01,0xa0,0x35,0x88,0x78,0x20,0x64,0x00,0xeb,0x1e,0x68,0xc2,0x75,
0x44,0x0c,0xfb,0xa4,0x85,0x71,0xf1,0xa4,0xbf,0x46,0x15,0xbb,0x5a,0xb3,0x30,0xa1,
0x74,0x29,0xae,0x34,0x2f,0x13,0x63,0x1e,0x6e,0x1c,0x13,0xae,0xc8,0xb7,0xaa,0x6e,
0xe0,0x47,0x61,0xc2,0x1f,0x5e,0xee,0xce,0xf2,0xc3,0xd3,0x0c,0xd6,0x39,0xc6,0xc9,
0x49,0x39,0x2c,0xbb,0x2f,0xf0,0x21,0xe5,0x64,0x4b,0x38,0x1b,0x18,0xc3,0x5f,0xca,
0xf1,0x73,0x42,0xd8,0x1c,0xac,0x2a,0x70,0x7e,0x44,0xfc,0x74,0x13,0xad,0xca,0xe2,
0xdc,0x66,0x4d,0x7e,0xbe,0x84,0xd8,0x5c,0x45,0x9d,0xf3,0x5a,0x28,0x7d,0x0c,0x0d,
0x00,0xfb,0xf2,0xa6,0xc9,0xcf,0xbd,0xf0,0x24,0x54,0x25,0x9d,0x19,0x5f,0x27,0x2b,
0x30,0x9e,0x8d,0x9a,0xfc,0x1c,0x41,0x6c,0xe2,0xf3,0x32,0xce,0x5f,0xf8,0xb0,0xa0,
0x22,0x5f,0x54,0x93,0x9f,0x87,0xe1,0x0d,0xa8,0x97,0x91,0x4b,0x3f,0x66,0x05,0xc6,
0xcf,0x31,0x93,0x9f,0x7b,0x71,0x88,0x71,0x18,0x33,0xb8,0x8e,0xae,0x40,0xb8,0x67,
0x73,0x86,0x1f,0x34,0xf9,0x59,0x2a,0xd4,0xc3,0x2e,0x05,0xa7,0x7b,0x04,0xc7,0x13,
0x8d,0x31,0x43,0x2e,0x98,0xfc,0x6c,0x12,0xf7,0x0b,0x15,0x8a,0x98,0x51,0xb2,0x8c,
0x3d,0xcc,0x50,0xc7,0x8c,0xf1,0x94,0x39,0x44,0xd5,0xb6,0x9b,0x56,0x46,0x7f,0x40,
0x5e,0x81,0x03,0xe9,0xca,0xac,0x73,0x2d,0xe9,0x37,0xf9,0x39,0x1d,0x1f,0x33,0x7f,
0xcc,0x15,0xe4,0x55,0xf8,0x30,0x5d,0x97,0x75,0x07,0xf9,0x53,0x26,0x3f,0x0f,0xc3,
0x44,0xf4,0xf3,0x7d,0xd5,0xa3,0x5a,0x5f,0xb4,0x7e,0x9a,0xfc,0x74,0x0c,0xc8,0xc7,
0x69,0xed,0x76,0xf7,0xf3,0x4b,0xfd,0xf4,0x0a,0xd4,0x65,0x27,0xf1,0x73,0x07,0xb4,
0x42,0x59,0xeb,0xd2,0xd5,0xa4,0x54,0xf9,0x19,0x54,0xa2,0x31,0xda,0xf9,0xa9,0x3e,
0x4e,0x59,0x17,0x8a,0xbf,0x5a,0x76,0x00,0x0b,0x76,0x7e,0x7a,0x06,0xb0,0x9d,0xff,
0x02,0x76,0xfd,0x2e,0xdc,0x03,0x02,0xaa,0xd7,0xce,0xcf,0x4e,0x62,0xf0,0xf3,0xcf,
0x91,0x9f,0x61,0x1a,0x98,0xc4,0x4f,0x61,0x0c,0x6a,0x05,0x37,0x9d,0x91,0x96,0xb5,
0xc5,0x4d,0x4d,0x7e,0x26,0xa5,0x3e,0xc6,0x25,0xb4,0x5d,0xa2,0xb3,0x00,0x6d,0xf7,
0x4d,0x93,0x9f,0x2b,0xc4,0x0d,0xa4,0x6a,0x91,0x33,0x55,0x3c,0x04,0x06,0x5f,0xec,
0xfc,0xfc,0x6b,0xfd,0x47,0x66,0x9d,0xc5,0x4f,0x01,0xb1,0x19,0x88,0x2d,0xf5,0x2c,
0x39,0x02,0xfb,0x10,0x50,0x4e,0xc9,0xce,0x4f,0x38,0xcd,0xba,0x00,0xb8,0x44,0x8c,
0xbe,0x58,0xfc,0x64,0xd8,0xac,0x52,0xdc,0xdb,0x18,0x23,0xf5,0xbe,0xe4,0xf8,0xe9,
0x75,0x5c,0x4c,0x5f,0x82,0xda,0x98,0x6b,0x8b,0x56,0x57,0xcf,0xea,0x5e,0x34,0xf9,
0x79,0xce,0x95,0x91,0x11,0x47,0xeb,0x8a,0xb3,0xb4,0x1b,0xef,0xb9,0x94,0x92,0x13,
0x16,0x3f,0x61,0x5f,0x14,0x6d,0xa2,0x98,0xf4,0x95,0x99,0x7d,0x31,0xee,0x59,0xb4,
0x5b,0x3a,0x88,0x7b,0x40,0x32,0x15,0x62,0xef,0x30,0x57,0x7b,0x87,0x1c,0x3f,0x4b,
0xf0,0x31,0x42,0x28,0xe6,0xd2,0x78,0x8d,0xef,0x87,0xcf,0x1b,0x36,0xec,0xd3,0xcf,
0xed,0x86,0x0f,0x20,0xbc,0xa2,0x3a,0x65,0xf5,0xf3,0x98,0x61,0x9f,0x7e,0x29,0x0d,
0xfd,0x10,0x92,0x02,0x74,0x46,0x16,0xfa,0xf5,0xf7,0xb3,0xf8,0xe9,0x27,0x4f,0xc0,
0xae,0x84,0x98,0x2e,0x1e,0x86,0xd7,0x70,0xe7,0x70,0xa6,0x91,0x91,0x8a,0x51,0xd7,
0x84,0x73,0xeb,0x63,0xcb,0x67,0x00,0x66,0xe8,0xf6,0x69,0xf1,0x33,0x0c,0xed,0xc1,
0xb2,0x01,0x71,0x0d,0xb9,0x4a,0x99,0xbd,0x38,0x9b,0xc9,0xb0,0xc9,0xcf,0x30,0xfc,
0xb6,0xf3,0x7b,0xef,0xba,0x7e,0xc0,0x5f,0x95,0xff,0x81,0xde,0x96,0x75,0xaf,0xe5,
0xdf,0xcd,0xf1,0xf3,0x46,0x5c,0x2b,0xf1,0x70,0xbf,0x7b,0x1f,0xff,0x9e,0x7a,0x85,
0x4d,0xb2,0xc5,0x4f,0x70,0x5c,0xa5,0x88,0xcd,0x21,0xd7,0x25,0x7e,0x16,0x9c,0xa4,
0xf5,0x59,0xf7,0x18,0x1a,0x78,0x8e,0x9f,0x0f,0x2b,0xed,0x50,0x39,0x2c,0xfe,0x30,
0x52,0x8a,0x9d,0xaa,0xec,0xc3,0xe7,0xbd,0x9b,0x5b,0xb7,0xd0,0xb1,0x08,0xbb,0xd0,
0x27,0xd6,0x90,0x3a,0x78,0x5c,0xef,0x8b,0x6a,0xd8,0xa7,0xec,0x19,0x10,0xd9,0x1e,
0xe0,0xc7,0x3d,0x80,0x1a,0x9b,0x41,0xbf,0x90,0xe3,0xe7,0x5e,0x6d,0x29,0xbb,0x46,
0xf9,0x3a,0xf2,0x07,0xbd,0x2f,0xef,0x9a,0x36,0x3f,0x02,0x7f,0xa3,0xff,0xe8,0x3c,
0xae,0x5b,0xad,0x90,0xe3,0x27,0x87,0xfc,0x9c,0xe0,0xea,0x7b,0xdc,0xcd,0x33,0xea,
0xe0,0x0d,0x4e,0x5b,0x63,0xaa,0xc1,0xcf,0x84,0xb0,0x4a,0x3c,0x04,0x55,0x3d,0xb8,
0xcc,0x03,0xa8,0xa5,0xaa,0xd8,0x7a,0xcf,0xe6,0xf8,0xc9,0x05,0xf0,0x56,0x73,0xd8,
0x8f,0x5c,0xb0,0xc9,0xa8,0xb3,0xf6,0x77,0xd8,0xcf,0xad,0xea,0xc7,0xee,0xb9,0xa0,
0x55,0x0a,0xf4,0x61,0x21,0x67,0x9f,0x50,0xb8,0x0a,0x26,0x84,0x5b,0x06,0xdc,0xa3,
0xce,0x3a,0xbc,0x67,0x78,0x10,0xfb,0xa2,0xf2,0x39,0x7e,0xbe,0x07,0x13,0xec,0x47,
0xfb,0xf8,0x71,0x78,0x5f,0xef,0xcb,0x70,0x5e,0x8e,0x9f,0x57,0x61,0x9c,0xd6,0xa8,
0x9b,0xc6,0xf8,0xab,0xf0,0x47,0x24,0xbd,0x6b,0xd8,0xd4,0x9f,0xaa,0x74,0x1e,0xba,
0xc5,0xf2,0x51,0x67,0x26,0x82,0xef,0x97,0xa8,0x92,0xc5,0x4e,0x12,0x34,0xde,0xaf,
0xdf,0x31,0x00,0x87,0x7c,0xfe,0x41,0x67,0x30,0xaf,0x83,0x71,0x89,0x8a,0x32,0x09,
0xd9,0xf8,0x89,0xdb,0x02,0x76,0x3d,0x8f,0xf1,0xac,0x2a,0x85,0xfb,0x51,0xd0,0xe2,
0xa7,0x30,0x46,0xbb,0x7a,0xb0,0x7b,0x7b,0xa5,0x1f,0x31,0x7e,0xa6,0x79,0x00,0x93,
0x9f,0xc2,0x78,0x8a,0x75,0xaf,0x15,0x07,0x0f,0x6e,0x53,0x91,0xf3,0x63,0x46,0xbb,
0x18,0xf2,0xf3,0x55,0x65,0x21,0x1b,0x62,0xb6,0x57,0x85,0x06,0x5d,0xbb,0xf8,0x12,
0x93,0x9f,0x01,0x61,0x83,0x50,0x3d,0x88,0xfa,0x93,0x3d,0xcf,0xd7,0x23,0xfa,0x5b,
0x72,0xfa,0x73,0x18,0xed,0x73,0x3b,0x17,0xe8,0x75,0xde,0x42,0x06,0x38,0xb6,0xbf,
0xe7,0xcb,0xc5,0x41,0x63,0xc8,0x46,0x91,0x9f,0x6d,0x88,0xcd,0x4d,0x6b,0x51,0x7f,
0x1e,0xa0,0x8f,0xad,0xfd,0xec,0x30,0x1a,0x87,0x7e,0x45,0x45,0x17,0x9c,0xca,0x2e,
0x50,0xdd,0x0b,0x50,0xef,0x7e,0x08,0xa1,0x6c,0xa1,0xcc,0x17,0xe7,0xe9,0x75,0xca,
0x8d,0x3b,0xe0,0x6a,0x10,0xa7,0xfb,0x83,0x69,0xe3,0x1a,0x3f,0xa7,0xcb,0xce,0xc1,
0x3c,0xbd,0x2f,0xc8,0x4f,0x38,0x4e,0xd7,0x5c,0x78,0x30,0xd3,0x10,0x60,0x9c,0xcf,
0xe2,0xfb,0xf5,0x1a,0xef,0xd7,0x5f,0xb4,0xc3,0xd3,0x2e,0x96,0xfd,0x4e,0x6c,0x2e,
0x46,0xfd,0xc9,0xf6,0x9c,0x38,0x72,0x3e,0x67,0x4b,0x8f,0x92,0xf6,0x64,0xd9,0x3b,
0xc9,0xb5,0xa1,0x59,0x22,0xe3,0xa7,0x7f,0x25,0xb2,0x5c,0xbf,0x67,0x94,0xf1,0x53,
0xd0,0x64,0xb5,0x4b,0xdb,0x03,0x70,0xac,0x5f,0x33,0xf9,0x99,0x84,0x63,0xd0,0x45,
0x5d,0x69,0xa7,0xa6,0x3f,0xa9,0x2b,0xcb,0x5f,0xce,0xb5,0x03,0x44,0x15,0x0d,0x4b,
0xb8,0x94,0x35,0x66,0xc9,0x8c,0x9f,0x39,0xbe,0x48,0x43,0xa8,0xe7,0xc3,0xcd,0xee,
0x9f,0xa3,0x2e,0x30,0x58,0x40,0x8c,0xf1,0x44,0x7e,0x02,0xf2,0x53,0xd9,0x84,0xdb,
0xba,0xce,0x48,0xa9,0x40,0xe6,0x2c,0x7e,0x6e,0xd0,0xb1,0x89,0x75,0x0a,0x2b,0xf8,
0xf2,0x72,0xf6,0xd9,0x88,0xfc,0x6c,0x44,0x54,0x79,0x88,0xa0,0xf3,0x73,0x7b,0x1e,
0xb1,0xf1,0x53,0x65,0x9c,0xa7,0x4e,0x19,0xba,0x75,0xbe,0x54,0x4d,0xcb,0xf1,0x93,
0xe1,0xa8,0x2e,0xe6,0x96,0x74,0x46,0xc6,0x50,0x63,0x9e,0x37,0xf5,0xe7,0x45,0xbd,
0x7b,0x83,0x3a,0xb3,0x98,0x58,0x39,0x6f,0xf2,0x13,0x65,0xa7,0x5c,0x05,0x4e,0x4d,
0x7f,0x72,0x55,0x9e,0xa5,0x2a,0xb2,0x5c,0x7f,0xbf,0x28,0xe3,0xa7,0x18,0x88,0x39,
0x8b,0xd3,0x7d,0xae,0x7d,0xf0,0x1d,0xc5,0x19,0x23,0xa7,0x8c,0x71,0xf1,0x96,0xec,
0xce,0xa1,0x9f,0xf1,0xb3,0x3a,0xee,0x1c,0x44,0x7e,0xea,0x75,0x69,0xcf,0xee,0xd2,
0x4b,0x85,0xa8,0x31,0x91,0x91,0x22,0x3e,0x2f,0xce,0xf8,0x69,0xb4,0x5b,0x09,0xd8,
0x2e,0xa5,0x75,0x81,0xb5,0x9b,0xc5,0xc6,0xec,0xb5,0x1c,0x23,0xa5,0xb4,0x38,0x06,
0x21,0x7c,0xad,0x06,0x36,0xd6,0xb5,0x38,0x4b,0xc8,0x4f,0xfd,0xdd,0xfd,0x82,0x1f,
0x9e,0x15,0x7c,0x4f,0x39,0xb3,0x91,0x61,0x09,0xf5,0x67,0xba,0x3f,0x4d,0x7a,0x0d,
0x3b,0xf3,0x3b,0xee,0x62,0xb2,0x73,0xd4,0x39,0x0f,0x98,0x26,0xaa,0x60,0x73,0xdb,
0x6b,0xee,0xef,0xa5,0x15,0xcf,0xa7,0xcb,0x5a,0x9c,0x71,0x72,0x06,0x9e,0xa7,0x15,
0xbb,0xc4,0x15,0xe6,0xfe,0xee,0x47,0xfd,0xf9,0xc7,0xd4,0x3c,0x86,0xcd,0xb3,0x70,
0x95,0xde,0x92,0x65,0xfa,0xd3,0xb0,0x4f,0xe4,0xa7,0xf4,0x61,0x71,0x78,0x2b,0xee,
0xa9,0x23,0x68,0x13,0x0b,0xe9,0x24,0xfd,0xf9,0x0a,0x4c,0xa4,0x34,0x7e,0x86,0xe1,
0x4a,0x1a,0xf5,0x67,0x3f,0xef,0x35,0xde,0xaf,0xb3,0x68,0x1b,0xb4,0xd2,0x4a,0x15,
0x97,0x2b,0xda,0x67,0x22,0x10,0x10,0xd5,0x82,0xb9,0x46,0x9d,0x04,0xdb,0x14,0x94,
0x9d,0x70,0x7b,0x73,0xde,0x0e,0x78,0x06,0xc7,0xd3,0x7f,0x1a,0x27,0x59,0xaf,0x8b,
0x7b,0xfa,0x60,0x93,0x10,0x80,0x7c,0x19,0x7d,0xd1,0x2e,0x08,0xa0,0x71,0x91,0xe2,
0x36,0xbd,0xae,0xbb,0xa4,0x43,0x98,0x40,0x3d,0x5f,0x89,0xeb,0x9d,0xad,0x07,0xb6,
0xde,0xfd,0x34,0x67,0xf3,0xb8,0x94,0xb9,0x3a,0xb8,0x2b,0x73,0xf3,0x2a,0xac,0x7b,
0x90,0x89,0x80,0x61,0xc3,0x3e,0x13,0xa9,0xf1,0xc6,0x93,0x10,0x1a,0x2e,0x1d,0xc5,
0x5f,0xb8,0x8a,0x6b,0xac,0x63,0x94,0xcf,0x9a,0xfa,0x73,0x15,0xa2,0xd1,0x97,0x10,
0x67,0x22,0x3f,0x9f,0xc5,0xfd,0x08,0x17,0x46,0x36,0x9e,0xd3,0x9f,0x58,0x27,0xe0,
0xbe,0xd9,0x45,0xf6,0x0a,0x87,0x84,0x2a,0x45,0x73,0x4e,0x0c,0xfd,0xd9,0x88,0xd8,
0x94,0x65,0xc5,0x2b,0x61,0x3f,0xf7,0xcf,0x0e,0x80,0x1f,0x48,0x6f,0x42,0x1f,0x33,
0x9a,0xbf,0x0a,0x4e,0x06,0x43,0xe0,0xdf,0xc7,0xef,0x15,0x2e,0x97,0x85,0xff,0x14,
0x37,0xd0,0xec,0x7a,0xbd,0xdd,0xce,0x1b,0xc7,0xf3,0xae,0x04,0xeb,0xc1,0x7d,0x8a,
0x7f,0x10,0xae,0x4a,0x75,0x37,0xa0,0x31,0x0e,0xdf,0x94,0xe3,0xe7,0x08,0x9c,0x9c,
0x8f,0x36,0x71,0x10,0xf9,0x39,0xae,0xd6,0xf7,0xe3,0x7e,0x14,0x14,0x72,0xfc,0x9c,
0x88,0x6c,0x2e,0xaa,0xbe,0xe8,0xec,0x22,0x08,0x52,0xa8,0xce,0x3a,0xbb,0x49,0x90,
0x98,0xfc,0x94,0x5a,0x67,0x6b,0x5b,0xcd,0x00,0xec,0x93,0x91,0xe5,0x19,0x3b,0x3f,
0x05,0x86,0xfe,0x4d,0x19,0xc6,0x33,0x89,0xed,0x01,0x05,0x41,0x83,0x13,0x12,0xea,
0xcf,0xe3,0xda,0x56,0x83,0x75,0x63,0x69,0x6d,0xcf,0x01,0x53,0x7f,0xea,0x43,0xbc,
0x6c,0x14,0x39,0x3f,0x4e,0x27,0xe9,0x4f,0xc6,0xcf,0xe3,0x24,0x84,0x72,0xd5,0x19,
0x40,0xb1,0xd2,0xc5,0xda,0xd9,0xf4,0x27,0x6c,0x16,0xca,0x5f,0x2e,0xc8,0x44,0xd8,
0xe6,0x53,0xce,0xf6,0x9c,0x8f,0x2c,0xfd,0x79,0x43,0x2b,0xc8,0x7d,0xb8,0xbf,0xbb,
0x84,0x2e,0xe6,0xc8,0x67,0x22,0x17,0x0c,0xdb,0x65,0xfc,0x6c,0xdf,0x57,0x36,0x52,
0xb0,0xd6,0x37,0x0b,0x9e,0x62,0x8e,0xfc,0x5a,0x32,0x6e,0xd4,0x31,0xfd,0xf9,0x82,
0x1c,0x1c,0x69,0x3a,0x34,0xcd,0x85,0x36,0x18,0x54,0xff,0x0c,0xf5,0xa7,0xc9,0xcf,
0xc3,0x70,0x65,0x85,0x16,0x5e,0x38,0x8c,0x73,0xab,0xbd,0x83,0xc5,0xcf,0x97,0x94,
0xe3,0x50,0x3b,0xd2,0x91,0x99,0x36,0x22,0x7d,0x48,0xbb,0x8e,0xa1,0x4e,0xb6,0xf1,
0x33,0xdd,0x0e,0xf2,0x31,0xb1,0xf9,0xcb,0x67,0xe0,0x69,0xb1,0x8c,0xed,0xe1,0x36,
0x7e,0x42,0xfb,0xd6,0xd9,0x4c,0x76,0x9e,0x15,0x9e,0x6f,0x29,0x1b,0xd1,0xb4,0xb0,
0x7e,0xcf,0xa8,0x67,0x04,0xfd,0x15,0x59,0x4d,0x06,0x35,0xbf,0x83,0x05,0x23,0x6c,
0xfc,0xec,0x44,0x7e,0xd6,0xa2,0xec,0x44,0x69,0x72,0x19,0x42,0xcc,0x06,0x27,0xf1,
0x53,0x47,0x95,0x06,0x52,0x4d,0x7f,0x1a,0x63,0x86,0xfc,0x14,0x98,0x8f,0xee,0x4e,
0x95,0x6b,0x7a,0x9e,0xb1,0xa0,0xd7,0xe4,0xe7,0x37,0x85,0x0d,0xc4,0x87,0x7c,0x89,
0xd8,0xf4,0xa7,0x7e,0x21,0x3f,0xe9,0x06,0xf0,0x45,0x36,0x7d,0xa2,0xfe,0xac,0x40,
0xfd,0x29,0x33,0x7e,0x22,0xbc,0x94,0xa9,0xfa,0x93,0xa8,0x24,0x04,0x39,0x96,0x6b,
0xfa,0xd3,0xe4,0xe7,0x5b,0xc2,0x25,0xe5,0x39,0x74,0x95,0x1b,0x34,0x46,0x32,0xff,
0xdd,0xc6,0xcf,0xdd,0x97,0x3e,0x3b,0x5f,0xd9,0x9c,0x9a,0x81,0x9c,0x57,0xc2,0x91,
0xbd,0x58,0xc7,0x99,0xfa,0x13,0xba,0x57,0xf8,0x90,0x9f,0x05,0x32,0x64,0xe4,0x85,
0x52,0x94,0x4e,0xe2,0xe7,0x41,0xb1,0x22,0xe6,0x94,0xb6,0x0a,0x12,0x63,0x39,0xae,
0xe9,0x53,0x9c,0x3e,0xd6,0x1a,0x3f,0x05,0x1f,0xeb,0xde,0x0a,0xfc,0xa5,0xb9,0xe8,
0xc8,0x5b,0xfc,0x64,0xfa,0x13,0x6a,0xd9,0x70,0xe8,0xfb,0x8a,0xcb,0xc6,0x4f,0xd4,
0x9f,0x3a,0xc2,0xcb,0xb1,0xa0,0x2e,0x9c,0xc2,0x4f,0x18,0x86,0x85,0x5a,0x7c,0x02,
0xc6,0x94,0x20,0x37,0x49,0x7f,0xce,0x95,0x37,0x82,0x6f,0x4f,0x12,0xe7,0x88,0xf1,
0x93,0xe9,0x4f,0x8b,0x9f,0x8d,0xe8,0xa2,0xf9,0x75,0xfd,0xb9,0x1f,0x9d,0x60,0xa6,
0x89,0x4c,0xfd,0x39,0x13,0xda,0xe5,0xb2,0x38,0xee,0xb7,0x67,0xd0,0x3f,0xd2,0xec,
0xc5,0xf2,0xdf,0x67,0xc2,0xef,0x33,0x9f,0xcf,0x8a,0x3f,0xe2,0xcf,0x4a,0x7f,0x4c,
0x6a,0x20,0xb5,0xf8,0xd9,0x06,0x57,0xe4,0xb0,0xb8,0x6c,0x5f,0x03,0x02,0xaa,0x79,
0x92,0xfe,0x54,0x1c,0x1f,0x29,0x27,0x93,0xe1,0x8b,0xee,0x4b,0xfc,0x55,0x65,0x22,
0x15,0x1e,0x42,0xff,0xfd,0xb4,0xc9,0xcf,0x9f,0x42,0x7b,0x51,0x65,0xdc,0xd9,0xdc,
0xb0,0x43,0x39,0x90,0xae,0x1e,0x12,0x51,0x7f,0x1a,0x75,0xc5,0xd0,0xd1,0xd2,0xae,
0xc9,0xce,0xe2,0x33,0xe2,0x01,0xb9,0xba,0x1f,0xeb,0x5e,0x34,0xf9,0xc9,0xf6,0xf7,
0x00,0x38,0x83,0xbe,0x0e,0x7d,0x7f,0x0f,0x92,0xdc,0xda,0xec,0xc4,0xb1,0x3e,0xc9,
0xf6,0xcd,0x51,0x64,0xf9,0x84,0xac,0xf9,0x9b,0xe7,0x0d,0x9b,0x4f,0x81,0x66,0x7a,
0x0c,0xef,0x9a,0xd6,0x50,0xab,0x33,0x16,0x3f,0xa5,0x8b,0x28,0xa1,0xea,0x47,0xb1,
0xdd,0xb8,0xc4,0xb4,0x14,0x0a,0xd8,0xec,0x0a,0xbd,0x6e,0x0b,0xee,0xef,0x28,0xcb,
0xb6,0x38,0x35,0xbe,0xe8,0xfe,0x6d,0x36,0x66,0xfa,0xef,0xcc,0xe5,0x25,0xa8,0xdd,
0x56,0x69,0x75,0xc9,0x8c,0xc5,0x4f,0xf4,0x8f,0xf6,0x93,0x00,0x71,0xce,0xcb,0xc3,
0xb1,0x96,0x03,0xfd,0x62,0xd0,0x9c,0x07,0x5a,0xb8,0x2a,0xfa,0x3e,0x84,0xa3,0xac,
0x9f,0x12,0xf6,0xb3,0x37,0x90,0x99,0x91,0x9d,0xa3,0xdf,0x73,0xd7,0x8d,0x23,0xca,
0xf3,0x50,0x4f,0xdc,0x17,0xca,0x51,0x2f,0x49,0x53,0xf4,0xe7,0xbb,0xae,0xe3,0xf7,
0x3f,0x5d,0xe6,0x4a,0xf2,0x3e,0xd4,0xf3,0xe8,0xa7,0x2e,0x37,0xfd,0x77,0x55,0x7a,
0x1b,0x1e,0x6a,0x08,0xa5,0x45,0x28,0xf0,0xc1,0x93,0x89,0xaa,0xac,0x73,0x3b,0x19,
0xcd,0xcb,0xf1,0x73,0x48,0x68,0x9d,0xef,0x03,0xc4,0x3b,0x00,0xd3,0xec,0x05,0x12,
0xb9,0x90,0xe3,0x67,0xd1,0x2f,0x51,0xa3,0xa0,0x5f,0xd5,0xd2,0x52,0x2e,0x3e,0xc9,
0x84,0xf6,0x56,0x32,0x6a,0x8c,0x19,0x94,0xf4,0xb2,0xf0,0x65,0x28,0xd0,0xc6,0x13,
0x0d,0xa4,0x0f,0xa7,0x6c,0xfe,0xfb,0x53,0x79,0x1f,0x7b,0x6f,0x3d,0xea,0x56,0xf9,
0x51,0x32,0xc1,0xfc,0xe2,0xd3,0x36,0xff,0x7d,0x37,0xfc,0x1e,0x9e,0x15,0xa7,0xb7,
0xf1,0xe7,0xe4,0x3f,0xcc,0xd7,0xe6,0x21,0x65,0xcc,0xd1,0x69,0xa6,0x89,0x16,0xc9,
0x59,0x51,0x24,0xb2,0x7c,0x85,0x6a,0xe3,0x19,0x34,0xf5,0x67,0x14,0x76,0x12,0xb9,
0xb9,0xc0,0x89,0x6b,0xfa,0x31,0x35,0x70,0x2a,0x2a,0x91,0x2e,0xc3,0x17,0x45,0x7e,
0x0e,0xb6,0x67,0xca,0xe4,0xe4,0x1d,0x64,0x26,0xda,0x4b,0xe5,0x88,0xf7,0x34,0xa9,
0x13,0xf4,0x3a,0xe4,0x27,0x7d,0x87,0x06,0xcf,0x4f,0x2f,0xe0,0x7b,0x07,0x7f,0x9f,
0xae,0x1b,0x09,0x78,0xf9,0xad,0x44,0xaf,0x53,0x6e,0xec,0x50,0xae,0xa4,0xff,0x8a,
0xba,0x53,0xce,0x41,0x98,0x90,0xea,0xfb,0x70,0x1d,0x0d,0x1a,0x3a,0xb9,0xd3,0xd1,
0x0f,0x47,0xe5,0x50,0xc4,0x4d,0xcb,0x2b,0xf2,0x5f,0x67,0xfa,0x33,0x65,0xe7,0x27,
0xd9,0x2c,0x96,0x29,0x4b,0xb3,0x3e,0x0f,0xd7,0x4e,0x70,0xa3,0x8f,0x91,0x0b,0x86,
0x5d,0x53,0xf8,0x5b,0xf8,0x6f,0x0c,0xe1,0x51,0x12,0x92,0x7e,0xc6,0xfc,0xf7,0xe5,
0x36,0x7e,0x9e,0x60,0xa1,0x5b,0xd5,0xe9,0x9d,0x5b,0x40,0x36,0xe1,0x58,0xef,0xb9,
0x9e,0xbc,0x06,0x39,0x7e,0xfe,0x1c,0x5e,0x13,0xba,0xda,0xdc,0x2b,0xf9,0x05,0xe8,
0x1b,0x86,0x51,0x88,0xa2,0xff,0x6e,0xc4,0x0b,0x90,0x9f,0xc3,0x80,0xfa,0x33,0xc1,
0x9f,0x53,0xc6,0xe0,0x27,0x30,0x9d,0xf9,0xef,0xfa,0xf3,0x90,0x9f,0x70,0x94,0x2e,
0x64,0x5a,0x6a,0x05,0xbc,0x23,0xd7,0x2a,0xcb,0x6c,0xfc,0x84,0x15,0x90,0x50,0x7d,
0xb7,0x3b,0x53,0xfc,0x62,0xd8,0x90,0xf5,0x4d,0xf1,0xdf,0xc9,0x86,0xf8,0xdc,0x45,
0x9f,0xd9,0x8e,0xec,0xd9,0x03,0x73,0x58,0xdd,0x09,0xcb,0x7f,0xe7,0x12,0x11,0xe4,
0xe7,0x0e,0xb2,0x4a,0xda,0xc0,0x3a,0x2c,0x91,0x63,0x0e,0x7d,0xda,0xbb,0x5d,0x32,
0xba,0xb5,0x0b,0x19,0x36,0xe3,0xd2,0x30,0xea,0x33,0xbb,0xff,0x5e,0x38,0x0e,0x1f,
0x2b,0x61,0x65,0xd9,0xe0,0x8c,0x97,0x99,0x10,0x65,0x62,0xf3,0x6d,0x21,0xc7,0xcf,
0x97,0xe1,0xe3,0x2c,0x72,0xfe,0x5e,0xde,0x23,0xfe,0x23,0xfa,0xef,0x1d,0xe8,0xdb,
0x9b,0xfc,0xc4,0xf7,0x4b,0xef,0xe2,0x9c,0x2c,0xa6,0xda,0x49,0xaa,0x98,0x31,0x9e,
0x30,0xec,0x33,0xba,0xb8,0x0f,0xb6,0xc4,0xe3,0x0a,0x9a,0x9e,0x20,0x3e,0x82,0xfc,
0xf4,0xdb,0xf8,0x59,0xb4,0x9b,0xee,0x11,0x7c,0x8b,0xb0,0xee,0x3d,0x7c,0x87,0xef,
0x2a,0xe2,0x76,0x93,0x05,0xe9,0xa2,0x7f,0x82,0x57,0xa1,0x2b,0xee,0x7e,0x94,0x73,
0x60,0xa1,0x4e,0xe3,0xa7,0xd1,0x97,0x95,0xdc,0x2f,0xe1,0x1d,0x40,0xbf,0x38,0xc5,
0xff,0x42,0x34,0x34,0xfb,0x29,0x9b,0xff,0x9e,0x95,0xbb,0xc0,0xdd,0x76,0xb3,0x0c,
0xc3,0x72,0x2d,0xd3,0xfa,0xbd,0x96,0xff,0x0e,0x0f,0x53,0x1f,0xfd,0xcc,0xf9,0x19,
0xb8,0x3b,0x2e,0x2e,0xef,0xdc,0x34,0xc9,0x7f,0x57,0x1e,0xeb,0xa9,0x50,0x9d,0xcf,
0x90,0x24,0xde,0xa2,0x82,0x6e,0x9a,0xec,0xbf,0xb7,0xe6,0x7f,0x43,0x75,0x8e,0x16,
0x5c,0x84,0x9f,0xd2,0xd9,0x09,0xe4,0x67,0x56,0xd0,0xdb,0xe9,0xf1,0xcf,0xa7,0x87,
0x96,0xae,0x75,0xfe,0x1c,0xde,0x46,0xfd,0x39,0x6d,0x2d,0xff,0xa6,0xe5,0xbf,0x0b,
0x87,0x60,0x2e,0xe3,0x52,0x1c,0x46,0x0d,0xfd,0x69,0xd8,0xa7,0x82,0xfe,0xe6,0xc7,
0xb4,0x26,0xeb,0xfe,0x01,0x3f,0x57,0x39,0x43,0xeb,0x57,0x04,0xc6,0xf8,0x41,0x93,
0x9f,0x8f,0x76,0xfe,0x86,0xca,0xc3,0xce,0x1f,0x92,0x99,0x74,0xd7,0xce,0xca,0x0a,
0xe4,0xcb,0xbb,0x86,0x2e,0x90,0x94,0x1d,0xf4,0x69,0x98,0xdd,0xef,0xbc,0x40,0x96,
0xd3,0x9f,0x49,0x95,0xa2,0xbf,0x19,0x17,0x94,0x5e,0xb7,0x52,0x3a,0x99,0xda,0xd7,
0x32,0x67,0x97,0x13,0x0a,0x14,0x48,0xa3,0xfe,0x44,0x47,0xfe,0x35,0xa3,0xee,0x89,
0x92,0x76,0x38,0x94,0xfc,0x28,0xe3,0x0e,0xb6,0xc6,0x0b,0x5e,0x83,0x3a,0x08,0x30,
0x46,0xea,0x7d,0x61,0xfc,0xbc,0x1c,0x59,0xe8,0x71,0x67,0x5a,0xd3,0xca,0x65,0xb6,
0xa7,0x5a,0xfe,0x7b,0x22,0xf5,0x91,0xf4,0xcf,0x52,0x0d,0xc3,0x66,0x08,0xde,0x97,
0x6b,0x04,0x2c,0x98,0x6c,0x45,0x7e,0x26,0x49,0x6d,0x45,0xc1,0xe1,0x02,0x50,0x06,
0x1d,0xbe,0x20,0xae,0xf7,0x61,0xcb,0x7f,0xa7,0x07,0x15,0x5f,0x4b,0xf4,0x99,0xe9,
0x32,0x74,0x82,0xaf,0x25,0x69,0xf9,0xef,0x05,0x82,0x8b,0x52,0xf9,0x97,0xde,0x82,
0xae,0x02,0x59,0x4e,0x65,0xe4,0x94,0x38,0x9f,0xf4,0x1a,0xf6,0x49,0x1f,0x5a,0x95,
0xbd,0x1c,0xec,0xea,0x77,0x75,0xf1,0x38,0x7f,0x62,0x68,0xbb,0xeb,0x29,0x3e,0xdb,
0x9a,0xf3,0xdf,0x47,0xe8,0xe5,0xe0,0x5c,0xaf,0x7b,0xdf,0x23,0x83,0xf2,0x58,0xd9,
0xe1,0x36,0xc6,0xcf,0x0a,0xdd,0x26,0x7a,0x8b,0x26,0xa4,0x37,0x3a,0x6b,0xc6,0x02,
0x97,0x51,0xcf,0x9f,0x84,0x9a,0x81,0x55,0xef,0x58,0xfe,0x3b,0xbc,0x4f,0x5a,0xd3,
0xe9,0x61,0x71,0x61,0x6f,0x55,0xf0,0xef,0xfa,0xca,0x87,0xc5,0x27,0xd0,0x0f,0x37,
0xf8,0x09,0x0f,0xb5,0x3c,0x36,0x37,0xbd,0x5c,0xac,0x61,0xb1,0x84,0xa8,0xfc,0x52,
0x81,0xaf,0xc0,0xd4,0x9f,0x9e,0xbd,0x79,0x9b,0x38,0x59,0x11,0x33,0x92,0x0b,0x1e,
0x83,0x5d,0x6a,0x81,0xe5,0xbf,0x73,0xc2,0x5e,0xe1,0x34,0x84,0xd2,0xae,0xc3,0x5c,
0x00,0x1d,0xc8,0x2e,0xb5,0x2a,0xdd,0x90,0xd3,0x9f,0x3d,0xd2,0xe1,0xbc,0x8b,0x72,
0xf5,0xee,0xea,0x71,0xe7,0x38,0x79,0x03,0x9e,0x57,0x0b,0xb3,0x24,0xc7,0xcf,0x66,
0xa9,0x03,0x5e,0x25,0x0b,0x23,0x81,0x67,0xf8,0x40,0x23,0xb6,0xeb,0xb5,0xf9,0xef,
0xc3,0x42,0x80,0xdb,0xa0,0xa0,0x8c,0x38,0x5c,0xf0,0xab,0xc8,0xe6,0x58,0xfd,0xef,
0x84,0x27,0xcc,0xfc,0x51,0xbf,0x20,0x42,0x6b,0x81,0x3c,0x88,0x4b,0x6b,0xba,0xc0,
0x02,0x4d,0xb7,0xa7,0x8b,0x73,0xf9,0xa3,0x53,0x52,0x01,0x69,0x4f,0x7d,0x36,0xeb,
0xfd,0x16,0x39,0xab,0xb4,0xd3,0x00,0xae,0x46,0x74,0x08,0xf4,0x76,0x51,0xc0,0x76,
0x5c,0xb0,0xdf,0x25,0xb5,0xb2,0x18,0x4b,0xb0,0x9f,0xf9,0xef,0xb9,0xf8,0xa7,0xa7,
0x5b,0x78,0x1f,0xaa,0x99,0x4e,0x3e,0x8c,0x9b,0xe4,0xe7,0xd4,0x65,0xe8,0x01,0x1a,
0x1c,0x14,0x3d,0x7b,0x51,0x0f,0x86,0x12,0x55,0x2d,0xac,0x9d,0x1c,0xea,0x0f,0xa4,
0xcd,0xf8,0x67,0x3f,0xec,0x80,0x57,0xe0,0x69,0x5a,0x28,0xdf,0x71,0x18,0x36,0x07,
0x76,0x1e,0xf3,0xa3,0xff,0x6e,0xc6,0x97,0x1e,0x85,0xc7,0x13,0x72,0x76,0xe3,0xf2,
0x99,0xb3,0xa4,0xf6,0xcc,0xec,0x11,0xe7,0x4a,0x33,0xfe,0x19,0xf5,0x74,0x08,0x8f,
0x0c,0xee,0x1e,0xcc,0x0f,0x92,0x0e,0xd2,0x8a,0x3e,0x1e,0xea,0x97,0x9c,0x1f,0xde,
0xef,0xd9,0x13,0xc3,0xe7,0x51,0x1c,0x46,0x3f,0x1c,0x73,0xd4,0x26,0xaa,0xb3,0xb6,
0xf8,0x67,0x1a,0x46,0xc5,0xc3,0xbc,0xae,0xf9,0x22,0x0b,0x39,0x81,0xda,0xf2,0x47,
0xbb,0xe1,0xa8,0x14,0x52,0x6e,0x4c,0x91,0x15,0xe4,0x28,0xcd,0x30,0x4e,0xf8,0xcc,
0xf8,0xe7,0x4d,0x64,0x0f,0xf8,0x50,0x96,0x15,0x20,0x2c,0x59,0x60,0x79,0x2b,0x91,
0x73,0xfc,0x14,0x5c,0xca,0x43,0x62,0xb9,0x72,0x73,0x73,0x71,0x1b,0x8a,0x84,0xb9,
0x8a,0x37,0x05,0x39,0xff,0x1d,0x04,0x07,0xfc,0x42,0xd9,0x19,0x9b,0xfb,0x7d,0xb2,
0x8e,0x25,0x92,0x62,0x0b,0x3c,0x91,0x9c,0xff,0x7e,0x1a,0x92,0x68,0x4f,0x21,0xd8,
0xac,0xf2,0x69,0x1c,0xa0,0x05,0xd0,0x48,0x9d,0x55,0x56,0xfc,0x53,0x3a,0xa8,0x84,
0x63,0x85,0xdb,0x1a,0x56,0xc0,0xa5,0x48,0x98,0xf9,0xef,0x66,0xfe,0x08,0xf5,0xe0,
0x51,0x12,0x8a,0xba,0x52,0x9b,0x56,0xc0,0x51,0x31,0xa4,0xb8,0xb6,0x98,0xf1,0x4f,
0xaf,0xb8,0x53,0x3e,0xbd,0xa6,0x16,0xc4,0x36,0x92,0xc6,0xcd,0xc6,0xe7,0x89,0xaa,
0x66,0xfc,0x53,0x69,0xdc,0x76,0x6c,0x9d,0x6b,0xe7,0x57,0xf3,0xef,0x6e,0xb9,0x57,
0x5e,0x27,0x96,0x45,0xc4,0xbb,0x23,0xa7,0x0c,0xfb,0xdc,0x2e,0x6c,0x83,0x5f,0x35,
0xce,0xbe,0x57,0x1c,0x94,0x9f,0x82,0x87,0x48,0xd9,0x22,0x71,0xd0,0x8a,0x7f,0xa2,
0x8f,0xbe,0x01,0xba,0x16,0x15,0x6e,0xe5,0xf0,0x79,0x5c,0x48,0x09,0xa4,0x1a,0x72,
0xfc,0xfc,0xb6,0xb4,0xbb,0x68,0x0b,0x62,0xb3,0x3a,0xe5,0x1c,0x92,0xdf,0xe1,0x9e,
0x43,0x7e,0x22,0x27,0xf4,0x76,0xc8,0x4f,0xbe,0x1f,0x6a,0x85,0xa6,0xb6,0x06,0x99,
0xa8,0xb8,0x51,0x34,0xa9,0xce,0x82,0x9c,0xd6,0x17,0xe6,0xc2,0x1d,0xd4,0x97,0xf1,
0x77,0xf2,0x7f,0x2e,0x6d,0xa4,0xbe,0x74,0xb4,0x93,0x14,0xe4,0xf6,0x9c,0x36,0x17,
0x3c,0x42,0x2b,0x2e,0x88,0xdd,0xd3,0x7f,0xa5,0xb4,0x26,0xfc,0xe7,0x56,0x2e,0xc0,
0x3a,0x7d,0x5c,0x38,0x28,0x2d,0x5b,0x2f,0xca,0x54,0x6c,0x6e,0x19,0xe4,0xd6,0x8b,
0x65,0xbd,0xb0,0xb6,0xc5,0x9f,0x6b,0x87,0xee,0xc4,0x0b,0xcc,0x6d,0xf7,0x3b,0x9f,
0x43,0xdd,0x73,0x6b,0xb6,0x69,0xd8,0x39,0xd7,0xb0,0x4f,0xbf,0x77,0x2f,0xf9,0x91,
0xa6,0xeb,0xa6,0xc5,0x05,0x2d,0xfe,0x99,0x36,0xf5,0x67,0xc4,0xf5,0x0c,0xf7,0x5e,
0xe2,0xc0,0x39,0xd7,0x69,0x3e,0x4c,0x5e,0xca,0xd4,0xac,0xc2,0x42,0x2e,0x7e,0xd6,
0x09,0xdb,0xa0,0x43,0xbe,0x45,0xf5,0x8f,0x92,0x3a,0xe8,0x90,0xca,0x1b,0x71,0x5c,
0x72,0xf9,0xa3,0x0c,0x3d,0x0c,0x8f,0x93,0x9d,0x7d,0xfe,0x45,0xe8,0xdb,0xb7,0x97,
0x95,0xdd,0x2e,0x46,0xf1,0x69,0xfa,0xfb,0x29,0xc2,0x5e,0xa9,0x55,0x98,0xa3,0x8a,
0x9f,0xc7,0xe5,0xd3,0x4a,0x70,0xe1,0x03,0x11,0x8d,0x79,0xd8,0x2e,0x74,0xba,0xfe,
0x40,0xba,0x14,0xcd,0x6f,0xc4,0x75,0x4b,0x03,0x19,0x67,0xee,0x9e,0x6d,0xb8,0x79,
0x5d,0x80,0x30,0xe7,0xce,0x94,0x67,0x59,0xec,0xde,0x1e,0xff,0x4c,0xd0,0xc3,0x05,
0x88,0x23,0xc5,0x15,0xcf,0x3f,0x0c,0xbf,0x85,0x9a,0xb4,0x6b,0xd8,0x09,0x86,0xff,
0xde,0xe3,0xa9,0x44,0x19,0x31,0xb7,0x62,0x6e,0x86,0xc8,0xe4,0x75,0xce,0x77,0xd1,
0xff,0x59,0xd3,0x3e,0x29,0xc8,0x72,0xbe,0xa0,0xe5,0x8f,0xf6,0xb2,0x3d,0x3c,0x85,
0x6c,0x95,0x73,0x63,0xa6,0xd5,0xfb,0xd5,0x82,0x60,0x41,0x07,0xc2,0x5b,0x0b,0x88,
0xe5,0xec,0xb3,0x05,0x9a,0xd8,0x32,0x57,0x03,0x94,0x47,0x47,0x17,0x17,0x54,0x47,
0x9a,0x07,0xc3,0x3e,0x13,0x25,0xe3,0xc5,0x9a,0x9c,0x1b,0x2d,0x7f,0x10,0xde,0x67,
0x85,0xac,0x4d,0x7f,0x9e,0x85,0x37,0xd4,0x7d,0x2b,0x5c,0x1f,0xa2,0xfe,0x7c,0x83,
0xab,0xc9,0xba,0xa2,0xe5,0xf6,0xf8,0xe7,0x26,0xa6,0x23,0xf7,0x11,0x99,0x6e,0x6a,
0xf3,0xb1,0x7d,0xda,0x1e,0xff,0xdc,0x2a,0xfb,0x92,0x2b,0xbb,0x50,0xf3,0xa1,0xdf,
0x81,0x9d,0x9b,0x14,0xff,0xdc,0x84,0xfb,0x6d,0x32,0x53,0x20,0xd3,0x3d,0xc5,0x3e,
0x19,0xe1,0x65,0xc6,0x3f,0x3d,0xbb,0x71,0x2d,0x85,0x4e,0xa1,0x7c,0x94,0x59,0x1c,
0x33,0x1b,0x00,0x5e,0x30,0x9e,0x77,0x0e,0x9e,0x62,0x9a,0x3d,0xb9,0x79,0x94,0xbf,
0x00,0x6f,0xd0,0xfa,0x9e,0x65,0xbd,0x66,0xfc,0x93,0xe5,0x8f,0xd0,0x3f,0xa2,0xae,
0x0c,0x37,0x0c,0xbf,0x17,0xbb,0x7a,0xf1,0x45,0xb7,0x19,0xf6,0xa9,0x0a,0x2e,0xd8,
0x44,0xe4,0x6d,0xe2,0x1a,0x72,0x3c,0x9e,0xaf,0xf8,0x62,0xe2,0x3a,0x5b,0xfe,0xbd,
0x29,0xb2,0x87,0xf8,0x15,0x71,0x7e,0x4b,0x2f,0x94,0xc7,0x7c,0x83,0x49,0x20,0x19,
0xbd,0x27,0xd0,0x8d,0xfb,0xfb,0xe6,0x54,0xf9,0x88,0xf8,0x43,0xf2,0x11,0x6d,0x4d,
0x97,0x0d,0x47,0x5f,0xb3,0xc5,0x3f,0xa7,0xa3,0xb6,0x59,0x30,0xec,0x5a,0xd0,0x70,
0x82,0x9e,0xcb,0x86,0xf6,0xb8,0x44,0x7e,0x6b,0xce,0x7f,0x2f,0xdc,0x3b,0x78,0xc5,
0x51,0xdf,0x53,0x3d,0xca,0x0f,0x28,0x57,0x83,0xcf,0x53,0x1c,0x4f,0xcb,0x7f,0x1f,
0xc2,0xa5,0xac,0xe5,0xb9,0x1a,0x91,0x9f,0x17,0x7a,0x06,0xa8,0x4d,0x7f,0x6e,0x83,
0x76,0x28,0x53,0xc5,0xe6,0x3c,0x8f,0x70,0x4a,0xfa,0xba,0x2a,0x2a,0x93,0xfc,0xf7,
0x6d,0xb4,0x3c,0xcb,0x2d,0x58,0x12,0x96,0x7f,0x91,0x2c,0x1f,0xbd,0x23,0x3a,0x29,
0xfe,0xf9,0x08,0xa4,0x87,0xf6,0x64,0x48,0x25,0xec,0x4b,0xec,0x1a,0x2b,0xf0,0xd9,
0xfc,0xf7,0x8d,0xf0,0x68,0xac,0x3c,0x9d,0x9f,0x75,0xfe,0x40,0x7a,0x97,0x7e,0x2f,
0x35,0x3d,0x6e,0xf7,0xdf,0x15,0x26,0xf5,0xa6,0xa3,0xde,0x10,0xbc,0xe2,0x73,0x50,
0x49,0x39,0xd3,0x7f,0x8f,0xa1,0xec,0x84,0x2e,0x25,0x2f,0xc5,0xcf,0x49,0x1f,0x57,
0xbb,0x94,0x8e,0xad,0x7c,0x8b,0x5d,0x7f,0x2a,0x3e,0xc5,0x9f,0xe2,0xff,0x0b,0x1a,
0xb2,0x96,0xdf,0x94,0x8d,0x71,0xf1,0xa2,0x8f,0xbe,0x47,0x94,0x99,0xcb,0x5b,0x09,
0x7b,0x14,0xad,0x6e,0xbd,0xa5,0x3f,0x85,0x0d,0x88,0x4d,0x71,0x1b,0x59,0x0c,0x3b,
0x41,0x5e,0x8d,0x3e,0x33,0xe1,0xf4,0x96,0xdd,0xa2,0x8c,0x6a,0x58,0xcb,0xbf,0xcb,
0x64,0x18,0x0b,0x1d,0xe9,0xa2,0x5f,0xe7,0xfc,0xf7,0x42,0xd4,0x98,0x70,0x9b,0x72,
0xe7,0x36,0xfe,0x22,0xbc,0xde,0x58,0xaf,0x54,0x5b,0xf1,0x4f,0xaf,0xe3,0x22,0x8f,
0x6e,0x4a,0xa3,0xfb,0xe7,0x0d,0x8d,0xf0,0x92,0x7c,0xeb,0xa2,0xa6,0x41,0x9b,0xff,
0x9e,0x85,0xa4,0xb8,0x90,0xc5,0x3f,0x83,0xa8,0xde,0x7c,0x2d,0xa2,0xe5,0xbf,0x2b,
0x8e,0x3e,0x71,0x1d,0x87,0xbe,0xe8,0x97,0x5b,0x3c,0x3b,0x13,0x5c,0xfa,0xf6,0x02,
0x9b,0xfe,0xfc,0xe2,0x6e,0x6e,0x03,0xdc,0x1c,0x75,0x0e,0x2e,0xf1,0xc8,0x1b,0xa6,
0x3d,0x70,0x7f,0x81,0xcd,0x7f,0xdf,0xb6,0x1e,0x36,0x7c,0xa3,0x2a,0xee,0x4e,0xe5,
0xdf,0x24,0x6d,0x50,0x42,0xb1,0xaf,0x6d,0xb5,0xc5,0x3f,0x1f,0x97,0x5e,0x85,0x85,
0x71,0xfd,0x9c,0x40,0xaa,0x56,0xa9,0xdc,0xda,0x3a,0xc9,0x7f,0x5f,0x1c,0x42,0xb7,
0xfd,0x9e,0x0f,0xe0,0xb4,0x58,0xeb,0x45,0xfb,0xdc,0x98,0xab,0x13,0x16,0x0a,0x1b,
0x13,0xa8,0x3f,0xd3,0xc5,0x63,0xf0,0x4a,0x23,0x4f,0xa3,0x6f,0x92,0xdc,0x11,0x1c,
0x16,0xff,0x2c,0xa7,0x15,0x2c,0xe4,0xd4,0x5f,0x4c,0xa5,0x39,0x83,0x20,0x90,0x16,
0x83,0x9f,0x65,0xdc,0xfc,0x8a,0xcd,0x1b,0xef,0xa7,0x4e,0x85,0x8c,0x71,0x8f,0xc2,
0x6c,0x35,0x7a,0x9a,0x88,0x66,0xfc,0x33,0x2c,0xbd,0x4d,0xff,0x6b,0xd6,0xb9,0x96,
0x7f,0x05,0x5e,0xa2,0x9f,0x8b,0xbb,0x96,0xf3,0x1b,0x73,0xfe,0xfb,0x9d,0xbb,0xe1,
0x4d,0x78,0x96,0xe2,0xbe,0x39,0xc4,0xe2,0x9f,0x8a,0x5b,0xb2,0xf9,0xef,0x57,0xe1,
0x1f,0xe9,0xfc,0x2c,0xba,0xed,0x0b,0x95,0xfe,0x74,0xcd,0x90,0xeb,0xa0,0xcd,0x7f,
0x7f,0x98,0x6e,0xd8,0x25,0xa7,0xc5,0x02,0xdf,0x0d,0x0a,0xed,0xf2,0x65,0xa3,0x0b,
0x4c,0xff,0x9d,0xc2,0x61,0x65,0x5b,0xb0,0x5c,0x8d,0xae,0xc9,0xab,0x6d,0xa3,0xc1,
0x77,0x9a,0xc4,0x25,0xa6,0xfe,0x44,0xff,0x5d,0xa4,0x51,0x1c,0x7e,0x20,0x0e,0xa0,
0x89,0xb9,0x8c,0x9f,0xa7,0x4c,0xff,0xbd,0x03,0x5e,0x5f,0xb7,0x6b,0x6d,0x20,0xb3,
0xf4,0xb3,0xa4,0x27,0xbe,0x70,0x91,0x6b,0x07,0x8e,0x67,0xce,0xe6,0xd1,0x7f,0x2f,
0x62,0x3e,0xc9,0x8c,0x61,0x72,0x59,0x9e,0x13,0xdf,0x9c,0x34,0x35,0x26,0x17,0x1b,
0x87,0xf3,0xb0,0xbf,0xb1,0x63,0x94,0x9f,0x2f,0x0f,0xa4,0xea,0xa1,0x63,0x70,0x46,
0x2e,0xfe,0xd9,0x83,0xfa,0xb3,0x05,0x98,0xef,0x7b,0x83,0x00,0xaf,0x29,0xbe,0xfb,
0xc5,0x0e,0x2b,0xfe,0x89,0xf2,0x57,0xdb,0xfb,0x83,0xc4,0x0f,0xc9,0xc6,0x5f,0x3f,
0x20,0x76,0x14,0x9b,0xfe,0x3b,0x5a,0xdf,0x06,0xbf,0x4c,0xfc,0x04,0xeb,0xb6,0x06,
0xd3,0x31,0x7f,0x9e,0xe9,0xbf,0x2b,0xe2,0xaa,0xce,0xe1,0xb2,0x35,0x5f,0x74,0xd5,
0xb6,0xce,0x56,0xfb,0x85,0xf0,0x72,0x9c,0x3f,0xcb,0x7f,0x1f,0x08,0x8e,0xf9,0xeb,
0x0b,0x96,0xb5,0x3c,0x72,0x84,0x8e,0xc7,0xc3,0xcb,0x37,0xf7,0xd9,0xf8,0x79,0x01,
0x50,0xc6,0xf7,0x3f,0x74,0x61,0x46,0x80,0x0e,0xd0,0x9a,0xa0,0x6b,0xcc,0xe6,0xbf,
0x67,0xdb,0x0e,0xfa,0xab,0x47,0x9d,0x1f,0x9e,0x3a,0xac,0x26,0x65,0xec,0x70,0x37,
0x19,0xcd,0xe9,0x1e,0x47,0x3f,0x7a,0x2b,0x81,0x41,0x96,0xcb,0xc2,0x1d,0x52,0x66,
0xb1,0x84,0x0b,0xd6,0xf9,0x25,0x38,0x28,0x68,0xa9,0xaf,0x0e,0x42,0x25,0xd9,0x9e,
0x7f,0x97,0x4a,0xb2,0xf4,0x34,0x6e,0x35,0x85,0x99,0xb2,0x00,0x1c,0x8b,0x6e,0x15,
0x03,0x99,0x06,0x5b,0xfe,0x5d,0x1c,0xdf,0xf6,0x27,0xaa,0xbb,0x19,0x39,0x7f,0x1b,
0xd4,0x53,0xf7,0x28,0xb1,0xe2,0x9f,0x69,0x0d,0xb7,0xae,0xcc,0xd7,0x03,0x70,0x19,
0xf1,0xd8,0x91,0xe1,0xff,0x87,0x95,0x3f,0x82,0x83,0x50,0xde,0x23,0xb2,0x7d,0xf3,
0x20,0x02,0xbf,0xd0,0x9e,0x7f,0x6f,0x84,0x0c,0xdb,0x6a,0x50,0xbb,0x89,0x5b,0xd8,
0x98,0x5a,0xf9,0xf7,0x51,0x6e,0x26,0x3c,0x4f,0xe5,0x6c,0x74,0x2d,0x79,0xb4,0xed,
0x99,0xf4,0xcd,0x7e,0x8e,0xc5,0x3f,0xf5,0x76,0xc8,0x4f,0xb5,0x8b,0x0b,0xf6,0xa1,
0xbb,0xf1,0x30,0xbd,0x94,0x5e,0x38,0x72,0x26,0x58,0x6e,0xcb,0xbf,0x0b,0x07,0xe0,
0x6a,0xaf,0x7b,0xb4,0xf5,0xc1,0xf8,0x55,0xa9,0xb6,0xe7,0x4f,0x6c,0xf9,0xf7,0x92,
0x73,0x28,0xff,0xc3,0x3d,0xee,0xa7,0xca,0xdf,0x73,0x1c,0xf7,0x85,0xa3,0x6e,0x2b,
0xff,0xde,0x7f,0xfd,0x0e,0xf9,0x19,0xa8,0x3c,0xe6,0x5c,0x4d,0x06,0x7a,0xda,0xa5,
0xca,0xbe,0xa5,0xcd,0xbe,0x0b,0x66,0x2e,0xf2,0x11,0x38,0x40,0xbf,0x33,0xec,0x5c,
0x5b,0x7c,0xb6,0xad,0x3d,0x51,0x19,0x5f,0x6a,0xe5,0xdf,0xa3,0xc2,0x00,0x1c,0x12,
0x8d,0x58,0x73,0xeb,0x9a,0x00,0x4b,0x20,0x9e,0x32,0xf9,0xf9,0xa4,0x96,0x76,0x67,
0xa1,0x77,0x38,0xa6,0x86,0xbb,0xdd,0x93,0xf2,0xef,0x34,0x17,0xff,0x24,0x53,0xf2,
0xef,0xa2,0xa4,0xa5,0xe1,0x98,0x0b,0xda,0xa7,0x07,0xa4,0x99,0xff,0x6e,0xe6,0xdf,
0x05,0x1c,0xc6,0x98,0x96,0x5f,0x61,0x89,0xa4,0x29,0xf9,0xf7,0x83,0x40,0xe2,0x7a,
0x9d,0x30,0x25,0xfe,0xe9,0x10,0xf6,0x35,0xa6,0x59,0xfc,0xf3,0x88,0x96,0x88,0x9f,
0x1c,0xff,0x94,0xc6,0x84,0x10,0xd5,0xb4,0x30,0xcb,0x4f,0x6f,0xb6,0xe7,0xdf,0x8d,
0xb0,0xe7,0xb6,0x86,0xb7,0xc8,0x94,0xfc,0xfb,0x76,0x07,0xea,0x4f,0x21,0x7c,0xc7,
0xe6,0xd4,0x63,0x43,0xe4,0xa8,0x1e,0x1b,0x7d,0xdb,0x96,0x3f,0x4a,0xca,0x55,0x92,
0x13,0xc8,0x39,0x48,0xfa,0xaa,0xec,0xf9,0xf7,0xa8,0xa3,0x6f,0x77,0x42,0x5c,0xa5,
0x38,0xa5,0xad,0x47,0xb0,0xa0,0xc5,0x62,0x4f,0x99,0xf9,0xf7,0x36,0xd8,0x10,0x60,
0x5d,0x7f,0x80,0x9d,0xc1,0xaa,0xc2,0x17,0x8d,0x98,0xfa,0xd3,0xb3,0x9b,0x3d,0x46,
0x59,0xc6,0xe2,0xc2,0xc6,0xf3,0x4c,0xff,0x1d,0x7e,0x19,0xfd,0x81,0x80,0x73,0x9b,
0xe2,0x87,0xb8,0x4b,0x04,0xe7,0x36,0xc5,0xdb,0xf4,0xa7,0xd4,0xaf,0x84,0x89,0x96,
0x1f,0x1b,0x9b,0x92,0x7f,0x17,0xfc,0xd2,0x46,0xa8,0x4a,0x2d,0x4d,0x13,0xbf,0x70,
0xf9,0x93,0xf2,0xef,0x6c,0x1d,0x3d,0x10,0xc8,0x9d,0x0f,0x39,0x66,0xe5,0x8f,0x8e,
0xb5,0x57,0x7c,0x47,0x59,0xfa,0x43,0x96,0xcf,0x81,0xca,0x7e,0x5b,0xfe,0x1d,0xf9,
0x29,0xff,0x36,0x7b,0xdb,0x39,0xf7,0x83,0xfc,0xac,0xc4,0x1f,0x53,0xb7,0x8d,0x4e,
0xca,0xbf,0xff,0x2a,0x75,0x85,0x84,0xbf,0x8a,0x7e,0xf1,0x48,0xd9,0x1f,0x82,0xe1,
0xde,0x49,0xf1,0xcf,0xb3,0xf4,0xa4,0x1a,0x4e,0x77,0x1c,0xe4,0xaf,0xca,0x2c,0x01,
0xe5,0x62,0xfe,0x7b,0x2e,0xff,0xde,0x41,0xff,0x01,0x2a,0x2b,0xfc,0x6b,0x23,0x67,
0x8a,0xf6,0x77,0xa6,0xfb,0xfc,0x56,0xfe,0x5d,0x82,0xc3,0xbf,0x63,0x26,0x8b,0x1a,
0xe5,0x0c,0x3c,0x0f,0xe5,0xaa,0x3d,0xfe,0x29,0x9d,0x80,0x47,0xd4,0xc0,0x22,0x31,
0xe8,0x1b,0x80,0xfd,0xaa,0x76,0x98,0xe0,0xb4,0x99,0x7f,0x3f,0x2c,0x9c,0x14,0xc3,
0xcc,0x0f,0x1f,0x87,0x89,0x96,0x1a,0x2d,0xfe,0x69,0xfa,0xef,0x23,0xca,0x15,0x61,
0xfa,0xa0,0x3b,0xc8,0x72,0xc2,0x42,0x38,0x6b,0xd3,0x9f,0x5b,0x52,0xe3,0xfe,0x89,
0xfc,0x79,0x32,0xcb,0xdb,0xc3,0x84,0xa8,0xe7,0x8f,0x4c,0xff,0x7d,0x95,0x74,0x2b,
0xd4,0xd6,0x89,0x2c,0xff,0x7e,0xab,0x16,0xff,0xf4,0xe5,0xec,0x93,0xe5,0x8f,0xf6,
0x83,0x2f,0x11,0xcd,0xf8,0x02,0x66,0x6c,0x34,0xe7,0xbf,0x03,0x4b,0xbf,0x56,0x50,
0x96,0x0b,0x61,0xe7,0xac,0xd8,0x81,0x81,0x9c,0x7d,0xd2,0xe4,0x8f,0x51,0xbb,0x21,
0xd1,0x46,0x9d,0x75,0x0a,0x8b,0xc9,0x61,0x3f,0xb3,0x66,0xfe,0x7d,0x7c,0xf7,0x04,
0xf7,0x79,0x61,0xf3,0x07,0x0d,0xd7,0xc6,0x3f,0x0b,0x47,0xe1,0xb7,0xf2,0x3c,0xb5,
0x29,0x8a,0x7a,0xbe,0x8f,0xd6,0xa8,0x85,0xb6,0xf3,0x4b,0xb8,0x8b,0x6d,0x5e,0x59,
0x7e,0x4a,0x4c,0x3a,0xbf,0x0b,0x49,0x8a,0xe3,0x62,0xf1,0x33,0x09,0xb4,0x38,0x51,
0xec,0x5f,0x2e,0xd2,0x3c,0x17,0x6c,0x04,0xb9,0xc7,0xce,0xcf,0xc6,0xb4,0xb8,0x07,
0x70,0x1b,0x4a,0xe6,0xad,0x82,0x64,0x4f,0x3a,0x2e,0x66,0x22,0xa3,0x96,0xff,0x4e,
0x8e,0xd3,0x85,0x4a,0x40,0xe4,0x03,0xa0,0xd2,0x90,0xfd,0xfc,0x52,0x0f,0xec,0x10,
0xde,0x48,0x3d,0xad,0xb8,0x55,0x7e,0x1c,0x41,0xa6,0xf5,0x73,0xdc,0xb0,0xcf,0x38,
0xec,0x45,0x7f,0xb3,0xab,0x0f,0xf5,0xcb,0x2a,0xb8,0x9c,0x0c,0xb5,0x05,0xac,0xfc,
0xd1,0xb0,0x27,0x50,0xb1,0x61,0x90,0x64,0xc5,0x87,0x88,0x0b,0xba,0x15,0x5f,0x5a,
0xb4,0xf2,0x47,0xaa,0xc0,0x53,0x94,0xf1,0x47,0xfd,0x37,0x22,0x7b,0x10,0xa4,0x3d,
0x7e,0x8b,0x9f,0x00,0xc5,0xea,0xba,0x44,0x59,0x56,0x3c,0x4d,0x1e,0xa5,0x87,0xdb,
0xd2,0xa3,0xe2,0xda,0x88,0xc9,0x4f,0x20,0xf4,0x05,0x1a,0x54,0x5c,0x25,0xfc,0x09,
0xfa,0x21,0x3a,0xf2,0x8d,0xb6,0xf3,0x4b,0x9e,0x14,0x99,0xc8,0xd4,0xc7,0xd1,0xcd,
0xac,0x43,0xe1,0xc4,0x38,0xcf,0x0f,0x12,0xbd,0x9f,0xa2,0xf0,0x50,0xe6,0x68,0x3c,
0xb4,0x1c,0xfb,0x79,0x9e,0x71,0xa9,0x7f,0x95,0xed,0xfc,0x92,0xb2,0x03,0x5d,0xde,
0x9d,0x11,0xbf,0x82,0x36,0xd8,0x0e,0x81,0x3e,0xb4,0x41,0xbb,0xff,0xde,0x8e,0x2c,
0x17,0x9b,0xc8,0xa3,0xb0,0x9e,0x56,0xda,0xcf,0x7f,0x46,0x3d,0x0f,0xa9,0xad,0xc2,
0xae,0x3e,0x7f,0x05,0x19,0xe0,0x5a,0x06,0x03,0xcc,0x00,0x4c,0xff,0x5d,0x78,0x12,
0x4e,0x40,0x57,0xda,0x35,0xc4,0x77,0xc2,0x09,0x29,0xdc,0x39,0xe9,0xfc,0x12,0xc5,
0x77,0x7d,0x8e,0x9d,0x49,0xd4,0xf8,0x29,0x57,0xdb,0xf8,0x89,0x7e,0x6a,0x0f,0xf2,
0x33,0xb0,0x95,0xe5,0xb5,0x53,0xb5,0x6a,0xc0,0xe2,0xa7,0x2a,0x54,0x44,0x36,0x28,
0xbb,0x62,0xe8,0x6e,0x0c,0x09,0xfb,0x34,0x47,0xde,0xe2,0xa7,0x30,0x1b,0x36,0x88,
0xbe,0x88,0x76,0xbe,0xee,0xa0,0x52,0xf1,0xa6,0x88,0xec,0xc9,0xf9,0xef,0xe0,0x41,
0x6c,0xa6,0x63,0x28,0x3b,0xfb,0x70,0x03,0x95,0x63,0x51,0x8b,0x9f,0xa7,0xbd,0x2c,
0xf7,0x11,0x32,0xf2,0x2b,0x2c,0x3f,0x4d,0x1b,0xec,0xe7,0x97,0x3e,0x56,0xb4,0xb4,
0x7b,0x1f,0x7c,0xcc,0x85,0x63,0xd5,0xb6,0xf3,0x4b,0x85,0x17,0xe1,0x63,0x75,0xff,
0x37,0x5c,0x31,0xe7,0x7c,0x38,0x02,0xe1,0x48,0xc0,0xe2,0xa7,0x17,0x3d,0x96,0xee,
0xf4,0xae,0xf9,0x28,0x3b,0x65,0x92,0x64,0x79,0x5f,0x8b,0x9f,0x71,0x68,0x8b,0x1d,
0x8c,0xcb,0xd1,0xe5,0x12,0x11,0xa4,0x9d,0x10,0x88,0x88,0x16,0x3f,0xbb,0x95,0xdd,
0x70,0x50,0xf6,0x35,0xb2,0x63,0x83,0xb0,0x47,0xae,0x6a,0x14,0x6d,0xe7,0x97,0xda,
0x58,0x8e,0x28,0x14,0x0f,0x78,0xf9,0x0a,0x78,0x27,0x59,0x15,0x0d,0x58,0xfc,0x8c,
0xc2,0x6e,0xe9,0x12,0x3c,0x17,0x77,0xff,0x35,0x8e,0xd9,0xab,0x22,0xae,0x77,0x8b,
0x9f,0x22,0xba,0x60,0x63,0x34,0x48,0x02,0x2d,0x3c,0x3b,0xc8,0x34,0x5f,0x0a,0x58,
0xfc,0x14,0x3d,0x0b,0x83,0x1f,0x41,0x79,0xda,0xff,0x6e,0xb1,0x57,0x3c,0x2b,0xa2,
0x7d,0x5a,0xfc,0x14,0xd1,0x77,0xaa,0xa1,0xd9,0xd1,0xfc,0x8a,0x82,0x24,0xfe,0xb3,
0xa2,0x4b,0xb4,0xf8,0x59,0x06,0xa5,0xb0,0x5f,0x29,0xeb,0xcd,0x6f,0x26,0x0b,0xe0,
0xa7,0xb4,0xac,0x45,0xb4,0xf1,0x93,0xce,0x82,0x3f,0xd2,0x4c,0xb6,0x70,0x25,0xff,
0x2c,0xbc,0x48,0x6f,0x91,0x03,0x36,0x7e,0x4a,0x9d,0xec,0x8c,0xa0,0x3a,0x3d,0x83,
0x7b,0x2a,0xce,0xfb,0x6b,0x76,0x7e,0x0a,0x0f,0x47,0x5e,0xa0,0xdf,0xcb,0xba,0x0e,
0x3a,0x6f,0x20,0x27,0xe8,0xad,0x69,0x57,0xb7,0xc9,0xcf,0x3d,0xd2,0xc3,0x89,0x44,
0xba,0x2c,0x8a,0xfe,0x58,0x81,0xba,0x1e,0x2a,0x7b,0xb0,0xf0,0xae,0xa9,0x15,0x77,
0xc0,0xdf,0xa2,0x14,0xba,0x7d,0xb4,0xc5,0x4b,0xdb,0x44,0x3f,0x45,0x90,0xe6,0xf4,
0xa7,0xe2,0x49,0xa2,0x9f,0xee,0x83,0x0d,0x0b,0x0a,0x44,0xa0,0x34,0x30,0x24,0x5a,
0xf9,0xcd,0xa4,0xb0,0x9b,0xc5,0x93,0x95,0x4a,0x3f,0xca,0xbe,0x5e,0xba,0x50,0xb5,
0xc5,0x3f,0x29,0xda,0xe0,0x3b,0xb8,0x57,0x4d,0x4f,0x39,0x87,0xe1,0xb2,0xa0,0xe5,
0x8f,0xcc,0xf3,0x4b,0xca,0x53,0xd2,0xc7,0x52,0x4d,0x63,0x65,0x96,0xef,0x86,0xf1,
0x74,0xcd,0x9b,0x01,0x8b,0x9f,0x3d,0xa8,0x3d,0x0f,0x46,0x7d,0x77,0x27,0xbb,0xf9,
0x95,0x52,0xf7,0xb9,0xda,0xc1,0x68,0xc6,0xb4,0x4f,0x2a,0xcc,0x81,0x6e,0xd0,0xa4,
0x1e,0x8a,0x38,0x90,0x5f,0xb6,0xf1,0x53,0x04,0x61,0x67,0x57,0x34,0x4d,0x17,0x04,
0x49,0x54,0xc9,0x40,0x05,0xb2,0xa0,0x25,0x17,0xff,0x54,0xc4,0x15,0x74,0x4c,0x41,
0x7e,0x76,0x39,0x3b,0x51,0x74,0x84,0xd4,0x40,0x17,0x9f,0xe5,0xf4,0x79,0xa0,0xd7,
0x8f,0xc9,0x57,0x65,0x5c,0xe6,0xdf,0xe7,0x3b,0xd5,0x71,0xe3,0xfc,0x52,0x2e,0xc7,
0xee,0xb8,0xea,0x99,0x98,0x59,0xa3,0x36,0x1d,0x2d,0x0f,0x7b,0xaf,0xe8,0xe7,0xc1,
0x46,0x6d,0xfe,0xfb,0xf3,0xf1,0xf2,0xd3,0x62,0x31,0xa9,0xf2,0x1e,0xa2,0x55,0x2c,
0xff,0x6e,0xe9,0x4f,0xdc,0x6a,0x24,0xf9,0x54,0x94,0xf1,0xd3,0xd8,0x37,0x2d,0xfd,
0xb9,0x57,0xda,0x4f,0xf4,0x18,0x84,0xb6,0x07,0x38,0x2d,0xfd,0x49,0x8b,0x98,0xee,
0x09,0xaa,0xae,0xb6,0x56,0x3d,0xe7,0x86,0x73,0xdb,0x67,0xe9,0x4f,0x89,0xc5,0x19,
0xaa,0x2f,0x2e,0x7d,0x10,0x26,0x3e,0xe1,0xfc,0xa7,0x23,0xd4,0x83,0xdb,0xba,0xde,
0x0e,0xb9,0xfb,0xb2,0x4d,0x7f,0x3e,0xcf,0x97,0xab,0xc7,0x59,0x1e,0xef,0x50,0xac,
0x4a,0x9d,0xa4,0x3f,0x9b,0x38,0x2d,0x65,0x9d,0x2e,0x6e,0x12,0x8d,0xdc,0x60,0x8e,
0x9f,0x59,0x6e,0x96,0xba,0x7f,0x27,0xcb,0xd9,0xe8,0x67,0xd6,0xb3,0xf9,0x93,0xf4,
0x67,0x72,0x7f,0x2a,0x38,0xec,0x5e,0xc8,0xdf,0xe5,0xf9,0x30,0x1d,0xee,0x73,0xd9,
0xf8,0xf9,0xdf,0x0f,0x2b,0xcf,0x37,0xd4,0xf7,0x6f,0xde,0x32,0x6d,0x9c,0xe5,0x8f,
0x26,0x9f,0xff,0x3c,0x5f,0x7c,0x5c,0x0e,0x0f,0xba,0xe6,0xb2,0x7e,0xe2,0xfe,0x60,
0xd7,0x9f,0x5b,0x0e,0x43,0xfb,0x0a,0xdc,0xc3,0xe3,0x2d,0xa5,0x28,0x60,0xf5,0xfc,
0xbb,0x31,0x2e,0x09,0xe4,0x5c,0x7b,0x7a,0xd5,0x50,0x74,0x6c,0x09,0xeb,0x8b,0x76,
0x16,0xc0,0xe4,0xe7,0xb6,0x01,0xa9,0x95,0x6d,0xa5,0x0b,0xb4,0xb1,0xd6,0xf2,0xef,
0x96,0xfe,0x64,0xf9,0xf7,0x30,0x75,0x3d,0xa1,0x9d,0x9f,0xd7,0xf3,0xef,0x96,0xfe,
0xd4,0xa4,0x50,0xb5,0x9e,0x13,0x9e,0xa2,0x3f,0x87,0x28,0x93,0x7a,0x4d,0x99,0x72,
0x76,0x2e,0xdd,0xc8,0xbf,0xe7,0xf4,0xe7,0x37,0xb5,0xd4,0x3a,0x63,0xa4,0x78,0x30,
0xae,0x9d,0x51,0x9a,0xac,0x3f,0xab,0x94,0xce,0x4f,0xc8,0xbf,0x03,0xb2,0x60,0x1f,
0xca,0x4e,0x51,0xcb,0xbf,0x5f,0x7b,0xfe,0x93,0xf5,0xc5,0x65,0xcf,0xbf,0x1b,0xfc,
0x3c,0x7d,0xe3,0x90,0x74,0x49,0x61,0x52,0x0f,0xfb,0xf2,0xc1,0x35,0xfa,0x53,0xba,
0x34,0x37,0x74,0x87,0x71,0x1e,0xb3,0x4b,0xd9,0x6c,0xf1,0x73,0xad,0x94,0xf5,0x74,
0xc7,0x7d,0xde,0xa4,0x76,0x7e,0x09,0x58,0x22,0xde,0xe4,0x27,0x68,0xe7,0x97,0xfc,
0x11,0xd1,0xfb,0x62,0x9f,0x0b,0xd7,0xd1,0x24,0xfd,0xc9,0xf2,0xef,0xdc,0xb7,0x17,
0x89,0xa9,0x3c,0xf6,0x0e,0xbe,0x49,0xe7,0x3f,0xb7,0xe0,0x63,0xb8,0x05,0x8a,0x2b,
0xd5,0x30,0x54,0x7a,0x89,0x84,0xec,0xe7,0x3f,0x57,0x72,0x7b,0xa5,0x0f,0xe0,0x19,
0xa5,0x3a,0x35,0xc3,0x3c,0xb3,0x60,0xd3,0x9f,0x2c,0x57,0xc7,0x64,0x75,0xf6,0x1a,
0xfd,0xe9,0xb8,0x19,0x9e,0x75,0x54,0xd3,0xa5,0x69,0xbe,0x53,0x7a,0x36,0x31,0x45,
0x7f,0xde,0xd9,0x53,0x03,0x2e,0x9c,0xd2,0x25,0xe8,0x64,0x78,0xa6,0x9c,0xff,0xd4,
0x64,0xe7,0xc0,0xa6,0x35,0x84,0x9d,0x19,0x9a,0x72,0xfe,0xf3,0x86,0xe0,0x3f,0x0d,
0xdf,0x36,0xe2,0xfe,0x49,0xeb,0x59,0x40,0xfd,0x39,0xf9,0xfc,0x67,0xa7,0xf7,0x8a,
0x1c,0xee,0xd7,0xce,0xf9,0x5f,0x8e,0x4f,0xca,0xbf,0xe3,0x7a,0x5f,0x74,0x32,0x15,
0x66,0xb2,0x93,0x25,0x92,0xb4,0xf5,0x6e,0xe9,0xcf,0x87,0x23,0xad,0xa9,0xc0,0x48,
0x32,0x18,0x19,0x70,0x3d,0x95,0xae,0x1c,0xb6,0x9f,0xff,0xa4,0x29,0xda,0x2e,0xe3,
0xf2,0xf9,0x7e,0xcb,0x19,0x38,0x20,0x6b,0x5a,0x38,0xa7,0x3f,0x65,0x8f,0x0a,0x2c,
0xad,0x89,0xcc,0x1a,0x60,0x12,0x8e,0xbd,0xc3,0x69,0xd3,0x3e,0x59,0xfe,0xbd,0x5e,
0xc5,0xe7,0x8d,0x88,0xc6,0x7a,0x3f,0x6f,0xf2,0x73,0x48,0x0f,0xcf,0x66,0x66,0x8c,
0xe4,0x58,0x90,0xe3,0xa7,0x2a,0x7d,0x84,0x08,0xd8,0x8f,0xba,0xd5,0x59,0x07,0x86,
0x96,0xca,0x9a,0xf6,0xf9,0x8d,0x18,0x3b,0x1f,0xa9,0xe9,0xcf,0xee,0x32,0xc6,0x17,
0x9b,0xfe,0xf4,0xcb,0x87,0xf4,0xd0,0x26,0x3a,0xa5,0x74,0xca,0xf9,0x4f,0x91,0xee,
0x97,0xe5,0x01,0xa6,0x3f,0xa5,0x7d,0xb2,0xe6,0xc7,0x99,0xfa,0xb3,0x30,0xae,0x4c,
0x04,0x51,0x1e,0x7f,0xc0,0x57,0xb2,0xb5,0xa2,0x6e,0xb6,0xeb,0x4f,0x55,0xbd,0x12,
0xac,0x3f,0xed,0xbe,0xc0,0x8f,0xa7,0xc7,0x56,0x86,0x07,0x27,0xe5,0xdf,0xdf,0xc6,
0xe9,0xae,0x89,0xbb,0xbb,0xf9,0x5a,0x54,0x52,0x28,0xaa,0xec,0xe7,0x3f,0x07,0x44,
0xc6,0x41,0xed,0x9c,0xea,0xdf,0x30,0x0e,0xa6,0x89,0x6c,0xe7,0xa7,0xbe,0x94,0x0d,
0x7e,0xca,0xc4,0x67,0x3f,0xff,0xd9,0xe2,0xeb,0x33,0xdb,0x7d,0x46,0xb6,0xc5,0x3f,
0x7f,0x0d,0x57,0x8e,0x22,0x07,0x33,0xce,0x2a,0x7d,0xcc,0xac,0xfc,0xd1,0x39,0xf8,
0x0d,0xfc,0x51,0x1b,0xaa,0x86,0xab,0x54,0x3b,0x3f,0x9f,0x75,0xda,0xf9,0x79,0x1c,
0xba,0x7a,0x10,0x9b,0x23,0x1e,0xe6,0x6f,0xba,0x76,0x59,0xdf,0x1f,0x09,0x81,0xb6,
0x4d,0x12,0x1b,0x4f,0xdf,0x08,0x3b,0x33,0xdb,0x2f,0x3e,0x61,0x8f,0x7f,0x92,0x56,
0xd8,0xcd,0xc6,0x93,0x1d,0x42,0x0f,0x28,0xf9,0xb2,0xf9,0xfd,0x51,0x86,0x0b,0xab,
0xed,0x99,0x9b,0xb2,0xe2,0x0f,0x7d,0x67,0x51,0xfc,0x55,0x66,0xf3,0x87,0xad,0xef,
0x8f,0xa2,0xd3,0x53,0x2f,0xd0,0xda,0xfe,0xa6,0xcf,0x37,0x9c,0x27,0x2f,0xc8,0x61,
0xb5,0xd2,0x96,0x3f,0xba,0x71,0xaf,0x74,0x05,0x6e,0x50,0xab,0xbb,0x1a,0xf4,0x79,
0xd7,0xce,0x7f,0xea,0xfd,0x44,0x7e,0xba,0xd8,0x77,0x52,0xae,0xee,0x72,0x16,0x2f,
0x0f,0xb1,0xd8,0xbd,0xc9,0xcf,0xa2,0xc3,0x9c,0x1e,0xff,0x44,0x7f,0xec,0xef,0x18,
0x3f,0xe3,0xf6,0xf3,0xf3,0xa4,0x95,0x3e,0xc6,0xce,0x2f,0x85,0xe1,0x67,0x8c,0x9f,
0xf6,0xfc,0x11,0x76,0x3d,0x2b,0xb3,0x6d,0x7d,0xba,0x82,0x83,0x3e,0x84,0x63,0x6d,
0x9d,0x9f,0xef,0x86,0x13,0xca,0xbc,0x74,0x20,0xed,0xf4,0xc2,0x6b,0xd0,0xc5,0x62,
0xb1,0x36,0x7e,0x2a,0x63,0x92,0x71,0x7e,0x69,0x78,0xaa,0xff,0xce,0x50,0xd5,0xa5,
0x7d,0x67,0xa3,0x7d,0x6c,0xe4,0x62,0xe7,0x3f,0xed,0xfe,0xfb,0x03,0x1a,0x3f,0xd9,
0x41,0x0a,0x2c,0xf0,0x71,0x53,0x7f,0x56,0x68,0xc8,0xd1,0xea,0x12,0xec,0x7b,0x19,
0x89,0x98,0xe7,0x3f,0xc1,0x81,0xfc,0xf4,0xc5,0xfc,0x1e,0xf2,0x16,0x61,0x42,0x14,
0x0b,0x91,0x1c,0x3f,0x45,0xc6,0x4f,0x1f,0x7a,0x50,0xcc,0x7f,0xe7,0x42,0x30,0x40,
0x79,0x2b,0x7f,0x84,0x1a,0x53,0xa9,0x64,0x38,0x7a,0x59,0x3f,0x23,0x3f,0x68,0x3b,
0x3f,0xff,0x16,0x1c,0x95,0x6b,0x95,0xc0,0x16,0x9e,0x1d,0xb2,0x7f,0x30,0xe6,0xda,
0x66,0xf9,0xef,0x90,0xa5,0x49,0xa6,0x35,0x5a,0x88,0x4a,0xbb,0xa1,0x0a,0x44,0x5b,
0xfe,0xc8,0xf1,0xf2,0xba,0x75,0x2b,0x67,0x47,0xc4,0xd8,0xcc,0x97,0xdb,0x9e,0x86,
0x4a,0x45,0x8c,0x59,0xe7,0xe7,0x4b,0x9e,0x12,0x1f,0x52,0xca,0x14,0x71,0x30,0x72,
0x11,0x7e,0x23,0x54,0x47,0xed,0xf9,0x23,0xcf,0xee,0xc8,0x51,0x5a,0x1b,0x77,0x31,
0x4c,0xb3,0xf3,0xf3,0x36,0xfd,0xf9,0x6d,0xf8,0x25,0x7c,0xa0,0x84,0xef,0x76,0xa7,
0x5a,0xdf,0x92,0x2f,0x89,0xe1,0xc8,0xb5,0xe7,0xe7,0xb5,0xf8,0x2e,0x2b,0x70,0xae,
0x5e,0x33,0x7f,0x84,0xfe,0xbb,0xe3,0x35,0xf0,0xa5,0xc4,0xdd,0xe4,0xb3,0xc2,0x46,
0xc5,0x97,0x2a,0x1c,0x36,0xf3,0x47,0xfe,0x75,0xab,0x84,0x47,0x40,0x1e,0xf4,0xd7,
0xf9,0xee,0x24,0xad,0xaa,0x6f,0x50,0xf4,0x9b,0xf9,0xa3,0xb2,0xa2,0xf9,0xf0,0x53,
0x0e,0xed,0xe5,0x41,0xf2,0x35,0x68,0x57,0xcb,0x17,0xe5,0xaf,0x34,0xf3,0x47,0x7e,
0xef,0x2c,0xf8,0x5b,0xba,0x1f,0x39,0xe8,0xfc,0x1a,0x9c,0xa4,0xfb,0xb2,0xae,0xe1,
0xf2,0x5c,0xfe,0x48,0x2c,0x6a,0x83,0x77,0x75,0x1c,0xed,0xd5,0x39,0x9f,0xb6,0xf9,
0xef,0xe7,0x95,0xe3,0x28,0x3b,0xc5,0x7e,0x7e,0x2e,0x8b,0xd7,0xa5,0x5d,0xa7,0xcb,
0xad,0xef,0x8f,0x7e,0x2a,0xac,0x4f,0xa3,0xcc,0x8d,0x90,0x82,0x6c,0x1b,0xca,0x5f,
0x71,0x30,0x6f,0xae,0x69,0x9f,0xb8,0xf7,0x4b,0x7b,0xa8,0xa8,0xa0,0x86,0xc6,0xba,
0x36,0x71,0xb9,0x99,0x3f,0x8a,0x7b,0x5e,0x54,0x36,0xaa,0x28,0xab,0x81,0xcc,0x95,
0x58,0x02,0x07,0xfd,0x0e,0xaf,0xe9,0xbf,0x77,0xc2,0xd1,0xe4,0xc2,0x78,0x20,0xe3,
0x9c,0xc3,0xf2,0xc5,0xc3,0x68,0xa8,0xd6,0x3d,0x99,0x5b,0xf4,0x1c,0xfb,0x3c,0x6a,
0x28,0xd7,0x4f,0xeb,0xfc,0xfc,0x18,0x1c,0xf5,0xd6,0x34,0xa2,0xff,0x7e,0x11,0x96,
0xe9,0x2c,0x90,0x4d,0xff,0xe8,0x7e,0x39,0xc9,0x6c,0x30,0x43,0xde,0x56,0x36,0xb1,
0x3d,0x5c,0x26,0x72,0x2e,0xff,0x2e,0x88,0xcc,0x64,0x41,0x3b,0xf6,0xd9,0xc9,0x62,
0x25,0x29,0x2b,0x7f,0x24,0x71,0x2d,0x34,0x98,0xc6,0xf9,0xb8,0xbb,0xa7,0x82,0x9d,
0x13,0xc0,0xba,0x5c,0xfe,0x48,0x11,0xbf,0x45,0x8e,0x46,0x83,0x9c,0x4b,0xe0,0x7b,
0x5a,0xb2,0x50,0x47,0x71,0x6e,0xe5,0x1c,0x3f,0x4b,0x06,0x15,0x44,0x38,0xe7,0xee,
0x45,0x69,0x79,0x06,0xea,0x81,0x9d,0xff,0x34,0xf9,0x39,0x9e,0x1d,0x4b,0xe0,0x36,
0x74,0x94,0xaf,0xa3,0x7f,0x35,0x59,0x7f,0x52,0xe9,0x63,0x86,0xf0,0xac,0x93,0xfa,
0xc2,0xca,0x21,0x59,0xd3,0x9f,0xd6,0xf7,0x47,0x03,0x29,0x6d,0xab,0x21,0xec,0xcc,
0x97,0x1e,0x9c,0xb3,0x7d,0x7f,0xc4,0x3d,0xcb,0xe2,0x9f,0xc2,0xb5,0xfa,0x13,0x50,
0x7f,0x8e,0xe5,0xe1,0xdc,0x6e,0x77,0x32,0x08,0x69,0x93,0xac,0x4e,0xd1,0x9f,0xee,
0xcb,0xfc,0x2a,0x61,0x82,0xbb,0x46,0x7f,0x8e,0x21,0xc6,0x98,0xfe,0x24,0xc7,0x85,
0xf0,0xe4,0xf3,0x9f,0x75,0xae,0x67,0x95,0xe9,0x88,0xf7,0x45,0x3f,0xc9,0x7d,0x73,
0x60,0xd7,0x9f,0x99,0x56,0x0d,0xef,0x2e,0xa1,0x55,0x98,0xfa,0xfd,0xd1,0xf2,0xa7,
0x77,0xe0,0x36,0x3b,0x7c,0xc7,0xd7,0xe4,0x76,0x3a,0xe5,0xfb,0x23,0x17,0x7c,0x98,
0xa9,0x1b,0x76,0xef,0x6b,0x98,0x0e,0x2f,0x40,0x9d,0x3a,0xe9,0xfb,0xa3,0xdf,0xcc,
0x9b,0x90,0xc2,0x7d,0xd8,0x85,0xc3,0xb9,0x78,0x88,0xc5,0xcf,0x91,0x36,0x55,0xc5,
0xfd,0x61,0xdb,0xb4,0x91,0xa4,0xf1,0x7e,0xd6,0xf7,0x47,0x87,0x93,0xd3,0x24,0x26,
0x3b,0xd7,0x9f,0x91,0x0f,0xe8,0x7a,0xc2,0xe2,0xe7,0x73,0xc9,0x42,0x15,0xb1,0xb9,
0x9c,0x9c,0xbd,0xf7,0x40,0x5b,0xa5,0x5d,0x7f,0x6a,0xdf,0x1f,0x51,0x59,0x8f,0x7f,
0xe6,0xb4,0xbe,0x5d,0x7f,0x52,0x94,0x9d,0x7b,0xf9,0xe1,0x5c,0x20,0xd4,0xc6,0xcf,
0x3c,0x4d,0x0a,0x25,0x5a,0x55,0x4d,0x13,0x15,0x4e,0xe6,0xe7,0x71,0x12,0xd6,0x6d,
0xf7,0xa8,0xae,0xb3,0x6c,0xdf,0x1f,0xc1,0x06,0xaa,0x9d,0x9f,0x1f,0x92,0x19,0x3f,
0x79,0x7b,0xfc,0x53,0x20,0x9b,0x64,0xe3,0xec,0x79,0xc2,0xd0,0x6e,0x56,0xfc,0x13,
0x7f,0xa4,0x9d,0x9f,0xef,0x23,0xda,0x41,0xd0,0xc9,0xfa,0xb3,0x5f,0x60,0xfa,0xac,
0x21,0x2b,0xf4,0xa3,0x4a,0x99,0xfa,0xfd,0x91,0x26,0xe7,0xe4,0xb7,0x84,0xa9,0xdf,
0x1f,0x95,0xbc,0x45,0x7b,0xd8,0x8f,0xb6,0xf1,0x43,0xc2,0xa5,0xc6,0xf0,0xa4,0xef,
0x8f,0xbc,0xe7,0x90,0x9f,0x55,0x80,0xdb,0x3a,0x3a,0xf2,0x30,0x87,0x73,0x4e,0xfa,
0xfe,0x48,0xdd,0xa9,0x4b,0xe0,0x23,0xec,0x7c,0xf2,0x22,0xfb,0xf7,0x47,0xdc,0x6e,
0xb2,0x61,0x63,0x95,0xf2,0x19,0x76,0x80,0xe6,0xa0,0x7e,0x06,0xcb,0xe4,0x27,0x3b,
0x97,0xa0,0x75,0x61,0x06,0x13,0xa2,0x2c,0x10,0x6a,0x7d,0x7f,0x04,0x6d,0xf0,0x23,
0x91,0xd5,0x35,0x8c,0x7c,0xd2,0xf7,0x47,0x63,0x4a,0x88,0x9b,0x8e,0x7b,0x95,0x3c,
0xf5,0xfb,0x23,0x76,0x7e,0x5e,0x65,0xc7,0x3e,0xf9,0x27,0xe9,0x6b,0x64,0x6a,0xfc,
0x93,0xdb,0x1f,0x93,0xd5,0x82,0x20,0x79,0x09,0xf7,0xf7,0x55,0x53,0xe2,0x9f,0x83,
0x07,0xe8,0xfd,0xaa,0x73,0x8d,0xef,0x39,0xf8,0x99,0x3c,0x49,0x7f,0xb2,0xf3,0xf3,
0xff,0x9c,0x9a,0x97,0x9d,0xbe,0x16,0x75,0xe4,0x6f,0x07,0xa7,0x7e,0x7f,0x44,0xaf,
0x54,0x84,0x55,0xe7,0x3e,0x16,0x57,0x54,0xa6,0x9c,0xff,0x9c,0xf0,0x9c,0x48,0xe1,
0x32,0x7f,0x90,0xbf,0xf5,0xda,0xef,0x8f,0x9e,0xa3,0xeb,0x68,0x80,0x3d,0xa6,0x45,
0xd0,0x0f,0x7b,0xda,0xbe,0x3f,0xda,0xa1,0xb6,0xb1,0x1f,0x8d,0x92,0x1d,0xb4,0x7d,
0xf2,0xf7,0x47,0xec,0xfc,0x67,0x42,0xd1,0x3e,0xfb,0x6b,0xd3,0x84,0xe8,0xa4,0xef,
0x8f,0x0e,0x13,0x2d,0xbc,0x3e,0xca,0x6b,0x07,0x41,0x27,0x7f,0x7f,0x34,0x20,0x5c,
0xee,0xc7,0x1f,0x75,0x17,0xbf,0xe5,0x9a,0xa2,0x3f,0x39,0x69,0x5c,0x78,0x09,0x59,
0xb0,0x6c,0xb4,0x21,0x24,0x4c,0x40,0xcd,0x94,0xef,0x8f,0x60,0xa3,0xbe,0xcc,0x7d,
0x54,0xff,0x50,0xc7,0xfa,0xfe,0xc8,0x81,0x75,0x8a,0x76,0xe4,0xdc,0x4f,0x0f,0x09,
0xbe,0x49,0xfa,0x13,0x77,0x37,0x5a,0x16,0xe8,0x5b,0x1a,0x2c,0x06,0xc5,0x10,0x7f,
0xe6,0xf7,0x47,0xe2,0x83,0xb4,0x3f,0x18,0x1e,0xdc,0x3c,0xca,0xfb,0x53,0x13,0xa0,
0x9d,0x2f,0xb7,0xbe,0x3f,0x1a,0x89,0xac,0x9d,0xc7,0x96,0xf9,0x0c,0xf5,0xb4,0xe1,
0x17,0xe7,0xf8,0x09,0x47,0x16,0x73,0xcd,0xca,0x7d,0xd2,0xe7,0x3c,0x8e,0x1e,0x2c,
0x9c,0x91,0xae,0x13,0x0a,0xd5,0x79,0xc6,0x3d,0x8f,0x28,0x79,0xf3,0xb8,0xfb,0x94,
0xaf,0x48,0x45,0x7d,0xd3,0x56,0x03,0x2b,0x94,0xa8,0xf3,0x8c,0xd8,0x21,0x25,0x5c,
0x33,0x8d,0xca,0xc5,0xa2,0xd0,0xc2,0xc9,0x7a,0xc1,0xf8,0x1c,0x96,0x0d,0xcc,0xce,
0x38,0xb7,0x1c,0x66,0x4a,0x0e,0x95,0x8b,0xc3,0x72,0xf8,0x32,0x38,0xcc,0xba,0x23,
0x51,0xc7,0x5f,0x2c,0x12,0xe9,0x75,0x72,0x49,0xbf,0x63,0x0d,0xdc,0x47,0xef,0x91,
0xad,0x7b,0x1e,0xb9,0x1b,0xe2,0x92,0x08,0x4b,0x44,0xf6,0xa1,0x3e,0xa7,0x7f,0xa8,
0x6f,0xf5,0x45,0x28,0xe3,0x44,0x88,0xc8,0x12,0x15,0x64,0xa3,0x60,0xd6,0xdd,0xc3,
0x29,0x54,0x91,0xef,0xf5,0x97,0xf0,0x9c,0x92,0x88,0xc9,0x25,0xfe,0x12,0x30,0xfb,
0xe9,0xe2,0xae,0x83,0x2f,0xa9,0xd2,0xf7,0xa1,0x14,0x0b,0x8a,0x5a,0xd2,0x2c,0xe0,
0xce,0x9a,0xeb,0x27,0x7c,0x46,0x66,0xe3,0x1e,0x03,0x36,0xdd,0x54,0xc0,0x79,0x33,
0xfb,0x19,0x47,0x9d,0xba,0x11,0xe4,0xb6,0xa8,0x4c,0xbc,0x5c,0x0b,0xc8,0x54,0xe4,
0xcc,0x7e,0xc6,0xa6,0xad,0xe0,0x5e,0x85,0xbb,0x15,0x4f,0xca,0xb1,0x82,0x8b,0xc2,
0x5f,0x28,0x1e,0x5b,0x3f,0xf3,0xd7,0xe0,0x2b,0xd7,0x22,0xac,0xf0,0xdd,0x4f,0xd3,
0x99,0x58,0x30,0xeb,0x22,0x79,0x72,0xc2,0x25,0xf3,0x62,0xac,0x37,0x4f,0xa2,0x2e,
0x79,0x91,0x28,0x59,0xe3,0x29,0x2a,0x41,0xa6,0x5e,0x40,0x97,0x31,0x80,0x05,0xd5,
0xec,0xa7,0x93,0x0b,0x26,0x9a,0x32,0x33,0x44,0xe1,0x38,0x17,0x52,0x9b,0x82,0xf7,
0x88,0x82,0x6a,0xf6,0xb3,0x61,0x76,0x73,0x62,0x79,0xb0,0x58,0x2c,0x69,0x15,0xe2,
0xea,0xf2,0xe0,0x97,0xc5,0x12,0x6a,0xf6,0x33,0x02,0x71,0x7c,0x4c,0x44,0xc4,0xfe,
0x37,0x27,0x44,0x7c,0x30,0x58,0x7d,0xb9,0x5b,0xa8,0x48,0x14,0xca,0x91,0xa8,0xb4,
0x15,0x0b,0x5e,0xb9,0x21,0x5a,0x64,0xd5,0xb1,0xc1,0x5f,0x02,0x31,0x5a,0x82,0x05,
0x2e,0x02,0x45,0xb4,0x44,0x31,0xfb,0x99,0xc7,0x2d,0xa2,0xf7,0xc6,0x4b,0xbc,0x8e,
0x3c,0xee,0x8b,0x34,0x16,0xf7,0x78,0x1d,0x60,0xce,0x7b,0x31,0x27,0xd0,0x7c,0x59,
0x8a,0x0a,0xc5,0x0e,0x01,0x27,0x04,0x0b,0xd6,0xbc,0x37,0xf2,0x5f,0xa4,0x6d,0xf1,
0x6f,0x78,0x5d,0x4b,0xc8,0x17,0x13,0x2f,0xc7,0x6f,0xf1,0x16,0x9a,0xf7,0x3c,0xb2,
0x64,0xda,0xea,0xc4,0x99,0xf8,0x57,0xbc,0x25,0xeb,0x1d,0xab,0x13,0xf7,0xc5,0xff,
0xd2,0x5b,0x62,0xda,0xe0,0xe2,0x25,0xdc,0x6a,0xba,0x3c,0x7e,0x9d,0xb7,0xf0,0x45,
0x2c,0x9c,0x61,0x05,0xb5,0xc8,0xac,0xcb,0x9b,0x47,0x5d,0xf1,0x7b,0xbc,0x9e,0x17,
0x67,0xce,0xa3,0xa5,0xf1,0x06,0x6c,0x57,0x94,0x8b,0x6d,0x03,0x0e,0x7f,0x93,0x4c,
0xc4,0xc5,0x2d,0x5c,0x1c,0x27,0xf1,0x3a,0xb4,0xcf,0x5c,0x8d,0x90,0xc7,0x95,0x25,
0x96,0xc7,0x67,0x7a,0x1d,0xeb,0xb9,0x32,0x1a,0x8d,0x7f,0xd9,0xeb,0xa0,0xb9,0x75,
0xb4,0x58,0x11,0x9a,0xb9,0x26,0x98,0x01,0x92,0xea,0x68,0x86,0x26,0xf8,0x0a,0x78,
0xcc,0x7b,0x2e,0x26,0x10,0x4f,0x8b,0x38,0x9e,0xd0,0x02,0x71,0x1c,0x4f,0xd4,0x3a,
0xd4,0xec,0x4b,0x44,0x90,0x77,0x7a,0xe5,0x25,0xfe,0xa2,0x16,0x41,0x4e,0x48,0x72,
0xc4,0x5f,0x64,0xab,0x2b,0x53,0x54,0x4f,0xf0,0x4b,0xae,0x92,0x08,0xa7,0xf4,0xdc,
0x5b,0xe6,0x71,0x95,0x28,0x66,0x3f,0x09,0x17,0xeb,0xbf,0x17,0x4d,0xd6,0x11,0xe1,
0x62,0xf4,0xde,0x20,0x16,0x94,0x5c,0x5f,0x70,0xc5,0xc7,0x8b,0x24,0x10,0x60,0x91,
0x28,0x28,0xe0,0x45,0xfb,0x34,0x6b,0x60,0xf1,0xdd,0x11,0x5f,0x82,0xca,0xb2,0x28,
0x12,0x22,0xeb,0x05,0x30,0xfb,0xa9,0x2c,0x63,0xeb,0xef,0x2f,0xa0,0x84,0x3a,0x72,
0x85,0x5c,0x5f,0x8e,0x28,0x8b,0x9a,0xd9,0x09,0x18,0x5c,0x92,0x0e,0xa6,0x94,0xaf,
0x83,0x3b,0xcd,0x71,0x01,0x98,0x13,0xe7,0x4a,0x81,0xe0,0xbb,0xcf,0x94,0xf1,0xe1,
0x4b,0xa0,0x48,0x35,0xab,0x24,0x98,0x0d,0xf9,0x40,0x14,0x78,0x05,0x2a,0x58,0xc1,
0xfc,0x8b,0x1d,0x78,0x55,0x70,0xf3,0xee,0x6f,0x52,0x8a,0x25,0xa1,0x8f,0x2b,0x83,
0x68,0xec,0x1e,0x49,0xb0,0xda,0xf9,0x1c,0xdf,0x3a,0xf7,0xd5,0xe0,0xcc,0x8e,0x92,
0xc7,0xb9,0x95,0xea,0xdd,0xc1,0x2f,0x77,0x94,0x50,0xdb,0x3d,0x6b,0xf1,0x56,0x11,
0x05,0x52,0x78,0x8b,0x29,0xf7,0x14,0x85,0x60,0x91,0x57,0x58,0xc2,0x15,0x25,0x95,
0x78,0x91,0xb7,0x0d,0x0b,0xb6,0x7e,0xf6,0x14,0x89,0xf7,0x0a,0x5f,0xe2,0x4b,0x94,
0xfb,0x63,0x45,0xf7,0x0a,0x25,0xdc,0x97,0x14,0xab,0x5d,0x59,0x44,0x54,0x5a,0x3c,
0xf2,0xba,0x28,0x16,0xee,0x65,0x05,0xab,0xae,0x60,0x31,0xa1,0xc0,0x04,0x68,0x81,
0x83,0x50,0x25,0x2d,0xf9,0xad,0x3b,0x82,0x8b,0xbf,0x07,0xda,0x7a,0x56,0x07,0x45,
0x2c,0x70,0xdb,0xd4,0x5b,0x82,0xa2,0xed,0x9e,0x8e,0x35,0xdc,0x7d,0xf4,0x2b,0xf2,
0xf5,0xfd,0x5a,0xe1,0x2f,0x91,0x59,0x66,0x1d,0x8f,0x50,0x91,0x82,0x82,0x8b,0x63,
0x85,0xa2,0x90,0xe0,0x2a,0xb2,0xee,0x59,0x3f,0xb1,0x6c,0xe2,0xef,0x27,0x2e,0x4e,
0x7c,0x6c,0x16,0xe0,0xdf,0x75,0x85,0xdf,0x58,0x76,0xb5,0xf7,0xa5,0xcc,0xfe,0x1f,
0x87,0x5f,0x77,0xd6,0xb2,0xc2,0x2a,0xdb,0x3d,0xeb,0x97,0x4d,0xcc,0xa8,0xe9,0x1b,
0xf8,0xb8,0xbe,0xae,0xe9,0x24,0x2b,0x7c,0x60,0xab,0xf3,0x46,0xcf,0x16,0xcf,0x6b,
0xeb,0xf8,0xc1,0xad,0xde,0xe8,0x8b,0x33,0x0e,0x60,0xc1,0xaa,0x9b,0x06,0xab,0x95,
0x1d,0xb1,0x9b,0x3c,0xf9,0x47,0x48,0xb3,0x52,0x2a,0xdd,0x24,0xe4,0x5b,0xef,0x20,
0x98,0x9c,0x77,0xac,0xe6,0x74,0xce,0x9b,0x75,0x1a,0xe7,0xe3,0xc5,0x5e,0x61,0xbd,
0xc9,0x79,0xab,0x5d,0xfc,0x7e,0xed,0xef,0xa9,0x08,0x16,0xaf,0xad,0x31,0x13,0xd6,
0x24,0x4a,0xd5,0x2f,0xc8,0x45,0xfd,0xc2,0x1a,0xae,0x94,0x62,0xc1,0xba,0x27,0x00,
0xb6,0x93,0xf3,0x44,0xdb,0x1f,0x64,0x31,0x6b,0x24,0x61,0x0e,0x97,0x4f,0x23,0xcd,
0xd2,0xcf,0x85,0xd9,0x9c,0x48,0x19,0xe7,0xcd,0xba,0x32,0x58,0x9a,0x28,0x4e,0x37,
0x76,0x16,0x55,0x09,0x51,0xaa,0x15,0xcc,0xa7,0xa9,0x55,0xb8,0x66,0x3d,0xd4,0x11,
0xe4,0xee,0x82,0x7b,0x70,0xcd,0x62,0xc1,0xd8,0x52,0xd9,0x81,0x5a,0xe1,0x16,0x28,
0x52,0x84,0xe5,0x9c,0x07,0xbe,0x40,0xd9,0x82,0xe2,0x8c,0x6d,0x1a,0xdb,0x29,0x44,
0x62,0xa7,0xde,0xd8,0x01,0x32,0x30,0x0a,0x56,0x9d,0xc0,0x00,0x88,0xcb,0x80,0x62,
0x61,0x3b,0x5b,0x0f,0xd4,0xa8,0x93,0x38,0x05,0x55,0xc7,0x76,0x98,0x03,0xf9,0x94,
0x3c,0x80,0xef,0x80,0x85,0x16,0xe3,0x71,0xca,0xe2,0xbb,0xf3,0xe2,0xc8,0xf9,0x06,
0x51,0xea,0x15,0x82,0x7a,0x21,0xc7,0x10,0x05,0x6f,0x2f,0x1b,0x78,0xc7,0xe7,0x18,
0x05,0xa3,0x1d,0x38,0x21,0x48,0xd9,0xfe,0x00,0xbd,0x10,0xec,0xd1,0x0a,0x66,0xdd,
0xe2,0x06,0xa1,0x19,0x31,0xbd,0x44,0x44,0xbe,0xc4,0x7b,0xb4,0x02,0xcd,0xdd,0x93,
0x63,0x9c,0x17,0x91,0x75,0xc0,0xf6,0x29,0x17,0x2b,0xe4,0xc6,0x2c,0xc6,0x21,0x97,
0xcc,0x76,0xd8,0x17,0xd6,0xce,0xbc,0xa7,0x82,0xef,0xe1,0x81,0xc5,0x6c,0x6c,0x70,
0x0b,0x64,0x85,0x88,0x71,0xcf,0x28,0x4e,0xca,0x22,0x2a,0xc5,0x05,0xaf,0x90,0x2b,
0x98,0xec,0x89,0x6a,0xe7,0x6c,0x10,0xef,0xa2,0x80,0xee,0x9b,0x5e,0x00,0xf3,0x9e,
0x8c,0xf3,0x0c,0xef,0xf9,0x4b,0x8c,0x42,0xa1,0xc9,0x3a,0x6e,0x89,0xb0,0x3a,0x51,
0x1a,0xff,0x82,0xb7,0xe8,0x45,0x61,0xf5,0xba,0x1d,0xac,0x60,0xbe,0x1f,0x8e,0xf2,
0x6a,0xba,0x23,0x7e,0x93,0x37,0xff,0x45,0xb2,0x9a,0xde,0xc7,0x0a,0xb9,0xba,0x18,
0xe3,0x3c,0x6e,0x0b,0xb8,0x3f,0xbc,0xe8,0x60,0x37,0xc0,0x42,0xaf,0x39,0x9e,0xc5,
0x50,0x41,0x6f,0x97,0x8b,0xa3,0xc2,0x56,0xae,0x82,0x36,0xd9,0xed,0x53,0xc1,0x77,
0x28,0xa3,0xde,0x78,0x9e,0x97,0x5b,0x0f,0xf7,0x27,0xb4,0x82,0x69,0x4b,0x8b,0xe3,
0x8e,0x05,0xdc,0x2c,0x34,0x87,0xa2,0xac,0xb0,0x56,0x2f,0x98,0x73,0xc4,0xdd,0x0d,
0x2b,0xe8,0xf5,0x72,0x5e,0x94,0xdb,0x8a,0x05,0xc3,0x50,0x73,0xfd,0x64,0xe3,0xc9,
0xf6,0x00,0xa9,0x25,0x57,0xb0,0xc6,0x33,0x02,0x31,0x44,0x7f,0xa3,0x58,0x44,0x04,
0x85,0x6a,0x05,0x30,0xfb,0x49,0x20,0x86,0x7b,0x80,0xc3,0xc5,0x35,0xe4,0x0a,0x8a,
0xd9,0x4f,0x51,0x78,0x00,0x72,0xba,0x46,0x2f,0x58,0xe3,0x19,0x21,0x40,0x0d,0xce,
0x1b,0x05,0xf3,0x9e,0x9c,0x52,0x11,0x77,0x78,0x61,0x92,0x7d,0x9a,0xfd,0x54,0x80,
0xfd,0xa5,0x85,0x39,0x4a,0x7e,0x8a,0xac,0x80,0x52,0xcd,0x50,0xcd,0x7b,0xc6,0xe7,
0xf8,0xb9,0xe9,0xd0,0x40,0xa5,0xac,0xb0,0x80,0xbb,0x81,0x15,0x72,0xef,0x1e,0xc3,
0xa5,0x51,0xa6,0xb8,0x62,0xbc,0x07,0x8e,0xd0,0x79,0x8a,0x28,0xf1,0x82,0xcd,0x3e,
0x05,0x98,0xad,0x14,0x4a,0xfc,0xbd,0xf0,0x3b,0x98,0xbf,0x48,0xfb,0xa5,0x5e,0xab,
0x9f,0xc2,0xfd,0x3d,0xde,0xe0,0x92,0xd2,0xa2,0x56,0x66,0x9f,0xc1,0x25,0x2e,0x9b,
0x7d,0x2a,0x68,0xea,0xb6,0xf5,0x65,0xe3,0xbc,0xc2,0x45,0x1b,0x43,0x0c,0xef,0x65,
0x45,0x1d,0x8d,0x0f,0x5c,0xaf,0x15,0x6c,0xf6,0xa9,0x4a,0x3f,0xf0,0xcc,0x5a,0x5c,
0xb4,0x45,0x69,0x8c,0xc5,0x3c,0xc2,0x62,0xae,0x48,0xb1,0xd6,0x11,0x1b,0x7a,0x22,
0x94,0x25,0xc4,0xb8,0xa2,0x4f,0x06,0x98,0x57,0x0e,0xef,0x42,0x01,0x67,0x14,0xac,
0xf1,0xbc,0x93,0xbf,0x47,0x79,0x79,0xf0,0x96,0xf9,0x85,0x66,0xc1,0xbc,0x27,0x17,
0xad,0x58,0x53,0x58,0x4a,0xbe,0x50,0x56,0xa4,0x36,0xae,0xc9,0xdf,0x31,0x99,0x59,
0x33,0x6c,0x9c,0xef,0x29,0x0a,0x35,0x36,0x6d,0xb1,0x9e,0xf7,0xbf,0xcb,0xf9,0x4a,
0x5b,0xbb,0xab,0x53,0x38,0x5f,0x63,0xbf,0xe7,0x14,0xce,0xdf,0xea,0xbd,0xeb,0x8d,
0x47,0x0e,0x0c,0xbd,0x77,0xe9,0xd6,0x9f,0xdc,0x75,0x76,0x06,0x2b,0x58,0x75,0xe8,
0xc3,0xec,0x04,0xbf,0x52,0x50,0xda,0x6b,0x14,0x24,0xab,0x4e,0x23,0x1f,0x8e,0x9d,
0x64,0x15,0xac,0x4b,0x8b,0x38,0x31,0x1a,0x9a,0x85,0x6b,0xda,0x95,0xaa,0xd7,0xb6,
0x63,0xd9,0x67,0x48,0xea,0x21,0x1f,0xbd,0x60,0xd6,0x4c,0x99,0x71,0xa3,0x60,0xd4,
0x5d,0xdb,0xce,0x56,0x77,0x4d,0x4b,0xeb,0x4f,0x86,0x15,0xb4,0x87,0x76,0x42,0xa7,
0x32,0x57,0x2a,0x68,0x23,0x5a,0xa1,0xb4,0xd7,0x6c,0xa7,0xf5,0x73,0x12,0x99,0xac,
0x7b,0x5e,0xfb,0x7e,0x66,0x9d,0xc4,0xfe,0x3e,0xda,0x40,0xee,0xfd,0x06,0xe0,0x7b,
0x50,0xda,0x6b,0x5a,0x8c,0xfe,0x27,0x34,0x14,0x52,0xaa,0x1a,0x05,0xc9,0xea,0x0b,
0x11,0x58,0x0c,0x96,0x7d,0x6f,0x6d,0x14,0xec,0x93,0xab,0x3d,0x06,0x72,0xcf,0xb3,
0xff,0xd1,0x33,0x89,0xf2,0x41,0x18,0xc8,0xfd,0x4b,0x2f,0x18,0xbe,0xf6,0xbf,0x76,
0xd5,0xd5,0x5d,0xfb,0x33,0xb7,0xfb,0xdf,0x6c,0xf6,0x1f,0xb8,0xea,0x27,0x26,0x26,
0xd4,0x49,0x85,0xff,0x37,0xeb,0x72,0x36,0x2f,0x91,0xff,0xf3,0x36,0xff,0x49,0xed,
0xa6,0xda,0xae,0x35,0xb7,0xff,0x97,0x6c,0xde,0x6e,0xd7,0xd6,0x26,0xfc,0xbf,0x6c,
0xf3,0x80,0xa6,0xae,0xbd,0xdf,0x54,0x9b,0xff,0x17,0xdb,0xc1,0x27,0xbc,0x9f,0xed,
0xfa,0x17,0xde,0x1c,0x98,0xcd,0xc3,0x35,0xd7,0xa7,0x36,0xff,0x1f,0xa8,0xfb,0x97,
0x2f,0xf6,0xb7,0x1c,0x89,0xf6,0xff,0x45,0xbf,0xfd,0xf7,0xb6,0xf9,0xf4,0xfa,0xf4,
0xfa,0xf4,0xfa,0xf4,0xfa,0xf4,0xfa,0xf4,0xfa,0xff,0xe5,0xd2,0xf6,0x49,0xa2,0xed,
0x93,0xea,0x7f,0x76,0x5f,0x3e,0xbd,0x3e,0xbd,0x3e,0xbd,0x3e,0xbd,0x3e,0xbd,0x3e,
0xbd,0xfe,0x33,0xae,0x20,0xfb,0x73,0x09,0x50,0xf9,0x6b,0xe3,0xef,0xff,0xf3,0x41,
0x50,0xa6,0xfd,0xeb,0xbf,0xaf,0xfd,0x9e,0x23,0x08,0x69,0xce,0x6a,0xbf,0x3b,0x35,
0xb5,0xaf,0xff,0x13,0x87,0x7b,0xf6,0xdd,0xd4,0x60,0x00,0x00,
| gpl-2.0 |
tarunkapadia93/gk_a6k | drivers/net/wireless/ath/ath6kl/cfg80211.c | 1000 | 98705 | /*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/moduleparam.h>
#include <linux/inetdevice.h>
#include <linux/export.h>
#include "core.h"
#include "cfg80211.h"
#include "debug.h"
#include "hif-ops.h"
#include "testmode.h"
#define RATETAB_ENT(_rate, _rateid, _flags) { \
.bitrate = (_rate), \
.flags = (_flags), \
.hw_value = (_rateid), \
}
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = 5000 + (5 * (_channel)), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define DEFAULT_BG_SCAN_PERIOD 60
struct ath6kl_cfg80211_match_probe_ssid {
struct cfg80211_ssid ssid;
u8 flag;
};
static struct ieee80211_rate ath6kl_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
RATETAB_ENT(55, 0x4, 0),
RATETAB_ENT(110, 0x8, 0),
RATETAB_ENT(60, 0x10, 0),
RATETAB_ENT(90, 0x20, 0),
RATETAB_ENT(120, 0x40, 0),
RATETAB_ENT(180, 0x80, 0),
RATETAB_ENT(240, 0x100, 0),
RATETAB_ENT(360, 0x200, 0),
RATETAB_ENT(480, 0x400, 0),
RATETAB_ENT(540, 0x800, 0),
};
#define ath6kl_a_rates (ath6kl_rates + 4)
#define ath6kl_a_rates_size 8
#define ath6kl_g_rates (ath6kl_rates + 0)
#define ath6kl_g_rates_size 12
#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
IEEE80211_HT_CAP_SGI_20 | \
IEEE80211_HT_CAP_SGI_40)
static struct ieee80211_channel ath6kl_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
CHAN5G(34, 0), CHAN5G(36, 0),
CHAN5G(38, 0), CHAN5G(40, 0),
CHAN5G(42, 0), CHAN5G(44, 0),
CHAN5G(46, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
CHAN5G(108, 0), CHAN5G(112, 0),
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(149, 0),
CHAN5G(153, 0), CHAN5G(157, 0),
CHAN5G(161, 0), CHAN5G(165, 0),
CHAN5G(184, 0), CHAN5G(188, 0),
CHAN5G(192, 0), CHAN5G(196, 0),
CHAN5G(200, 0), CHAN5G(204, 0),
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
static struct ieee80211_supported_band ath6kl_band_2ghz = {
.n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
.channels = ath6kl_2ghz_channels,
.n_bitrates = ath6kl_g_rates_size,
.bitrates = ath6kl_g_rates,
.ht_cap.cap = ath6kl_g_htcap,
.ht_cap.ht_supported = true,
};
static struct ieee80211_supported_band ath6kl_band_5ghz = {
.n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
.channels = ath6kl_5ghz_a_channels,
.n_bitrates = ath6kl_a_rates_size,
.bitrates = ath6kl_a_rates,
.ht_cap.cap = ath6kl_a_htcap,
.ht_cap.ht_supported = true,
};
#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
/* returns true if scheduled scan was stopped */
static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags))
return false;
del_timer_sync(&vif->sched_scan_timer);
if (ar->state == ATH6KL_STATE_RECOVERY)
return true;
ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false);
return true;
}
static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
bool stopped;
stopped = __ath6kl_cfg80211_sscan_stop(vif);
if (!stopped)
return;
cfg80211_sched_scan_stopped(ar->wiphy);
}
static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
enum nl80211_wpa_versions wpa_version)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
if (!wpa_version) {
vif->auth_mode = NONE_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_2) {
vif->auth_mode = WPA2_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_1) {
vif->auth_mode = WPA_AUTH;
} else {
ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
return -ENOTSUPP;
}
return 0;
}
static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
enum nl80211_auth_type auth_type)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
switch (auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
vif->dot11_auth_mode = OPEN_AUTH;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
vif->dot11_auth_mode = SHARED_AUTH;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
vif->dot11_auth_mode = LEAP_AUTH;
break;
case NL80211_AUTHTYPE_AUTOMATIC:
vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
break;
default:
ath6kl_err("%s: 0x%x not supported\n", __func__, auth_type);
return -ENOTSUPP;
}
return 0;
}
static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
{
u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
&vif->grp_crypto_len;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
__func__, cipher, ucast);
switch (cipher) {
case 0:
/* our own hack to use value 0 as no crypto used */
*ar_cipher = NONE_CRYPT;
*ar_cipher_len = 0;
break;
case WLAN_CIPHER_SUITE_WEP40:
*ar_cipher = WEP_CRYPT;
*ar_cipher_len = 5;
break;
case WLAN_CIPHER_SUITE_WEP104:
*ar_cipher = WEP_CRYPT;
*ar_cipher_len = 13;
break;
case WLAN_CIPHER_SUITE_TKIP:
*ar_cipher = TKIP_CRYPT;
*ar_cipher_len = 0;
break;
case WLAN_CIPHER_SUITE_CCMP:
*ar_cipher = AES_CRYPT;
*ar_cipher_len = 0;
break;
case WLAN_CIPHER_SUITE_SMS4:
*ar_cipher = WAPI_CRYPT;
*ar_cipher_len = 0;
break;
default:
ath6kl_err("cipher 0x%x not supported\n", cipher);
return -ENOTSUPP;
}
return 0;
}
static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
if (key_mgmt == WLAN_AKM_SUITE_PSK) {
if (vif->auth_mode == WPA_AUTH)
vif->auth_mode = WPA_PSK_AUTH;
else if (vif->auth_mode == WPA2_AUTH)
vif->auth_mode = WPA2_PSK_AUTH;
} else if (key_mgmt == 0x00409600) {
if (vif->auth_mode == WPA_AUTH)
vif->auth_mode = WPA_AUTH_CCKM;
else if (vif->auth_mode == WPA2_AUTH)
vif->auth_mode = WPA2_AUTH_CCKM;
} else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
vif->auth_mode = NONE_AUTH;
}
}
static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
if (!test_bit(WMI_READY, &ar->flag)) {
ath6kl_err("wmi is not ready\n");
return false;
}
if (!test_bit(WLAN_ENABLED, &vif->flags)) {
ath6kl_err("wlan disabled\n");
return false;
}
return true;
}
static bool ath6kl_is_wpa_ie(const u8 *pos)
{
return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 0x01;
}
static bool ath6kl_is_rsn_ie(const u8 *pos)
{
return pos[0] == WLAN_EID_RSN;
}
static bool ath6kl_is_wps_ie(const u8 *pos)
{
return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
pos[5] == 0x04);
}
static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
size_t ies_len)
{
struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *buf = NULL;
size_t len = 0;
int ret;
/*
* Clear previously set flag
*/
ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
/*
* Filter out RSN/WPA IE(s)
*/
if (ies && ies_len) {
buf = kmalloc(ies_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
pos = ies;
while (pos + 1 < ies + ies_len) {
if (pos + 2 + pos[1] > ies + ies_len)
break;
if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
memcpy(buf + len, pos, 2 + pos[1]);
len += 2 + pos[1];
}
if (ath6kl_is_wps_ie(pos))
ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
pos += 2 + pos[1];
}
}
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_ASSOC_REQ, buf, len);
kfree(buf);
return ret;
}
static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
{
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
*nw_type = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
*nw_type = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
*nw_type = AP_NETWORK;
break;
default:
ath6kl_err("invalid interface type %u\n", type);
return -ENOTSUPP;
}
return 0;
}
static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
u8 *if_idx, u8 *nw_type)
{
int i;
if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
return false;
if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
ar->num_vif))
return false;
if (type == NL80211_IFTYPE_STATION ||
type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
for (i = 0; i < ar->vif_max; i++) {
if ((ar->avail_idx_map) & BIT(i)) {
*if_idx = i;
return true;
}
}
}
if (type == NL80211_IFTYPE_P2P_CLIENT ||
type == NL80211_IFTYPE_P2P_GO) {
for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
if ((ar->avail_idx_map) & BIT(i)) {
*if_idx = i;
return true;
}
}
}
return false;
}
static bool ath6kl_is_tx_pending(struct ath6kl *ar)
{
return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
}
static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
bool enable)
{
int err;
if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
return;
if (vif->nw_type != INFRA_NETWORK)
return;
if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
vif->ar->fw_capabilities))
return;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
enable ? "enable" : "disable");
err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
vif->fw_vif_idx, enable);
if (err)
ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
enable ? "enable" : "disable", err);
}
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
int status;
u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
u16 interval;
ath6kl_cfg80211_sscan_disable(vif);
vif->sme_state = SME_CONNECTING;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
ath6kl_err("destroy in progress\n");
return -EBUSY;
}
if (test_bit(SKIP_SCAN, &ar->flag) &&
((sme->channel && sme->channel->center_freq == 0) ||
(sme->bssid && is_zero_ether_addr(sme->bssid)))) {
ath6kl_err("SkipScan: channel or bssid invalid\n");
return -EINVAL;
}
if (down_interruptible(&ar->sem)) {
ath6kl_err("busy, couldn't get access\n");
return -ERESTARTSYS;
}
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
ath6kl_err("busy, destroy in progress\n");
up(&ar->sem);
return -EBUSY;
}
if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
/*
* sleep until the command queue drains
*/
wait_event_interruptible_timeout(ar->event_wq,
ath6kl_is_tx_pending(ar),
WMI_TIMEOUT);
if (signal_pending(current)) {
ath6kl_err("cmd queue drain timeout\n");
up(&ar->sem);
return -EINTR;
}
}
status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
if (status) {
up(&ar->sem);
return status;
}
if (sme->ie == NULL || sme->ie_len == 0)
ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
if (test_bit(CONNECTED, &vif->flags) &&
vif->ssid_len == sme->ssid_len &&
!memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
vif->reconnect_flag = true;
status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
vif->req_bssid,
vif->ch_hint);
up(&ar->sem);
if (status) {
ath6kl_err("wmi_reconnect_cmd failed\n");
return -EIO;
}
return 0;
} else if (vif->ssid_len == sme->ssid_len &&
!memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
ath6kl_disconnect(vif);
}
memset(vif->ssid, 0, sizeof(vif->ssid));
vif->ssid_len = sme->ssid_len;
memcpy(vif->ssid, sme->ssid, sme->ssid_len);
if (sme->channel)
vif->ch_hint = sme->channel->center_freq;
memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
status = ath6kl_set_auth_type(vif, sme->auth_type);
if (status) {
up(&ar->sem);
return status;
}
if (sme->crypto.n_ciphers_pairwise)
ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
else
ath6kl_set_cipher(vif, 0, true);
ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
if (sme->crypto.n_akm_suites)
ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
if ((sme->key_len) &&
(vif->auth_mode == NONE_AUTH) &&
(vif->prwise_crypto == WEP_CRYPT)) {
struct ath6kl_key *key = NULL;
if (sme->key_idx > WMI_MAX_KEY_INDEX) {
ath6kl_err("key index %d out of bounds\n",
sme->key_idx);
up(&ar->sem);
return -ENOENT;
}
key = &vif->keys[sme->key_idx];
key->key_len = sme->key_len;
memcpy(key->key, sme->key, key->key_len);
key->cipher = vif->prwise_crypto;
vif->def_txkey_index = sme->key_idx;
ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
vif->prwise_crypto,
GROUP_USAGE | TX_USAGE,
key->key_len,
NULL, 0,
key->key, KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
}
}
vif->nw_type = vif->next_mode;
/* enable enhanced bmiss detection if applicable */
ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
nw_subtype = SUBTYPE_P2PCLIENT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
vif->prwise_crypto_len, vif->grp_crypto,
vif->grp_crypto_len, vif->ch_hint);
vif->reconnect_flag = 0;
if (vif->nw_type == INFRA_NETWORK) {
interval = max_t(u16, vif->listen_intvl_t,
ATH6KL_MAX_WOW_LISTEN_INTL);
status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
interval,
0);
if (status) {
ath6kl_err("couldn't set listen intervel\n");
up(&ar->sem);
return status;
}
}
status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
vif->dot11_auth_mode, vif->auth_mode,
vif->prwise_crypto,
vif->prwise_crypto_len,
vif->grp_crypto, vif->grp_crypto_len,
vif->ssid_len, vif->ssid,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, nw_subtype);
if (sme->bg_scan_period == 0) {
/* disable background scan if period is 0 */
sme->bg_scan_period = 0xffff;
} else if (sme->bg_scan_period == -1) {
/* configure default value if not specified */
sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
}
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
up(&ar->sem);
if (status == -EINVAL) {
memset(vif->ssid, 0, sizeof(vif->ssid));
vif->ssid_len = 0;
ath6kl_err("invalid request\n");
return -ENOENT;
} else if (status) {
ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
return -EIO;
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
((vif->auth_mode == WPA_PSK_AUTH) ||
(vif->auth_mode == WPA2_PSK_AUTH))) {
mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
static struct cfg80211_bss *
ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
enum network_type nw_type,
const u8 *bssid,
struct ieee80211_channel *chan,
const u8 *beacon_ie,
size_t beacon_ie_len)
{
struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
u16 cap_mask, cap_val;
u8 *ie;
if (nw_type & ADHOC_NETWORK) {
cap_mask = WLAN_CAPABILITY_IBSS;
cap_val = WLAN_CAPABILITY_IBSS;
} else {
cap_mask = WLAN_CAPABILITY_ESS;
cap_val = WLAN_CAPABILITY_ESS;
}
bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
vif->ssid, vif->ssid_len,
cap_mask, cap_val);
if (bss == NULL) {
/*
* Since cfg80211 may not yet know about the BSS,
* generate a partial entry until the first BSS info
* event becomes available.
*
* Prepend SSID element since it is not included in the Beacon
* IEs from the target.
*/
ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
if (ie == NULL)
return NULL;
ie[0] = WLAN_EID_SSID;
ie[1] = vif->ssid_len;
memcpy(ie + 2, vif->ssid, vif->ssid_len);
memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
bss = cfg80211_inform_bss(ar->wiphy, chan,
bssid, 0, cap_val, 100,
ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
return bss;
}
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info)
{
struct ieee80211_channel *chan;
struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
/* capinfo + listen interval */
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
/* capinfo + status code + associd */
u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
assoc_resp_ie_offset;
assoc_req_len -= assoc_req_ie_offset;
assoc_resp_len -= assoc_resp_ie_offset;
/*
* Store Beacon interval here; DTIM period will be available only once
* a Beacon frame from the AP is seen.
*/
vif->assoc_bss_beacon_int = beacon_intvl;
clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
if (nw_type & ADHOC_NETWORK) {
if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
}
if (nw_type & INFRA_NETWORK) {
if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
}
}
chan = ieee80211_get_channel(ar->wiphy, (int) channel);
bss = ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan,
assoc_info, beacon_ie_len);
if (!bss) {
ath6kl_err("could not add cfg80211 bss entry\n");
return;
}
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
if (vif->sme_state == SME_CONNECTING) {
/* inform connect result to cfg80211 */
vif->sme_state = SME_CONNECTED;
cfg80211_connect_result(vif->ndev, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
}
}
static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *dev, u16 reason_code)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
reason_code);
ath6kl_cfg80211_sscan_disable(vif);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
ath6kl_err("busy, destroy in progress\n");
return -EBUSY;
}
if (down_interruptible(&ar->sem)) {
ath6kl_err("busy, couldn't get access\n");
return -ERESTARTSYS;
}
vif->reconnect_flag = 0;
ath6kl_disconnect(vif);
memset(vif->ssid, 0, sizeof(vif->ssid));
vif->ssid_len = 0;
if (!test_bit(SKIP_SCAN, &ar->flag))
memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
up(&ar->sem);
vif->sme_state = SME_DISCONNECTED;
return 0;
}
void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason)
{
struct ath6kl *ar = vif->ar;
if (vif->scan_req) {
cfg80211_scan_done(vif->scan_req, true);
vif->scan_req = NULL;
}
if (vif->nw_type & ADHOC_NETWORK) {
if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
memset(bssid, 0, ETH_ALEN);
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
if (vif->nw_type & INFRA_NETWORK) {
if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
}
}
clear_bit(CONNECT_PEND, &vif->flags);
if (vif->sme_state == SME_CONNECTING) {
cfg80211_connect_result(vif->ndev,
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, proto_reason,
NULL, 0, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
/*
* Send a disconnect command to target when a disconnect event is
* received with reason code other than 3 (DISCONNECT_CMD - disconnect
* request from host) to make the firmware stop trying to connect even
* after giving disconnect event. There will be one more disconnect
* event for this disconnect command with reason code DISCONNECT_CMD
* which won't be notified to cfg80211.
*/
if (reason != DISCONNECT_CMD)
ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
}
static int ath6kl_set_probed_ssids(struct ath6kl *ar,
struct ath6kl_vif *vif,
struct cfg80211_ssid *ssids, int n_ssids,
struct cfg80211_match_set *match_set,
int n_match_ssid)
{
u8 i, j, index_to_add, ssid_found = false;
struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
memset(ssid_list, 0, sizeof(ssid_list));
if (n_ssids > MAX_PROBED_SSIDS ||
n_match_ssid > MAX_PROBED_SSIDS)
return -EINVAL;
for (i = 0; i < n_ssids; i++) {
memcpy(ssid_list[i].ssid.ssid,
ssids[i].ssid,
ssids[i].ssid_len);
ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
if (ssids[i].ssid_len)
ssid_list[i].flag = SPECIFIC_SSID_FLAG;
else
ssid_list[i].flag = ANY_SSID_FLAG;
if (n_match_ssid == 0)
ssid_list[i].flag |= MATCH_SSID_FLAG;
}
index_to_add = i;
for (i = 0; i < n_match_ssid; i++) {
ssid_found = false;
for (j = 0; j < n_ssids; j++) {
if ((match_set[i].ssid.ssid_len ==
ssid_list[j].ssid.ssid_len) &&
(!memcmp(ssid_list[j].ssid.ssid,
match_set[i].ssid.ssid,
match_set[i].ssid.ssid_len))) {
ssid_list[j].flag |= MATCH_SSID_FLAG;
ssid_found = true;
break;
}
}
if (ssid_found)
continue;
if (index_to_add >= MAX_PROBED_SSIDS)
continue;
ssid_list[index_to_add].ssid.ssid_len =
match_set[i].ssid.ssid_len;
memcpy(ssid_list[index_to_add].ssid.ssid,
match_set[i].ssid.ssid,
match_set[i].ssid.ssid_len);
ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
index_to_add++;
}
for (i = 0; i < index_to_add; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
ssid_list[i].flag,
ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid);
}
/* Make sure no old entries are left behind */
for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
DISABLE_SSID_FLAG, 0, NULL);
}
return 0;
}
static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(request->wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
u32 force_fg_scan = 0;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_cfg80211_sscan_disable(vif);
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
return ret;
}
}
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids, NULL, 0);
if (ret < 0)
return ret;
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for scan\n");
return ret;
}
/*
* Scan only the requested channels if the request specifies a set of
* channels. If the list is longer than the target supports, do not
* configure the list and instead, scan all available channels.
*/
if (request->n_channels > 0 &&
request->n_channels <= WMI_MAX_CHANNELS) {
u8 i;
n_channels = request->n_channels;
channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
if (channels == NULL) {
ath6kl_warn("failed to set scan channels, scan all channels");
n_channels = 0;
}
for (i = 0; i < n_channels; i++)
channels[i] = request->channels[i]->center_freq;
}
if (test_bit(CONNECTED, &vif->flags))
force_fg_scan = 1;
vif->scan_req = request;
ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
WMI_LONG_SCAN, force_fg_scan,
false, 0,
ATH6KL_FG_SCAN_INTERVAL,
n_channels, channels,
request->no_cck,
request->rates);
if (ret) {
ath6kl_err("failed to start scan: %d\n", ret);
vif->scan_req = NULL;
}
kfree(channels);
return ret;
}
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
{
struct ath6kl *ar = vif->ar;
int i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
aborted ? " aborted" : "");
if (!vif->scan_req)
return;
if (aborted)
goto out;
if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
for (i = 0; i < vif->scan_req->n_ssids; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
i + 1, DISABLE_SSID_FLAG,
0, NULL);
}
}
out:
cfg80211_scan_done(vif->scan_req, aborted);
vif->scan_req = NULL;
}
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
enum wmi_phy_mode mode)
{
struct cfg80211_chan_def chandef;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"channel switch notify nw_type %d freq %d mode %d\n",
vif->nw_type, freq, mode);
cfg80211_chandef_create(&chandef,
ieee80211_get_channel(vif->ar->wiphy, freq),
(mode == WMI_11G_HT20) ?
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
cfg80211_ch_switch_notify(vif->ndev, &chandef);
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
struct ath6kl *ar = ath6kl_priv(ndev);
struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
int seq_len;
u8 key_usage;
u8 key_type;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
if (params->key_len != WMI_KRK_LEN)
return -EINVAL;
return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
params->key);
}
if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
return -ENOENT;
}
key = &vif->keys[key_index];
memset(key, 0, sizeof(struct ath6kl_key));
if (pairwise)
key_usage = PAIRWISE_USAGE;
else
key_usage = GROUP_USAGE;
seq_len = params->seq_len;
if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
seq_len > ATH6KL_KEY_SEQ_LEN) {
/* Only first half of the WPI PN is configured */
seq_len = ATH6KL_KEY_SEQ_LEN;
}
if (params->key_len > WLAN_MAX_KEY_LEN ||
seq_len > sizeof(key->seq))
return -EINVAL;
key->key_len = params->key_len;
memcpy(key->key, params->key, key->key_len);
key->seq_len = seq_len;
memcpy(key->seq, params->seq, key->seq_len);
key->cipher = params->cipher;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
key_type = WEP_CRYPT;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_CRYPT;
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AES_CRYPT;
break;
case WLAN_CIPHER_SUITE_SMS4:
key_type = WAPI_CRYPT;
break;
default:
return -ENOTSUPP;
}
if (((vif->auth_mode == WPA_PSK_AUTH) ||
(vif->auth_mode == WPA2_PSK_AUTH)) &&
(key_usage & GROUP_USAGE))
del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
__func__, key_index, key->key_len, key_type,
key_usage, key->seq_len);
if (vif->nw_type == AP_NETWORK && !pairwise &&
(key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
key_type == WAPI_CRYPT)) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
if (!test_bit(CONNECTED, &vif->flags)) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"Delay initial group key configuration until AP mode has been started\n");
/*
* The key will be set in ath6kl_connect_ap_mode() once
* the connected event is received from the target.
*/
return 0;
}
}
if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
!test_bit(CONNECTED, &vif->flags)) {
/*
* Store the key locally so that it can be re-configured after
* the AP mode has properly started
* (ath6kl_install_statioc_wep_keys).
*/
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"Delay WEP key configuration until AP mode has been started\n");
vif->wep_key_list[key_index].key_len = key->key_len;
memcpy(vif->wep_key_list[key_index].key, key->key,
key->key_len);
return 0;
}
return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
key_type, key_usage, key->key_len,
key->seq, key->seq_len, key->key,
KEY_OP_INIT_VAL,
(u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct ath6kl *ar = ath6kl_priv(ndev);
struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
return -ENOENT;
}
if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d is empty\n", __func__, key_index);
return 0;
}
vif->keys[key_index].key_len = 0;
return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback) (void *cookie,
struct key_params *))
{
struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
struct key_params params;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
return -ENOENT;
}
key = &vif->keys[key_index];
memset(¶ms, 0, sizeof(params));
params.cipher = key->cipher;
params.key_len = key->key_len;
params.seq_len = key->seq_len;
params.seq = key->seq;
params.key = key->key;
callback(cookie, ¶ms);
return key->key_len ? 0 : -ENOENT;
}
static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool unicast,
bool multicast)
{
struct ath6kl *ar = ath6kl_priv(ndev);
struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
u8 key_usage;
enum crypto_type key_type = NONE_CRYPT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n",
__func__, key_index);
return -ENOENT;
}
if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
__func__, key_index);
return -EINVAL;
}
vif->def_txkey_index = key_index;
key = &vif->keys[vif->def_txkey_index];
key_usage = GROUP_USAGE;
if (vif->prwise_crypto == WEP_CRYPT)
key_usage |= TX_USAGE;
if (unicast)
key_type = vif->prwise_crypto;
if (multicast)
key_type = vif->grp_crypto;
if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
return 0; /* Delay until AP mode has been started */
return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
vif->def_txkey_index,
key_type, key_usage,
key->key_len, key->seq, key->seq_len,
key->key,
KEY_OP_INIT_VAL, NULL,
SYNC_BOTH_WMIFLAG);
}
void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
(ismcast ? NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
GFP_KERNEL);
}
static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
int ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
changed);
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
if (ret != 0) {
ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
return -EIO;
}
}
return 0;
}
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_tx_power_setting type,
int mbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
type, dbm);
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_LIMITED:
ar->tx_pwr = dbm;
break;
default:
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
__func__, type);
return -EOPNOTSUPP;
}
ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm);
return 0;
}
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(CONNECTED, &vif->flags)) {
ar->tx_pwr = 0;
if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
5 * HZ);
if (signal_pending(current)) {
ath6kl_err("target did not respond\n");
return -EINTR;
}
}
*dbm = ar->tx_pwr;
return 0;
}
static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool pmgmt, int timeout)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct wmi_power_mode_cmd mode;
struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
__func__, pmgmt, timeout);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (pmgmt) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
mode.pwr_mode = REC_POWER;
} else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
mode.pwr_mode = MAX_PERF_POWER;
}
if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
return 0;
}
static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct wireless_dev *wdev;
u8 if_idx, nw_type;
if (ar->num_vif == ar->vif_max) {
ath6kl_err("Reached maximum number of supported vif\n");
return ERR_PTR(-EINVAL);
}
if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
ath6kl_err("Not a supported interface type\n");
return ERR_PTR(-EINVAL);
}
wdev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
if (!wdev)
return ERR_PTR(-ENOMEM);
ar->num_vif++;
return wdev;
}
static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct ath6kl_vif *vif = netdev_priv(wdev->netdev);
spin_lock_bh(&ar->list_lock);
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
ath6kl_cfg80211_vif_cleanup(vif);
rtnl_unlock();
return 0;
}
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
int i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
/*
* Don't bring up p2p on an interface which is not initialized
* for p2p operation where fw does not have capability to switch
* dynamically between non-p2p and p2p type interface.
*/
if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
vif->ar->fw_capabilities) &&
(type == NL80211_IFTYPE_P2P_CLIENT ||
type == NL80211_IFTYPE_P2P_GO)) {
if (vif->ar->vif_max == 1) {
if (vif->fw_vif_idx != 0)
return -EINVAL;
else
goto set_iface_type;
}
for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
if (i == vif->fw_vif_idx)
break;
}
if (i == vif->ar->vif_max) {
ath6kl_err("Invalid interface to bring up P2P\n");
return -EINVAL;
}
}
/* need to clean up enhanced bmiss detection fw state */
ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
vif->next_mode = AP_NETWORK;
break;
default:
ath6kl_err("invalid interface type %u\n", type);
return -EOPNOTSUPP;
}
vif->wdev.iftype = type;
return 0;
}
static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_ibss_params *ibss_param)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
int status;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
vif->ssid_len = ibss_param->ssid_len;
memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
if (ibss_param->chandef.chan)
vif->ch_hint = ibss_param->chandef.chan->center_freq;
if (ibss_param->channel_fixed) {
/*
* TODO: channel_fixed: The channel should be fixed, do not
* search for IBSSs to join on other channels. Target
* firmware does not support this feature, needs to be
* updated.
*/
return -EOPNOTSUPP;
}
memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
memcpy(vif->req_bssid, ibss_param->bssid,
sizeof(vif->req_bssid));
ath6kl_set_wpa_version(vif, 0);
status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
if (status)
return status;
if (ibss_param->privacy) {
ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
} else {
ath6kl_set_cipher(vif, 0, true);
ath6kl_set_cipher(vif, 0, false);
}
vif->nw_type = vif->next_mode;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
vif->prwise_crypto_len, vif->grp_crypto,
vif->grp_crypto_len, vif->ch_hint);
status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
vif->dot11_auth_mode, vif->auth_mode,
vif->prwise_crypto,
vif->prwise_crypto_len,
vif->grp_crypto, vif->grp_crypto_len,
vif->ssid_len, vif->ssid,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, SUBTYPE_NONE);
set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
struct net_device *dev)
{
struct ath6kl_vif *vif = netdev_priv(dev);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_disconnect(vif);
memset(vif->ssid, 0, sizeof(vif->ssid));
vif->ssid_len = 0;
return 0;
}
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
CCKM_KRK_CIPHER_SUITE,
WLAN_CIPHER_SUITE_SMS4,
};
static bool is_rate_legacy(s32 rate)
{
static const s32 legacy[] = { 1000, 2000, 5500, 11000,
6000, 9000, 12000, 18000, 24000,
36000, 48000, 54000
};
u8 i;
for (i = 0; i < ARRAY_SIZE(legacy); i++)
if (rate == legacy[i])
return true;
return false;
}
static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
{
static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
52000, 58500, 65000, 72200
};
u8 i;
for (i = 0; i < ARRAY_SIZE(ht20); i++) {
if (rate == ht20[i]) {
if (i == ARRAY_SIZE(ht20) - 1)
/* last rate uses sgi */
*sgi = true;
else
*sgi = false;
*mcs = i;
return true;
}
}
return false;
}
static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
{
static const s32 ht40[] = { 13500, 27000, 40500, 54000,
81000, 108000, 121500, 135000,
150000
};
u8 i;
for (i = 0; i < ARRAY_SIZE(ht40); i++) {
if (rate == ht40[i]) {
if (i == ARRAY_SIZE(ht40) - 1)
/* last rate uses sgi */
*sgi = true;
else
*sgi = false;
*mcs = i;
return true;
}
}
return false;
}
static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
long left;
bool sgi;
s32 rate;
int ret;
u8 mcs;
if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
return -ENOENT;
if (down_interruptible(&ar->sem))
return -EBUSY;
set_bit(STATS_UPDATE_PEND, &vif->flags);
ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
if (ret != 0) {
up(&ar->sem);
return -EIO;
}
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
&vif->flags),
WMI_TIMEOUT);
up(&ar->sem);
if (left == 0)
return -ETIMEDOUT;
else if (left < 0)
return left;
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
sinfo->filled |= STATION_INFO_RX_BYTES64;
sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
sinfo->filled |= STATION_INFO_TX_BYTES64;
sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
sinfo->signal = vif->target_stats.cs_rssi;
sinfo->filled |= STATION_INFO_SIGNAL;
rate = vif->target_stats.tx_ucast_rate;
if (is_rate_legacy(rate)) {
sinfo->txrate.legacy = rate / 100;
} else if (is_rate_ht20(rate, &mcs, &sgi)) {
if (sgi) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
sinfo->txrate.mcs = mcs - 1;
} else {
sinfo->txrate.mcs = mcs;
}
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
} else if (is_rate_ht40(rate, &mcs, &sgi)) {
if (sgi) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
sinfo->txrate.mcs = mcs - 1;
} else {
sinfo->txrate.mcs = mcs;
}
sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
} else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"invalid rate from stats: %d\n", rate);
ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE);
return 0;
}
sinfo->filled |= STATION_INFO_TX_BITRATE;
if (test_bit(CONNECTED, &vif->flags) &&
test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
vif->nw_type == INFRA_NETWORK) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
sinfo->bss_param.flags = 0;
sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
}
return 0;
}
static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
struct ath6kl_vif *vif = netdev_priv(netdev);
return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, true);
}
static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
struct ath6kl_vif *vif = netdev_priv(netdev);
return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, false);
}
static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
{
struct ath6kl *ar = ath6kl_priv(netdev);
struct ath6kl_vif *vif = netdev_priv(netdev);
if (test_bit(CONNECTED, &vif->flags))
return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
vif->bssid, NULL, false);
return 0;
}
static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
struct cfg80211_wowlan *wow, u32 *filter)
{
int ret, pos;
u8 mask[WOW_PATTERN_SIZE];
u16 i;
/* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
/*
* Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along
* with patterns. For example, If the mask value defined in
* struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
* then equivalent driver specific mask value is
* "0xFF 0x00 0xFF 0x00".
*/
memset(&mask, 0, sizeof(mask));
for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
mask[pos] = 0xFF;
}
/*
* Note: Pattern's offset is not passed as part of wowlan
* parameter from CFG layer. So it's always passed as ZERO
* to the firmware. It means, given WOW patterns are always
* matched from the first byte of received pkt in the firmware.
*/
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
wow->patterns[i].pattern_len,
0 /* pattern offset */,
wow->patterns[i].pattern, mask);
if (ret)
return ret;
}
if (wow->disconnect)
*filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
if (wow->magic_pkt)
*filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
if (wow->gtk_rekey_failure)
*filter |= WOW_FILTER_OPTION_GTK_ERROR;
if (wow->eap_identity_req)
*filter |= WOW_FILTER_OPTION_EAP_REQ;
if (wow->four_way_handshake)
*filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
return 0;
}
static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif)
{
static const u8 unicst_pattern[] = { 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08 };
static const u8 unicst_mask[] = { 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7f };
u8 unicst_offset = 0;
static const u8 arp_pattern[] = { 0x08, 0x06 };
static const u8 arp_mask[] = { 0xff, 0xff };
u8 arp_offset = 20;
static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
u8 discvr_offset = 38;
static const u8 dhcp_pattern[] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x43 /* port 67 */ };
static const u8 dhcp_mask[] = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff /* port 67 */ };
u8 dhcp_offset = 0;
int ret;
/* Setup unicast IP, EAPOL-like and ARP pkt pattern */
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
sizeof(unicst_pattern), unicst_offset,
unicst_pattern, unicst_mask);
if (ret) {
ath6kl_err("failed to add WOW unicast IP pattern\n");
return ret;
}
/* Setup all ARP pkt pattern */
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
sizeof(arp_pattern), arp_offset,
arp_pattern, arp_mask);
if (ret) {
ath6kl_err("failed to add WOW ARP pattern\n");
return ret;
}
/*
* Setup multicast pattern for mDNS 224.0.0.251,
* SSDP 239.255.255.250 and LLMNR 224.0.0.252
*/
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
sizeof(discvr_pattern), discvr_offset,
discvr_pattern, discvr_mask);
if (ret) {
ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
return ret;
}
/* Setup all DHCP broadcast pkt pattern */
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
sizeof(dhcp_pattern), dhcp_offset,
dhcp_pattern, dhcp_mask);
if (ret) {
ath6kl_err("failed to add WOW DHCP broadcast pattern\n");
return ret;
}
return 0;
}
static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
{
struct net_device *ndev = vif->ndev;
static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
u8 discvr_offset = 38;
u8 mac_mask[ETH_ALEN];
int ret;
/* Setup unicast pkt pattern */
memset(mac_mask, 0xff, ETH_ALEN);
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
ETH_ALEN, 0, ndev->dev_addr,
mac_mask);
if (ret) {
ath6kl_err("failed to add WOW unicast pattern\n");
return ret;
}
/*
* Setup multicast pattern for mDNS 224.0.0.251,
* SSDP 239.255.255.250 and LLMNR 224.0.0.252
*/
if ((ndev->flags & IFF_ALLMULTI) ||
(ndev->flags & IFF_MULTICAST && netdev_mc_count(ndev) > 0)) {
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
sizeof(discvr_pattern), discvr_offset,
discvr_pattern, discvr_mask);
if (ret) {
ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
return ret;
}
}
return 0;
}
static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
{
return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
}
static bool is_ctrl_ep_empty(struct ath6kl *ar)
{
return !ar->tx_pending[ar->ctrl_ep];
}
static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
{
int ret, left;
clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_ASLEEP);
if (ret)
return ret;
left = wait_event_interruptible_timeout(ar->event_wq,
is_hsleep_mode_procsed(vif),
WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
left);
ret = left;
}
if (ar->tx_pending[ar->ctrl_ep]) {
left = wait_event_interruptible_timeout(ar->event_wq,
is_ctrl_ep_empty(ar),
WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("clear wmi ctrl data timeout\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
ret = left;
}
}
return ret;
}
static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
struct cfg80211_wowlan *wow, u32 *filter)
{
struct ath6kl *ar = vif->ar;
struct in_device *in_dev;
struct in_ifaddr *ifa;
int ret;
u16 i, bmiss_time;
__be32 ips[MAX_IP_ADDRS];
u8 index = 0;
if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, false);
if (ret)
return ret;
}
/* Clear existing WOW patterns */
for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
WOW_LIST_ID, i);
/*
* Skip the default WOW pattern configuration
* if the driver receives any WOW patterns from
* the user.
*/
if (wow)
ret = ath6kl_wow_usr(ar, vif, wow, filter);
else if (vif->nw_type == AP_NETWORK)
ret = ath6kl_wow_ap(ar, vif);
else
ret = ath6kl_wow_sta(ar, vif);
if (ret)
return ret;
netif_stop_queue(vif->ndev);
if (vif->nw_type != AP_NETWORK) {
ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_MAX_WOW_LISTEN_INTL,
0);
if (ret)
return ret;
/* Set listen interval x 15 times as bmiss time */
bmiss_time = ATH6KL_MAX_WOW_LISTEN_INTL * 15;
if (bmiss_time > ATH6KL_MAX_BMISS_TIME)
bmiss_time = ATH6KL_MAX_BMISS_TIME;
ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
bmiss_time, 0);
if (ret)
return ret;
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0xFFFF, 0, 0xFFFF, 0, 0, 0,
0, 0, 0, 0);
if (ret)
return ret;
}
/* Setup own IP addr for ARP agent. */
in_dev = __in_dev_get_rtnl(vif->ndev);
if (!in_dev)
return 0;
ifa = in_dev->ifa_list;
memset(&ips, 0, sizeof(ips));
/* Configure IP addr only if IP address count < MAX_IP_ADDRS */
while (index < MAX_IP_ADDRS && ifa) {
ips[index] = ifa->ifa_local;
ifa = ifa->ifa_next;
index++;
}
if (ifa) {
ath6kl_err("total IP addr count is exceeding fw limit\n");
return -EINVAL;
}
ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]);
if (ret) {
ath6kl_err("fail to setup ip for arp agent\n");
return ret;
}
return ret;
}
static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
struct ath6kl_vif *first_vif, *vif;
int ret = 0;
u32 filter = 0;
bool connected = false;
/* enter / leave wow suspend on first vif always */
first_vif = ath6kl_vif_first(ar);
if (WARN_ON(unlikely(!first_vif)) ||
!ath6kl_cfg80211_ready(first_vif))
return -EIO;
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
return -EINVAL;
/* install filters for each connected vif */
spin_lock_bh(&ar->list_lock);
list_for_each_entry(vif, &ar->vif_list, list) {
if (!test_bit(CONNECTED, &vif->flags) ||
!ath6kl_cfg80211_ready(vif))
continue;
connected = true;
ret = ath6kl_wow_suspend_vif(vif, wow, &filter);
if (ret)
break;
}
spin_unlock_bh(&ar->list_lock);
if (!connected)
return -ENOTCONN;
else if (ret)
return ret;
ar->state = ATH6KL_STATE_SUSPENDING;
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx,
ATH6KL_WOW_MODE_ENABLE,
filter,
WOW_HOST_REQ_DELAY);
if (ret)
return ret;
return ath6kl_cfg80211_host_sleep(ar, first_vif);
}
static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
int ret;
if (vif->nw_type != AP_NETWORK) {
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
if (ret)
return ret;
ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
if (ret)
return ret;
ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
vif->bmiss_time_t, 0);
if (ret)
return ret;
}
if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, true);
if (ret)
return ret;
}
netif_wake_queue(vif->ndev);
return 0;
}
static int ath6kl_wow_resume(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
int ret;
vif = ath6kl_vif_first(ar);
if (WARN_ON(unlikely(!vif)) ||
!ath6kl_cfg80211_ready(vif))
return -EIO;
ar->state = ATH6KL_STATE_RESUMING;
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
if (ret) {
ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
ret);
goto cleanup;
}
spin_lock_bh(&ar->list_lock);
list_for_each_entry(vif, &ar->vif_list, list) {
if (!test_bit(CONNECTED, &vif->flags) ||
!ath6kl_cfg80211_ready(vif))
continue;
ret = ath6kl_wow_resume_vif(vif);
if (ret)
break;
}
spin_unlock_bh(&ar->list_lock);
if (ret)
goto cleanup;
ar->state = ATH6KL_STATE_ON;
return 0;
cleanup:
ar->state = ATH6KL_STATE_WOW;
return ret;
}
static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
int ret;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (!test_bit(WMI_READY, &ar->flag)) {
ath6kl_err("deepsleep failed as wmi is not ready\n");
return -EIO;
}
ath6kl_cfg80211_stop_all(ar);
/* Save the current power mode before enabling power save */
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
if (ret)
return ret;
/* Disable WOW mode */
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_WOW_MODE_DISABLE,
0, 0);
if (ret)
return ret;
/* Flush all non control pkts in TX path */
ath6kl_tx_data_cleanup(ar);
ret = ath6kl_cfg80211_host_sleep(ar, vif);
if (ret)
return ret;
return 0;
}
static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
int ret;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
ar->wmi->saved_pwr_mode);
if (ret)
return ret;
}
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
if (ret)
return ret;
ar->state = ATH6KL_STATE_ON;
/* Reset scan parameter to default values */
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
if (ret)
return ret;
return 0;
}
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow)
{
struct ath6kl_vif *vif;
enum ath6kl_state prev_state;
int ret;
switch (mode) {
case ATH6KL_CFG_SUSPEND_WOW:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
/* Flush all non control pkts in TX path */
ath6kl_tx_data_cleanup(ar);
prev_state = ar->state;
ret = ath6kl_wow_suspend(ar, wow);
if (ret) {
ar->state = prev_state;
return ret;
}
ar->state = ATH6KL_STATE_WOW;
break;
case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
ret = ath6kl_cfg80211_deepsleep_suspend(ar);
if (ret) {
ath6kl_err("deepsleep suspend failed: %d\n", ret);
return ret;
}
ar->state = ATH6KL_STATE_DEEPSLEEP;
break;
case ATH6KL_CFG_SUSPEND_CUTPOWER:
ath6kl_cfg80211_stop_all(ar);
if (ar->state == ATH6KL_STATE_OFF) {
ath6kl_dbg(ATH6KL_DBG_SUSPEND,
"suspend hw off, no action for cutpower\n");
break;
}
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
ret = ath6kl_init_hw_stop(ar);
if (ret) {
ath6kl_warn("failed to stop hw during suspend: %d\n",
ret);
}
ar->state = ATH6KL_STATE_CUTPOWER;
break;
default:
break;
}
list_for_each_entry(vif, &ar->vif_list, list)
ath6kl_cfg80211_scan_complete_event(vif, true);
return 0;
}
EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
int ath6kl_cfg80211_resume(struct ath6kl *ar)
{
int ret;
switch (ar->state) {
case ATH6KL_STATE_WOW:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
ret = ath6kl_wow_resume(ar);
if (ret) {
ath6kl_warn("wow mode resume failed: %d\n", ret);
return ret;
}
break;
case ATH6KL_STATE_DEEPSLEEP:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
ret = ath6kl_cfg80211_deepsleep_resume(ar);
if (ret) {
ath6kl_warn("deep sleep resume failed: %d\n", ret);
return ret;
}
break;
case ATH6KL_STATE_CUTPOWER:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
ret = ath6kl_init_hw_start(ar);
if (ret) {
ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
return ret;
}
break;
default:
break;
}
return 0;
}
EXPORT_SYMBOL(ath6kl_cfg80211_resume);
#ifdef CONFIG_PM
/* hif layer decides what suspend mode to use */
static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct ath6kl *ar = wiphy_priv(wiphy);
ath6kl_recovery_suspend(ar);
return ath6kl_hif_suspend(ar, wow);
}
static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
{
struct ath6kl *ar = wiphy_priv(wiphy);
int err;
err = ath6kl_hif_resume(ar);
if (err)
return err;
ath6kl_recovery_resume(ar);
return 0;
}
/*
* FIXME: WOW suspend mode is selected if the host sdio controller supports
* both sdio irq wake up and keep power. The target pulls sdio data line to
* wake up the host when WOW pattern matches. This causes sdio irq handler
* is being called in the host side which internally hits ath6kl's RX path.
*
* Since sdio interrupt is not disabled, RX path executes even before
* the host executes the actual resume operation from PM module.
*
* In the current scenario, WOW resume should happen before start processing
* any data from the target. So It's required to perform WOW resume in RX path.
* Ideally we should perform WOW resume only in the actual platform
* resume path. This area needs bit rework to avoid WOW resume in RX path.
*
* ath6kl_check_wow_status() is called from ath6kl_rx().
*/
void ath6kl_check_wow_status(struct ath6kl *ar)
{
if (ar->state == ATH6KL_STATE_SUSPENDING)
return;
if (ar->state == ATH6KL_STATE_WOW)
ath6kl_cfg80211_resume(ar);
}
#else
void ath6kl_check_wow_status(struct ath6kl *ar)
{
}
#endif
static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
bool ht_enable)
{
struct ath6kl_htcap *htcap = &vif->htcap[band];
if (htcap->ht_enable == ht_enable)
return 0;
if (ht_enable) {
/* Set default ht capabilities */
htcap->ht_enable = true;
htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
ath6kl_g_htcap : ath6kl_a_htcap;
htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
} else /* Disable ht */
memset(htcap, 0, sizeof(*htcap));
return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
band, htcap);
}
static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
{
struct wiphy *wiphy = vif->ar->wiphy;
int band, ret = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!wiphy->bands[band])
continue;
ret = ath6kl_set_htcap(vif, band,
wiphy->bands[band]->ht_cap.ht_supported);
if (ret)
return ret;
}
return ret;
}
static bool ath6kl_is_p2p_ie(const u8 *pos)
{
return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
pos[2] == 0x50 && pos[3] == 0x6f &&
pos[4] == 0x9a && pos[5] == 0x09;
}
static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
const u8 *ies, size_t ies_len)
{
struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *buf = NULL;
size_t len = 0;
int ret;
/*
* Filter out P2P IE(s) since they will be included depending on
* the Probe Request frame in ath6kl_send_go_probe_resp().
*/
if (ies && ies_len) {
buf = kmalloc(ies_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
pos = ies;
while (pos + 1 < ies + ies_len) {
if (pos + 2 + pos[1] > ies + ies_len)
break;
if (!ath6kl_is_p2p_ie(pos)) {
memcpy(buf + len, pos, 2 + pos[1]);
len += 2 + pos[1];
}
pos += 2 + pos[1];
}
}
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_RESP, buf, len);
kfree(buf);
return ret;
}
static int ath6kl_set_ies(struct ath6kl_vif *vif,
struct cfg80211_beacon_data *info)
{
struct ath6kl *ar = vif->ar;
int res;
/* this also clears IE in fw if it's not set */
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
/* this also clears IE in fw if it's not set */
res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
/* this also clears IE in fw if it's not set */
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
return res;
return 0;
}
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
u8 *rsn_capab)
{
const u8 *rsn_ie;
size_t rsn_ie_len;
u16 cnt;
if (!beacon->tail)
return -EINVAL;
rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
if (!rsn_ie)
return -EINVAL;
rsn_ie_len = *(rsn_ie + 1);
/* skip element id and length */
rsn_ie += 2;
/* skip version */
if (rsn_ie_len < 2)
return -EINVAL;
rsn_ie += 2;
rsn_ie_len -= 2;
/* skip group cipher suite */
if (rsn_ie_len < 4)
return 0;
rsn_ie += 4;
rsn_ie_len -= 4;
/* skip pairwise cipher suite */
if (rsn_ie_len < 2)
return 0;
cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
/* skip akm suite */
if (rsn_ie_len < 2)
return 0;
cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
if (rsn_ie_len < 2)
return 0;
memcpy(rsn_capab, rsn_ie, 2);
return 0;
}
static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *info)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
bool hidden = false;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
int res;
int i, ret;
u16 rsn_capab = 0;
int inactivity_timeout = 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
res = ath6kl_set_ies(vif, &info->beacon);
ar->ap_mode_bkey.valid = false;
ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
info->beacon_interval);
if (ret)
ath6kl_warn("Failed to set beacon interval: %d\n", ret);
ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
info->dtim_period);
/* ignore error, just print a warning and continue normally */
if (ret)
ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
if (info->beacon.head == NULL)
return -EINVAL;
mgmt = (struct ieee80211_mgmt *) info->beacon.head;
ies = mgmt->u.beacon.variable;
if (ies > info->beacon.head + info->beacon.head_len)
return -EINVAL;
ies_len = info->beacon.head + info->beacon.head_len - ies;
if (info->ssid == NULL)
return -EINVAL;
memcpy(vif->ssid, info->ssid, info->ssid_len);
vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
hidden = true;
res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden);
if (res)
return res;
ret = ath6kl_set_auth_type(vif, info->auth_type);
if (ret)
return ret;
memset(&p, 0, sizeof(p));
for (i = 0; i < info->crypto.n_akm_suites; i++) {
switch (info->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
p.auth_mode |= WPA_AUTH;
if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
p.auth_mode |= WPA2_AUTH;
break;
case WLAN_AKM_SUITE_PSK:
if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
p.auth_mode |= WPA_PSK_AUTH;
if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
p.auth_mode |= WPA2_PSK_AUTH;
break;
}
}
if (p.auth_mode == 0)
p.auth_mode = NONE_AUTH;
vif->auth_mode = p.auth_mode;
for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
switch (info->crypto.ciphers_pairwise[i]) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
p.prwise_crypto_type |= WEP_CRYPT;
break;
case WLAN_CIPHER_SUITE_TKIP:
p.prwise_crypto_type |= TKIP_CRYPT;
break;
case WLAN_CIPHER_SUITE_CCMP:
p.prwise_crypto_type |= AES_CRYPT;
break;
case WLAN_CIPHER_SUITE_SMS4:
p.prwise_crypto_type |= WAPI_CRYPT;
break;
}
}
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1)
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
p.grp_crypto_type = WEP_CRYPT;
break;
case WLAN_CIPHER_SUITE_TKIP:
p.grp_crypto_type = TKIP_CRYPT;
break;
case WLAN_CIPHER_SUITE_CCMP:
p.grp_crypto_type = AES_CRYPT;
break;
case WLAN_CIPHER_SUITE_SMS4:
p.grp_crypto_type = WAPI_CRYPT;
break;
default:
p.grp_crypto_type = NONE_CRYPT;
break;
}
ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
p.nw_type = AP_NETWORK;
vif->nw_type = vif->next_mode;
p.ssid_len = vif->ssid_len;
memcpy(p.ssid, vif->ssid, vif->ssid_len);
p.dot11_auth_mode = vif->dot11_auth_mode;
p.ch = cpu_to_le16(info->chandef.chan->center_freq);
/* Enable uAPSD support by default */
res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
if (res < 0)
return res;
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
p.nw_subtype = SUBTYPE_P2PGO;
} else {
/*
* Due to firmware limitation, it is not possible to
* do P2P mgmt operations in AP mode
*/
p.nw_subtype = SUBTYPE_NONE;
}
if (info->inactivity_timeout) {
inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
60);
res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
inactivity_timeout);
if (res < 0)
return res;
}
if (ath6kl_set_htcap(vif, info->chandef.chan->band,
cfg80211_get_chandef_type(&info->chandef)
!= NL80211_CHAN_NO_HT))
return -EIO;
/*
* Get the PTKSA replay counter in the RSN IE. Supplicant
* will use the RSN IE in M3 message and firmware has to
* advertise the same in beacon/probe response. Send
* the complete RSN IE capability field to firmware
*/
if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
ar->fw_capabilities)) {
res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
WLAN_EID_RSN, WMI_RSN_IE_CAPB,
(const u8 *) &rsn_capab,
sizeof(rsn_capab));
vif->rsn_capab = rsn_capab;
if (res < 0)
return res;
}
memcpy(&vif->profile, &p, sizeof(p));
res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
return 0;
}
static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_beacon_data *beacon)
{
struct ath6kl_vif *vif = netdev_priv(dev);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
return ath6kl_set_ies(vif, beacon);
}
static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
if (!test_bit(CONNECTED, &vif->flags))
return -ENOTCONN;
ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
clear_bit(CONNECTED, &vif->flags);
/* Restore ht setting in firmware */
return ath6kl_restore_htcap(vif);
}
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
const u8 *addr = mac ? mac : bcast_addr;
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
int err;
if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
err = cfg80211_check_station_change(wiphy, params,
CFG80211_STA_AP_MLME_CLIENT);
if (err)
return err;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
WMI_AP_MLME_AUTHORIZE, mac, 0);
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
WMI_AP_MLME_UNAUTHORIZE, mac, 0);
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
unsigned int duration,
u64 *cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
* return -EBUSY */
id = ++vif->last_roc_id;
if (id == 0) {
/* Do not use 0 as the cookie value */
id = ++vif->last_roc_id;
}
*cookie = id;
return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
chan->center_freq, duration);
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
if (cookie != vif->last_roc_id)
return -ENOENT;
vif->last_cancel_roc_id = cookie;
return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
}
static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
const u8 *buf, size_t len,
unsigned int freq)
{
struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *p2p;
int p2p_len;
int ret;
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
/* Include P2P IE(s) from the frame generated in user space. */
p2p = kmalloc(len, GFP_KERNEL);
if (p2p == NULL)
return -ENOMEM;
p2p_len = 0;
pos = mgmt->u.probe_resp.variable;
while (pos + 1 < buf + len) {
if (pos + 2 + pos[1] > buf + len)
break;
if (ath6kl_is_p2p_ie(pos)) {
memcpy(p2p + p2p_len, pos, 2 + pos[1]);
p2p_len += 2 + pos[1];
}
pos += 2 + pos[1];
}
ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
mgmt->da, p2p, p2p_len);
kfree(p2p);
return ret;
}
static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
u32 id,
u32 freq,
u32 wait,
const u8 *buf,
size_t len,
bool *more_data,
bool no_cck)
{
struct ieee80211_mgmt *mgmt;
struct ath6kl_sta *conn;
bool is_psq_empty = false;
struct ath6kl_mgmt_buff *mgmt_buf;
size_t mgmt_buf_size;
struct ath6kl *ar = vif->ar;
mgmt = (struct ieee80211_mgmt *) buf;
if (is_multicast_ether_addr(mgmt->da))
return false;
conn = ath6kl_find_sta(vif, mgmt->da);
if (!conn)
return false;
if (conn->sta_flags & STA_PS_SLEEP) {
if (!(conn->sta_flags & STA_PS_POLLED)) {
/* Queue the frames if the STA is sleeping */
mgmt_buf_size = len + sizeof(struct ath6kl_mgmt_buff);
mgmt_buf = kmalloc(mgmt_buf_size, GFP_KERNEL);
if (!mgmt_buf)
return false;
INIT_LIST_HEAD(&mgmt_buf->list);
mgmt_buf->id = id;
mgmt_buf->freq = freq;
mgmt_buf->wait = wait;
mgmt_buf->len = len;
mgmt_buf->no_cck = no_cck;
memcpy(mgmt_buf->buf, buf, len);
spin_lock_bh(&conn->psq_lock);
is_psq_empty = skb_queue_empty(&conn->psq) &&
(conn->mgmt_psq_len == 0);
list_add_tail(&mgmt_buf->list, &conn->mgmt_psq);
conn->mgmt_psq_len++;
spin_unlock_bh(&conn->psq_lock);
/*
* If this is the first pkt getting queued
* for this STA, update the PVB for this
* STA.
*/
if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
conn->aid, 1);
return true;
}
/*
* This tx is because of a PsPoll.
* Determine if MoreData bit has to be set.
*/
spin_lock_bh(&conn->psq_lock);
if (!skb_queue_empty(&conn->psq) || (conn->mgmt_psq_len != 0))
*more_data = true;
spin_unlock_bh(&conn->psq_lock);
}
return false;
}
/* Check if SSID length is greater than DIRECT- */
static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
{
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
/* variable[1] contains the SSID tag length */
if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
(mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
return true;
}
return false;
}
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
unsigned int wait, const u8 *buf, size_t len,
bool no_cck, bool dont_wait_for_ack, u64 *cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
mgmt = (const struct ieee80211_mgmt *) buf;
if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control) &&
ath6kl_is_p2p_go_ssid(buf, len)) {
/*
* Send Probe Response frame in GO mode using a separate WMI
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
return ath6kl_send_go_probe_resp(vif, buf, len,
chan->center_freq);
}
id = vif->send_action_id++;
if (id == 0) {
/*
* 0 is a reserved value in the WMI command and shall not be
* used for the command.
*/
id = vif->send_action_id++;
}
*cookie = id;
/* AP mode Power saving processing */
if (vif->nw_type == AP_NETWORK) {
queued = ath6kl_mgmt_powersave_ap(vif,
id, chan->center_freq,
wait, buf,
len, &more_data, no_cck);
if (queued)
return 0;
}
return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
chan->center_freq, wait,
buf, len, no_cck);
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
/*
* Note: This notification callback is not allowed to sleep, so
* we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
* hardcode target to report Probe Request frames all the time.
*/
vif->probe_req_report = reg;
}
}
static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
int n_match_sets = request->n_match_sets;
/* If there's a matchset w/o an SSID, then assume it's just for
* the RSSI (nothing else is currently supported) and ignore it.
* The device only supports a global RSSI filter that we set below.
*/
if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
if (vif->sme_state != SME_DISCONNECTED)
return -EBUSY;
ath6kl_cfg80211_scan_complete_event(vif, true);
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
n_match_sets);
if (ret < 0)
return ret;
if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
return ret;
} else {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
MATCHED_SSID_FILTER, 0);
if (ret < 0)
return ret;
}
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);
if (ret) {
ath6kl_err("failed to set RSSI threshold for scan\n");
return ret;
}
}
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
interval, interval,
vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
ret);
return ret;
}
ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true);
if (ret)
return ret;
set_bit(SCHED_SCANNING, &vif->flags);
return 0;
}
static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
struct net_device *dev)
{
struct ath6kl_vif *vif = netdev_priv(dev);
bool stopped;
stopped = __ath6kl_cfg80211_sscan_stop(vif);
if (!stopped)
return -EIO;
return 0;
}
static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
mask);
}
static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy,
struct net_device *dev,
u32 rate, u32 pkts, u32 intvl)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
if (vif->nw_type != INFRA_NETWORK ||
!test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities))
return -EOPNOTSUPP;
if (vif->sme_state != SME_CONNECTED)
return -ENOTCONN;
/* save this since the firmware won't report the interval */
vif->txe_intvl = intvl;
return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx,
rate, pkts, intvl);
}
static const struct ieee80211_txrx_stypes
ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
};
static struct cfg80211_ops ath6kl_cfg80211_ops = {
.add_virtual_intf = ath6kl_cfg80211_add_iface,
.del_virtual_intf = ath6kl_cfg80211_del_iface,
.change_virtual_intf = ath6kl_cfg80211_change_iface,
.scan = ath6kl_cfg80211_scan,
.connect = ath6kl_cfg80211_connect,
.disconnect = ath6kl_cfg80211_disconnect,
.add_key = ath6kl_cfg80211_add_key,
.get_key = ath6kl_cfg80211_get_key,
.del_key = ath6kl_cfg80211_del_key,
.set_default_key = ath6kl_cfg80211_set_default_key,
.set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
.set_tx_power = ath6kl_cfg80211_set_txpower,
.get_tx_power = ath6kl_cfg80211_get_txpower,
.set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
.join_ibss = ath6kl_cfg80211_join_ibss,
.leave_ibss = ath6kl_cfg80211_leave_ibss,
.get_station = ath6kl_get_station,
.set_pmksa = ath6kl_set_pmksa,
.del_pmksa = ath6kl_del_pmksa,
.flush_pmksa = ath6kl_flush_pmksa,
CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
#ifdef CONFIG_PM
.suspend = __ath6kl_cfg80211_suspend,
.resume = __ath6kl_cfg80211_resume,
#endif
.start_ap = ath6kl_start_ap,
.change_beacon = ath6kl_change_beacon,
.stop_ap = ath6kl_stop_ap,
.del_station = ath6kl_del_station,
.change_station = ath6kl_change_station,
.remain_on_channel = ath6kl_remain_on_channel,
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
.mgmt_frame_register = ath6kl_mgmt_frame_register,
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
.set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
.set_cqm_txe_config = ath6kl_cfg80211_set_txe_config,
};
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
{
ath6kl_cfg80211_sscan_disable(vif);
switch (vif->sme_state) {
case SME_DISCONNECTED:
break;
case SME_CONNECTING:
cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
break;
case SME_CONNECTED:
cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
break;
}
if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
(test_bit(CONNECTED, &vif->flags) ||
test_bit(CONNECT_PEND, &vif->flags)))
ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
vif->sme_state = SME_DISCONNECTED;
clear_bit(CONNECTED, &vif->flags);
clear_bit(CONNECT_PEND, &vif->flags);
/* Stop netdev queues, needed during recovery */
netif_stop_queue(vif->ndev);
netif_carrier_off(vif->ndev);
/* disable scanning */
if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
ath6kl_warn("failed to disable scan during stop\n");
ath6kl_cfg80211_scan_complete_event(vif, true);
}
void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
vif = ath6kl_vif_first(ar);
if (!vif && ar->state != ATH6KL_STATE_RECOVERY) {
/* save the current power mode before enabling power save */
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
ath6kl_warn("ath6kl_deep_sleep_enable: wmi_powermode_cmd failed\n");
return;
}
/*
* FIXME: we should take ar->list_lock to protect changes in the
* vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
* sleeps.
*/
list_for_each_entry(vif, &ar->vif_list, list)
ath6kl_cfg80211_stop(vif);
}
static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ath6kl *ar = wiphy_priv(wiphy);
u32 rates[IEEE80211_NUM_BANDS];
int ret, i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
request->alpha2[0], request->alpha2[1],
request->intersect ? " intersect" : "",
request->processed ? " processed" : "",
request->initiator, request->user_reg_hint_type);
if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
return;
ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
if (ret) {
ath6kl_err("failed to set regdomain: %d\n", ret);
return;
}
/*
* Firmware will apply the regdomain change only after a scan is
* issued and it will send a WMI_REGDOMAIN_EVENTID when it has been
* changed.
*/
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
if (wiphy->bands[i])
rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false,
false, 0, ATH6KL_FG_SCAN_INTERVAL,
0, NULL, false, rates);
if (ret) {
ath6kl_err("failed to start scan for a regdomain change: %d\n",
ret);
return;
}
}
static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
{
vif->aggr_cntxt = aggr_init(vif);
if (!vif->aggr_cntxt) {
ath6kl_err("failed to initialize aggr\n");
return -ENOMEM;
}
setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
(unsigned long) vif->ndev);
setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
(unsigned long) vif);
set_bit(WMM_ENABLED, &vif->flags);
spin_lock_init(&vif->if_lock);
INIT_LIST_HEAD(&vif->mc_filter);
return 0;
}
void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
{
static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
bool discon_issued;
netif_stop_queue(vif->ndev);
clear_bit(WLAN_ENABLED, &vif->flags);
if (wmi_ready) {
discon_issued = test_bit(CONNECTED, &vif->flags) ||
test_bit(CONNECT_PEND, &vif->flags);
ath6kl_disconnect(vif);
del_timer(&vif->disconnect_timer);
if (discon_issued)
ath6kl_disconnect_event(vif, DISCONNECT_CMD,
(vif->nw_type & AP_NETWORK) ?
bcast_mac : vif->bssid,
0, NULL, 0);
}
if (vif->scan_req) {
cfg80211_scan_done(vif->scan_req, true);
vif->scan_req = NULL;
}
/* need to clean up enhanced bmiss detection fw state */
ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
}
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
struct ath6kl_mc_filter *mc_filter, *tmp;
aggr_module_destroy(vif->aggr_cntxt);
ar->avail_idx_map |= BIT(vif->fw_vif_idx);
if (vif->nw_type == ADHOC_NETWORK)
ar->ibss_if_active = false;
list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
list_del(&mc_filter->list);
kfree(mc_filter);
}
unregister_netdevice(vif->ndev);
ar->num_vif--;
}
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type)
{
struct net_device *ndev;
struct ath6kl_vif *vif;
ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
if (!ndev)
return NULL;
vif = netdev_priv(ndev);
ndev->ieee80211_ptr = &vif->wdev;
vif->wdev.wiphy = ar->wiphy;
vif->ar = ar;
vif->ndev = ndev;
SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
vif->wdev.netdev = ndev;
vif->wdev.iftype = type;
vif->fw_vif_idx = fw_vif_idx;
vif->nw_type = nw_type;
vif->next_mode = nw_type;
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->bg_scan_period = 0;
vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0) {
ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
0x2;
if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
ar->fw_capabilities))
ndev->dev_addr[4] ^= 0x80;
}
init_netdev(ndev);
ath6kl_init_control_info(vif);
if (ath6kl_cfg80211_vif_init(vif))
goto err;
if (register_netdevice(ndev))
goto err;
ar->avail_idx_map &= ~BIT(fw_vif_idx);
vif->sme_state = SME_DISCONNECTED;
set_bit(WLAN_ENABLED, &vif->flags);
ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
if (type == NL80211_IFTYPE_ADHOC)
ar->ibss_if_active = true;
spin_lock_bh(&ar->list_lock);
list_add_tail(&vif->list, &ar->vif_list);
spin_unlock_bh(&ar->list_lock);
return &vif->wdev;
err:
aggr_module_destroy(vif->aggr_cntxt);
free_netdev(ndev);
return NULL;
}
int ath6kl_cfg80211_init(struct ath6kl *ar)
{
struct wiphy *wiphy = ar->wiphy;
bool band_2gig = false, band_5gig = false, ht = false;
int ret;
wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
/* set device pointer for wiphy */
set_wiphy_dev(wiphy, ar->dev);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
if (ar->p2p) {
wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
}
if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
}
/* max num of ssids that can be probed during scanning */
wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
/* max num of ssids that can be matched after scan */
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
ar->fw_capabilities))
wiphy->max_match_sets = MAX_PROBED_SSIDS;
wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
break;
default:
ath6kl_err("invalid phy capability!\n");
return -EINVAL;
}
/*
* Even if the fw has HT support, advertise HT cap only when
* the firmware has support to override RSN capability, otherwise
* 4-way handshake would fail.
*/
if (!(ht &&
test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
ar->fw_capabilities))) {
ath6kl_band_2ghz.ht_cap.cap = 0;
ath6kl_band_2ghz.ht_cap.ht_supported = false;
ath6kl_band_5ghz.ht_cap.cap = 0;
ath6kl_band_5ghz.ht_cap.ht_supported = false;
}
if (ar->hw.flags & ATH6KL_HW_64BIT_RATES) {
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
} else {
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
}
if (band_2gig)
wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
#ifdef CONFIG_PM
wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_4WAY_HANDSHAKE;
wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
#endif
wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
ar->fw_capabilities))
ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
ret = wiphy_register(wiphy);
if (ret < 0) {
ath6kl_err("couldn't register wiphy device\n");
return ret;
}
ar->wiphy_registered = true;
return 0;
}
void ath6kl_cfg80211_cleanup(struct ath6kl *ar)
{
wiphy_unregister(ar->wiphy);
ar->wiphy_registered = false;
}
struct ath6kl *ath6kl_cfg80211_create(void)
{
struct ath6kl *ar;
struct wiphy *wiphy;
/* create a new wiphy for use with cfg80211 */
wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
if (!wiphy) {
ath6kl_err("couldn't allocate wiphy device\n");
return NULL;
}
ar = wiphy_priv(wiphy);
ar->wiphy = wiphy;
return ar;
}
/* Note: ar variable must not be accessed after calling this! */
void ath6kl_cfg80211_destroy(struct ath6kl *ar)
{
int i;
for (i = 0; i < AP_MAX_NUM_STA; i++)
kfree(ar->sta_list[i].aggr_conn);
wiphy_free(ar->wiphy);
}
| gpl-2.0 |
metacloud/linux | drivers/s390/scsi/zfcp_erp.c | 1768 | 44522 | /*
* zfcp device driver
*
* Error Recovery Procedures (ERP).
*
* Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#define ZFCP_MAX_ERPS 3
enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
};
enum zfcp_erp_steps {
ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
enum zfcp_erp_act_type {
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
};
enum zfcp_erp_act_state {
ZFCP_ERP_ACTION_RUNNING = 1,
ZFCP_ERP_ACTION_READY = 2,
};
enum zfcp_erp_act_result {
ZFCP_ERP_SUCCEEDED = 0,
ZFCP_ERP_FAILED = 1,
ZFCP_ERP_CONTINUES = 2,
ZFCP_ERP_EXIT = 3,
ZFCP_ERP_DISMISSED = 4,
ZFCP_ERP_NOMEM = 5,
};
static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
{
zfcp_erp_clear_adapter_status(adapter,
ZFCP_STATUS_COMMON_UNBLOCKED | mask);
}
static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
{
struct zfcp_erp_action *curr_act;
list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
if (act == curr_act)
return ZFCP_ERP_ACTION_RUNNING;
return 0;
}
static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
list_move(&act->list, &act->adapter->erp_ready_head);
zfcp_dbf_rec_run("erardy1", act);
wake_up(&adapter->erp_ready_wq);
zfcp_dbf_rec_run("erardy2", act);
}
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
{
act->status |= ZFCP_STATUS_ERP_DISMISSED;
if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
zfcp_erp_action_ready(act);
}
static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
}
static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
{
struct scsi_device *sdev;
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
else {
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
spin_unlock(port->adapter->scsi_host->host_lock);
}
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&adapter->erp_action);
else {
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_action_dismiss_port(port);
read_unlock(&adapter->port_list_lock);
}
}
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
int need = want;
int l_status, p_status, a_status;
struct zfcp_scsi_dev *zfcp_sdev;
switch (want) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
l_status = atomic_read(&zfcp_sdev->status);
if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT:
p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
!(a_status & ZFCP_STATUS_COMMON_OPEN))
return 0; /* shutdown requested for closed adapter */
}
return need;
}
static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
if (scsi_device_get(sdev))
return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
erp_action->sdev = sdev;
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
default:
return NULL;
}
erp_action->adapter = adapter;
erp_action->action = need;
erp_action->status = act_status;
return erp_action;
}
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
char *id, u32 act_status)
{
int retval = 1, need;
struct zfcp_erp_action *act;
if (!adapter->erp_thread)
return -EIO;
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act)
goto out;
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
retval = 0;
out:
zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
return retval;
}
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
/* ensure propagation of failed status to new devices */
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, id, 0);
}
/**
* zfcp_erp_adapter_reopen - Reopen adapter.
* @adapter: Adapter to reopen.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
*/
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
{
unsigned long flags;
zfcp_erp_adapter_block(adapter, clear);
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
else
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_adapter_shutdown - Shutdown adapter.
* @adapter: Adapter to shut down.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
char *id)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_adapter_reopen(adapter, clear | flags, id);
}
/**
* zfcp_erp_port_shutdown - Shutdown port
* @port: Port to shut down.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
*/
void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_port_reopen(port, clear | flags, id);
}
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
{
zfcp_erp_clear_port_status(port,
ZFCP_STATUS_COMMON_UNBLOCKED | clear);
}
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, id, 0);
}
/**
* zfcp_erp_port_forced_reopen - Forced close of port and open again
* @port: Port to force close and to reopen.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
*/
void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_port_forced_reopen(port, clear, id);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, id, 0);
}
/**
* zfcp_erp_port_reopen - trigger remote port recovery
* @port: port to recover
* @clear_mask: flags in port status to be cleared
* @id: Id for debug trace event.
*
* Returns 0 if recovery has been triggered, < 0 if not.
*/
int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
int retval;
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
retval = _zfcp_erp_port_reopen(port, clear, id);
write_unlock_irqrestore(&adapter->erp_lock, flags);
return retval;
}
static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
{
zfcp_erp_clear_lun_status(sdev,
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
}
static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
u32 act_status)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
zfcp_erp_lun_block(sdev, clear);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, id, act_status);
}
/**
* zfcp_erp_lun_reopen - initiate reopen of a LUN
* @sdev: SCSI device / LUN to be reopened
* @clear_mask: specifies flags in LUN status to be cleared
* @id: Id for debug trace event.
*
* Return: 0 on success, < 0 on error
*/
void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_lun_shutdown - Shutdown LUN
* @sdev: SCSI device / LUN to shut down.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
*/
void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_lun_reopen(sdev, clear | flags, id);
}
/**
* zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
* @sdev: SCSI device / LUN to shut down.
* @id: Id for debug trace event.
*
* Do not acquire a reference for the LUN when creating the ERP
* action. It is safe, because this function waits for the ERP to
* complete first. This allows to shutdown the LUN, even when the SCSI
* device is in the state SDEV_DEL when scsi_device_get will fail.
*/
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_adapter *adapter = port->adapter;
int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
write_unlock_irqrestore(&adapter->erp_lock, flags);
zfcp_erp_wait(adapter);
}
static int status_change_set(unsigned long mask, atomic_t *status)
{
return (atomic_read(status) ^ mask) & mask;
}
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
zfcp_dbf_rec_run("erator1", erp_action);
}
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_fsf_req *req;
if (!act->fsf_req_id)
return;
spin_lock(&adapter->req_list->lock);
req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
act->fsf_req_id = 0;
} else
act->fsf_req_id = 0;
spin_unlock(&adapter->req_list->lock);
}
/**
* zfcp_erp_notify - Trigger ERP action.
* @erp_action: ERP action to continue.
* @set_mask: ERP action status flags to set.
*/
void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
{
struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
write_lock_irqsave(&adapter->erp_lock, flags);
if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
}
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
* @data: ERP action (from timer data)
*/
void zfcp_erp_timeout_handler(unsigned long data)
{
struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
static void zfcp_erp_memwait_handler(unsigned long data)
{
zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
}
static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
{
init_timer(&erp_action->timer);
erp_action->timer.function = zfcp_erp_memwait_handler;
erp_action->timer.data = (unsigned long) erp_action;
erp_action->timer.expires = jiffies + HZ;
add_timer(&erp_action->timer);
}
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *id)
{
struct zfcp_port *port;
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
_zfcp_erp_port_reopen(port, clear, id);
read_unlock(&adapter->port_list_lock);
}
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
char *id)
{
struct scsi_device *sdev;
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
{
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
break;
case ZFCP_ERP_ACTION_REOPEN_LUN:
_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
break;
}
}
static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
{
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
break;
}
}
static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
unsigned long flags;
read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status);
wake_up(&adapter->erp_done_wqh);
}
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
_zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
{
int retries;
int sleep = 1;
struct zfcp_adapter *adapter = erp_action->adapter;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
for (retries = 7; retries; retries--) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action);
write_unlock_irq(&adapter->erp_lock);
if (zfcp_fsf_exchange_config_data(erp_action)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
return ZFCP_ERP_FAILED;
}
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
break;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
break;
ssleep(sleep);
sleep *= 2;
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
return ZFCP_ERP_SUCCEEDED;
}
static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
{
int ret;
struct zfcp_adapter *adapter = act->adapter;
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(act);
write_unlock_irq(&adapter->erp_lock);
ret = zfcp_fsf_exchange_port_data(act);
if (ret == -EOPNOTSUPP)
return ZFCP_ERP_SUCCEEDED;
if (ret)
return ZFCP_ERP_FAILED;
zfcp_dbf_rec_run("erasox1", act);
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
zfcp_dbf_rec_run("erasox2", act);
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
{
if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.status_read_req,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
/* close queues to ensure that buffers are not accessed by adapter */
zfcp_qdio_close(adapter->qdio);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
zfcp_fc_wka_ports_force_offline(adapter->gs);
/* all ports and LUNs are closed */
zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
if (zfcp_qdio_open(adapter->qdio)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&adapter->status);
return ZFCP_ERP_FAILED;
}
if (zfcp_erp_adapter_strategy_open_fsf(act)) {
zfcp_erp_adapter_strategy_close(act);
return ZFCP_ERP_FAILED;
}
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
return ZFCP_ERP_SUCCEEDED;
}
static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
zfcp_erp_adapter_strategy_close(act);
if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
}
if (zfcp_erp_adapter_strategy_open(act)) {
ssleep(8);
return ZFCP_ERP_FAILED;
}
return ZFCP_ERP_SUCCEEDED;
}
static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
{
int retval;
retval = zfcp_fsf_close_physical_port(act);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
{
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
}
static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int status = atomic_read(&port->status);
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_port_strategy_clearstati(port);
if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
(status & ZFCP_STATUS_COMMON_OPEN))
return zfcp_erp_port_forced_strategy_close(erp_action);
else
return ZFCP_ERP_FAILED;
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
return ZFCP_ERP_SUCCEEDED;
}
return ZFCP_ERP_FAILED;
}
static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
{
int retval;
retval = zfcp_fsf_close_port(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
{
int retval;
retval = zfcp_fsf_open_port(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return ZFCP_ERP_FAILED;
}
port->d_id = adapter->peer_d_id;
return zfcp_erp_port_strategy_open_port(act);
}
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
int p_status = atomic_read(&port->status);
switch (act->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_CLOSING:
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!port->d_id) {
zfcp_fc_trigger_did_lookup(port);
return ZFCP_ERP_EXIT;
}
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
if (p_status & ZFCP_STATUS_COMMON_OPEN) {
if (!port->d_id) {
zfcp_fc_trigger_did_lookup(port);
return ZFCP_ERP_EXIT;
}
return ZFCP_ERP_SUCCEEDED;
}
if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
port->d_id = 0;
return ZFCP_ERP_FAILED;
}
/* fall through otherwise */
}
return ZFCP_ERP_FAILED;
}
static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int p_status = atomic_read(&port->status);
if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
!(p_status & ZFCP_STATUS_COMMON_OPEN))
goto close_init_done;
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_port_strategy_clearstati(port);
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_port_strategy_close(erp_action);
break;
case ZFCP_ERP_STEP_PORT_CLOSING:
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
break;
}
close_init_done:
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
return zfcp_erp_port_strategy_open_common(erp_action);
}
static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
&zfcp_sdev->status);
}
static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_close_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_open_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
{
struct scsi_device *sdev = erp_action->sdev;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_lun_strategy_clearstati(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
/* already closed, fall through */
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
return zfcp_erp_lun_strategy_open(erp_action);
case ZFCP_ERP_STEP_LUN_OPENING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_SUCCEEDED;
}
return ZFCP_ERP_FAILED;
}
static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&zfcp_sdev->erp_counter, 0);
zfcp_erp_lun_unblock(sdev);
break;
case ZFCP_ERP_FAILED :
atomic_inc(&zfcp_sdev->erp_counter);
if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
"ERP failed for LUN 0x%016Lx on "
"port 0x%016Lx\n",
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_lun_block(sdev, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&port->erp_counter, 0);
zfcp_erp_port_unblock(port);
break;
case ZFCP_ERP_FAILED :
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
zfcp_erp_port_block(port, 0);
result = ZFCP_ERP_EXIT;
}
atomic_inc(&port->erp_counter);
if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&port->adapter->ccw_device->dev,
"ERP failed for remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_erp_set_port_status(port,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_port_block(port, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
int result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&adapter->erp_counter, 0);
zfcp_erp_adapter_unblock(adapter);
break;
case ZFCP_ERP_FAILED :
atomic_inc(&adapter->erp_counter);
if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&adapter->ccw_device->dev,
"ERP cannot recover an error "
"on the FCP device\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_adapter_block(adapter, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
int result)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_port *port = erp_action->port;
struct scsi_device *sdev = erp_action->sdev;
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
result = zfcp_erp_strategy_check_lun(sdev, result);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
result = zfcp_erp_strategy_check_port(port, result);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
result = zfcp_erp_strategy_check_adapter(adapter, result);
break;
}
return result;
}
static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
{
int status = atomic_read(target_status);
if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
return 1; /* take it online */
if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
!(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
return 1; /* take it offline */
return 0;
}
static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
{
int action = act->action;
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
struct zfcp_scsi_dev *zfcp_sdev;
u32 erp_status = act->status;
switch (action) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg1");
return ZFCP_ERP_EXIT;
}
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
_zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg2");
return ZFCP_ERP_EXIT;
}
break;
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
_zfcp_erp_lun_reopen(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg3", 0);
return ZFCP_ERP_EXIT;
}
break;
}
return ret;
}
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_scsi_dev *zfcp_sdev;
adapter->erp_total_count--;
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
adapter->erp_low_mem_count--;
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
list_del(&erp_action->list);
zfcp_dbf_rec_run("eractd1", erp_action);
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->port->status);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->adapter->status);
break;
}
}
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
zfcp_fc_conditional_port_scan(adapter);
queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
}
static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
{
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
return zfcp_erp_adapter_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
return zfcp_erp_port_forced_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT:
return zfcp_erp_port_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_LUN:
return zfcp_erp_lun_strategy(erp_action);
}
return ZFCP_ERP_FAILED;
}
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
unsigned long flags;
struct zfcp_adapter *adapter = erp_action->adapter;
kref_get(&adapter->ref);
write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_strategy_check_fsfreq(erp_action);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
zfcp_erp_action_dequeue(erp_action);
retval = ZFCP_ERP_DISMISSED;
goto unlock;
}
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
retval = ZFCP_ERP_FAILED;
goto check_target;
}
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
write_unlock_irqrestore(&adapter->erp_lock, flags);
retval = zfcp_erp_strategy_do_action(erp_action);
write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
retval = ZFCP_ERP_CONTINUES;
switch (retval) {
case ZFCP_ERP_NOMEM:
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
++adapter->erp_low_mem_count;
erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
}
if (adapter->erp_total_count == adapter->erp_low_mem_count)
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
else {
zfcp_erp_strategy_memwait(erp_action);
retval = ZFCP_ERP_CONTINUES;
}
goto unlock;
case ZFCP_ERP_CONTINUES:
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
--adapter->erp_low_mem_count;
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
goto unlock;
}
check_target:
retval = zfcp_erp_strategy_check_target(erp_action, retval);
zfcp_erp_action_dequeue(erp_action);
retval = zfcp_erp_strategy_statechange(erp_action, retval);
if (retval == ZFCP_ERP_EXIT)
goto unlock;
if (retval == ZFCP_ERP_SUCCEEDED)
zfcp_erp_strategy_followup_success(erp_action);
if (retval == ZFCP_ERP_FAILED)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
write_unlock_irqrestore(&adapter->erp_lock, flags);
if (retval != ZFCP_ERP_CONTINUES)
zfcp_erp_action_cleanup(erp_action, retval);
kref_put(&adapter->ref, zfcp_adapter_release);
return retval;
}
static int zfcp_erp_thread(void *data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
for (;;) {
wait_event_interruptible(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head) ||
kthread_should_stop());
if (kthread_should_stop())
break;
write_lock_irqsave(&adapter->erp_lock, flags);
next = adapter->erp_ready_head.next;
write_unlock_irqrestore(&adapter->erp_lock, flags);
if (next != &adapter->erp_ready_head) {
act = list_entry(next, struct zfcp_erp_action, list);
/* there is more to come after dismission, no notify */
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
zfcp_erp_wakeup(adapter);
}
}
return 0;
}
/**
* zfcp_erp_thread_setup - Start ERP thread for adapter
* @adapter: Adapter to start the ERP thread for
*
* Returns 0 on success or error code from kernel_thread()
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
struct task_struct *thread;
thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
dev_name(&adapter->ccw_device->dev));
if (IS_ERR(thread)) {
dev_err(&adapter->ccw_device->dev,
"Creating an ERP thread for the FCP device failed.\n");
return PTR_ERR(thread);
}
adapter->erp_thread = thread;
return 0;
}
/**
* zfcp_erp_thread_kill - Stop ERP thread.
* @adapter: Adapter where the ERP thread should be stopped.
*
* The caller of this routine ensures that the specified adapter has
* been shut down and that this operation has been completed. Thus,
* there are no pending erp_actions which would need to be handled
* here.
*/
void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
kthread_stop(adapter->erp_thread);
adapter->erp_thread = NULL;
WARN_ON(!list_empty(&adapter->erp_ready_head));
WARN_ON(!list_empty(&adapter->erp_running_head));
}
/**
* zfcp_erp_wait - wait for completion of error recovery on an adapter
* @adapter: adapter for which to wait for completion of its error recovery
*/
void zfcp_erp_wait(struct zfcp_adapter *adapter)
{
wait_event(adapter->erp_done_wqh,
!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_PENDING));
}
/**
* zfcp_erp_set_adapter_status - set adapter status bits
* @adapter: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached ports and LUNs.
*/
void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
struct zfcp_port *port;
struct scsi_device *sdev;
unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
atomic_set_mask(mask, &adapter->status);
if (!common_mask)
return;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_clear_adapter_status - clear adapter status bits
* @adapter: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached ports and LUNs.
*/
void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
struct zfcp_port *port;
struct scsi_device *sdev;
unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
atomic_clear_mask(mask, &adapter->status);
if (!common_mask)
return;
if (clear_counter)
atomic_set(&adapter->erp_counter, 0);
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
atomic_clear_mask(common_mask, &port->status);
if (clear_counter)
atomic_set(&port->erp_counter, 0);
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_set_port_status - set port status bits
* @port: port to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached LUNs.
*/
void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags;
atomic_set_mask(mask, &port->status);
if (!common_mask)
return;
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_clear_port_status - clear port status bits
* @port: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached LUNs.
*/
void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags;
atomic_clear_mask(mask, &port->status);
if (!common_mask)
return;
if (clear_counter)
atomic_set(&port->erp_counter, 0);
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_set_lun_status - set lun status bits
* @sdev: SCSI device / lun to set the status bits
* @mask: status bits to change
*/
void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_set_mask(mask, &zfcp_sdev->status);
}
/**
* zfcp_erp_clear_lun_status - clear lun status bits
* @sdev: SCSi device / lun to clear the status bits
* @mask: status bits to change
*/
void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(mask, &zfcp_sdev->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&zfcp_sdev->erp_counter, 0);
}
| gpl-2.0 |
SCforAndroid/android_kernel_omap | arch/arm/mach-pxa/lubbock.c | 2280 | 13033 | /*
* linux/arch/arm/mach-pxa/lubbock.c
*
* Support for the Intel DBPXA250 Development Platform.
*
* Author: Nicolas Pitre
* Created: Jun 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/major.h>
#include <linux/fb.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/smc91x.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/pxa2xx_spi.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <asm/hardware/sa1111.h>
#include <mach/pxa25x.h>
#include <mach/gpio.h>
#include <mach/audio.h>
#include <mach/lubbock.h>
#include <mach/udc.h>
#include <mach/irda.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/pm.h>
#include <mach/smemc.h>
#include "generic.h"
#include "clock.h"
#include "devices.h"
static unsigned long lubbock_pin_config[] __initdata = {
GPIO15_nCS_1, /* CS1 - Flash */
GPIO78_nCS_2, /* CS2 - Baseboard FGPA */
GPIO79_nCS_3, /* CS3 - SMC ethernet */
GPIO80_nCS_4, /* CS4 - SA1111 */
/* SSP data pins */
GPIO23_SSP1_SCLK,
GPIO25_SSP1_TXD,
GPIO26_SSP1_RXD,
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
/* LCD - 16bpp DSTN */
GPIOxx_LCD_DSTN_16BPP,
/* BTUART */
GPIO42_BTUART_RXD,
GPIO43_BTUART_TXD,
GPIO44_BTUART_CTS,
GPIO45_BTUART_RTS,
/* PC Card */
GPIO48_nPOE,
GPIO49_nPWE,
GPIO50_nPIOR,
GPIO51_nPIOW,
GPIO52_nPCE_1,
GPIO53_nPCE_2,
GPIO54_nPSKTSEL,
GPIO55_nPREG,
GPIO56_nPWAIT,
GPIO57_nIOIS16,
/* MMC */
GPIO6_MMC_CLK,
GPIO8_MMC_CS0,
/* wakeup */
GPIO1_GPIO | WAKEUP_ON_EDGE_RISE,
};
#define LUB_HEXLED __LUB_REG(LUBBOCK_FPGA_PHYS + 0x010)
#define LUB_MISC_WR __LUB_REG(LUBBOCK_FPGA_PHYS + 0x080)
void lubbock_set_hexled(uint32_t value)
{
LUB_HEXLED = value;
}
void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
{
unsigned long flags;
local_irq_save(flags);
LUB_MISC_WR = (LUB_MISC_WR & ~mask) | (set & mask);
local_irq_restore(flags);
}
EXPORT_SYMBOL(lubbock_set_misc_wr);
static unsigned long lubbock_irq_enabled;
static void lubbock_mask_irq(struct irq_data *d)
{
int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
}
static void lubbock_unmask_irq(struct irq_data *d)
{
int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
/* the irq can be acknowledged only if deasserted, so it's done here */
LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
}
static struct irq_chip lubbock_irq_chip = {
.name = "FPGA",
.irq_ack = lubbock_mask_irq,
.irq_mask = lubbock_mask_irq,
.irq_unmask = lubbock_unmask_irq,
};
static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
do {
/* clear our parent irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (likely(pending)) {
irq = LUBBOCK_IRQ(0) + __ffs(pending);
generic_handle_irq(irq);
}
pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
} while (pending);
}
static void __init lubbock_init_irq(void)
{
int irq;
pxa25x_init_irq();
/* setup extra lubbock irqs */
for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
irq_set_chip_and_handler(irq, &lubbock_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chained_handler(IRQ_GPIO(0), lubbock_irq_handler);
irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
static void lubbock_irq_resume(void)
{
LUB_IRQ_MASK_EN = lubbock_irq_enabled;
}
static struct syscore_ops lubbock_irq_syscore_ops = {
.resume = lubbock_irq_resume,
};
static int __init lubbock_irq_device_init(void)
{
if (machine_is_lubbock()) {
register_syscore_ops(&lubbock_irq_syscore_ops);
return 0;
}
return -ENODEV;
}
device_initcall(lubbock_irq_device_init);
#endif
static int lubbock_udc_is_connected(void)
{
return (LUB_MISC_RD & (1 << 9)) == 0;
}
static struct pxa2xx_udc_mach_info udc_info __initdata = {
.udc_is_connected = lubbock_udc_is_connected,
// no D+ pullup; lubbock can't connect/disconnect in software
};
static struct resource sa1111_resources[] = {
[0] = {
.start = 0x10000000,
.end = 0x10001fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LUBBOCK_SA1111_IRQ,
.end = LUBBOCK_SA1111_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct sa1111_platform_data sa1111_info = {
.irq_base = LUBBOCK_SA1111_IRQ_BASE,
};
static struct platform_device sa1111_device = {
.name = "sa1111",
.id = -1,
.num_resources = ARRAY_SIZE(sa1111_resources),
.resource = sa1111_resources,
.dev = {
.platform_data = &sa1111_info,
},
};
/* ADS7846 is connected through SSP ... and if your board has J5 populated,
* you can select it to replace the ucb1400 by switching the touchscreen cable
* (to J5) and poking board registers (as done below). Else it's only useful
* for the temperature sensors.
*/
static struct pxa2xx_spi_master pxa_ssp_master_info = {
.num_chipselect = 1,
};
static int lubbock_ads7846_pendown_state(void)
{
/* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */
return 0;
}
static struct ads7846_platform_data ads_info = {
.model = 7846,
.vref_delay_usecs = 100, /* internal, no cap */
.get_pendown_state = lubbock_ads7846_pendown_state,
// .x_plate_ohms = 500, /* GUESS! */
// .y_plate_ohms = 500, /* GUESS! */
};
static void ads7846_cs(u32 command)
{
static const unsigned TS_nCS = 1 << 11;
lubbock_set_misc_wr(TS_nCS, (command == PXA2XX_CS_ASSERT) ? 0 : TS_nCS);
}
static struct pxa2xx_spi_chip ads_hw = {
.tx_threshold = 1,
.rx_threshold = 2,
.cs_control = ads7846_cs,
};
static struct spi_board_info spi_board_info[] __initdata = { {
.modalias = "ads7846",
.platform_data = &ads_info,
.controller_data = &ads_hw,
.irq = LUBBOCK_BB_IRQ,
.max_speed_hz = 120000 /* max sample rate at 3V */
* 26 /* command + data + overhead */,
.bus_num = 1,
.chip_select = 0,
},
};
static struct resource smc91x_resources[] = {
[0] = {
.name = "smc91x-regs",
.start = 0x0c000c00,
.end = 0x0c0fffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LUBBOCK_ETH_IRQ,
.end = LUBBOCK_ETH_IRQ,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
[2] = {
.name = "smc91x-attrib",
.start = 0x0e000000,
.end = 0x0e0fffff,
.flags = IORESOURCE_MEM,
},
};
static struct smc91x_platdata lubbock_smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_IO_SHIFT_2,
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = -1,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &lubbock_smc91x_info,
},
};
static struct resource flash_resources[] = {
[0] = {
.start = 0x00000000,
.end = SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x04000000,
.end = 0x04000000 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct mtd_partition lubbock_partitions[] = {
{
.name = "Bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE /* force read-only */
},{
.name = "Kernel",
.size = 0x00100000,
.offset = 0x00040000,
},{
.name = "Filesystem",
.size = MTDPART_SIZ_FULL,
.offset = 0x00140000
}
};
static struct flash_platform_data lubbock_flash_data[2] = {
{
.map_name = "cfi_probe",
.parts = lubbock_partitions,
.nr_parts = ARRAY_SIZE(lubbock_partitions),
}, {
.map_name = "cfi_probe",
.parts = NULL,
.nr_parts = 0,
}
};
static struct platform_device lubbock_flash_device[2] = {
{
.name = "pxa2xx-flash",
.id = 0,
.dev = {
.platform_data = &lubbock_flash_data[0],
},
.resource = &flash_resources[0],
.num_resources = 1,
},
{
.name = "pxa2xx-flash",
.id = 1,
.dev = {
.platform_data = &lubbock_flash_data[1],
},
.resource = &flash_resources[1],
.num_resources = 1,
},
};
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&smc91x_device,
&lubbock_flash_device[0],
&lubbock_flash_device[1],
};
static struct pxafb_mode_info sharp_lm8v31_mode = {
.pixclock = 270000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 1,
.left_margin = 3,
.right_margin = 3,
.vsync_len = 1,
.upper_margin = 0,
.lower_margin = 0,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.cmap_greyscale = 0,
};
static struct pxafb_mach_info sharp_lm8v31 = {
.modes = &sharp_lm8v31_mode,
.num_modes = 1,
.cmap_inverse = 0,
.cmap_static = 0,
.lcd_conn = LCD_COLOR_DSTN_16BPP | LCD_PCLK_EDGE_FALL |
LCD_AC_BIAS_FREQ(255),
};
#define MMC_POLL_RATE msecs_to_jiffies(1000)
static void lubbock_mmc_poll(unsigned long);
static irq_handler_t mmc_detect_int;
static struct timer_list mmc_timer = {
.function = lubbock_mmc_poll,
};
static void lubbock_mmc_poll(unsigned long data)
{
unsigned long flags;
/* clear any previous irq state, then ... */
local_irq_save(flags);
LUB_IRQ_SET_CLR &= ~(1 << 0);
local_irq_restore(flags);
/* poll until mmc/sd card is removed */
if (LUB_IRQ_SET_CLR & (1 << 0))
mod_timer(&mmc_timer, jiffies + MMC_POLL_RATE);
else {
(void) mmc_detect_int(LUBBOCK_SD_IRQ, (void *)data);
enable_irq(LUBBOCK_SD_IRQ);
}
}
static irqreturn_t lubbock_detect_int(int irq, void *data)
{
/* IRQ is level triggered; disable, and poll for removal */
disable_irq(irq);
mod_timer(&mmc_timer, jiffies + MMC_POLL_RATE);
return mmc_detect_int(irq, data);
}
static int lubbock_mci_init(struct device *dev,
irq_handler_t detect_int,
void *data)
{
/* detect card insert/eject */
mmc_detect_int = detect_int;
init_timer(&mmc_timer);
mmc_timer.data = (unsigned long) data;
return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int,
IRQF_SAMPLE_RANDOM, "lubbock-sd-detect", data);
}
static int lubbock_mci_get_ro(struct device *dev)
{
return (LUB_MISC_RD & (1 << 2)) != 0;
}
static void lubbock_mci_exit(struct device *dev, void *data)
{
free_irq(LUBBOCK_SD_IRQ, data);
del_timer_sync(&mmc_timer);
}
static struct pxamci_platform_data lubbock_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.detect_delay_ms = 10,
.init = lubbock_mci_init,
.get_ro = lubbock_mci_get_ro,
.exit = lubbock_mci_exit,
.gpio_card_detect = -1,
.gpio_card_ro = -1,
.gpio_power = -1,
};
static void lubbock_irda_transceiver_mode(struct device *dev, int mode)
{
unsigned long flags;
local_irq_save(flags);
if (mode & IR_SIRMODE) {
LUB_MISC_WR &= ~(1 << 4);
} else if (mode & IR_FIRMODE) {
LUB_MISC_WR |= 1 << 4;
}
pxa2xx_transceiver_mode(dev, mode);
local_irq_restore(flags);
}
static struct pxaficp_platform_data lubbock_ficp_platform_data = {
.gpio_pwdown = -1,
.transceiver_cap = IR_SIRMODE | IR_FIRMODE,
.transceiver_mode = lubbock_irda_transceiver_mode,
};
static void __init lubbock_init(void)
{
int flashboot = (LUB_CONF_SWITCHES & 1);
pxa2xx_mfp_config(ARRAY_AND_SIZE(lubbock_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
pxa_set_udc_info(&udc_info);
pxa_set_fb_info(NULL, &sharp_lm8v31);
pxa_set_mci_info(&lubbock_mci_platform_data);
pxa_set_ficp_info(&lubbock_ficp_platform_data);
pxa_set_ac97_info(NULL);
lubbock_flash_data[0].width = lubbock_flash_data[1].width =
(__raw_readl(BOOT_DEF) & 1) ? 2 : 4;
/* Compensate for the nROMBT switch which swaps the flash banks */
printk(KERN_NOTICE "Lubbock configured to boot from %s (bank %d)\n",
flashboot?"Flash":"ROM", flashboot);
lubbock_flash_data[flashboot^1].name = "application-flash";
lubbock_flash_data[flashboot].name = "boot-rom";
(void) platform_add_devices(devices, ARRAY_SIZE(devices));
pxa2xx_set_spi_info(1, &pxa_ssp_master_info);
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
}
static struct map_desc lubbock_io_desc[] __initdata = {
{ /* CPLD */
.virtual = LUBBOCK_FPGA_VIRT,
.pfn = __phys_to_pfn(LUBBOCK_FPGA_PHYS),
.length = 0x00100000,
.type = MT_DEVICE
}
};
static void __init lubbock_map_io(void)
{
pxa25x_map_io();
iotable_init(lubbock_io_desc, ARRAY_SIZE(lubbock_io_desc));
PCFR |= PCFR_OPDE;
}
MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
/* Maintainer: MontaVista Software Inc. */
.map_io = lubbock_map_io,
.nr_irqs = LUBBOCK_NR_IRQS,
.init_irq = lubbock_init_irq,
.timer = &pxa_timer,
.init_machine = lubbock_init,
MACHINE_END
| gpl-2.0 |
evnit/android_kernel_samsung_msm8660-common-10.1 | arch/arm/mach-imx/clock-imx21.c | 2536 | 32326 | /*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
* Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <asm/div64.h>
#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
/* Register offsets */
#define CCM_CSCR IO_ADDR_CCM(0x0)
#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
#define CCM_PCDR0 IO_ADDR_CCM(0x18)
#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
#define CCM_PCCR0 IO_ADDR_CCM(0x20)
#define CCM_PCCR1 IO_ADDR_CCM(0x24)
#define CCM_CCSR IO_ADDR_CCM(0x28)
#define CCM_PMCTL IO_ADDR_CCM(0x2c)
#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
#define CCM_CSCR_PRESC_OFFSET 29
#define CCM_CSCR_PRESC_MASK (0x7 << CCM_CSCR_PRESC_OFFSET)
#define CCM_CSCR_USB_OFFSET 26
#define CCM_CSCR_USB_MASK (0x7 << CCM_CSCR_USB_OFFSET)
#define CCM_CSCR_SD_OFFSET 24
#define CCM_CSCR_SD_MASK (0x3 << CCM_CSCR_SD_OFFSET)
#define CCM_CSCR_SPLLRES (1 << 22)
#define CCM_CSCR_MPLLRES (1 << 21)
#define CCM_CSCR_SSI2_OFFSET 20
#define CCM_CSCR_SSI2 (1 << CCM_CSCR_SSI2_OFFSET)
#define CCM_CSCR_SSI1_OFFSET 19
#define CCM_CSCR_SSI1 (1 << CCM_CSCR_SSI1_OFFSET)
#define CCM_CSCR_FIR_OFFSET 18
#define CCM_CSCR_FIR (1 << CCM_CSCR_FIR_OFFSET)
#define CCM_CSCR_SP (1 << 17)
#define CCM_CSCR_MCU (1 << 16)
#define CCM_CSCR_BCLK_OFFSET 10
#define CCM_CSCR_BCLK_MASK (0xf << CCM_CSCR_BCLK_OFFSET)
#define CCM_CSCR_IPDIV_OFFSET 9
#define CCM_CSCR_IPDIV (1 << CCM_CSCR_IPDIV_OFFSET)
#define CCM_CSCR_OSC26MDIV (1 << 4)
#define CCM_CSCR_OSC26M (1 << 3)
#define CCM_CSCR_FPM (1 << 2)
#define CCM_CSCR_SPEN (1 << 1)
#define CCM_CSCR_MPEN 1
#define CCM_MPCTL0_CPLM (1 << 31)
#define CCM_MPCTL0_PD_OFFSET 26
#define CCM_MPCTL0_PD_MASK (0xf << 26)
#define CCM_MPCTL0_MFD_OFFSET 16
#define CCM_MPCTL0_MFD_MASK (0x3ff << 16)
#define CCM_MPCTL0_MFI_OFFSET 10
#define CCM_MPCTL0_MFI_MASK (0xf << 10)
#define CCM_MPCTL0_MFN_OFFSET 0
#define CCM_MPCTL0_MFN_MASK 0x3ff
#define CCM_MPCTL1_LF (1 << 15)
#define CCM_MPCTL1_BRMO (1 << 6)
#define CCM_SPCTL0_CPLM (1 << 31)
#define CCM_SPCTL0_PD_OFFSET 26
#define CCM_SPCTL0_PD_MASK (0xf << 26)
#define CCM_SPCTL0_MFD_OFFSET 16
#define CCM_SPCTL0_MFD_MASK (0x3ff << 16)
#define CCM_SPCTL0_MFI_OFFSET 10
#define CCM_SPCTL0_MFI_MASK (0xf << 10)
#define CCM_SPCTL0_MFN_OFFSET 0
#define CCM_SPCTL0_MFN_MASK 0x3ff
#define CCM_SPCTL1_LF (1 << 15)
#define CCM_SPCTL1_BRMO (1 << 6)
#define CCM_OSC26MCTL_PEAK_OFFSET 16
#define CCM_OSC26MCTL_PEAK_MASK (0x3 << 16)
#define CCM_OSC26MCTL_AGC_OFFSET 8
#define CCM_OSC26MCTL_AGC_MASK (0x3f << 8)
#define CCM_OSC26MCTL_ANATEST_OFFSET 0
#define CCM_OSC26MCTL_ANATEST_MASK 0x3f
#define CCM_PCDR0_SSI2BAUDDIV_OFFSET 26
#define CCM_PCDR0_SSI2BAUDDIV_MASK (0x3f << 26)
#define CCM_PCDR0_SSI1BAUDDIV_OFFSET 16
#define CCM_PCDR0_SSI1BAUDDIV_MASK (0x3f << 16)
#define CCM_PCDR0_NFCDIV_OFFSET 12
#define CCM_PCDR0_NFCDIV_MASK (0xf << 12)
#define CCM_PCDR0_48MDIV_OFFSET 5
#define CCM_PCDR0_48MDIV_MASK (0x7 << CCM_PCDR0_48MDIV_OFFSET)
#define CCM_PCDR0_FIRIDIV_OFFSET 0
#define CCM_PCDR0_FIRIDIV_MASK 0x1f
#define CCM_PCDR1_PERDIV4_OFFSET 24
#define CCM_PCDR1_PERDIV4_MASK (0x3f << 24)
#define CCM_PCDR1_PERDIV3_OFFSET 16
#define CCM_PCDR1_PERDIV3_MASK (0x3f << 16)
#define CCM_PCDR1_PERDIV2_OFFSET 8
#define CCM_PCDR1_PERDIV2_MASK (0x3f << 8)
#define CCM_PCDR1_PERDIV1_OFFSET 0
#define CCM_PCDR1_PERDIV1_MASK 0x3f
#define CCM_PCCR_HCLK_CSI_OFFSET 31
#define CCM_PCCR_HCLK_CSI_REG CCM_PCCR0
#define CCM_PCCR_HCLK_DMA_OFFSET 30
#define CCM_PCCR_HCLK_DMA_REG CCM_PCCR0
#define CCM_PCCR_HCLK_BROM_OFFSET 28
#define CCM_PCCR_HCLK_BROM_REG CCM_PCCR0
#define CCM_PCCR_HCLK_EMMA_OFFSET 27
#define CCM_PCCR_HCLK_EMMA_REG CCM_PCCR0
#define CCM_PCCR_HCLK_LCDC_OFFSET 26
#define CCM_PCCR_HCLK_LCDC_REG CCM_PCCR0
#define CCM_PCCR_HCLK_SLCDC_OFFSET 25
#define CCM_PCCR_HCLK_SLCDC_REG CCM_PCCR0
#define CCM_PCCR_HCLK_USBOTG_OFFSET 24
#define CCM_PCCR_HCLK_USBOTG_REG CCM_PCCR0
#define CCM_PCCR_HCLK_BMI_OFFSET 23
#define CCM_PCCR_BMI_MASK (1 << CCM_PCCR_BMI_MASK)
#define CCM_PCCR_HCLK_BMI_REG CCM_PCCR0
#define CCM_PCCR_PERCLK4_OFFSET 22
#define CCM_PCCR_PERCLK4_REG CCM_PCCR0
#define CCM_PCCR_SLCDC_OFFSET 21
#define CCM_PCCR_SLCDC_REG CCM_PCCR0
#define CCM_PCCR_FIRI_BAUD_OFFSET 20
#define CCM_PCCR_FIRI_BAUD_MASK (1 << CCM_PCCR_FIRI_BAUD_MASK)
#define CCM_PCCR_FIRI_BAUD_REG CCM_PCCR0
#define CCM_PCCR_NFC_OFFSET 19
#define CCM_PCCR_NFC_REG CCM_PCCR0
#define CCM_PCCR_LCDC_OFFSET 18
#define CCM_PCCR_LCDC_REG CCM_PCCR0
#define CCM_PCCR_SSI1_BAUD_OFFSET 17
#define CCM_PCCR_SSI1_BAUD_REG CCM_PCCR0
#define CCM_PCCR_SSI2_BAUD_OFFSET 16
#define CCM_PCCR_SSI2_BAUD_REG CCM_PCCR0
#define CCM_PCCR_EMMA_OFFSET 15
#define CCM_PCCR_EMMA_REG CCM_PCCR0
#define CCM_PCCR_USBOTG_OFFSET 14
#define CCM_PCCR_USBOTG_REG CCM_PCCR0
#define CCM_PCCR_DMA_OFFSET 13
#define CCM_PCCR_DMA_REG CCM_PCCR0
#define CCM_PCCR_I2C1_OFFSET 12
#define CCM_PCCR_I2C1_REG CCM_PCCR0
#define CCM_PCCR_GPIO_OFFSET 11
#define CCM_PCCR_GPIO_REG CCM_PCCR0
#define CCM_PCCR_SDHC2_OFFSET 10
#define CCM_PCCR_SDHC2_REG CCM_PCCR0
#define CCM_PCCR_SDHC1_OFFSET 9
#define CCM_PCCR_SDHC1_REG CCM_PCCR0
#define CCM_PCCR_FIRI_OFFSET 8
#define CCM_PCCR_FIRI_MASK (1 << CCM_PCCR_BAUD_MASK)
#define CCM_PCCR_FIRI_REG CCM_PCCR0
#define CCM_PCCR_SSI2_IPG_OFFSET 7
#define CCM_PCCR_SSI2_REG CCM_PCCR0
#define CCM_PCCR_SSI1_IPG_OFFSET 6
#define CCM_PCCR_SSI1_REG CCM_PCCR0
#define CCM_PCCR_CSPI2_OFFSET 5
#define CCM_PCCR_CSPI2_REG CCM_PCCR0
#define CCM_PCCR_CSPI1_OFFSET 4
#define CCM_PCCR_CSPI1_REG CCM_PCCR0
#define CCM_PCCR_UART4_OFFSET 3
#define CCM_PCCR_UART4_REG CCM_PCCR0
#define CCM_PCCR_UART3_OFFSET 2
#define CCM_PCCR_UART3_REG CCM_PCCR0
#define CCM_PCCR_UART2_OFFSET 1
#define CCM_PCCR_UART2_REG CCM_PCCR0
#define CCM_PCCR_UART1_OFFSET 0
#define CCM_PCCR_UART1_REG CCM_PCCR0
#define CCM_PCCR_OWIRE_OFFSET 31
#define CCM_PCCR_OWIRE_REG CCM_PCCR1
#define CCM_PCCR_KPP_OFFSET 30
#define CCM_PCCR_KPP_REG CCM_PCCR1
#define CCM_PCCR_RTC_OFFSET 29
#define CCM_PCCR_RTC_REG CCM_PCCR1
#define CCM_PCCR_PWM_OFFSET 28
#define CCM_PCCR_PWM_REG CCM_PCCR1
#define CCM_PCCR_GPT3_OFFSET 27
#define CCM_PCCR_GPT3_REG CCM_PCCR1
#define CCM_PCCR_GPT2_OFFSET 26
#define CCM_PCCR_GPT2_REG CCM_PCCR1
#define CCM_PCCR_GPT1_OFFSET 25
#define CCM_PCCR_GPT1_REG CCM_PCCR1
#define CCM_PCCR_WDT_OFFSET 24
#define CCM_PCCR_WDT_REG CCM_PCCR1
#define CCM_PCCR_CSPI3_OFFSET 23
#define CCM_PCCR_CSPI3_REG CCM_PCCR1
#define CCM_PCCR_CSPI1_MASK (1 << CCM_PCCR_CSPI1_OFFSET)
#define CCM_PCCR_CSPI2_MASK (1 << CCM_PCCR_CSPI2_OFFSET)
#define CCM_PCCR_CSPI3_MASK (1 << CCM_PCCR_CSPI3_OFFSET)
#define CCM_PCCR_DMA_MASK (1 << CCM_PCCR_DMA_OFFSET)
#define CCM_PCCR_EMMA_MASK (1 << CCM_PCCR_EMMA_OFFSET)
#define CCM_PCCR_GPIO_MASK (1 << CCM_PCCR_GPIO_OFFSET)
#define CCM_PCCR_GPT1_MASK (1 << CCM_PCCR_GPT1_OFFSET)
#define CCM_PCCR_GPT2_MASK (1 << CCM_PCCR_GPT2_OFFSET)
#define CCM_PCCR_GPT3_MASK (1 << CCM_PCCR_GPT3_OFFSET)
#define CCM_PCCR_HCLK_BROM_MASK (1 << CCM_PCCR_HCLK_BROM_OFFSET)
#define CCM_PCCR_HCLK_CSI_MASK (1 << CCM_PCCR_HCLK_CSI_OFFSET)
#define CCM_PCCR_HCLK_DMA_MASK (1 << CCM_PCCR_HCLK_DMA_OFFSET)
#define CCM_PCCR_HCLK_EMMA_MASK (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
#define CCM_PCCR_HCLK_LCDC_MASK (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
#define CCM_PCCR_HCLK_SLCDC_MASK (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
#define CCM_PCCR_HCLK_USBOTG_MASK (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
#define CCM_PCCR_I2C1_MASK (1 << CCM_PCCR_I2C1_OFFSET)
#define CCM_PCCR_KPP_MASK (1 << CCM_PCCR_KPP_OFFSET)
#define CCM_PCCR_LCDC_MASK (1 << CCM_PCCR_LCDC_OFFSET)
#define CCM_PCCR_NFC_MASK (1 << CCM_PCCR_NFC_OFFSET)
#define CCM_PCCR_OWIRE_MASK (1 << CCM_PCCR_OWIRE_OFFSET)
#define CCM_PCCR_PERCLK4_MASK (1 << CCM_PCCR_PERCLK4_OFFSET)
#define CCM_PCCR_PWM_MASK (1 << CCM_PCCR_PWM_OFFSET)
#define CCM_PCCR_RTC_MASK (1 << CCM_PCCR_RTC_OFFSET)
#define CCM_PCCR_SDHC1_MASK (1 << CCM_PCCR_SDHC1_OFFSET)
#define CCM_PCCR_SDHC2_MASK (1 << CCM_PCCR_SDHC2_OFFSET)
#define CCM_PCCR_SLCDC_MASK (1 << CCM_PCCR_SLCDC_OFFSET)
#define CCM_PCCR_SSI1_BAUD_MASK (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
#define CCM_PCCR_SSI1_IPG_MASK (1 << CCM_PCCR_SSI1_IPG_OFFSET)
#define CCM_PCCR_SSI2_BAUD_MASK (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
#define CCM_PCCR_SSI2_IPG_MASK (1 << CCM_PCCR_SSI2_IPG_OFFSET)
#define CCM_PCCR_UART1_MASK (1 << CCM_PCCR_UART1_OFFSET)
#define CCM_PCCR_UART2_MASK (1 << CCM_PCCR_UART2_OFFSET)
#define CCM_PCCR_UART3_MASK (1 << CCM_PCCR_UART3_OFFSET)
#define CCM_PCCR_UART4_MASK (1 << CCM_PCCR_UART4_OFFSET)
#define CCM_PCCR_USBOTG_MASK (1 << CCM_PCCR_USBOTG_OFFSET)
#define CCM_PCCR_WDT_MASK (1 << CCM_PCCR_WDT_OFFSET)
#define CCM_CCSR_32KSR (1 << 15)
#define CCM_CCSR_CLKMODE1 (1 << 9)
#define CCM_CCSR_CLKMODE0 (1 << 8)
#define CCM_CCSR_CLKOSEL_OFFSET 0
#define CCM_CCSR_CLKOSEL_MASK 0x1f
#define SYS_FMCR 0x14 /* Functional Muxing Control Reg */
#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
static int _clk_enable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg |= 1 << clk->enable_shift;
__raw_writel(reg, clk->enable_reg);
return 0;
}
static void _clk_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg &= ~(1 << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
static unsigned long _clk_generic_round_rate(struct clk *clk,
unsigned long rate,
u32 max_divisor)
{
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (parent_rate % rate)
div++;
if (div > max_divisor)
div = max_divisor;
return parent_rate / div;
}
static int _clk_spll_enable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(CCM_CSCR);
reg |= CCM_CSCR_SPEN;
__raw_writel(reg, CCM_CSCR);
while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
;
return 0;
}
static void _clk_spll_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(CCM_CSCR);
reg &= ~CCM_CSCR_SPEN;
__raw_writel(reg, CCM_CSCR);
}
#define CSCR() (__raw_readl(CCM_CSCR))
#define PCDR0() (__raw_readl(CCM_PCDR0))
#define PCDR1() (__raw_readl(CCM_PCDR1))
static unsigned long _clk_perclkx_round_rate(struct clk *clk,
unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 64);
}
static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->id < 0 || clk->id > 3)
return -EINVAL;
div = parent_rate / rate;
if (div > 64 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg =
__raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
(clk->id << 3));
reg |= div << (clk->id << 3);
__raw_writel(reg, CCM_PCDR1);
return 0;
}
static unsigned long _clk_usb_recalc(struct clk *clk)
{
unsigned long usb_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
return parent_rate / (usb_pdf + 1U);
}
static unsigned long _clk_usb_round_rate(struct clk *clk,
unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 8);
}
static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = CSCR() & ~CCM_CSCR_USB_MASK;
reg |= div << CCM_CSCR_USB_OFFSET;
__raw_writel(reg, CCM_CSCR);
return 0;
}
static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
{
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
return 2UL * parent_rate / pdf;
}
static unsigned long _clk_ssi1_recalc(struct clk *clk)
{
return _clk_ssix_recalc(clk,
(PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
>> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
}
static unsigned long _clk_ssi2_recalc(struct clk *clk)
{
return _clk_ssix_recalc(clk,
(PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
CCM_PCDR0_SSI2BAUDDIV_OFFSET);
}
static unsigned long _clk_nfc_recalc(struct clk *clk)
{
unsigned long nfc_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
>> CCM_PCDR0_NFCDIV_OFFSET;
return parent_rate / (nfc_pdf + 1);
}
static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->round_rate(clk->parent, rate);
}
static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->set_rate(clk->parent, rate);
}
static unsigned long external_high_reference; /* in Hz */
static unsigned long get_high_reference_clock_rate(struct clk *clk)
{
return external_high_reference;
}
/*
* the high frequency external clock reference
* Default case is 26MHz.
*/
static struct clk ckih_clk = {
.get_rate = get_high_reference_clock_rate,
};
static unsigned long external_low_reference; /* in Hz */
static unsigned long get_low_reference_clock_rate(struct clk *clk)
{
return external_low_reference;
}
/*
* the low frequency external clock reference
* Default case is 32.768kHz.
*/
static struct clk ckil_clk = {
.get_rate = get_low_reference_clock_rate,
};
static unsigned long _clk_fpm_recalc(struct clk *clk)
{
return clk_get_rate(clk->parent) * 512;
}
/* Output of frequency pre multiplier */
static struct clk fpm_clk = {
.parent = &ckil_clk,
.get_rate = _clk_fpm_recalc,
};
static unsigned long get_mpll_clk(struct clk *clk)
{
uint32_t reg;
unsigned long ref_clk;
unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
unsigned long long temp;
ref_clk = clk_get_rate(clk->parent);
reg = __raw_readl(CCM_MPCTL0);
pdf = (reg & CCM_MPCTL0_PD_MASK) >> CCM_MPCTL0_PD_OFFSET;
mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
temp = 2LL * ref_clk * mfn;
do_div(temp, mfd + 1);
temp = 2LL * ref_clk * mfi + temp;
do_div(temp, pdf + 1);
return (unsigned long)temp;
}
static struct clk mpll_clk = {
.parent = &ckih_clk,
.get_rate = get_mpll_clk,
};
static unsigned long _clk_fclk_get_rate(struct clk *clk)
{
unsigned long parent_rate;
u32 div;
div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
parent_rate = clk_get_rate(clk->parent);
return parent_rate / (div+1);
}
static struct clk fclk_clk = {
.parent = &mpll_clk,
.get_rate = _clk_fclk_get_rate
};
static unsigned long get_spll_clk(struct clk *clk)
{
uint32_t reg;
unsigned long ref_clk;
unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
unsigned long long temp;
ref_clk = clk_get_rate(clk->parent);
reg = __raw_readl(CCM_SPCTL0);
pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
temp = 2LL * ref_clk * mfn;
do_div(temp, mfd + 1);
temp = 2LL * ref_clk * mfi + temp;
do_div(temp, pdf + 1);
return (unsigned long)temp;
}
static struct clk spll_clk = {
.parent = &ckih_clk,
.get_rate = get_spll_clk,
.enable = _clk_spll_enable,
.disable = _clk_spll_disable,
};
static unsigned long get_hclk_clk(struct clk *clk)
{
unsigned long rate;
unsigned long bclk_pdf;
bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
>> CCM_CSCR_BCLK_OFFSET;
rate = clk_get_rate(clk->parent);
return rate / (bclk_pdf + 1);
}
static struct clk hclk_clk = {
.parent = &fclk_clk,
.get_rate = get_hclk_clk,
};
static unsigned long get_ipg_clk(struct clk *clk)
{
unsigned long rate;
unsigned long ipg_pdf;
ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
rate = clk_get_rate(clk->parent);
return rate / (ipg_pdf + 1);
}
static struct clk ipg_clk = {
.parent = &hclk_clk,
.get_rate = get_ipg_clk,
};
static unsigned long _clk_perclkx_recalc(struct clk *clk)
{
unsigned long perclk_pdf;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->id < 0 || clk->id > 3)
return 0;
perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
return parent_rate / (perclk_pdf + 1);
}
static struct clk per_clk[] = {
{
.id = 0,
.parent = &mpll_clk,
.get_rate = _clk_perclkx_recalc,
}, {
.id = 1,
.parent = &mpll_clk,
.get_rate = _clk_perclkx_recalc,
}, {
.id = 2,
.parent = &mpll_clk,
.round_rate = _clk_perclkx_round_rate,
.set_rate = _clk_perclkx_set_rate,
.get_rate = _clk_perclkx_recalc,
/* Enable/Disable done via lcd_clkc[1] */
}, {
.id = 3,
.parent = &mpll_clk,
.round_rate = _clk_perclkx_round_rate,
.set_rate = _clk_perclkx_set_rate,
.get_rate = _clk_perclkx_recalc,
/* Enable/Disable done via csi_clk[1] */
},
};
static struct clk uart_ipg_clk[];
static struct clk uart_clk[] = {
{
.id = 0,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[2],
}, {
.id = 3,
.parent = &per_clk[0],
.secondary = &uart_ipg_clk[3],
},
};
static struct clk uart_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART1_REG,
.enable_shift = CCM_PCCR_UART1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART2_REG,
.enable_shift = CCM_PCCR_UART2_OFFSET,
.disable = _clk_disable,
}, {
.id = 2,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART3_REG,
.enable_shift = CCM_PCCR_UART3_OFFSET,
.disable = _clk_disable,
}, {
.id = 3,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_UART4_REG,
.enable_shift = CCM_PCCR_UART4_OFFSET,
.disable = _clk_disable,
},
};
static struct clk gpt_ipg_clk[];
static struct clk gpt_clk[] = {
{
.id = 0,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[0],
.secondary = &gpt_ipg_clk[2],
},
};
static struct clk gpt_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT1_REG,
.enable_shift = CCM_PCCR_GPT1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT2_REG,
.enable_shift = CCM_PCCR_GPT2_OFFSET,
.disable = _clk_disable,
}, {
.id = 2,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPT3_REG,
.enable_shift = CCM_PCCR_GPT3_OFFSET,
.disable = _clk_disable,
},
};
static struct clk pwm_clk[] = {
{
.parent = &per_clk[0],
.secondary = &pwm_clk[1],
}, {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_PWM_REG,
.enable_shift = CCM_PCCR_PWM_OFFSET,
.disable = _clk_disable,
},
};
static struct clk sdhc_ipg_clk[];
static struct clk sdhc_clk[] = {
{
.id = 0,
.parent = &per_clk[1],
.secondary = &sdhc_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[1],
.secondary = &sdhc_ipg_clk[1],
},
};
static struct clk sdhc_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SDHC1_REG,
.enable_shift = CCM_PCCR_SDHC1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SDHC2_REG,
.enable_shift = CCM_PCCR_SDHC2_OFFSET,
.disable = _clk_disable,
},
};
static struct clk cspi_ipg_clk[];
static struct clk cspi_clk[] = {
{
.id = 0,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[0],
}, {
.id = 1,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[1],
}, {
.id = 2,
.parent = &per_clk[1],
.secondary = &cspi_ipg_clk[2],
},
};
static struct clk cspi_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI1_REG,
.enable_shift = CCM_PCCR_CSPI1_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI2_REG,
.enable_shift = CCM_PCCR_CSPI2_OFFSET,
.disable = _clk_disable,
}, {
.id = 3,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_CSPI3_REG,
.enable_shift = CCM_PCCR_CSPI3_OFFSET,
.disable = _clk_disable,
},
};
static struct clk lcdc_clk[] = {
{
.parent = &per_clk[2],
.secondary = &lcdc_clk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
}, {
.parent = &ipg_clk,
.secondary = &lcdc_clk[2],
.enable = _clk_enable,
.enable_reg = CCM_PCCR_LCDC_REG,
.enable_shift = CCM_PCCR_LCDC_OFFSET,
.disable = _clk_disable,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_LCDC_REG,
.enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
.disable = _clk_disable,
},
};
static struct clk csi_clk[] = {
{
.parent = &per_clk[3],
.secondary = &csi_clk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_CSI_REG,
.enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
.disable = _clk_disable,
},
};
static struct clk usb_clk[] = {
{
.parent = &spll_clk,
.secondary = &usb_clk[1],
.get_rate = _clk_usb_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_USBOTG_REG,
.enable_shift = CCM_PCCR_USBOTG_OFFSET,
.disable = _clk_disable,
.round_rate = _clk_usb_round_rate,
.set_rate = _clk_usb_set_rate,
}, {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
.enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
.disable = _clk_disable,
}
};
static struct clk ssi_ipg_clk[];
static struct clk ssi_clk[] = {
{
.id = 0,
.parent = &mpll_clk,
.secondary = &ssi_ipg_clk[0],
.get_rate = _clk_ssi1_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI1_BAUD_REG,
.enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &mpll_clk,
.secondary = &ssi_ipg_clk[1],
.get_rate = _clk_ssi2_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI2_BAUD_REG,
.enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
.disable = _clk_disable,
},
};
static struct clk ssi_ipg_clk[] = {
{
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI1_REG,
.enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
.disable = _clk_disable,
}, {
.id = 1,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SSI2_REG,
.enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
.disable = _clk_disable,
},
};
static struct clk nfc_clk = {
.parent = &fclk_clk,
.get_rate = _clk_nfc_recalc,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_NFC_REG,
.enable_shift = CCM_PCCR_NFC_OFFSET,
.disable = _clk_disable,
};
static struct clk dma_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_DMA_REG,
.enable_shift = CCM_PCCR_DMA_OFFSET,
.disable = _clk_disable,
.secondary = &dma_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_DMA_REG,
.enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
.disable = _clk_disable,
},
};
static struct clk brom_clk = {
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_BROM_REG,
.enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
.disable = _clk_disable,
};
static struct clk emma_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_EMMA_REG,
.enable_shift = CCM_PCCR_EMMA_OFFSET,
.disable = _clk_disable,
.secondary = &emma_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_EMMA_REG,
.enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
.disable = _clk_disable,
}
};
static struct clk slcdc_clk[] = {
{
.parent = &hclk_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_SLCDC_REG,
.enable_shift = CCM_PCCR_SLCDC_OFFSET,
.disable = _clk_disable,
.secondary = &slcdc_clk[1],
}, {
.enable = _clk_enable,
.enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
.enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
.disable = _clk_disable,
}
};
static struct clk wdog_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_WDT_REG,
.enable_shift = CCM_PCCR_WDT_OFFSET,
.disable = _clk_disable,
};
static struct clk gpio_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_GPIO_REG,
.enable_shift = CCM_PCCR_GPIO_OFFSET,
.disable = _clk_disable,
};
static struct clk i2c_clk = {
.id = 0,
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_I2C1_REG,
.enable_shift = CCM_PCCR_I2C1_OFFSET,
.disable = _clk_disable,
};
static struct clk kpp_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_KPP_REG,
.enable_shift = CCM_PCCR_KPP_OFFSET,
.disable = _clk_disable,
};
static struct clk owire_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_OWIRE_REG,
.enable_shift = CCM_PCCR_OWIRE_OFFSET,
.disable = _clk_disable,
};
static struct clk rtc_clk = {
.parent = &ipg_clk,
.enable = _clk_enable,
.enable_reg = CCM_PCCR_RTC_REG,
.enable_shift = CCM_PCCR_RTC_OFFSET,
.disable = _clk_disable,
};
static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_generic_round_rate(clk, rate, 8);
}
static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
u32 div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_PCDR0);
if (clk->parent == &usb_clk[0]) {
reg &= ~CCM_PCDR0_48MDIV_MASK;
reg |= div << CCM_PCDR0_48MDIV_OFFSET;
}
__raw_writel(reg, CCM_PCDR0);
return 0;
}
static unsigned long _clk_clko_recalc(struct clk *clk)
{
u32 div = 0;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
if (clk->parent == &usb_clk[0]) /* 48M */
div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
>> CCM_PCDR0_48MDIV_OFFSET;
div++;
return parent_rate / div;
}
static struct clk clko_clk;
static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
{
u32 reg;
reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
if (parent == &ckil_clk)
reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &fpm_clk)
reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ckih_clk)
reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == mpll_clk.parent)
reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == spll_clk.parent)
reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &mpll_clk)
reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &spll_clk)
reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &fclk_clk)
reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &hclk_clk)
reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ipg_clk)
reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[0])
reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[1])
reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[2])
reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &per_clk[3])
reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ssi_clk[0])
reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &ssi_clk[1])
reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &nfc_clk)
reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &usb_clk[0])
reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
else if (parent == &clko_clk)
reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
else
return -EINVAL;
__raw_writel(reg, CCM_CCSR);
return 0;
}
static struct clk clko_clk = {
.get_rate = _clk_clko_recalc,
.set_rate = _clk_clko_set_rate,
.round_rate = _clk_clko_round_rate,
.set_parent = _clk_clko_set_parent,
};
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup lookups[] = {
/* It's unlikely that any driver wants one of them directly:
_REGISTER_CLOCK(NULL, "ckih", ckih_clk)
_REGISTER_CLOCK(NULL, "ckil", ckil_clk)
_REGISTER_CLOCK(NULL, "fpm", fpm_clk)
_REGISTER_CLOCK(NULL, "mpll", mpll_clk)
_REGISTER_CLOCK(NULL, "spll", spll_clk)
_REGISTER_CLOCK(NULL, "fclk", fclk_clk)
_REGISTER_CLOCK(NULL, "hclk", hclk_clk)
_REGISTER_CLOCK(NULL, "ipg", ipg_clk)
*/
_REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
_REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
_REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
_REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
_REGISTER_CLOCK(NULL, "clko", clko_clk)
_REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0])
_REGISTER_CLOCK("imx-uart.1", NULL, uart_clk[1])
_REGISTER_CLOCK("imx-uart.2", NULL, uart_clk[2])
_REGISTER_CLOCK("imx-uart.3", NULL, uart_clk[3])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
_REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
_REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
_REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
_REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
_REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
_REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
_REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
_REGISTER_CLOCK(NULL, "csi", csi_clk[0])
_REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
_REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
_REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
_REGISTER_CLOCK(NULL, "dma", dma_clk[0])
_REGISTER_CLOCK(NULL, "brom", brom_clk)
_REGISTER_CLOCK(NULL, "emma", emma_clk[0])
_REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
_REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
_REGISTER_CLOCK(NULL, "gpio", gpio_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
_REGISTER_CLOCK(NULL, "owire", owire_clk)
_REGISTER_CLOCK(NULL, "rtc", rtc_clk)
};
/*
* must be called very early to get information about the
* available clock rate when the timer framework starts
*/
int __init mx21_clocks_init(unsigned long lref, unsigned long href)
{
u32 cscr;
external_low_reference = lref;
external_high_reference = href;
/* detect clock reference for both system PLL */
cscr = CSCR();
if (cscr & CCM_CSCR_MCU)
mpll_clk.parent = &ckih_clk;
else
mpll_clk.parent = &fpm_clk;
if (cscr & CCM_CSCR_SP)
spll_clk.parent = &ckih_clk;
else
spll_clk.parent = &fpm_clk;
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clock gates */
__raw_writel(0, CCM_PCCR0);
__raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
/* This turns of the serial PLL as well */
spll_clk.disable(&spll_clk);
/* This will propagate to all children and init all the clock rates. */
clk_enable(&per_clk[0]);
clk_enable(&gpio_clk);
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
clk_enable(&uart_clk[0]);
#endif
mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
MX21_INT_GPT1);
return 0;
}
| gpl-2.0 |
drod2169/KernelSanders-OMAP | drivers/media/rc/keymaps/rc-tivo.c | 2792 | 2873 | /* rc-tivo.c - Keytable for TiVo remotes
*
* Copyright (c) 2011 by Jarod Wilson <jarod@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
/*
* Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
* which also ships with a TiVo-branded IR transceiver, supported by the mceusb
* driver. Note that the remote uses an NEC-ish protocol, but instead of having
* a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
* NEC extended checksums do pass, so the table presently has the intended
* values and the checksum-passed versions for those keys.
*/
static struct rc_map_table tivo[] = {
{ 0xa10c900f, KEY_MEDIA }, /* TiVo Button */
{ 0xa10c0807, KEY_POWER2 }, /* TV Power */
{ 0xa10c8807, KEY_TV }, /* Live TV/Swap */
{ 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
{ 0xa10cc807, KEY_INFO },
{ 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
{ 0x0085305f, KEY_CYCLEWINDOWS },
{ 0xa10c6c03, KEY_EPG }, /* Guide */
{ 0xa10c2807, KEY_UP },
{ 0xa10c6807, KEY_DOWN },
{ 0xa10ce807, KEY_LEFT },
{ 0xa10ca807, KEY_RIGHT },
{ 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
{ 0xa10c9807, KEY_SELECT },
{ 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */
{ 0xa10c3807, KEY_VOLUMEUP },
{ 0xa10cb807, KEY_VOLUMEDOWN },
{ 0xa10cd807, KEY_MUTE },
{ 0xa10c040b, KEY_RECORD },
{ 0xa10c7807, KEY_CHANNELUP },
{ 0xa10cf807, KEY_CHANNELDOWN },
{ 0x0085301f, KEY_CHANNELDOWN },
{ 0xa10c840b, KEY_PLAY },
{ 0xa10cc40b, KEY_PAUSE },
{ 0xa10ca40b, KEY_SLOW },
{ 0xa10c440b, KEY_REWIND },
{ 0xa10c240b, KEY_FASTFORWARD },
{ 0xa10c640b, KEY_PREVIOUS },
{ 0xa10ce40b, KEY_NEXT }, /* ->| */
{ 0xa10c220d, KEY_ZOOM }, /* Aspect */
{ 0xa10c120d, KEY_STOP },
{ 0xa10c520d, KEY_DVD }, /* DVD Menu */
{ 0xa10c140b, KEY_NUMERIC_1 },
{ 0xa10c940b, KEY_NUMERIC_2 },
{ 0xa10c540b, KEY_NUMERIC_3 },
{ 0xa10cd40b, KEY_NUMERIC_4 },
{ 0xa10c340b, KEY_NUMERIC_5 },
{ 0xa10cb40b, KEY_NUMERIC_6 },
{ 0xa10c740b, KEY_NUMERIC_7 },
{ 0xa10cf40b, KEY_NUMERIC_8 },
{ 0x0085302f, KEY_NUMERIC_8 },
{ 0xa10c0c03, KEY_NUMERIC_9 },
{ 0xa10c8c03, KEY_NUMERIC_0 },
{ 0xa10ccc03, KEY_ENTER },
{ 0xa10c4c03, KEY_CLEAR },
};
static struct rc_map_list tivo_map = {
.map = {
.scan = tivo,
.size = ARRAY_SIZE(tivo),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_TIVO,
}
};
static int __init init_rc_map_tivo(void)
{
return rc_map_register(&tivo_map);
}
static void __exit exit_rc_map_tivo(void)
{
rc_map_unregister(&tivo_map);
}
module_init(init_rc_map_tivo)
module_exit(exit_rc_map_tivo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
| gpl-2.0 |
impl/rbppc-linux | arch/arm/kernel/atags_compat.c | 3304 | 6654 | /*
* linux/arch/arm/kernel/atags_compat.c
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* We keep the old params compatibility cruft in one place (here)
* so we don't end up with lots of mess around other places.
*
* NOTE:
* The old struct param_struct is deprecated, but it will be kept in
* the kernel for 5 years from now (2001). This will allow boot loaders
* to convert to the new struct tag way.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/page.h>
#include <asm/mach/arch.h>
#include "atags.h"
/*
* Usage:
* - do not go blindly adding fields, add them at the end
* - when adding fields, don't rely on the address until
* a patch from me has been released
* - unused fields should be zero (for future expansion)
* - this structure is relatively short-lived - only
* guaranteed to contain useful data in setup_arch()
*
* This is the old deprecated way to pass parameters to the kernel
*/
struct param_struct {
union {
struct {
unsigned long page_size; /* 0 */
unsigned long nr_pages; /* 4 */
unsigned long ramdisk_size; /* 8 */
unsigned long flags; /* 12 */
#define FLAG_READONLY 1
#define FLAG_RDLOAD 4
#define FLAG_RDPROMPT 8
unsigned long rootdev; /* 16 */
unsigned long video_num_cols; /* 20 */
unsigned long video_num_rows; /* 24 */
unsigned long video_x; /* 28 */
unsigned long video_y; /* 32 */
unsigned long memc_control_reg; /* 36 */
unsigned char sounddefault; /* 40 */
unsigned char adfsdrives; /* 41 */
unsigned char bytes_per_char_h; /* 42 */
unsigned char bytes_per_char_v; /* 43 */
unsigned long pages_in_bank[4]; /* 44 */
unsigned long pages_in_vram; /* 60 */
unsigned long initrd_start; /* 64 */
unsigned long initrd_size; /* 68 */
unsigned long rd_start; /* 72 */
unsigned long system_rev; /* 76 */
unsigned long system_serial_low; /* 80 */
unsigned long system_serial_high; /* 84 */
unsigned long mem_fclk_21285; /* 88 */
} s;
char unused[256];
} u1;
union {
char paths[8][128];
struct {
unsigned long magic;
char n[1024 - sizeof(unsigned long)];
} s;
} u2;
char commandline[COMMAND_LINE_SIZE];
};
static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size)
{
tag = tag_next(tag);
tag->hdr.tag = ATAG_MEM;
tag->hdr.size = tag_size(tag_mem32);
tag->u.mem.size = size;
tag->u.mem.start = start;
return tag;
}
static void __init build_tag_list(struct param_struct *params, void *taglist)
{
struct tag *tag = taglist;
if (params->u1.s.page_size != PAGE_SIZE) {
printk(KERN_WARNING "Warning: bad configuration page, "
"trying to continue\n");
return;
}
printk(KERN_DEBUG "Converting old-style param struct to taglist\n");
#ifdef CONFIG_ARCH_NETWINDER
if (params->u1.s.nr_pages != 0x02000 &&
params->u1.s.nr_pages != 0x04000 &&
params->u1.s.nr_pages != 0x08000 &&
params->u1.s.nr_pages != 0x10000) {
printk(KERN_WARNING "Warning: bad NeTTrom parameters "
"detected, using defaults\n");
params->u1.s.nr_pages = 0x1000; /* 16MB */
params->u1.s.ramdisk_size = 0;
params->u1.s.flags = FLAG_READONLY;
params->u1.s.initrd_start = 0;
params->u1.s.initrd_size = 0;
params->u1.s.rd_start = 0;
}
#endif
tag->hdr.tag = ATAG_CORE;
tag->hdr.size = tag_size(tag_core);
tag->u.core.flags = params->u1.s.flags & FLAG_READONLY;
tag->u.core.pagesize = params->u1.s.page_size;
tag->u.core.rootdev = params->u1.s.rootdev;
tag = tag_next(tag);
tag->hdr.tag = ATAG_RAMDISK;
tag->hdr.size = tag_size(tag_ramdisk);
tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) |
(params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0);
tag->u.ramdisk.size = params->u1.s.ramdisk_size;
tag->u.ramdisk.start = params->u1.s.rd_start;
tag = tag_next(tag);
tag->hdr.tag = ATAG_INITRD;
tag->hdr.size = tag_size(tag_initrd);
tag->u.initrd.start = params->u1.s.initrd_start;
tag->u.initrd.size = params->u1.s.initrd_size;
tag = tag_next(tag);
tag->hdr.tag = ATAG_SERIAL;
tag->hdr.size = tag_size(tag_serialnr);
tag->u.serialnr.low = params->u1.s.system_serial_low;
tag->u.serialnr.high = params->u1.s.system_serial_high;
tag = tag_next(tag);
tag->hdr.tag = ATAG_REVISION;
tag->hdr.size = tag_size(tag_revision);
tag->u.revision.rev = params->u1.s.system_rev;
#ifdef CONFIG_ARCH_ACORN
if (machine_is_riscpc()) {
int i;
for (i = 0; i < 4; i++)
tag = memtag(tag, PHYS_OFFSET + (i << 26),
params->u1.s.pages_in_bank[i] * PAGE_SIZE);
} else
#endif
tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
#ifdef CONFIG_FOOTBRIDGE
if (params->u1.s.mem_fclk_21285) {
tag = tag_next(tag);
tag->hdr.tag = ATAG_MEMCLK;
tag->hdr.size = tag_size(tag_memclk);
tag->u.memclk.fmemclk = params->u1.s.mem_fclk_21285;
}
#endif
#ifdef CONFIG_ARCH_EBSA285
if (machine_is_ebsa285()) {
tag = tag_next(tag);
tag->hdr.tag = ATAG_VIDEOTEXT;
tag->hdr.size = tag_size(tag_videotext);
tag->u.videotext.x = params->u1.s.video_x;
tag->u.videotext.y = params->u1.s.video_y;
tag->u.videotext.video_page = 0;
tag->u.videotext.video_mode = 0;
tag->u.videotext.video_cols = params->u1.s.video_num_cols;
tag->u.videotext.video_ega_bx = 0;
tag->u.videotext.video_lines = params->u1.s.video_num_rows;
tag->u.videotext.video_isvga = 1;
tag->u.videotext.video_points = 8;
}
#endif
#ifdef CONFIG_ARCH_ACORN
tag = tag_next(tag);
tag->hdr.tag = ATAG_ACORN;
tag->hdr.size = tag_size(tag_acorn);
tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg;
tag->u.acorn.vram_pages = params->u1.s.pages_in_vram;
tag->u.acorn.sounddefault = params->u1.s.sounddefault;
tag->u.acorn.adfsdrives = params->u1.s.adfsdrives;
#endif
tag = tag_next(tag);
tag->hdr.tag = ATAG_CMDLINE;
tag->hdr.size = (strlen(params->commandline) + 3 +
sizeof(struct tag_header)) >> 2;
strcpy(tag->u.cmdline.cmdline, params->commandline);
tag = tag_next(tag);
tag->hdr.tag = ATAG_NONE;
tag->hdr.size = 0;
memmove(params, taglist, ((int)tag) - ((int)taglist) +
sizeof(struct tag_header));
}
void __init convert_to_tag_list(struct tag *tags)
{
struct param_struct *params = (struct param_struct *)tags;
build_tag_list(params, ¶ms->u2);
}
| gpl-2.0 |
wrxtasy/linux | arch/arm/mach-omap2/clock34xx.c | 3816 | 4808 | /*
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2011 Nokia Corporation
*
* Paul Walmsley
* Jouni Högander
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
* Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include "clock.h"
#include "clock34xx.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = {
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* Some OMAP modules on OMAP3 ES2+ chips have both initiator and
* target IDLEST bits. For our purposes, we are concerned with the
* target IDLEST bits, which exist at a different bit position than
* the *CLKEN bit position for these modules (DSS and USBHOST) (The
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
/* USBHOST_IDLE has same shift */
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = {
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
thomhastings/linux-3.17.1-parrot | arch/arm/mach-omap2/clock3517.c | 3816 | 4170 | /*
* OMAP3517/3505-specific clock framework functions
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Copyright (C) 2011 Nokia Corporation
*
* Ranjith Lohithakshan
* Paul Walmsley
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
* Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include "clock.h"
#include "clock3517.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
/*
* In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported
* in the same register at a bit offset of 0x8. The EN_ACK for ICK is
* at an offset of 4 from ICK enable bit.
*/
#define AM35XX_IPSS_ICK_MASK 0xF
#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4
#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8
#define AM35XX_IPSS_CLK_IDLEST_VAL 0
/**
* am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The interface clocks on AM35xx IPSS reflects the clock idle status
* in the enable register itsel at a bit offset of 4 from the enable
* bit. A value of 1 indicates that clock is enabled.
*/
static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
*idlest_reg = (__force void __iomem *)(clk->enable_reg);
*idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET;
*idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL;
}
/**
* am35xx_clk_find_companion - find companion clock to @clk
* @clk: struct clk * to find the companion clock of
* @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
* @other_bit: u8 ** to return the companion clock bit shift in
*
* Some clocks don't have companion clocks. For example, modules with
* only an interface clock (such as HECC) don't have a companion
* clock. Right now, this code relies on the hardware exporting a bit
* in the correct companion register that indicates that the
* nonexistent 'companion clock' is active. Future patches will
* associate this type of code with per-module data structures to
* avoid this issue, and remove the casts. No return value.
*/
static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
void __iomem **other_reg,
u8 *other_bit)
{
*other_reg = (__force void __iomem *)(clk->enable_reg);
if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
*other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET;
else
*other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
}
const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
.find_idlest = am35xx_clk_find_idlest,
.find_companion = am35xx_clk_find_companion,
};
/**
* am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The IPSS target CM_IDLEST bit is at a different shift from the
* CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg
* and @idlest_bit. No return value.
*/
static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = AM35XX_ST_IPSS_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = am35xx_clk_ipss_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
di11igaf/flounder-kernel | arch/arm/mach-imx/3ds_debugboard.c | 3816 | 5755 | /*
* Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2010 Jason Wang <jason77.wang@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/smsc911x.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include "hardware.h"
/* LAN9217 ethernet base address */
#define LAN9217_BASE_ADDR(n) (n + 0x0)
/* External UART */
#define UARTA_BASE_ADDR(n) (n + 0x8000)
#define UARTB_BASE_ADDR(n) (n + 0x10000)
#define BOARD_IO_ADDR(n) (n + 0x20000)
/* LED switchs */
#define LED_SWITCH_REG 0x00
/* buttons */
#define SWITCH_BUTTONS_REG 0x08
/* status, interrupt */
#define INTR_STATUS_REG 0x10
#define INTR_MASK_REG 0x38
#define INTR_RESET_REG 0x20
/* magic word for debug CPLD */
#define MAGIC_NUMBER1_REG 0x40
#define MAGIC_NUMBER2_REG 0x48
/* CPLD code version */
#define CPLD_CODE_VER_REG 0x50
/* magic word for debug CPLD */
#define MAGIC_NUMBER3_REG 0x58
/* module reset register*/
#define MODULE_RESET_REG 0x60
/* CPU ID and Personality ID */
#define MCU_BOARD_ID_REG 0x68
#define MXC_MAX_EXP_IO_LINES 16
/* interrupts like external uart , external ethernet etc*/
#define EXPIO_INT_ENET 0
#define EXPIO_INT_XUART_A 1
#define EXPIO_INT_XUART_B 2
#define EXPIO_INT_BUTTON_A 3
#define EXPIO_INT_BUTTON_B 4
static void __iomem *brd_io;
static struct irq_domain *domain;
static struct resource smsc911x_resources[] = {
{
.flags = IORESOURCE_MEM,
} , {
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.flags = SMSC911X_USE_32BIT | SMSC911X_FORCE_INTERNAL_PHY,
};
static struct platform_device smsc_lan9217_device = {
.name = "smsc911x",
.id = -1,
.dev = {
.platform_data = &smsc911x_config,
},
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
};
static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 imr_val;
u32 int_valid;
u32 expio_irq;
/* irq = gpio irq number */
desc->irq_data.chip->irq_mask(&desc->irq_data);
imr_val = __raw_readw(brd_io + INTR_MASK_REG);
int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
expio_irq = 0;
for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
if ((int_valid & 1) == 0)
continue;
generic_handle_irq(irq_find_mapping(domain, expio_irq));
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
/*
* Disable an expio pin's interrupt by setting the bit in the imr.
* Irq is an expio virtual irq number
*/
static void expio_mask_irq(struct irq_data *d)
{
u16 reg;
u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg |= (1 << expio);
__raw_writew(reg, brd_io + INTR_MASK_REG);
}
static void expio_ack_irq(struct irq_data *d)
{
u32 expio = d->hwirq;
__raw_writew(1 << expio, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
expio_mask_irq(d);
}
static void expio_unmask_irq(struct irq_data *d)
{
u16 reg;
u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg &= ~(1 << expio);
__raw_writew(reg, brd_io + INTR_MASK_REG);
}
static struct irq_chip expio_irq_chip = {
.irq_ack = expio_ack_irq,
.irq_mask = expio_mask_irq,
.irq_unmask = expio_unmask_irq,
};
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
REGULATOR_SUPPLY("vddvario", "smsc911x"),
};
int __init mxc_expio_init(u32 base, u32 intr_gpio)
{
u32 p_irq = gpio_to_irq(intr_gpio);
int irq_base;
int i;
brd_io = ioremap(BOARD_IO_ADDR(base), SZ_4K);
if (brd_io == NULL)
return -ENOMEM;
if ((__raw_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
(__raw_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
(__raw_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
pr_info("3-Stack Debug board not detected\n");
iounmap(brd_io);
brd_io = NULL;
return -ENODEV;
}
pr_info("3-Stack Debug board detected, rev = 0x%04X\n",
readw(brd_io + CPLD_CODE_VER_REG));
/*
* Configure INT line as GPIO input
*/
gpio_request(intr_gpio, "expio_pirq");
gpio_direction_input(intr_gpio);
/* disable the interrupt and clear the status */
__raw_writew(0, brd_io + INTR_MASK_REG);
__raw_writew(0xFFFF, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
__raw_writew(0x1F, brd_io + INTR_MASK_REG);
irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
WARN_ON(irq_base < 0);
domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
&irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW);
irq_set_chained_handler(p_irq, mxc_expio_irq_handler);
/* Register Lan device on the debugboard */
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
smsc911x_resources[0].start = LAN9217_BASE_ADDR(base);
smsc911x_resources[0].end = LAN9217_BASE_ADDR(base) + 0x100 - 1;
smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET);
smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET);
platform_device_register(&smsc_lan9217_device);
return 0;
}
| gpl-2.0 |
LeonNardella/philips-pta-01 | arch/arm/mach-omap2/clock34xx.c | 3816 | 4808 | /*
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2011 Nokia Corporation
*
* Paul Walmsley
* Jouni Högander
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
* Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include "clock.h"
#include "clock34xx.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = {
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* Some OMAP modules on OMAP3 ES2+ chips have both initiator and
* target IDLEST bits. For our purposes, we are concerned with the
* target IDLEST bits, which exist at a different bit position than
* the *CLKEN bit position for these modules (DSS and USBHOST) (The
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
/* USBHOST_IDLE has same shift */
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = {
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
zzicewind/linux | arch/arm/mach-imx/3ds_debugboard.c | 3816 | 5755 | /*
* Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2010 Jason Wang <jason77.wang@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/smsc911x.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include "hardware.h"
/* LAN9217 ethernet base address */
#define LAN9217_BASE_ADDR(n) (n + 0x0)
/* External UART */
#define UARTA_BASE_ADDR(n) (n + 0x8000)
#define UARTB_BASE_ADDR(n) (n + 0x10000)
#define BOARD_IO_ADDR(n) (n + 0x20000)
/* LED switchs */
#define LED_SWITCH_REG 0x00
/* buttons */
#define SWITCH_BUTTONS_REG 0x08
/* status, interrupt */
#define INTR_STATUS_REG 0x10
#define INTR_MASK_REG 0x38
#define INTR_RESET_REG 0x20
/* magic word for debug CPLD */
#define MAGIC_NUMBER1_REG 0x40
#define MAGIC_NUMBER2_REG 0x48
/* CPLD code version */
#define CPLD_CODE_VER_REG 0x50
/* magic word for debug CPLD */
#define MAGIC_NUMBER3_REG 0x58
/* module reset register*/
#define MODULE_RESET_REG 0x60
/* CPU ID and Personality ID */
#define MCU_BOARD_ID_REG 0x68
#define MXC_MAX_EXP_IO_LINES 16
/* interrupts like external uart , external ethernet etc*/
#define EXPIO_INT_ENET 0
#define EXPIO_INT_XUART_A 1
#define EXPIO_INT_XUART_B 2
#define EXPIO_INT_BUTTON_A 3
#define EXPIO_INT_BUTTON_B 4
static void __iomem *brd_io;
static struct irq_domain *domain;
static struct resource smsc911x_resources[] = {
{
.flags = IORESOURCE_MEM,
} , {
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.flags = SMSC911X_USE_32BIT | SMSC911X_FORCE_INTERNAL_PHY,
};
static struct platform_device smsc_lan9217_device = {
.name = "smsc911x",
.id = -1,
.dev = {
.platform_data = &smsc911x_config,
},
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
};
static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 imr_val;
u32 int_valid;
u32 expio_irq;
/* irq = gpio irq number */
desc->irq_data.chip->irq_mask(&desc->irq_data);
imr_val = __raw_readw(brd_io + INTR_MASK_REG);
int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
expio_irq = 0;
for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
if ((int_valid & 1) == 0)
continue;
generic_handle_irq(irq_find_mapping(domain, expio_irq));
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
/*
* Disable an expio pin's interrupt by setting the bit in the imr.
* Irq is an expio virtual irq number
*/
static void expio_mask_irq(struct irq_data *d)
{
u16 reg;
u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg |= (1 << expio);
__raw_writew(reg, brd_io + INTR_MASK_REG);
}
static void expio_ack_irq(struct irq_data *d)
{
u32 expio = d->hwirq;
__raw_writew(1 << expio, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
expio_mask_irq(d);
}
static void expio_unmask_irq(struct irq_data *d)
{
u16 reg;
u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg &= ~(1 << expio);
__raw_writew(reg, brd_io + INTR_MASK_REG);
}
static struct irq_chip expio_irq_chip = {
.irq_ack = expio_ack_irq,
.irq_mask = expio_mask_irq,
.irq_unmask = expio_unmask_irq,
};
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
REGULATOR_SUPPLY("vddvario", "smsc911x"),
};
int __init mxc_expio_init(u32 base, u32 intr_gpio)
{
u32 p_irq = gpio_to_irq(intr_gpio);
int irq_base;
int i;
brd_io = ioremap(BOARD_IO_ADDR(base), SZ_4K);
if (brd_io == NULL)
return -ENOMEM;
if ((__raw_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
(__raw_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
(__raw_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
pr_info("3-Stack Debug board not detected\n");
iounmap(brd_io);
brd_io = NULL;
return -ENODEV;
}
pr_info("3-Stack Debug board detected, rev = 0x%04X\n",
readw(brd_io + CPLD_CODE_VER_REG));
/*
* Configure INT line as GPIO input
*/
gpio_request(intr_gpio, "expio_pirq");
gpio_direction_input(intr_gpio);
/* disable the interrupt and clear the status */
__raw_writew(0, brd_io + INTR_MASK_REG);
__raw_writew(0xFFFF, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
__raw_writew(0x1F, brd_io + INTR_MASK_REG);
irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
WARN_ON(irq_base < 0);
domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
&irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW);
irq_set_chained_handler(p_irq, mxc_expio_irq_handler);
/* Register Lan device on the debugboard */
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
smsc911x_resources[0].start = LAN9217_BASE_ADDR(base);
smsc911x_resources[0].end = LAN9217_BASE_ADDR(base) + 0x100 - 1;
smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET);
smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET);
platform_device_register(&smsc_lan9217_device);
return 0;
}
| gpl-2.0 |
Alonso1398/android_kernel_samsung_coriplus | drivers/net/wireless/prism54/islpci_mgt.c | 4072 | 14683 | /*
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/system.h>
#include <linux/if_arp.h>
#include "prismcompat.h"
#include "isl_38xx.h"
#include "islpci_mgt.h"
#include "isl_oid.h" /* additional types and defs for isl38xx fw */
#include "isl_ioctl.h"
#include <net/iw_handler.h>
/******************************************************************************
Global variable definition section
******************************************************************************/
int pc_debug = VERBOSE;
module_param(pc_debug, int, 0);
/******************************************************************************
Driver general functions
******************************************************************************/
#if VERBOSE > SHOW_ERROR_MESSAGES
void
display_buffer(char *buffer, int length)
{
if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0)
return;
while (length > 0) {
printk("[%02x]", *buffer & 255);
length--;
buffer++;
}
printk("\n");
}
#endif
/*****************************************************************************
Queue handling for management frames
******************************************************************************/
/*
* Helper function to create a PIMFOR management frame header.
*/
static void
pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h)
{
h->version = PIMFOR_VERSION;
h->operation = operation;
h->device_id = PIMFOR_DEV_ID_MHLI_MIB;
h->flags = 0;
h->oid = cpu_to_be32(oid);
h->length = cpu_to_be32(length);
}
/*
* Helper function to analyze a PIMFOR management frame header.
*/
static pimfor_header_t *
pimfor_decode_header(void *data, int len)
{
pimfor_header_t *h = data;
while ((void *) h < data + len) {
if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
le32_to_cpus(&h->oid);
le32_to_cpus(&h->length);
} else {
be32_to_cpus(&h->oid);
be32_to_cpus(&h->length);
}
if (h->oid != OID_INL_TUNNEL)
return h;
h++;
}
return NULL;
}
/*
* Fill the receive queue for management frames with fresh buffers.
*/
int
islpci_mgmt_rx_fill(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb = /* volatile not needed */
(isl38xx_control_block *) priv->control_block;
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_rx[index];
isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
if (buf->mem == NULL) {
buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
if (!buf->mem) {
printk(KERN_WARNING
"Error allocating management frame.\n");
return -ENOMEM;
}
buf->size = MGMT_FRAME_SIZE;
}
if (buf->pci_addr == 0) {
buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
MGMT_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
if (!buf->pci_addr) {
printk(KERN_WARNING
"Failed to make memory DMA'able.\n");
return -ENOMEM;
}
}
/* be safe: always reset control block information */
frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
frag->flags = 0;
frag->address = cpu_to_le32(buf->pci_addr);
curr++;
/* The fragment address in the control block must have
* been written before announcing the frame buffer to
* device */
wmb();
cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
}
return 0;
}
/*
* Create and transmit a management frame using "operation" and "oid",
* with arguments data/length.
* We either return an error and free the frame, or we return 0 and
* islpci_mgt_cleanup_transmit() frees the frame in the tx-done
* interrupt.
*/
static int
islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
void *data, int length)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb =
(isl38xx_control_block *) priv->control_block;
void *p;
int err = -EINVAL;
unsigned long flags;
isl38xx_fragment *frag;
struct islpci_membuf buf;
u32 curr_frag;
int index;
int frag_len = length + PIMFOR_HEADER_SIZE;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
#endif
if (frag_len > MGMT_FRAME_SIZE) {
printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
ndev->name, frag_len);
goto error;
}
err = -ENOMEM;
p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
if (!buf.mem) {
printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n",
ndev->name);
goto error;
}
buf.size = frag_len;
/* create the header directly in the fragment data area */
pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
p += PIMFOR_HEADER_SIZE;
if (data)
memcpy(p, data, length);
else
memset(p, 0, length);
#if VERBOSE > SHOW_ERROR_MESSAGES
{
pimfor_header_t *h = buf.mem;
DEBUG(SHOW_PIMFOR_FRAMES,
"PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
h->operation, oid, h->device_id, h->flags, length);
/* display the buffer contents for debugging */
display_buffer((char *) h, sizeof (pimfor_header_t));
display_buffer(p, length);
}
#endif
err = -ENOMEM;
buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
PCI_DMA_TODEVICE);
if (!buf.pci_addr) {
printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
ndev->name);
goto error_free;
}
/* Protect the control block modifications against interrupts. */
spin_lock_irqsave(&priv->slock, flags);
curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
ndev->name);
goto error_unlock;
}
/* commit the frame to the tx device queue */
index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
priv->mgmt_tx[index] = buf;
frag = &cb->tx_data_mgmt[index];
frag->size = cpu_to_le16(frag_len);
frag->flags = 0; /* for any other than the last fragment, set to 1 */
frag->address = cpu_to_le32(buf.pci_addr);
/* The fragment address in the control block must have
* been written before announcing the frame buffer to
* device */
wmb();
cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
spin_unlock_irqrestore(&priv->slock, flags);
/* trigger the device */
islpci_trigger(priv);
return 0;
error_unlock:
spin_unlock_irqrestore(&priv->slock, flags);
error_free:
kfree(buf.mem);
error:
return err;
}
/*
* Receive a management frame from the device.
* This can be an arbitrary number of traps, and at most one response
* frame for a previous request sent via islpci_mgt_transmit().
*/
int
islpci_mgt_receive(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb =
(isl38xx_control_block *) priv->control_block;
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
#endif
/* Only once per interrupt, determine fragment range to
* process. This avoids an endless loop (i.e. lockup) if
* frames come in faster than we can process them. */
curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
barrier();
for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
pimfor_header_t *header;
u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_rx[index];
u16 frag_len;
int size;
struct islpci_mgmtframe *frame;
/* I have no idea (and no documentation) if flags != 0
* is possible. Drop the frame, reuse the buffer. */
if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
ndev->name,
le16_to_cpu(cb->rx_data_mgmt[index].flags));
continue;
}
/* The device only returns the size of the header(s) here. */
frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);
/*
* We appear to have no way to tell the device the
* size of a receive buffer. Thus, if this check
* triggers, we likely have kernel heap corruption. */
if (frag_len > MGMT_FRAME_SIZE) {
printk(KERN_WARNING
"%s: Bogus packet size of %d (%#x).\n",
ndev->name, frag_len, frag_len);
frag_len = MGMT_FRAME_SIZE;
}
/* Ensure the results of device DMA are visible to the CPU. */
pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
buf->size, PCI_DMA_FROMDEVICE);
/* Perform endianess conversion for PIMFOR header in-place. */
header = pimfor_decode_header(buf->mem, frag_len);
if (!header) {
printk(KERN_WARNING "%s: no PIMFOR header found\n",
ndev->name);
continue;
}
/* The device ID from the PIMFOR packet received from
* the MVC is always 0. We forward a sensible device_id.
* Not that anyone upstream would care... */
header->device_id = priv->ndev->ifindex;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
"PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
header->operation, header->oid, header->device_id,
header->flags, header->length);
/* display the buffer contents for debugging */
display_buffer((char *) header, PIMFOR_HEADER_SIZE);
display_buffer((char *) header + PIMFOR_HEADER_SIZE,
header->length);
#endif
/* nobody sends these */
if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
printk(KERN_DEBUG
"%s: errant PIMFOR application frame\n",
ndev->name);
continue;
}
/* Determine frame size, skipping OID_INL_TUNNEL headers. */
size = PIMFOR_HEADER_SIZE + header->length;
frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
GFP_ATOMIC);
if (!frame) {
printk(KERN_WARNING
"%s: Out of memory, cannot handle oid 0x%08x\n",
ndev->name, header->oid);
continue;
}
frame->ndev = ndev;
memcpy(&frame->buf, header, size);
frame->header = (pimfor_header_t *) frame->buf;
frame->data = frame->buf + PIMFOR_HEADER_SIZE;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
"frame: header: %p, data: %p, size: %d\n",
frame->header, frame->data, size);
#endif
if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
printk(KERN_DEBUG
"TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
header->oid, header->device_id, header->flags,
header->length);
#endif
/* Create work to handle trap out of interrupt
* context. */
INIT_WORK(&frame->ws, prism54_process_trap);
schedule_work(&frame->ws);
} else {
/* Signal the one waiting process that a response
* has been received. */
if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
printk(KERN_WARNING
"%s: mgmt response not collected\n",
ndev->name);
kfree(frame);
}
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
wake_up(&priv->mgmt_wqueue);
}
}
return 0;
}
/*
* Cleanup the transmit queue by freeing all frames handled by the device.
*/
void
islpci_mgt_cleanup_transmit(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb = /* volatile not needed */
(isl38xx_control_block *) priv->control_block;
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n");
#endif
/* Only once per cleanup, determine fragment range to
* process. This avoids an endless loop (i.e. lockup) if
* the device became confused, incrementing device_curr_frag
* rapidly. */
curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]);
barrier();
for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_tx[index];
pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
PCI_DMA_TODEVICE);
buf->pci_addr = 0;
kfree(buf->mem);
buf->mem = NULL;
buf->size = 0;
}
}
/*
* Perform one request-response transaction to the device.
*/
int
islpci_mgt_transaction(struct net_device *ndev,
int operation, unsigned long oid,
void *senddata, int sendlen,
struct islpci_mgmtframe **recvframe)
{
islpci_private *priv = netdev_priv(ndev);
const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
int err;
DEFINE_WAIT(wait);
*recvframe = NULL;
if (mutex_lock_interruptible(&priv->mgmt_lock))
return -ERESTARTSYS;
prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
if (err)
goto out;
err = -ETIMEDOUT;
while (timeout_left > 0) {
int timeleft;
struct islpci_mgmtframe *frame;
timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
frame = xchg(&priv->mgmt_received, NULL);
if (frame) {
if (frame->header->oid == oid) {
*recvframe = frame;
err = 0;
goto out;
} else {
printk(KERN_DEBUG
"%s: expecting oid 0x%x, received 0x%x.\n",
ndev->name, (unsigned int) oid,
frame->header->oid);
kfree(frame);
frame = NULL;
}
}
if (timeleft == 0) {
printk(KERN_DEBUG
"%s: timeout waiting for mgmt response %lu, "
"triggering device\n",
ndev->name, timeout_left);
islpci_trigger(priv);
}
timeout_left += timeleft - wait_cycle_jiffies;
}
printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
ndev->name);
/* TODO: we should reset the device here */
out:
finish_wait(&priv->mgmt_wqueue, &wait);
mutex_unlock(&priv->mgmt_lock);
return err;
}
| gpl-2.0 |
Pafcholini/emotion_tw_511_COI3 | drivers/mfd/si476x-prop.c | 4584 | 5911 | /*
* drivers/mfd/si476x-prop.c -- Subroutines to access
* properties of si476x chips
*
* Copyright (C) 2012 Innovative Converged Devices(ICD)
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/module.h>
#include <linux/mfd/si476x-core.h>
struct si476x_property_range {
u16 low, high;
};
static bool si476x_core_element_is_in_array(u16 element,
const u16 array[],
size_t size)
{
int i;
for (i = 0; i < size; i++)
if (element == array[i])
return true;
return false;
}
static bool si476x_core_element_is_in_range(u16 element,
const struct si476x_property_range range[],
size_t size)
{
int i;
for (i = 0; i < size; i++)
if (element <= range[i].high && element >= range[i].low)
return true;
return false;
}
static bool si476x_core_is_valid_property_a10(struct si476x_core *core,
u16 property)
{
static const u16 valid_properties[] = {
0x0000,
0x0500, 0x0501,
0x0600,
0x0709, 0x070C, 0x070D, 0x70E, 0x710,
0x0718,
0x1207, 0x1208,
0x2007,
0x2300,
};
static const struct si476x_property_range valid_ranges[] = {
{ 0x0200, 0x0203 },
{ 0x0300, 0x0303 },
{ 0x0400, 0x0404 },
{ 0x0700, 0x0707 },
{ 0x1100, 0x1102 },
{ 0x1200, 0x1204 },
{ 0x1300, 0x1306 },
{ 0x2000, 0x2005 },
{ 0x2100, 0x2104 },
{ 0x2106, 0x2106 },
{ 0x2200, 0x220E },
{ 0x3100, 0x3104 },
{ 0x3207, 0x320F },
{ 0x3300, 0x3304 },
{ 0x3500, 0x3517 },
{ 0x3600, 0x3617 },
{ 0x3700, 0x3717 },
{ 0x4000, 0x4003 },
};
return si476x_core_element_is_in_range(property, valid_ranges,
ARRAY_SIZE(valid_ranges)) ||
si476x_core_element_is_in_array(property, valid_properties,
ARRAY_SIZE(valid_properties));
}
static bool si476x_core_is_valid_property_a20(struct si476x_core *core,
u16 property)
{
static const u16 valid_properties[] = {
0x071B,
0x1006,
0x2210,
0x3401,
};
static const struct si476x_property_range valid_ranges[] = {
{ 0x2215, 0x2219 },
};
return si476x_core_is_valid_property_a10(core, property) ||
si476x_core_element_is_in_range(property, valid_ranges,
ARRAY_SIZE(valid_ranges)) ||
si476x_core_element_is_in_array(property, valid_properties,
ARRAY_SIZE(valid_properties));
}
static bool si476x_core_is_valid_property_a30(struct si476x_core *core,
u16 property)
{
static const u16 valid_properties[] = {
0x071C, 0x071D,
0x1007, 0x1008,
0x220F, 0x2214,
0x2301,
0x3105, 0x3106,
0x3402,
};
static const struct si476x_property_range valid_ranges[] = {
{ 0x0405, 0x0411 },
{ 0x2008, 0x200B },
{ 0x2220, 0x2223 },
{ 0x3100, 0x3106 },
};
return si476x_core_is_valid_property_a20(core, property) ||
si476x_core_element_is_in_range(property, valid_ranges,
ARRAY_SIZE(valid_ranges)) ||
si476x_core_element_is_in_array(property, valid_properties,
ARRAY_SIZE(valid_properties));
}
typedef bool (*valid_property_pred_t) (struct si476x_core *, u16);
static bool si476x_core_is_valid_property(struct si476x_core *core,
u16 property)
{
static const valid_property_pred_t is_valid_property[] = {
[SI476X_REVISION_A10] = si476x_core_is_valid_property_a10,
[SI476X_REVISION_A20] = si476x_core_is_valid_property_a20,
[SI476X_REVISION_A30] = si476x_core_is_valid_property_a30,
};
BUG_ON(core->revision > SI476X_REVISION_A30 ||
core->revision == -1);
return is_valid_property[core->revision](core, property);
}
static bool si476x_core_is_readonly_property(struct si476x_core *core,
u16 property)
{
BUG_ON(core->revision > SI476X_REVISION_A30 ||
core->revision == -1);
switch (core->revision) {
case SI476X_REVISION_A10:
return (property == 0x3200);
case SI476X_REVISION_A20:
return (property == 0x1006 ||
property == 0x2210 ||
property == 0x3200);
case SI476X_REVISION_A30:
return false;
}
return false;
}
static bool si476x_core_regmap_readable_register(struct device *dev,
unsigned int reg)
{
struct i2c_client *client = to_i2c_client(dev);
struct si476x_core *core = i2c_get_clientdata(client);
return si476x_core_is_valid_property(core, (u16) reg);
}
static bool si476x_core_regmap_writable_register(struct device *dev,
unsigned int reg)
{
struct i2c_client *client = to_i2c_client(dev);
struct si476x_core *core = i2c_get_clientdata(client);
return si476x_core_is_valid_property(core, (u16) reg) &&
!si476x_core_is_readonly_property(core, (u16) reg);
}
static int si476x_core_regmap_write(void *context, unsigned int reg,
unsigned int val)
{
return si476x_core_cmd_set_property(context, reg, val);
}
static int si476x_core_regmap_read(void *context, unsigned int reg,
unsigned *val)
{
struct si476x_core *core = context;
int err;
err = si476x_core_cmd_get_property(core, reg);
if (err < 0)
return err;
*val = err;
return 0;
}
static const struct regmap_config si476x_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
.max_register = 0x4003,
.writeable_reg = si476x_core_regmap_writable_register,
.readable_reg = si476x_core_regmap_readable_register,
.reg_read = si476x_core_regmap_read,
.reg_write = si476x_core_regmap_write,
.cache_type = REGCACHE_RBTREE,
};
struct regmap *devm_regmap_init_si476x(struct si476x_core *core)
{
return devm_regmap_init(&core->client->dev, NULL,
core, &si476x_regmap_config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_si476x);
| gpl-2.0 |
danonbrown/trltetmo-kernel | drivers/input/mouse/rpcmouse.c | 4840 | 2961 | /*
* Acorn RiscPC mouse driver for Linux/ARM
*
* Copyright (c) 2000-2002 Vojtech Pavlik
* Copyright (C) 1996-2002 Russell King
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This handles the Acorn RiscPCs mouse. We basically have a couple of
* hardware registers that track the sensor count for the X-Y movement and
* another register holding the button state. On every VSYNC interrupt we read
* the complete state and then work out if something has changed.
*/
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/iomd.h>
MODULE_AUTHOR("Vojtech Pavlik, Russell King");
MODULE_DESCRIPTION("Acorn RiscPC mouse driver");
MODULE_LICENSE("GPL");
static short rpcmouse_lastx, rpcmouse_lasty;
static struct input_dev *rpcmouse_dev;
static irqreturn_t rpcmouse_irq(int irq, void *dev_id)
{
struct input_dev *dev = dev_id;
short x, y, dx, dy, b;
x = (short) iomd_readl(IOMD_MOUSEX);
y = (short) iomd_readl(IOMD_MOUSEY);
b = (short) (__raw_readl(IOMEM(0xe0310000)) ^ 0x70);
dx = x - rpcmouse_lastx;
dy = y - rpcmouse_lasty;
rpcmouse_lastx = x;
rpcmouse_lasty = y;
input_report_rel(dev, REL_X, dx);
input_report_rel(dev, REL_Y, -dy);
input_report_key(dev, BTN_LEFT, b & 0x40);
input_report_key(dev, BTN_MIDDLE, b & 0x20);
input_report_key(dev, BTN_RIGHT, b & 0x10);
input_sync(dev);
return IRQ_HANDLED;
}
static int __init rpcmouse_init(void)
{
int err;
rpcmouse_dev = input_allocate_device();
if (!rpcmouse_dev)
return -ENOMEM;
rpcmouse_dev->name = "Acorn RiscPC Mouse";
rpcmouse_dev->phys = "rpcmouse/input0";
rpcmouse_dev->id.bustype = BUS_HOST;
rpcmouse_dev->id.vendor = 0x0005;
rpcmouse_dev->id.product = 0x0001;
rpcmouse_dev->id.version = 0x0100;
rpcmouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
rpcmouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
rpcmouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
rpcmouse_lastx = (short) iomd_readl(IOMD_MOUSEX);
rpcmouse_lasty = (short) iomd_readl(IOMD_MOUSEY);
if (request_irq(IRQ_VSYNCPULSE, rpcmouse_irq, IRQF_SHARED, "rpcmouse", rpcmouse_dev)) {
printk(KERN_ERR "rpcmouse: unable to allocate VSYNC interrupt\n");
err = -EBUSY;
goto err_free_dev;
}
err = input_register_device(rpcmouse_dev);
if (err)
goto err_free_irq;
return 0;
err_free_irq:
free_irq(IRQ_VSYNCPULSE, rpcmouse_dev);
err_free_dev:
input_free_device(rpcmouse_dev);
return err;
}
static void __exit rpcmouse_exit(void)
{
free_irq(IRQ_VSYNCPULSE, rpcmouse_dev);
input_unregister_device(rpcmouse_dev);
}
module_init(rpcmouse_init);
module_exit(rpcmouse_exit);
| gpl-2.0 |
tudorsirb/lge_kernel_p700 | net/netrom/nr_in.c | 7912 | 7118 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/netrom.h>
static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
{
struct sk_buff *skbo, *skbn = skb;
struct nr_sock *nr = nr_sk(sk);
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
nr_start_idletimer(sk);
if (more) {
nr->fraglen += skb->len;
skb_queue_tail(&nr->frag_queue, skb);
return 0;
}
if (!more && nr->fraglen > 0) { /* End of fragment */
nr->fraglen += skb->len;
skb_queue_tail(&nr->frag_queue, skb);
if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL)
return 1;
skb_reset_transport_header(skbn);
while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) {
skb_copy_from_linear_data(skbo,
skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
}
nr->fraglen = 0;
}
return sock_queue_rcv_skb(sk, skbn);
}
/*
* State machine for state 1, Awaiting Connection State.
* The handling of the timer(s) is in file nr_timer.c.
* Handling of state 0 and connection release is in netrom.c.
*/
static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
int frametype)
{
switch (frametype) {
case NR_CONNACK: {
struct nr_sock *nr = nr_sk(sk);
nr_stop_t1timer(sk);
nr_start_idletimer(sk);
nr->your_index = skb->data[17];
nr->your_id = skb->data[18];
nr->vs = 0;
nr->va = 0;
nr->vr = 0;
nr->vl = 0;
nr->state = NR_STATE_3;
nr->n2count = 0;
nr->window = skb->data[20];
sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
break;
}
case NR_CONNACK | NR_CHOKE_FLAG:
nr_disconnect(sk, ECONNREFUSED);
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
nr_disconnect(sk, ECONNRESET);
break;
default:
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Release State.
* The handling of the timer(s) is in file nr_timer.c
* Handling of state 0 and connection release is in netrom.c.
*/
static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
int frametype)
{
switch (frametype) {
case NR_CONNACK | NR_CHOKE_FLAG:
nr_disconnect(sk, ECONNRESET);
break;
case NR_DISCREQ:
nr_write_internal(sk, NR_DISCACK);
case NR_DISCACK:
nr_disconnect(sk, 0);
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
nr_disconnect(sk, ECONNRESET);
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file nr_timer.c
* Handling of state 0 and connection release is in netrom.c.
*/
static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct nr_sock *nrom = nr_sk(sk);
struct sk_buff_head temp_queue;
struct sk_buff *skbn;
unsigned short save_vr;
unsigned short nr, ns;
int queued = 0;
nr = skb->data[18];
ns = skb->data[17];
switch (frametype) {
case NR_CONNREQ:
nr_write_internal(sk, NR_CONNACK);
break;
case NR_DISCREQ:
nr_write_internal(sk, NR_DISCACK);
nr_disconnect(sk, 0);
break;
case NR_CONNACK | NR_CHOKE_FLAG:
case NR_DISCACK:
nr_disconnect(sk, ECONNRESET);
break;
case NR_INFOACK:
case NR_INFOACK | NR_CHOKE_FLAG:
case NR_INFOACK | NR_NAK_FLAG:
case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG:
if (frametype & NR_CHOKE_FLAG) {
nrom->condition |= NR_COND_PEER_RX_BUSY;
nr_start_t4timer(sk);
} else {
nrom->condition &= ~NR_COND_PEER_RX_BUSY;
nr_stop_t4timer(sk);
}
if (!nr_validate_nr(sk, nr)) {
break;
}
if (frametype & NR_NAK_FLAG) {
nr_frames_acked(sk, nr);
nr_send_nak_frame(sk);
} else {
if (nrom->condition & NR_COND_PEER_RX_BUSY) {
nr_frames_acked(sk, nr);
} else {
nr_check_iframes_acked(sk, nr);
}
}
break;
case NR_INFO:
case NR_INFO | NR_NAK_FLAG:
case NR_INFO | NR_CHOKE_FLAG:
case NR_INFO | NR_MORE_FLAG:
case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG:
case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG:
case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG:
case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG:
if (frametype & NR_CHOKE_FLAG) {
nrom->condition |= NR_COND_PEER_RX_BUSY;
nr_start_t4timer(sk);
} else {
nrom->condition &= ~NR_COND_PEER_RX_BUSY;
nr_stop_t4timer(sk);
}
if (nr_validate_nr(sk, nr)) {
if (frametype & NR_NAK_FLAG) {
nr_frames_acked(sk, nr);
nr_send_nak_frame(sk);
} else {
if (nrom->condition & NR_COND_PEER_RX_BUSY) {
nr_frames_acked(sk, nr);
} else {
nr_check_iframes_acked(sk, nr);
}
}
}
queued = 1;
skb_queue_head(&nrom->reseq_queue, skb);
if (nrom->condition & NR_COND_OWN_RX_BUSY)
break;
skb_queue_head_init(&temp_queue);
do {
save_vr = nrom->vr;
while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) {
ns = skbn->data[17];
if (ns == nrom->vr) {
if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) {
nrom->vr = (nrom->vr + 1) % NR_MODULUS;
} else {
nrom->condition |= NR_COND_OWN_RX_BUSY;
skb_queue_tail(&temp_queue, skbn);
}
} else if (nr_in_rx_window(sk, ns)) {
skb_queue_tail(&temp_queue, skbn);
} else {
kfree_skb(skbn);
}
}
while ((skbn = skb_dequeue(&temp_queue)) != NULL) {
skb_queue_tail(&nrom->reseq_queue, skbn);
}
} while (save_vr != nrom->vr);
/*
* Window is full, ack it immediately.
*/
if (((nrom->vl + nrom->window) % NR_MODULUS) == nrom->vr) {
nr_enquiry_response(sk);
} else {
if (!(nrom->condition & NR_COND_ACK_PENDING)) {
nrom->condition |= NR_COND_ACK_PENDING;
nr_start_t2timer(sk);
}
}
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
nr_disconnect(sk, ECONNRESET);
break;
default:
break;
}
return queued;
}
/* Higher level upcall for a LAPB frame - called with sk locked */
int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb)
{
struct nr_sock *nr = nr_sk(sk);
int queued = 0, frametype;
if (nr->state == NR_STATE_0)
return 0;
frametype = skb->data[19];
switch (nr->state) {
case NR_STATE_1:
queued = nr_state1_machine(sk, skb, frametype);
break;
case NR_STATE_2:
queued = nr_state2_machine(sk, skb, frametype);
break;
case NR_STATE_3:
queued = nr_state3_machine(sk, skb, frametype);
break;
}
nr_kick(sk);
return queued;
}
| gpl-2.0 |
TheTypoMaster/ghost | drivers/ps3/ps3-lpm.c | 8168 | 31964 | /*
* PS3 Logical Performance Monitor.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/ps3.h>
#include <asm/lv1call.h>
#include <asm/cell-pmu.h>
/* BOOKMARK tag macros */
#define PS3_PM_BOOKMARK_START 0x8000000000000000ULL
#define PS3_PM_BOOKMARK_STOP 0x4000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_KERNEL 0x1000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_USER 0x3000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_HI 0xF000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_LO 0x0F00000000000000ULL
/* CBE PM CONTROL register macros */
#define PS3_PM_CONTROL_PPU_TH0_BOOKMARK 0x00001000
#define PS3_PM_CONTROL_PPU_TH1_BOOKMARK 0x00000800
#define PS3_PM_CONTROL_PPU_COUNT_MODE_MASK 0x000C0000
#define PS3_PM_CONTROL_PPU_COUNT_MODE_PROBLEM 0x00080000
#define PS3_WRITE_PM_MASK 0xFFFFFFFFFFFFFFFFULL
/* CBE PM START STOP register macros */
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START 0x02000000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START 0x01000000
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP 0x00020000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP 0x00010000
#define PS3_PM_START_STOP_START_MASK 0xFF000000
#define PS3_PM_START_STOP_STOP_MASK 0x00FF0000
/* CBE PM COUNTER register macres */
#define PS3_PM_COUNTER_MASK_HI 0xFFFFFFFF00000000ULL
#define PS3_PM_COUNTER_MASK_LO 0x00000000FFFFFFFFULL
/* BASE SIGNAL GROUP NUMBER macros */
#define PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER 0
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER1 6
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER2 7
#define PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER 7
#define PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER 15
#define PM_SPU_TRIGGER_SIGNAL_GROUP_NUMBER 17
#define PM_SPU_EVENT_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER 24
#define PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER 49
#define PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER 52
#define PM_SIG_GROUP_SPU 41
#define PM_SIG_GROUP_SPU_TRIGGER 42
#define PM_SIG_GROUP_SPU_EVENT 43
#define PM_SIG_GROUP_MFC_MAX 60
/**
* struct ps3_lpm_shadow_regs - Performance monitor shadow registers.
*
* @pm_control: Shadow of the processor's pm_control register.
* @pm_start_stop: Shadow of the processor's pm_start_stop register.
* @group_control: Shadow of the processor's group_control register.
* @debug_bus_control: Shadow of the processor's debug_bus_control register.
*
* The logical performance monitor provides a write-only interface to
* these processor registers. These shadow variables cache the processor
* register values for reading.
*
* The initial value of the shadow registers at lpm creation is
* PS3_LPM_SHADOW_REG_INIT.
*/
struct ps3_lpm_shadow_regs {
u64 pm_control;
u64 pm_start_stop;
u64 group_control;
u64 debug_bus_control;
};
#define PS3_LPM_SHADOW_REG_INIT 0xFFFFFFFF00000000ULL
/**
* struct ps3_lpm_priv - Private lpm device data.
*
* @open: An atomic variable indicating the lpm driver has been opened.
* @rights: The lpm rigths granted by the system policy module. A logical
* OR of enum ps3_lpm_rights.
* @node_id: The node id of a BE prosessor whose performance monitor this
* lpar has the right to use.
* @pu_id: The lv1 id of the logical PU.
* @lpm_id: The lv1 id of this lpm instance.
* @outlet_id: The outlet created by lv1 for this lpm instance.
* @tb_count: The number of bytes of data held in the lv1 trace buffer.
* @tb_cache: Kernel buffer to receive the data from the lv1 trace buffer.
* Must be 128 byte aligned.
* @tb_cache_size: Size of the kernel @tb_cache buffer. Must be 128 byte
* aligned.
* @tb_cache_internal: An unaligned buffer allocated by this driver to be
* used for the trace buffer cache when ps3_lpm_open() is called with a
* NULL tb_cache argument. Otherwise unused.
* @shadow: Processor register shadow of type struct ps3_lpm_shadow_regs.
* @sbd: The struct ps3_system_bus_device attached to this driver.
*
* The trace buffer is a buffer allocated and used internally to the lv1
* hypervisor to collect trace data. The trace buffer cache is a guest
* buffer that accepts the trace data from the trace buffer.
*/
struct ps3_lpm_priv {
atomic_t open;
u64 rights;
u64 node_id;
u64 pu_id;
u64 lpm_id;
u64 outlet_id;
u64 tb_count;
void *tb_cache;
u64 tb_cache_size;
void *tb_cache_internal;
struct ps3_lpm_shadow_regs shadow;
struct ps3_system_bus_device *sbd;
};
enum {
PS3_LPM_DEFAULT_TB_CACHE_SIZE = 0x4000,
};
/**
* lpm_priv - Static instance of the lpm data.
*
* Since the exported routines don't support the notion of a device
* instance we need to hold the instance in this static variable
* and then only allow at most one instance at a time to be created.
*/
static struct ps3_lpm_priv *lpm_priv;
static struct device *sbd_core(void)
{
BUG_ON(!lpm_priv || !lpm_priv->sbd);
return &lpm_priv->sbd->core;
}
/**
* use_start_stop_bookmark - Enable the PPU bookmark trace.
*
* And it enables PPU bookmark triggers ONLY if the other triggers are not set.
* The start/stop bookmarks are inserted at ps3_enable_pm() and ps3_disable_pm()
* to start/stop LPM.
*
* Used to get good quality of the performance counter.
*/
enum {use_start_stop_bookmark = 1,};
void ps3_set_bookmark(u64 bookmark)
{
/*
* As per the PPE book IV, to avoid bookmark loss there must
* not be a traced branch within 10 cycles of setting the
* SPRN_BKMK register. The actual text is unclear if 'within'
* includes cycles before the call.
*/
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
mtspr(SPRN_BKMK, bookmark);
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
}
EXPORT_SYMBOL_GPL(ps3_set_bookmark);
void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id)
{
u64 bookmark;
bookmark = (get_tb() & 0x00000000FFFFFFFFULL) |
PS3_PM_BOOKMARK_TAG_KERNEL;
bookmark = ((tag << 56) & PS3_PM_BOOKMARK_TAG_MASK_LO) |
(incident << 48) | (th_id << 32) | bookmark;
ps3_set_bookmark(bookmark);
}
EXPORT_SYMBOL_GPL(ps3_set_pm_bookmark);
/**
* ps3_read_phys_ctr - Read physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
{
int result;
u64 counter0415;
u64 counter2637;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id, 0, 0, 0, 0, &counter0415,
&counter2637);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, %s\n", __func__, __LINE__, phys_ctr,
ps3_result(result));
return 0;
}
switch (phys_ctr) {
case 0:
return counter0415 >> 32;
case 1:
return counter0415 & PS3_PM_COUNTER_MASK_LO;
case 2:
return counter2637 >> 32;
case 3:
return counter2637 & PS3_PM_COUNTER_MASK_LO;
default:
BUG();
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_phys_ctr);
/**
* ps3_write_phys_ctr - Write physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
{
u64 counter0415;
u64 counter0415_mask;
u64 counter2637;
u64 counter2637_mask;
int result;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
switch (phys_ctr) {
case 0:
counter0415 = (u64)val << 32;
counter0415_mask = PS3_PM_COUNTER_MASK_HI;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 1:
counter0415 = (u64)val;
counter0415_mask = PS3_PM_COUNTER_MASK_LO;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 2:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val << 32;
counter2637_mask = PS3_PM_COUNTER_MASK_HI;
break;
case 3:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val;
counter2637_mask = PS3_PM_COUNTER_MASK_LO;
break;
default:
BUG();
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id,
counter0415, counter0415_mask,
counter2637, counter2637_mask,
&counter0415, &counter2637);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, val %u, %s\n", __func__, __LINE__,
phys_ctr, val, ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_phys_ctr);
/**
* ps3_read_ctr - Read counter.
*
* Read 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
u32 ps3_read_ctr(u32 cpu, u32 ctr)
{
u32 val;
u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
return val;
}
EXPORT_SYMBOL_GPL(ps3_read_ctr);
/**
* ps3_write_ctr - Write counter.
*
* Write 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
{
u32 phys_ctr;
u32 phys_val;
phys_ctr = ctr & (NR_PHYS_CTRS - 1);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ctr < NR_PHYS_CTRS)
val = (val << 16) | (phys_val & 0xffff);
else
val = (val & 0xffff) | (phys_val & 0xffff0000);
}
ps3_write_phys_ctr(cpu, phys_ctr, val);
}
EXPORT_SYMBOL_GPL(ps3_write_ctr);
/**
* ps3_read_pm07_control - Read counter control registers.
*
* Each logical counter has a corresponding control register.
*/
u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
{
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm07_control);
/**
* ps3_write_pm07_control - Write counter control registers.
*
* Each logical counter has a corresponding control register.
*/
void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
{
int result;
static const u64 mask = 0xFFFFFFFFFFFFFFFFULL;
u64 old_value;
if (ctr >= NR_CTRS) {
dev_dbg(sbd_core(), "%s:%u: ctr too big: %u\n", __func__,
__LINE__, ctr);
return;
}
result = lv1_set_lpm_counter_control(lpm_priv->lpm_id, ctr, val, mask,
&old_value);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter_control "
"failed: ctr %u, %s\n", __func__, __LINE__, ctr,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm07_control);
/**
* ps3_read_pm - Read Other LPM control registers.
*/
u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
{
int result = 0;
u64 val = 0;
switch (reg) {
case pm_control:
return lpm_priv->shadow.pm_control;
case trace_address:
return CBE_PM_TRACE_BUF_EMPTY;
case pm_start_stop:
return lpm_priv->shadow.pm_start_stop;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 set_inteval failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case group_control:
return lpm_priv->shadow.group_control;
case debug_bus_control:
return lpm_priv->shadow.debug_bus_control;
case pm_status:
result = lv1_get_lpm_interrupt_status(lpm_priv->lpm_id,
&val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 get_lpm_status failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case ext_tr_timer:
return 0;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm);
/**
* ps3_write_pm - Write Other LPM control registers.
*/
void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
{
int result = 0;
u64 dummy;
switch (reg) {
case group_control:
if (val != lpm_priv->shadow.group_control)
result = lv1_set_lpm_group_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.group_control = val;
break;
case debug_bus_control:
if (val != lpm_priv->shadow.debug_bus_control)
result = lv1_set_lpm_debug_bus_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.debug_bus_control = val;
break;
case pm_control:
if (use_start_stop_bookmark)
val |= (PS3_PM_CONTROL_PPU_TH0_BOOKMARK |
PS3_PM_CONTROL_PPU_TH1_BOOKMARK);
if (val != lpm_priv->shadow.pm_control)
result = lv1_set_lpm_general_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
0, 0, &dummy,
&dummy);
lpm_priv->shadow.pm_control = val;
break;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
PS3_WRITE_PM_MASK, &dummy);
break;
case pm_start_stop:
if (val != lpm_priv->shadow.pm_start_stop)
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.pm_start_stop = val;
break;
case trace_address:
case ext_tr_timer:
case pm_status:
break;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
if (result)
dev_err(sbd_core(), "%s:%u: lv1 set_control failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm);
/**
* ps3_get_ctr_size - Get the size of a physical counter.
*
* Returns either 16 or 32.
*/
u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
return (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
}
EXPORT_SYMBOL_GPL(ps3_get_ctr_size);
/**
* ps3_set_ctr_size - Set the size of a physical counter to 16 or 32 bits.
*/
void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
switch (ctr_size) {
case 16:
pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
case 32:
pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
default:
BUG();
}
}
EXPORT_SYMBOL_GPL(ps3_set_ctr_size);
static u64 pm_translate_signal_group_number_on_island2(u64 subgroup)
{
if (subgroup == 2)
subgroup = 3;
if (subgroup <= 6)
return PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER + subgroup;
else if (subgroup == 7)
return PM_ISLAND2_SIGNAL_GROUP_NUMBER1;
else
return PM_ISLAND2_SIGNAL_GROUP_NUMBER2;
}
static u64 pm_translate_signal_group_number_on_island3(u64 subgroup)
{
switch (subgroup) {
case 2:
case 3:
case 4:
subgroup += 2;
break;
case 5:
subgroup = 8;
break;
default:
break;
}
return PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island4(u64 subgroup)
{
return PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island5(u64 subgroup)
{
switch (subgroup) {
case 3:
subgroup = 4;
break;
case 4:
subgroup = 6;
break;
default:
break;
}
return PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island6(u64 subgroup,
u64 subsubgroup)
{
switch (subgroup) {
case 3:
case 4:
case 5:
subgroup += 1;
break;
default:
break;
}
switch (subsubgroup) {
case 4:
case 5:
case 6:
subsubgroup += 2;
break;
case 7:
case 8:
case 9:
case 10:
subsubgroup += 4;
break;
case 11:
case 12:
case 13:
subsubgroup += 5;
break;
default:
break;
}
if (subgroup <= 5)
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup);
else
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup
+ subsubgroup - 1);
}
static u64 pm_translate_signal_group_number_on_island7(u64 subgroup)
{
return PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island8(u64 subgroup)
{
return PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group)
{
u64 island;
u64 subgroup;
u64 subsubgroup;
subgroup = 0;
subsubgroup = 0;
island = 0;
if (group < 1000) {
if (group < 100) {
if (20 <= group && group < 30) {
island = 2;
subgroup = group - 20;
} else if (30 <= group && group < 40) {
island = 3;
subgroup = group - 30;
} else if (40 <= group && group < 50) {
island = 4;
subgroup = group - 40;
} else if (50 <= group && group < 60) {
island = 5;
subgroup = group - 50;
} else if (60 <= group && group < 70) {
island = 6;
subgroup = group - 60;
} else if (70 <= group && group < 80) {
island = 7;
subgroup = group - 70;
} else if (80 <= group && group < 90) {
island = 8;
subgroup = group - 80;
}
} else if (200 <= group && group < 300) {
island = 2;
subgroup = group - 200;
} else if (600 <= group && group < 700) {
island = 6;
subgroup = 5;
subsubgroup = group - 650;
}
} else if (6000 <= group && group < 7000) {
island = 6;
subgroup = 5;
subsubgroup = group - 6500;
}
switch (island) {
case 2:
return pm_translate_signal_group_number_on_island2(subgroup);
case 3:
return pm_translate_signal_group_number_on_island3(subgroup);
case 4:
return pm_translate_signal_group_number_on_island4(subgroup);
case 5:
return pm_translate_signal_group_number_on_island5(subgroup);
case 6:
return pm_translate_signal_group_number_on_island6(subgroup,
subsubgroup);
case 7:
return pm_translate_signal_group_number_on_island7(subgroup);
case 8:
return pm_translate_signal_group_number_on_island8(subgroup);
default:
dev_dbg(sbd_core(), "%s:%u: island not found: %llu\n", __func__,
__LINE__, group);
BUG();
break;
}
return 0;
}
static u64 pm_bus_word_to_ps3_lv1_bus_word(u8 word)
{
switch (word) {
case 1:
return 0xF000;
case 2:
return 0x0F00;
case 4:
return 0x00F0;
case 8:
default:
return 0x000F;
}
}
static int __ps3_set_signal(u64 lv1_signal_group, u64 bus_select,
u64 signal_select, u64 attr1, u64 attr2, u64 attr3)
{
int ret;
ret = lv1_set_lpm_signal(lpm_priv->lpm_id, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(),
"%s:%u: error:%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
__func__, __LINE__, ret, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
return ret;
}
int ps3_set_signal(u64 signal_group, u8 signal_bit, u16 sub_unit,
u8 bus_word)
{
int ret;
u64 lv1_signal_group;
u64 bus_select;
u64 signal_select;
u64 attr1, attr2, attr3;
if (signal_group == 0)
return __ps3_set_signal(0, 0, 0, 0, 0, 0);
lv1_signal_group =
pm_signal_group_to_ps3_lv1_signal_group(signal_group);
bus_select = pm_bus_word_to_ps3_lv1_bus_word(bus_word);
switch (signal_group) {
case PM_SIG_GROUP_SPU_TRIGGER:
signal_select = 1;
signal_select = signal_select << (63 - signal_bit);
break;
case PM_SIG_GROUP_SPU_EVENT:
signal_select = 1;
signal_select = (signal_select << (63 - signal_bit)) | 0x3;
break;
default:
signal_select = 0;
break;
}
/*
* 0: physical object.
* 1: logical object.
* This parameter is only used for the PPE and SPE signals.
*/
attr1 = 1;
/*
* This parameter is used to specify the target physical/logical
* PPE/SPE object.
*/
if (PM_SIG_GROUP_SPU <= signal_group &&
signal_group < PM_SIG_GROUP_MFC_MAX)
attr2 = sub_unit;
else
attr2 = lpm_priv->pu_id;
/*
* This parameter is only used for setting the SPE signal.
*/
attr3 = 0;
ret = __ps3_set_signal(lv1_signal_group, bus_select, signal_select,
attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(), "%s:%u: __ps3_set_signal failed: %d\n",
__func__, __LINE__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(ps3_set_signal);
u32 ps3_get_hw_thread_id(int cpu)
{
return get_hard_smp_processor_id(cpu);
}
EXPORT_SYMBOL_GPL(ps3_get_hw_thread_id);
/**
* ps3_enable_pm - Enable the entire performance monitoring unit.
*
* When we enable the LPM, all pending writes to counters get committed.
*/
void ps3_enable_pm(u32 cpu)
{
int result;
u64 tmp;
int insert_bookmark = 0;
lpm_priv->tb_count = 0;
if (use_start_stop_bookmark) {
if (!(lpm_priv->shadow.pm_start_stop &
(PS3_PM_START_STOP_START_MASK
| PS3_PM_START_STOP_STOP_MASK))) {
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
(PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP),
0xFFFFFFFFFFFFFFFFULL, &tmp);
if (result)
dev_err(sbd_core(), "%s:%u: "
"lv1_set_lpm_trigger_control failed: "
"%s\n", __func__, __LINE__,
ps3_result(result));
insert_bookmark = !result;
}
}
result = lv1_start_lpm(lpm_priv->lpm_id);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_start_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
if (use_start_stop_bookmark && !result && insert_bookmark)
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_START);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm);
/**
* ps3_disable_pm - Disable the entire performance monitoring unit.
*/
void ps3_disable_pm(u32 cpu)
{
int result;
u64 tmp;
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_STOP);
result = lv1_stop_lpm(lpm_priv->lpm_id, &tmp);
if (result) {
if(result != LV1_WRONG_STATE)
dev_err(sbd_core(), "%s:%u: lv1_stop_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
return;
}
lpm_priv->tb_count = tmp;
dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__,
lpm_priv->tb_count, lpm_priv->tb_count);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm);
/**
* ps3_lpm_copy_tb - Copy data from the trace buffer to a kernel buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: Copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
memcpy(buf, lpm_priv->tb_cache, tmp);
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb);
/**
* ps3_lpm_copy_tb_to_user - Copy data from the trace buffer to a user buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: A __user copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
unsigned long count, unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n",
__func__, __LINE__, tmp, buf);
dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
__func__, __LINE__, result);
return -EFAULT;
}
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb_to_user);
/**
* ps3_get_and_clear_pm_interrupts -
*
* Clearing interrupts for the entire performance monitoring unit.
* Reading pm_status clears the interrupt bits.
*/
u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
{
return ps3_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(ps3_get_and_clear_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Enabling interrupts for the entire performance monitoring unit.
* Enables the interrupt bits in the pm_status register.
*/
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{
if (mask)
ps3_write_pm(cpu, pm_status, mask);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Disabling interrupts for the entire performance monitoring unit.
*/
void ps3_disable_pm_interrupts(u32 cpu)
{
ps3_get_and_clear_pm_interrupts(cpu);
ps3_write_pm(cpu, pm_status, 0);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm_interrupts);
/**
* ps3_lpm_open - Open the logical performance monitor device.
* @tb_type: Specifies the type of trace buffer lv1 should use for this lpm
* instance, specified by one of enum ps3_lpm_tb_type.
* @tb_cache: Optional user supplied buffer to use as the trace buffer cache.
* If NULL, the driver will allocate and manage an internal buffer.
* Unused when when @tb_type is PS3_LPM_TB_TYPE_NONE.
* @tb_cache_size: The size in bytes of the user supplied @tb_cache buffer.
* Unused when @tb_cache is NULL or @tb_type is PS3_LPM_TB_TYPE_NONE.
*/
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
u64 tb_cache_size)
{
int result;
u64 tb_size;
BUG_ON(!lpm_priv);
BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);
if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);
if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
return -EBUSY;
}
/* Note tb_cache needs 128 byte alignment. */
if (tb_type == PS3_LPM_TB_TYPE_NONE) {
lpm_priv->tb_cache_size = 0;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = NULL;
} else if (tb_cache) {
if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
__func__, __LINE__);
result = -EINVAL;
goto fail_align;
}
lpm_priv->tb_cache_size = tb_cache_size;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = tb_cache;
} else {
lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
lpm_priv->tb_cache_internal = kzalloc(
lpm_priv->tb_cache_size + 127, GFP_KERNEL);
if (!lpm_priv->tb_cache_internal) {
dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
"failed\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_malloc;
}
lpm_priv->tb_cache = (void *)_ALIGN_UP(
(unsigned long)lpm_priv->tb_cache_internal, 128);
}
result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
&lpm_priv->outlet_id, &tb_size);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EINVAL;
goto fail_construct;
}
lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
lpm_priv->outlet_id, tb_size);
return 0;
fail_construct:
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
atomic_dec(&lpm_priv->open);
return result;
}
EXPORT_SYMBOL_GPL(ps3_lpm_open);
/**
* ps3_lpm_close - Close the lpm device.
*
*/
int ps3_lpm_close(void)
{
dev_dbg(sbd_core(), "%s:%u\n", __func__, __LINE__);
lv1_destruct_lpm(lpm_priv->lpm_id);
lpm_priv->lpm_id = 0;
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
atomic_dec(&lpm_priv->open);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_close);
static int __devinit ps3_lpm_probe(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u\n", __func__, __LINE__);
if (lpm_priv) {
dev_info(&dev->core, "%s:%u: called twice\n",
__func__, __LINE__);
return -EBUSY;
}
lpm_priv = kzalloc(sizeof(*lpm_priv), GFP_KERNEL);
if (!lpm_priv)
return -ENOMEM;
lpm_priv->sbd = dev;
lpm_priv->node_id = dev->lpm.node_id;
lpm_priv->pu_id = dev->lpm.pu_id;
lpm_priv->rights = dev->lpm.rights;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
return 0;
}
static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
ps3_lpm_close();
kfree(lpm_priv);
lpm_priv = NULL;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
return 0;
}
static struct ps3_system_bus_driver ps3_lpm_driver = {
.match_id = PS3_MATCH_ID_LPM,
.core.name = "ps3-lpm",
.core.owner = THIS_MODULE,
.probe = ps3_lpm_probe,
.remove = ps3_lpm_remove,
.shutdown = ps3_lpm_remove,
};
static int __init ps3_lpm_init(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
return ps3_system_bus_driver_register(&ps3_lpm_driver);
}
static void __exit ps3_lpm_exit(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
ps3_system_bus_driver_unregister(&ps3_lpm_driver);
}
module_init(ps3_lpm_init);
module_exit(ps3_lpm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 Logical Performance Monitor Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_LPM);
| gpl-2.0 |
sjkoon/SJKernel-gn2 | drivers/video/sunxvr2500.c | 9192 | 6103 | /* s3d.c: Sun 3DLABS XVR-2500 et al. driver for sparc64 systems
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of_device.h>
#include <asm/io.h>
struct s3d_info {
struct fb_info *info;
struct pci_dev *pdev;
char __iomem *fb_base;
unsigned long fb_base_phys;
struct device_node *of_node;
unsigned int width;
unsigned int height;
unsigned int depth;
unsigned int fb_size;
u32 pseudo_palette[16];
};
static int __devinit s3d_get_props(struct s3d_info *sp)
{
sp->width = of_getintprop_default(sp->of_node, "width", 0);
sp->height = of_getintprop_default(sp->of_node, "height", 0);
sp->depth = of_getintprop_default(sp->of_node, "depth", 8);
if (!sp->width || !sp->height) {
printk(KERN_ERR "s3d: Critical properties missing for %s\n",
pci_name(sp->pdev));
return -EINVAL;
}
return 0;
}
static int s3d_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
u32 value;
if (regno < 16) {
red >>= 8;
green >>= 8;
blue >>= 8;
value = (blue << 24) | (green << 16) | (red << 8);
((u32 *)info->pseudo_palette)[regno] = value;
}
return 0;
}
static struct fb_ops s3d_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = s3d_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int __devinit s3d_set_fbinfo(struct s3d_info *sp)
{
struct fb_info *info = sp->info;
struct fb_var_screeninfo *var = &info->var;
info->flags = FBINFO_DEFAULT;
info->fbops = &s3d_ops;
info->screen_base = sp->fb_base;
info->screen_size = sp->fb_size;
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
if (sp->depth == 32 || sp->depth == 24)
info->fix.visual = FB_VISUAL_TRUECOLOR;
else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
var->xres = sp->width;
var->yres = sp->height;
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = sp->depth;
var->red.offset = 8;
var->red.length = 8;
var->green.offset = 16;
var->green.length = 8;
var->blue.offset = 24;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
if (fb_alloc_cmap(&info->cmap, 256, 0)) {
printk(KERN_ERR "s3d: Cannot allocate color map.\n");
return -ENOMEM;
}
return 0;
}
static int __devinit s3d_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct fb_info *info;
struct s3d_info *sp;
int err;
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "s3d: Cannot enable PCI device %s\n",
pci_name(pdev));
goto err_out;
}
info = framebuffer_alloc(sizeof(struct s3d_info), &pdev->dev);
if (!info) {
printk(KERN_ERR "s3d: Cannot allocate fb_info\n");
err = -ENOMEM;
goto err_disable;
}
sp = info->par;
sp->info = info;
sp->pdev = pdev;
sp->of_node = pci_device_to_OF_node(pdev);
if (!sp->of_node) {
printk(KERN_ERR "s3d: Cannot find OF node of %s\n",
pci_name(pdev));
err = -ENODEV;
goto err_release_fb;
}
sp->fb_base_phys = pci_resource_start (pdev, 1);
err = pci_request_region(pdev, 1, "s3d framebuffer");
if (err < 0) {
printk("s3d: Cannot request region 1 for %s\n",
pci_name(pdev));
goto err_release_fb;
}
err = s3d_get_props(sp);
if (err)
goto err_release_pci;
/* XXX 'linebytes' is often wrong, it is equal to the width
* XXX with depth of 32 on my XVR-2500 which is clearly not
* XXX right. So we don't try to use it.
*/
switch (sp->depth) {
case 8:
info->fix.line_length = sp->width;
break;
case 16:
info->fix.line_length = sp->width * 2;
break;
case 24:
info->fix.line_length = sp->width * 3;
break;
case 32:
info->fix.line_length = sp->width * 4;
break;
}
sp->fb_size = info->fix.line_length * sp->height;
sp->fb_base = ioremap(sp->fb_base_phys, sp->fb_size);
if (!sp->fb_base)
goto err_release_pci;
err = s3d_set_fbinfo(sp);
if (err)
goto err_unmap_fb;
pci_set_drvdata(pdev, info);
printk("s3d: Found device at %s\n", pci_name(pdev));
err = register_framebuffer(info);
if (err < 0) {
printk(KERN_ERR "s3d: Could not register framebuffer %s\n",
pci_name(pdev));
goto err_unmap_fb;
}
return 0;
err_unmap_fb:
iounmap(sp->fb_base);
err_release_pci:
pci_release_region(pdev, 1);
err_release_fb:
framebuffer_release(info);
err_disable:
pci_disable_device(pdev);
err_out:
return err;
}
static void __devexit s3d_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct s3d_info *sp = info->par;
unregister_framebuffer(info);
iounmap(sp->fb_base);
pci_release_region(pdev, 1);
framebuffer_release(info);
pci_disable_device(pdev);
}
static struct pci_device_id s3d_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002c), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002d), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002e), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002f), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x0030), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x0031), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x0032), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x0033), },
{ 0, }
};
static struct pci_driver s3d_driver = {
.name = "s3d",
.id_table = s3d_pci_table,
.probe = s3d_pci_register,
.remove = __devexit_p(s3d_pci_unregister),
};
static int __init s3d_init(void)
{
if (fb_get_options("s3d", NULL))
return -ENODEV;
return pci_register_driver(&s3d_driver);
}
static void __exit s3d_exit(void)
{
pci_unregister_driver(&s3d_driver);
}
module_init(s3d_init);
module_exit(s3d_exit);
MODULE_DESCRIPTION("framebuffer driver for Sun XVR-2500 graphics");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vDorst/linux | drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | 11240 | 12348 | /*
* Linux driver attachment glue for PCI based controllers.
*
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $
*/
#include "aic7xxx_osm.h"
#include "aic7xxx_pci.h"
/* Define the macro locally since it's different for different class of chips.
*/
#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
static const struct pci_device_id ahc_linux_pci_id_table[] = {
/* aic7850 based controllers */
ID(ID_AHA_2902_04_10_15_20C_30C),
/* aic7860 based controllers */
ID(ID_AHA_2930CU),
ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK),
/* aic7870 based controllers */
ID(ID_AHA_2940),
ID(ID_AHA_3940),
ID(ID_AHA_398X),
ID(ID_AHA_2944),
ID(ID_AHA_3944),
ID(ID_AHA_4944),
/* aic7880 based controllers */
ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK),
ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK),
ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK),
/* aic7890 based controllers */
ID(ID_AHA_2930U2),
ID(ID_AHA_2940U2B),
ID(ID_AHA_2940U2_OEM),
ID(ID_AHA_2940U2),
ID(ID_AHA_2950U2B),
ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK),
ID(ID_AAA_131U2),
/* aic7890 based controllers */
ID(ID_AHA_29160),
ID(ID_AHA_29160_CPQ),
ID(ID_AHA_29160N),
ID(ID_AHA_29160C),
ID(ID_AHA_29160B),
ID(ID_AHA_19160B),
ID(ID_AIC7892_ARO),
/* aic7892 based controllers */
ID(ID_AHA_2940U_DUAL),
ID(ID_AHA_3940AU),
ID(ID_AHA_3944AU),
ID(ID_AIC7895_ARO),
ID(ID_AHA_3950U2B_0),
ID(ID_AHA_3950U2B_1),
ID(ID_AHA_3950U2D_0),
ID(ID_AHA_3950U2D_1),
ID(ID_AIC7896_ARO),
/* aic7899 based controllers */
ID(ID_AHA_3960D),
ID(ID_AHA_3960D_CPQ),
ID(ID_AIC7899_ARO),
/* Generic chip probes for devices we don't know exactly. */
ID(ID_AIC7850 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7855 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7859 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7860 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7870 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7880 & ID_DEV_VENDOR_MASK),
ID16(ID_AIC7890 & ID_9005_GENERIC_MASK),
ID16(ID_AIC7892 & ID_9005_GENERIC_MASK),
ID(ID_AIC7895 & ID_DEV_VENDOR_MASK),
ID16(ID_AIC7896 & ID_9005_GENERIC_MASK),
ID16(ID_AIC7899 & ID_9005_GENERIC_MASK),
ID(ID_AIC7810 & ID_DEV_VENDOR_MASK),
ID(ID_AIC7815 & ID_DEV_VENDOR_MASK),
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
#ifdef CONFIG_PM
static int
ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ahc_softc *ahc = pci_get_drvdata(pdev);
int rc;
if ((rc = ahc_suspend(ahc)))
return rc;
pci_save_state(pdev);
pci_disable_device(pdev);
if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
return rc;
}
static int
ahc_linux_pci_dev_resume(struct pci_dev *pdev)
{
struct ahc_softc *ahc = pci_get_drvdata(pdev);
int rc;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if ((rc = pci_enable_device(pdev))) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to enable device after resume (%d)\n", rc);
return rc;
}
pci_set_master(pdev);
ahc_pci_resume(ahc);
return (ahc_resume(ahc));
}
#endif
static void
ahc_linux_pci_dev_remove(struct pci_dev *pdev)
{
struct ahc_softc *ahc = pci_get_drvdata(pdev);
u_long s;
if (ahc->platform_data && ahc->platform_data->host)
scsi_remove_host(ahc->platform_data->host);
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
ahc_free(ahc);
}
static void
ahc_linux_pci_inherit_flags(struct ahc_softc *ahc)
{
struct pci_dev *pdev = ahc->dev_softc, *master_pdev;
unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
master_pdev = pci_get_slot(pdev->bus, master_devfn);
if (master_pdev) {
struct ahc_softc *master = pci_get_drvdata(master_pdev);
if (master) {
ahc->flags &= ~AHC_BIOS_ENABLED;
ahc->flags |= master->flags & AHC_BIOS_ENABLED;
ahc->flags &= ~AHC_PRIMARY_CHANNEL;
ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL;
} else
printk(KERN_ERR "aic7xxx: no multichannel peer found!\n");
pci_dev_put(master_pdev);
}
}
static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
char buf[80];
const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
struct ahc_softc *ahc;
ahc_dev_softc_t pci;
const struct ahc_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
pci = pdev;
entry = ahc_find_pci_device(pci);
if (entry == NULL)
return (-ENODEV);
/*
* Allocate a softc for this card and
* set it up for attachment by our
* common detect routine.
*/
sprintf(buf, "ahc_pci:%d:%d:%d",
ahc_get_pci_bus(pci),
ahc_get_pci_slot(pci),
ahc_get_pci_function(pci));
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
if (name == NULL)
return (-ENOMEM);
strcpy(name, buf);
ahc = ahc_alloc(NULL, name);
if (ahc == NULL)
return (-ENOMEM);
if (pci_enable_device(pdev)) {
ahc_free(ahc);
return (-ENODEV);
}
pci_set_master(pdev);
if (sizeof(dma_addr_t) > 4
&& ahc->features & AHC_LARGE_SCBS
&& dma_set_mask(dev, mask_39bit) == 0
&& dma_get_required_mask(dev) > DMA_BIT_MASK(32)) {
ahc->flags |= AHC_39BIT_ADDRESSING;
} else {
if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
ahc_free(ahc);
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV);
}
}
ahc->dev_softc = pci;
error = ahc_pci_config(ahc, entry);
if (error != 0) {
ahc_free(ahc);
return (-error);
}
/*
* Second Function PCI devices need to inherit some
* settings from function 0.
*/
if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
ahc_linux_pci_inherit_flags(ahc);
pci_set_drvdata(pdev, ahc);
ahc_linux_register_host(ahc, &aic7xxx_driver_template);
return (0);
}
/******************************* PCI Routines *********************************/
uint32_t
ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
{
switch (width) {
case 1:
{
uint8_t retval;
pci_read_config_byte(pci, reg, &retval);
return (retval);
}
case 2:
{
uint16_t retval;
pci_read_config_word(pci, reg, &retval);
return (retval);
}
case 4:
{
uint32_t retval;
pci_read_config_dword(pci, reg, &retval);
return (retval);
}
default:
panic("ahc_pci_read_config: Read size too big");
/* NOTREACHED */
return (0);
}
}
void
ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
{
switch (width) {
case 1:
pci_write_config_byte(pci, reg, value);
break;
case 2:
pci_write_config_word(pci, reg, value);
break;
case 4:
pci_write_config_dword(pci, reg, value);
break;
default:
panic("ahc_pci_write_config: Write size too big");
/* NOTREACHED */
}
}
static struct pci_driver aic7xxx_pci_driver = {
.name = "aic7xxx",
.probe = ahc_linux_pci_dev_probe,
#ifdef CONFIG_PM
.suspend = ahc_linux_pci_dev_suspend,
.resume = ahc_linux_pci_dev_resume,
#endif
.remove = ahc_linux_pci_dev_remove,
.id_table = ahc_linux_pci_id_table
};
int
ahc_linux_pci_init(void)
{
return pci_register_driver(&aic7xxx_pci_driver);
}
void
ahc_linux_pci_exit(void)
{
pci_unregister_driver(&aic7xxx_pci_driver);
}
static int
ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
{
if (aic7xxx_allow_memio == 0)
return (ENOMEM);
*base = pci_resource_start(ahc->dev_softc, 0);
if (*base == 0)
return (ENOMEM);
if (!request_region(*base, 256, "aic7xxx"))
return (ENOMEM);
return (0);
}
static int
ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
resource_size_t start;
int error;
error = 0;
start = pci_resource_start(ahc->dev_softc, 1);
if (start != 0) {
*bus_addr = start;
if (!request_mem_region(start, 0x1000, "aic7xxx"))
error = ENOMEM;
if (error == 0) {
*maddr = ioremap_nocache(start, 256);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
}
}
} else
error = ENOMEM;
return (error);
}
int
ahc_pci_map_registers(struct ahc_softc *ahc)
{
uint32_t command;
resource_size_t base;
uint8_t __iomem *maddr;
int error;
/*
* If its allowed, we prefer memory mapped access.
*/
command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4);
command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
base = 0;
maddr = NULL;
error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr);
if (error == 0) {
ahc->platform_data->mem_busaddr = base;
ahc->tag = BUS_SPACE_MEMIO;
ahc->bsh.maddr = maddr;
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
command | PCIM_CMD_MEMEN, 4);
/*
* Do a quick test to see if memory mapped
* I/O is functioning correctly.
*/
if (ahc_pci_test_register_access(ahc) != 0) {
printk("aic7xxx: PCI Device %d:%d:%d "
"failed memory mapped test. Using PIO.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc));
iounmap(maddr);
release_mem_region(ahc->platform_data->mem_busaddr,
0x1000);
ahc->bsh.maddr = NULL;
maddr = NULL;
} else
command |= PCIM_CMD_MEMEN;
} else {
printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
(unsigned long long)base);
}
/*
* We always prefer memory mapped access.
*/
if (maddr == NULL) {
error = ahc_linux_pci_reserve_io_region(ahc, &base);
if (error == 0) {
ahc->tag = BUS_SPACE_PIO;
ahc->bsh.ioport = (u_long)base;
command |= PCIM_CMD_PORTEN;
} else {
printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
"unavailable. Cannot map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
(unsigned long long)base);
}
}
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
return (error);
}
int
ahc_pci_map_int(struct ahc_softc *ahc)
{
int error;
error = request_irq(ahc->dev_softc->irq, ahc_linux_isr,
IRQF_SHARED, "aic7xxx", ahc);
if (error == 0)
ahc->platform_data->irq = ahc->dev_softc->irq;
return (-error);
}
| gpl-2.0 |
kgp700/Neok-GNexroid-JB | kernel/trace/trace_nop.c | 12008 | 2239 | /*
* nop tracer
*
* Copyright (C) 2008 Steven Noonan <steven@uplinklabs.net>
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include "trace.h"
/* Our two options */
enum {
TRACE_NOP_OPT_ACCEPT = 0x1,
TRACE_NOP_OPT_REFUSE = 0x2
};
/* Options for the tracer (see trace_options file) */
static struct tracer_opt nop_opts[] = {
/* Option that will be accepted by set_flag callback */
{ TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) },
/* Option that will be refused by set_flag callback */
{ TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) },
{ } /* Always set a last empty entry */
};
static struct tracer_flags nop_flags = {
/* You can check your flags value here when you want. */
.val = 0, /* By default: all flags disabled */
.opts = nop_opts
};
static struct trace_array *ctx_trace;
static void start_nop_trace(struct trace_array *tr)
{
/* Nothing to do! */
}
static void stop_nop_trace(struct trace_array *tr)
{
/* Nothing to do! */
}
static int nop_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
start_nop_trace(tr);
return 0;
}
static void nop_trace_reset(struct trace_array *tr)
{
stop_nop_trace(tr);
}
/* It only serves as a signal handler and a callback to
* accept or refuse tthe setting of a flag.
* If you don't implement it, then the flag setting will be
* automatically accepted.
*/
static int nop_set_flag(u32 old_flags, u32 bit, int set)
{
/*
* Note that you don't need to update nop_flags.val yourself.
* The tracing Api will do it automatically if you return 0
*/
if (bit == TRACE_NOP_OPT_ACCEPT) {
printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept."
" Now cat trace_options to see the result\n",
set);
return 0;
}
if (bit == TRACE_NOP_OPT_REFUSE) {
printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
"Now cat trace_options to see the result\n",
set);
return -EINVAL;
}
return 0;
}
struct tracer nop_trace __read_mostly =
{
.name = "nop",
.init = nop_trace_init,
.reset = nop_trace_reset,
.wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop,
#endif
.flags = &nop_flags,
.set_flag = nop_set_flag
};
| gpl-2.0 |
Mr-AW/Kernel_TeLo_LP_LenovoA6000 | fs/ext4/ioctl.c | 233 | 17296 | /*
* linux/fs/ext4/ioctl.c
*
* Copyright (C) 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*/
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "ext4_extents.h"
#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
/**
* Swap memory between @a and @b for @len bytes.
*
* @a: pointer to first memory area
* @b: pointer to second memory area
* @len: number of bytes to swap
*
*/
static void memswap(void *a, void *b, size_t len)
{
unsigned char *ap, *bp;
unsigned char tmp;
ap = (unsigned char *)a;
bp = (unsigned char *)b;
while (len-- > 0) {
tmp = *ap;
*ap = *bp;
*bp = tmp;
ap++;
bp++;
}
}
/**
* Swap i_data and associated attributes between @inode1 and @inode2.
* This function is used for the primary swap between inode1 and inode2
* and also to revert this primary swap in case of errors.
*
* Therefore you have to make sure, that calling this method twice
* will revert all changes.
*
* @inode1: pointer to first inode
* @inode2: pointer to second inode
*/
static void swap_inode_data(struct inode *inode1, struct inode *inode2)
{
loff_t isize;
struct ext4_inode_info *ei1;
struct ext4_inode_info *ei2;
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
memswap(&inode1->i_flags, &inode2->i_flags, sizeof(inode1->i_flags));
memswap(&inode1->i_version, &inode2->i_version,
sizeof(inode1->i_version));
memswap(&inode1->i_blocks, &inode2->i_blocks,
sizeof(inode1->i_blocks));
memswap(&inode1->i_bytes, &inode2->i_bytes, sizeof(inode1->i_bytes));
memswap(&inode1->i_atime, &inode2->i_atime, sizeof(inode1->i_atime));
memswap(&inode1->i_mtime, &inode2->i_mtime, sizeof(inode1->i_mtime));
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
ext4_es_lru_del(inode1);
ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
i_size_write(inode2, isize);
}
/**
* Swap the information from the given @inode and the inode
* EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
* important fields of the inodes.
*
* @sb: the super block of the filesystem
* @inode: the inode to swap with EXT4_BOOT_LOADER_INO
*
*/
static long swap_inode_boot_loader(struct super_block *sb,
struct inode *inode)
{
handle_t *handle;
int err;
struct inode *inode_bl;
struct ext4_inode_info *ei;
struct ext4_inode_info *ei_bl;
struct ext4_sb_info *sbi;
if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode)) {
err = -EINVAL;
goto swap_boot_out;
}
if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto swap_boot_out;
}
sbi = EXT4_SB(sb);
ei = EXT4_I(inode);
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
if (IS_ERR(inode_bl)) {
err = PTR_ERR(inode_bl);
goto swap_boot_out;
}
ei_bl = EXT4_I(inode_bl);
filemap_flush(inode->i_mapping);
filemap_flush(inode_bl->i_mapping);
/* Protect orig inodes against a truncate and make sure,
* that only 1 swap_inode_boot_loader is running. */
ext4_inode_double_lock(inode, inode_bl);
truncate_inode_pages(&inode->i_data, 0);
truncate_inode_pages(&inode_bl->i_data, 0);
/* Wait for all existing dio workers */
ext4_inode_block_unlocked_dio(inode);
ext4_inode_block_unlocked_dio(inode_bl);
inode_dio_wait(inode);
inode_dio_wait(inode_bl);
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
goto journal_err_out;
}
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
if (inode_bl->i_nlink == 0) {
/* this inode has never been used as a BOOT_LOADER */
set_nlink(inode_bl, 1);
i_uid_write(inode_bl, 0);
i_gid_write(inode_bl, 0);
inode_bl->i_flags = 0;
ei_bl->i_flags = 0;
inode_bl->i_version = 1;
i_size_write(inode_bl, 0);
inode_bl->i_mode = S_IFREG;
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_EXTENTS)) {
ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
ext4_ext_tree_init(handle, inode_bl);
} else
memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
}
swap_inode_data(inode, inode_bl);
inode->i_ctime = inode_bl->i_ctime = ext4_current_time(inode);
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
inode_bl->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
ext4_discard_preallocations(inode);
err = ext4_mark_inode_dirty(handle, inode);
if (err < 0) {
ext4_warning(inode->i_sb,
"couldn't mark inode #%lu dirty (err %d)",
inode->i_ino, err);
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
} else {
err = ext4_mark_inode_dirty(handle, inode_bl);
if (err < 0) {
ext4_warning(inode_bl->i_sb,
"couldn't mark inode #%lu dirty (err %d)",
inode_bl->i_ino, err);
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
ext4_mark_inode_dirty(handle, inode);
}
}
ext4_journal_stop(handle);
ext4_double_up_write_data_sem(inode, inode_bl);
journal_err_out:
ext4_inode_resume_unlocked_dio(inode);
ext4_inode_resume_unlocked_dio(inode_bl);
ext4_inode_double_unlock(inode, inode_bl);
iput(inode_bl);
swap_boot_out:
return err;
}
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int flags;
ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
switch (cmd) {
case EXT4_IOC_GETFLAGS:
ext4_get_inode_flags(ei);
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case EXT4_IOC_SETFLAGS: {
handle_t *handle = NULL;
int err, migrate = 0;
struct ext4_iloc iloc;
unsigned int oldflags, mask, i;
unsigned int jflag;
if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
return -EFAULT;
err = mnt_want_write_file(filp);
if (err)
return err;
flags = ext4_mask_flags(inode->i_mode, flags);
err = -EPERM;
mutex_lock(&inode->i_mutex);
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
goto flags_out;
oldflags = ei->i_flags;
/* The JOURNAL_DATA flag is modifiable only by root */
jflag = flags & EXT4_JOURNAL_DATA_FL;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by
* the relevant capability.
*
* This test looks nicer. Thanks to Pauline Middelink
*/
if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE))
goto flags_out;
}
/*
* The JOURNAL_DATA flag can only be changed by
* the relevant capability.
*/
if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
if (!capable(CAP_SYS_RESOURCE))
goto flags_out;
}
if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
migrate = 1;
if (flags & EXT4_EOFBLOCKS_FL) {
/* we don't support adding EOFBLOCKS flag */
if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
err = -EOPNOTSUPP;
goto flags_out;
}
} else if (oldflags & EXT4_EOFBLOCKS_FL)
ext4_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto flags_out;
}
if (IS_SYNC(inode))
ext4_handle_sync(handle);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto flags_err;
for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
if (!(mask & EXT4_FL_USER_MODIFIABLE))
continue;
if (mask & flags)
ext4_set_inode_flag(inode, i);
else
ext4_clear_inode_flag(inode, i);
}
ext4_set_inode_flags(inode);
inode->i_ctime = ext4_current_time(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
ext4_journal_stop(handle);
if (err)
goto flags_out;
if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
err = ext4_change_inode_journal_flag(inode, jflag);
if (err)
goto flags_out;
if (migrate) {
if (flags & EXT4_EXTENTS_FL)
err = ext4_ext_migrate(inode);
else
err = ext4_ind_migrate(inode);
}
flags_out:
mutex_unlock(&inode->i_mutex);
mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_GETVERSION:
case EXT4_IOC_GETVERSION_OLD:
return put_user(inode->i_generation, (int __user *) arg);
case EXT4_IOC_SETVERSION:
case EXT4_IOC_SETVERSION_OLD: {
handle_t *handle;
struct ext4_iloc iloc;
__u32 generation;
int err;
if (!inode_owner_or_capable(inode))
return -EPERM;
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
ext4_warning(sb, "Setting inode version is not "
"supported with metadata_csum enabled.");
return -ENOTTY;
}
err = mnt_want_write_file(filp);
if (err)
return err;
if (get_user(generation, (int __user *) arg)) {
err = -EFAULT;
goto setversion_out;
}
mutex_lock(&inode->i_mutex);
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto unlock_out;
}
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
inode->i_ctime = ext4_current_time(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
ext4_journal_stop(handle);
unlock_out:
mutex_unlock(&inode->i_mutex);
setversion_out:
mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_GROUP_EXTEND: {
ext4_fsblk_t n_blocks_count;
int err, err2=0;
err = ext4_resize_begin(sb);
if (err)
return err;
if (get_user(n_blocks_count, (__u32 __user *)arg)) {
err = -EFAULT;
goto group_extend_out;
}
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online resizing not supported with bigalloc");
err = -EOPNOTSUPP;
goto group_extend_out;
}
err = mnt_want_write_file(filp);
if (err)
goto group_extend_out;
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
}
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
group_extend_out:
ext4_resize_end(sb);
return err;
}
case EXT4_IOC_MOVE_EXT: {
struct move_extent me;
struct fd donor;
int err;
if (!(filp->f_mode & FMODE_READ) ||
!(filp->f_mode & FMODE_WRITE))
return -EBADF;
if (copy_from_user(&me,
(struct move_extent __user *)arg, sizeof(me)))
return -EFAULT;
me.moved_len = 0;
donor = fdget(me.donor_fd);
if (!donor.file)
return -EBADF;
if (!(donor.file->f_mode & FMODE_WRITE)) {
err = -EBADF;
goto mext_out;
}
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online defrag not supported with bigalloc");
err = -EOPNOTSUPP;
goto mext_out;
}
err = mnt_want_write_file(filp);
if (err)
goto mext_out;
err = ext4_move_extents(filp, donor.file, me.orig_start,
me.donor_start, me.len, &me.moved_len);
mnt_drop_write_file(filp);
if (copy_to_user((struct move_extent __user *)arg,
&me, sizeof(me)))
err = -EFAULT;
mext_out:
fdput(donor);
return err;
}
case EXT4_IOC_GROUP_ADD: {
struct ext4_new_group_data input;
int err, err2=0;
err = ext4_resize_begin(sb);
if (err)
return err;
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
sizeof(input))) {
err = -EFAULT;
goto group_add_out;
}
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online resizing not supported with bigalloc");
err = -EOPNOTSUPP;
goto group_add_out;
}
err = mnt_want_write_file(filp);
if (err)
goto group_add_out;
err = ext4_group_add(sb, &input);
if (EXT4_SB(sb)->s_journal) {
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
}
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
if (!err && ext4_has_group_desc_csum(sb) &&
test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, input.group);
group_add_out:
ext4_resize_end(sb);
return err;
}
case EXT4_IOC_MIGRATE:
{
int err;
if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write_file(filp);
if (err)
return err;
/*
* inode_mutex prevent write and truncate on the file.
* Read still goes through. We take i_data_sem in
* ext4_ext_swap_inode_data before we switch the
* inode format to prevent read.
*/
mutex_lock(&(inode->i_mutex));
err = ext4_ext_migrate(inode);
mutex_unlock(&(inode->i_mutex));
mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_ALLOC_DA_BLKS:
{
int err;
if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write_file(filp);
if (err)
return err;
err = ext4_alloc_da_blocks(inode);
mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_SWAP_BOOT:
{
int err;
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
err = mnt_want_write_file(filp);
if (err)
return err;
err = swap_inode_boot_loader(sb, inode);
mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_RESIZE_FS: {
ext4_fsblk_t n_blocks_count;
int err = 0, err2 = 0;
ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online resizing not (yet) supported with bigalloc");
return -EOPNOTSUPP;
}
if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
sizeof(__u64))) {
return -EFAULT;
}
err = ext4_resize_begin(sb);
if (err)
return err;
err = mnt_want_write_file(filp);
if (err)
goto resizefs_out;
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
}
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
ext4_has_group_desc_csum(sb) &&
test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, o_group);
resizefs_out:
ext4_resize_end(sb);
return err;
}
case FITRIM:
{
struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity);
ret = ext4_trim_fs(sb, &range);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
return 0;
}
default:
return -ENOTTY;
}
}
#ifdef CONFIG_COMPAT
long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case EXT4_IOC32_GETFLAGS:
cmd = EXT4_IOC_GETFLAGS;
break;
case EXT4_IOC32_SETFLAGS:
cmd = EXT4_IOC_SETFLAGS;
break;
case EXT4_IOC32_GETVERSION:
cmd = EXT4_IOC_GETVERSION;
break;
case EXT4_IOC32_SETVERSION:
cmd = EXT4_IOC_SETVERSION;
break;
case EXT4_IOC32_GROUP_EXTEND:
cmd = EXT4_IOC_GROUP_EXTEND;
break;
case EXT4_IOC32_GETVERSION_OLD:
cmd = EXT4_IOC_GETVERSION_OLD;
break;
case EXT4_IOC32_SETVERSION_OLD:
cmd = EXT4_IOC_SETVERSION_OLD;
break;
case EXT4_IOC32_GETRSVSZ:
cmd = EXT4_IOC_GETRSVSZ;
break;
case EXT4_IOC32_SETRSVSZ:
cmd = EXT4_IOC_SETRSVSZ;
break;
case EXT4_IOC32_GROUP_ADD: {
struct compat_ext4_new_group_input __user *uinput;
struct ext4_new_group_input input;
mm_segment_t old_fs;
int err;
uinput = compat_ptr(arg);
err = get_user(input.group, &uinput->group);
err |= get_user(input.block_bitmap, &uinput->block_bitmap);
err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
err |= get_user(input.inode_table, &uinput->inode_table);
err |= get_user(input.blocks_count, &uinput->blocks_count);
err |= get_user(input.reserved_blocks,
&uinput->reserved_blocks);
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
(unsigned long) &input);
set_fs(old_fs);
return err;
}
case EXT4_IOC_MOVE_EXT:
case FITRIM:
case EXT4_IOC_RESIZE_FS:
break;
default:
return -ENOIOCTLCMD;
}
return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
| gpl-2.0 |
DarkRainX/ufoai | src/libs/intl/src/hash-string.c | 233 | 1641 | /* Implements a string hashing function.
Copyright (C) 1995, 1997, 1998, 2000, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
/* Specification. */
#include "hash-string.h"
/* Defines the so called `hashpjw' function by P.J. Weinberger
[see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
1986, 1987 Bell Telephone Laboratories, Inc.] */
unsigned long int
__hash_string (const char *str_param)
{
unsigned long int hval, g;
const char *str = str_param;
/* Compute the hash value for the given string. */
hval = 0;
while (*str != '\0')
{
hval <<= 4;
hval += (unsigned char) *str++;
g = hval & ((unsigned long int) 0xf << (HASHWORDBITS - 4));
if (g != 0)
{
hval ^= g >> (HASHWORDBITS - 8);
hval ^= g;
}
}
return hval;
}
| gpl-2.0 |
regiesoriano/rs_kernel_msm | drivers/gpu/drm/nouveau/nouveau_gem.c | 745 | 21771 | /*
* Copyright (C) 2008 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#define nouveau_gem_pushbuf_sync(chan) 0
int
nouveau_gem_object_new(struct drm_gem_object *gem)
{
return 0;
}
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
if (!nvbo)
return;
nvbo->gem = NULL;
if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
}
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
kfree(gem);
}
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
if (!fpriv->vm)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return ret;
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto out;
}
ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
if (ret) {
kfree(vma);
goto out;
}
} else {
vma->refcount++;
}
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
if (!fpriv->vm)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return;
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (vma) {
if (--vma->refcount == 0) {
nouveau_bo_vma_del(nvbo, vma);
kfree(vma);
}
}
ttm_bo_unreserve(&nvbo->bo);
}
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
* earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
if (dev_priv->card_type >= NV_50)
nvbo->valid_domains &= domain;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
nvbo->gem->driver_private = nvbo;
return 0;
}
static int
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct drm_nouveau_gem_info *rep)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
if (fpriv->vm) {
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (!vma)
return -EINVAL;
rep->offset = vma->offset;
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
ret = nouveau_gem_new(dev, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
if (ret)
return ret;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
if (ret == 0) {
ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
if (!domains)
return -EINVAL;
if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
valid_flags |= TTM_PL_FLAG_VRAM;
if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
valid_flags |= TTM_PL_FLAG_TT;
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
pref_flags |= TTM_PL_FLAG_TT;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else
pref_flags |= TTM_PL_FLAG_TT;
nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
return 0;
}
struct validate_op {
struct list_head vram_list;
struct list_head gart_list;
struct list_head both_list;
};
static void
validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
{
struct list_head *entry, *tmp;
struct nouveau_bo *nvbo;
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
if (likely(fence))
nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;
}
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
drm_gem_object_unreference_unlocked(nvbo->gem);
}
}
static void
validate_fini(struct validate_op *op, struct nouveau_fence* fence)
{
validate_fini_list(&op->vram_list, fence);
validate_fini_list(&op->gart_list, fence);
validate_fini_list(&op->both_list, fence);
}
static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t sequence;
int trycnt = 0;
int ret, i;
sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
NV_ERROR(dev, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
for (i = 0; i < nr_buffers; i++) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -ENOENT;
}
nvbo = gem->driver_private;
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_ERROR(dev, "multiple instances of buffer %d on "
"validation list\n", b->handle);
validate_fini(op, NULL);
return -EINVAL;
}
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
if (unlikely(ret == -EAGAIN))
ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
drm_gem_object_unreference_unlocked(gem);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "fail reserve\n");
return ret;
}
goto retry;
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
list_add_tail(&nvbo->entry, &op->both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
list_add_tail(&nvbo->entry, &op->vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
}
return 0;
}
static int
validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
{
struct nouveau_fence *fence = NULL;
int ret = 0;
spin_lock(&nvbo->bo.bdev->fence_lock);
if (nvbo->bo.sync_obj)
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (fence) {
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
}
return ret;
}
static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
NV_ERROR(dev, "fail pre-validate sync\n");
return ret;
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
NV_ERROR(dev, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
NV_ERROR(dev, "fail post-validate sync\n");
return ret;
}
if (dev_priv->card_type < NV_50) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0;
relocs++;
if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
}
return relocs;
}
static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
struct drm_device *dev = chan->dev;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
if (nr_buffers == 0)
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
*apply_relocs = relocs;
return 0;
}
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
mem = kmalloc(nmemb * size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
kfree(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
struct nouveau_bo *nvbo;
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
NV_ERROR(dev, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
if (b->presumed.valid)
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
NV_ERROR(dev, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
NV_ERROR(dev, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
NV_ERROR(dev, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
}
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
break;
}
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
kfree(reloc);
return ret;
}
int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free;
if (unlikely(req->nr_push == 0))
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
nouveau_channel_put(&chan);
return -EINVAL;
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
if (IS_ERR(push)) {
nouveau_channel_put(&chan);
return PTR_ERR(push);
}
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
nouveau_channel_put(&chan);
return PTR_ERR(bo);
}
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
NV_ERROR(dev, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
}
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
NV_ERROR(dev, "reloc apply: %d\n", ret);
goto out;
}
}
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
if (ret) {
NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
nv50_dma_push(chan, nvbo, push[i].offset,
push[i].length);
}
} else
if (dev_priv->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
push[i].offset) | 2);
OUT_RING(chan, 0);
}
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_ERROR(dev, "jmp_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
uint32_t cmd;
cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
nvbo->bo.mem.
num_pages,
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
goto out;
}
nvbo->validate_mapped = true;
}
nouveau_bo_wr32(nvbo, (push[i].offset +
push[i].length - 8) / 4, cmd);
}
OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
push[i].offset) | 0x20000000);
OUT_RING(chan, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
OUT_RING(chan, 0);
}
}
ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
out_prevalid:
kfree(bo);
kfree(push);
out_next:
if (chan->dma.ib_max) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
if (dev_priv->chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
(chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
nouveau_channel_put(&chan);
return ret;
}
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
uint32_t flags = 0;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
return flags;
}
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_prep *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
int ret = -EINVAL;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
spin_unlock(&nvbo->bo.bdev->fence_lock);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return 0;
}
int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_info *req = data;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
| gpl-2.0 |
AayushRd7/Xeski | lib/zlib_deflate/deftree.c | 1001 | 40495 | /* +++ trees.c */
/* trees.c -- output deflated data using Huffman coding
* Copyright (C) 1995-1996 Jean-loup Gailly
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* ALGORITHM
*
* The "deflation" process uses several Huffman trees. The more
* common source values are represented by shorter bit sequences.
*
* Each code tree is stored in a compressed form which is itself
* a Huffman encoding of the lengths of all the code strings (in
* ascending order by source values). The actual code strings are
* reconstructed from the lengths in the inflate process, as described
* in the deflate specification.
*
* REFERENCES
*
* Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
* Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
*
* Storer, James A.
* Data Compression: Methods and Theory, pp. 49-50.
* Computer Science Press, 1988. ISBN 0-7167-8156-5.
*
* Sedgewick, R.
* Algorithms, p290.
* Addison-Wesley, 1983. ISBN 0-201-06672-6.
*/
/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
/* #include "deflate.h" */
#include <linux/zutil.h>
#include <linux/bitrev.h>
#include "defutil.h"
#ifdef DEBUG_ZLIB
# include <ctype.h>
#endif
/* ===========================================================================
* Constants
*/
#define MAX_BL_BITS 7
/* Bit length codes must not exceed MAX_BL_BITS bits */
#define END_BLOCK 256
/* end of block literal code */
#define REP_3_6 16
/* repeat previous bit length 3-6 times (2 bits of repeat count) */
#define REPZ_3_10 17
/* repeat a zero length 3-10 times (3 bits of repeat count) */
#define REPZ_11_138 18
/* repeat a zero length 11-138 times (7 bits of repeat count) */
static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
= {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
static const int extra_dbits[D_CODES] /* extra bits for each distance code */
= {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
= {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
static const uch bl_order[BL_CODES]
= {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
/* The lengths of the bit length codes are sent in order of decreasing
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
#define Buf_size (8 * 2*sizeof(char))
/* Number of bits used within bi_buf. (bi_buf might be implemented on
* more than 16 bits on some systems.)
*/
/* ===========================================================================
* Local data. These are initialized only once.
*/
static ct_data static_ltree[L_CODES+2];
/* The static literal tree. Since the bit lengths are imposed, there is no
* need for the L_CODES extra codes used during heap construction. However
* The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
* below).
*/
static ct_data static_dtree[D_CODES];
/* The static distance tree. (Actually a trivial tree since all codes use
* 5 bits.)
*/
static uch dist_code[512];
/* distance codes. The first 256 values correspond to the distances
* 3 .. 258, the last 256 values correspond to the top 8 bits of
* the 15 bit distances.
*/
static uch length_code[MAX_MATCH-MIN_MATCH+1];
/* length code for each normalized match length (0 == MIN_MATCH) */
static int base_length[LENGTH_CODES];
/* First normalized length for each code (0 = MIN_MATCH) */
static int base_dist[D_CODES];
/* First normalized distance for each code (0 = distance of 1) */
struct static_tree_desc_s {
const ct_data *static_tree; /* static tree or NULL */
const int *extra_bits; /* extra bits for each code or NULL */
int extra_base; /* base index for extra_bits */
int elems; /* max number of elements in the tree */
int max_length; /* max bit length for the codes */
};
static static_tree_desc static_l_desc =
{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
static static_tree_desc static_d_desc =
{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
static static_tree_desc static_bl_desc =
{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
/* ===========================================================================
* Local (static) routines in this file.
*/
static void tr_static_init (void);
static void init_block (deflate_state *s);
static void pqdownheap (deflate_state *s, ct_data *tree, int k);
static void gen_bitlen (deflate_state *s, tree_desc *desc);
static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
static void build_tree (deflate_state *s, tree_desc *desc);
static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
static void send_tree (deflate_state *s, ct_data *tree, int max_code);
static int build_bl_tree (deflate_state *s);
static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
int blcodes);
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
static void bi_windup (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
int header);
#ifndef DEBUG_ZLIB
# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
/* Send a code of the given tree. c and tree must not have side effects */
#else /* DEBUG_ZLIB */
# define send_code(s, c, tree) \
{ if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
send_bits(s, tree[c].Code, tree[c].Len); }
#endif
#define d_code(dist) \
((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
/* Mapping from a distance to a distance code. dist is the distance - 1 and
* must not have side effects. dist_code[256] and dist_code[257] are never
* used.
*/
/* ===========================================================================
* Send a value on a given number of bits.
* IN assertion: length <= 16 and value fits in length bits.
*/
#ifdef DEBUG_ZLIB
static void send_bits (deflate_state *s, int value, int length);
static void send_bits(
deflate_state *s,
int value, /* value to send */
int length /* number of bits */
)
{
Tracevv((stderr," l %2d v %4x ", length, value));
Assert(length > 0 && length <= 15, "invalid length");
s->bits_sent += (ulg)length;
/* If not enough room in bi_buf, use (valid) bits from bi_buf and
* (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
* unused bits in value.
*/
if (s->bi_valid > (int)Buf_size - length) {
s->bi_buf |= (value << s->bi_valid);
put_short(s, s->bi_buf);
s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
s->bi_valid += length - Buf_size;
} else {
s->bi_buf |= value << s->bi_valid;
s->bi_valid += length;
}
}
#else /* !DEBUG_ZLIB */
#define send_bits(s, value, length) \
{ int len = length;\
if (s->bi_valid > (int)Buf_size - len) {\
int val = value;\
s->bi_buf |= (val << s->bi_valid);\
put_short(s, s->bi_buf);\
s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
s->bi_valid += len - Buf_size;\
} else {\
s->bi_buf |= (value) << s->bi_valid;\
s->bi_valid += len;\
}\
}
#endif /* DEBUG_ZLIB */
/* ===========================================================================
* Initialize the various 'constant' tables. In a multi-threaded environment,
* this function may be called by two threads concurrently, but this is
* harmless since both invocations do exactly the same thing.
*/
static void tr_static_init(void)
{
static int static_init_done;
int n; /* iterates over tree elements */
int bits; /* bit counter */
int length; /* length value */
int code; /* code value */
int dist; /* distance index */
ush bl_count[MAX_BITS+1];
/* number of codes at each bit length for an optimal tree */
if (static_init_done) return;
/* Initialize the mapping length (0..255) -> length code (0..28) */
length = 0;
for (code = 0; code < LENGTH_CODES-1; code++) {
base_length[code] = length;
for (n = 0; n < (1<<extra_lbits[code]); n++) {
length_code[length++] = (uch)code;
}
}
Assert (length == 256, "tr_static_init: length != 256");
/* Note that the length 255 (match length 258) can be represented
* in two different ways: code 284 + 5 bits or code 285, so we
* overwrite length_code[255] to use the best encoding:
*/
length_code[length-1] = (uch)code;
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */
dist = 0;
for (code = 0 ; code < 16; code++) {
base_dist[code] = dist;
for (n = 0; n < (1<<extra_dbits[code]); n++) {
dist_code[dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: dist != 256");
dist >>= 7; /* from now on, all distances are divided by 128 */
for ( ; code < D_CODES; code++) {
base_dist[code] = dist << 7;
for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
dist_code[256 + dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: 256+dist != 512");
/* Construct the codes of the static literal tree */
for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
n = 0;
while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
/* Codes 286 and 287 do not exist, but we must include them in the
* tree construction to get a canonical Huffman tree (longest code
* all ones)
*/
gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
/* The static distance tree is trivial: */
for (n = 0; n < D_CODES; n++) {
static_dtree[n].Len = 5;
static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
}
static_init_done = 1;
}
/* ===========================================================================
* Initialize the tree data structures for a new zlib stream.
*/
void zlib_tr_init(
deflate_state *s
)
{
tr_static_init();
s->compressed_len = 0L;
s->l_desc.dyn_tree = s->dyn_ltree;
s->l_desc.stat_desc = &static_l_desc;
s->d_desc.dyn_tree = s->dyn_dtree;
s->d_desc.stat_desc = &static_d_desc;
s->bl_desc.dyn_tree = s->bl_tree;
s->bl_desc.stat_desc = &static_bl_desc;
s->bi_buf = 0;
s->bi_valid = 0;
s->last_eob_len = 8; /* enough lookahead for inflate */
#ifdef DEBUG_ZLIB
s->bits_sent = 0L;
#endif
/* Initialize the first block of the first file: */
init_block(s);
}
/* ===========================================================================
* Initialize a new block.
*/
static void init_block(
deflate_state *s
)
{
int n; /* iterates over tree elements */
/* Initialize the trees. */
for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
s->dyn_ltree[END_BLOCK].Freq = 1;
s->opt_len = s->static_len = 0L;
s->last_lit = s->matches = 0;
}
#define SMALLEST 1
/* Index within the heap array of least frequent node in the Huffman tree */
/* ===========================================================================
* Remove the smallest element from the heap and recreate the heap with
* one less element. Updates heap and heap_len.
*/
#define pqremove(s, tree, top) \
{\
top = s->heap[SMALLEST]; \
s->heap[SMALLEST] = s->heap[s->heap_len--]; \
pqdownheap(s, tree, SMALLEST); \
}
/* ===========================================================================
* Compares to subtrees, using the tree depth as tie breaker when
* the subtrees have equal frequency. This minimizes the worst case length.
*/
#define smaller(tree, n, m, depth) \
(tree[n].Freq < tree[m].Freq || \
(tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
/* ===========================================================================
* Restore the heap property by moving down the tree starting at node k,
* exchanging a node with the smallest of its two sons if necessary, stopping
* when the heap property is re-established (each father smaller than its
* two sons).
*/
static void pqdownheap(
deflate_state *s,
ct_data *tree, /* the tree to restore */
int k /* node to move down */
)
{
int v = s->heap[k];
int j = k << 1; /* left son of k */
while (j <= s->heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s->heap_len &&
smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
j++;
}
/* Exit if v is smaller than both sons */
if (smaller(tree, v, s->heap[j], s->depth)) break;
/* Exchange v with the smallest son */
s->heap[k] = s->heap[j]; k = j;
/* And continue down the tree, setting j to the left son of k */
j <<= 1;
}
s->heap[k] = v;
}
/* ===========================================================================
* Compute the optimal bit lengths for a tree and update the total bit length
* for the current block.
* IN assertion: the fields freq and dad are set, heap[heap_max] and
* above are the tree nodes sorted by increasing frequency.
* OUT assertions: the field len is set to the optimal bit length, the
* array bl_count contains the frequencies for each bit length.
* The length opt_len is updated; static_len is also updated if stree is
* not null.
*/
static void gen_bitlen(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
int max_code = desc->max_code;
const ct_data *stree = desc->stat_desc->static_tree;
const int *extra = desc->stat_desc->extra_bits;
int base = desc->stat_desc->extra_base;
int max_length = desc->stat_desc->max_length;
int h; /* heap index */
int n, m; /* iterate over the tree elements */
int bits; /* bit length */
int xbits; /* extra bits */
ush f; /* frequency */
int overflow = 0; /* number of elements with bit length too large */
for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
/* In a first pass, compute the optimal bit lengths (which may
* overflow in the case of the bit length tree).
*/
tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
n = s->heap[h];
bits = tree[tree[n].Dad].Len + 1;
if (bits > max_length) bits = max_length, overflow++;
tree[n].Len = (ush)bits;
/* We overwrite tree[n].Dad which is no longer needed */
if (n > max_code) continue; /* not a leaf node */
s->bl_count[bits]++;
xbits = 0;
if (n >= base) xbits = extra[n-base];
f = tree[n].Freq;
s->opt_len += (ulg)f * (bits + xbits);
if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
}
if (overflow == 0) return;
Trace((stderr,"\nbit length overflow\n"));
/* This happens for example on obj2 and pic of the Calgary corpus */
/* Find the first bit length which could increase: */
do {
bits = max_length-1;
while (s->bl_count[bits] == 0) bits--;
s->bl_count[bits]--; /* move one leaf down the tree */
s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
s->bl_count[max_length]--;
/* The brother of the overflow item also moves one step up,
* but this does not affect bl_count[max_length]
*/
overflow -= 2;
} while (overflow > 0);
/* Now recompute all bit lengths, scanning in increasing frequency.
* h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
* lengths instead of fixing only the wrong ones. This idea is taken
* from 'ar' written by Haruhiko Okumura.)
*/
for (bits = max_length; bits != 0; bits--) {
n = s->bl_count[bits];
while (n != 0) {
m = s->heap[--h];
if (m > max_code) continue;
if (tree[m].Len != (unsigned) bits) {
Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
s->opt_len += ((long)bits - (long)tree[m].Len)
*(long)tree[m].Freq;
tree[m].Len = (ush)bits;
}
n--;
}
}
}
/* ===========================================================================
* Generate the codes for a given tree and bit counts (which need not be
* optimal).
* IN assertion: the array bl_count contains the bit length statistics for
* the given tree and the field len is set for all tree elements.
* OUT assertion: the field code is set for all tree elements of non
* zero code length.
*/
static void gen_codes(
ct_data *tree, /* the tree to decorate */
int max_code, /* largest code with non zero frequency */
ush *bl_count /* number of codes at each bit length */
)
{
ush next_code[MAX_BITS+1]; /* next code value for each bit length */
ush code = 0; /* running code value */
int bits; /* bit index */
int n; /* code index */
/* The distribution counts are first used to generate the code values
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
next_code[bits] = code = (code + bl_count[bits-1]) << 1;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
"inconsistent bit counts");
Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
for (n = 0; n <= max_code; n++) {
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
}
}
/* ===========================================================================
* Construct one Huffman tree and assigns the code bit strings and lengths.
* Update the total bit length for the current block.
* IN assertion: the field freq is set for all tree elements.
* OUT assertions: the fields len and code are set to the optimal bit length
* and corresponding code. The length opt_len is updated; static_len is
* also updated if stree is not null. The field max_code is set.
*/
static void build_tree(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
const ct_data *stree = desc->stat_desc->static_tree;
int elems = desc->stat_desc->elems;
int n, m; /* iterate over heap elements */
int max_code = -1; /* largest code with non zero frequency */
int node; /* new node being created */
/* Construct the initial heap, with least frequent element in
* heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
* heap[0] is not used.
*/
s->heap_len = 0, s->heap_max = HEAP_SIZE;
for (n = 0; n < elems; n++) {
if (tree[n].Freq != 0) {
s->heap[++(s->heap_len)] = max_code = n;
s->depth[n] = 0;
} else {
tree[n].Len = 0;
}
}
/* The pkzip format requires that at least one distance code exists,
* and that at least one bit should be sent even if there is only one
* possible code. So to avoid special checks later on we force at least
* two codes of non zero frequency.
*/
while (s->heap_len < 2) {
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
tree[node].Freq = 1;
s->depth[node] = 0;
s->opt_len--; if (stree) s->static_len -= stree[node].Len;
/* node is 0 or 1 so it does not have extra bits */
}
desc->max_code = max_code;
/* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
* establish sub-heaps of increasing lengths:
*/
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
/* Construct the Huffman tree by repeatedly combining the least two
* frequent nodes.
*/
node = elems; /* next internal node of the tree */
do {
pqremove(s, tree, n); /* n = node of least frequency */
m = s->heap[SMALLEST]; /* m = node of next least frequency */
s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
s->heap[--(s->heap_max)] = m;
/* Create a new node father of n and m */
tree[node].Freq = tree[n].Freq + tree[m].Freq;
s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
tree[n].Dad = tree[m].Dad = (ush)node;
#ifdef DUMP_BL_TREE
if (tree == s->bl_tree) {
fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
}
#endif
/* and insert the new node in the heap */
s->heap[SMALLEST] = node++;
pqdownheap(s, tree, SMALLEST);
} while (s->heap_len >= 2);
s->heap[--(s->heap_max)] = s->heap[SMALLEST];
/* At this point, the fields freq and dad are set. We can now
* generate the bit lengths.
*/
gen_bitlen(s, (tree_desc *)desc);
/* The field len is now set, we can generate the bit codes */
gen_codes ((ct_data *)tree, max_code, s->bl_count);
}
/* ===========================================================================
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree.
*/
static void scan_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
if (nextlen == 0) max_count = 138, min_count = 3;
tree[max_code+1].Len = (ush)0xffff; /* guard */
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
s->bl_tree[curlen].Freq += count;
} else if (curlen != 0) {
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
s->bl_tree[REP_3_6].Freq++;
} else if (count <= 10) {
s->bl_tree[REPZ_3_10].Freq++;
} else {
s->bl_tree[REPZ_11_138].Freq++;
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Send a literal or distance tree in compressed form, using the codes in
* bl_tree.
*/
static void send_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
/* tree[max_code+1].Len = -1; */ /* guard already set */
if (nextlen == 0) max_count = 138, min_count = 3;
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
} else if (curlen != 0) {
if (curlen != prevlen) {
send_code(s, curlen, s->bl_tree); count--;
}
Assert(count >= 3 && count <= 6, " 3_6?");
send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
} else if (count <= 10) {
send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
} else {
send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Construct the Huffman tree for the bit lengths and return the index in
* bl_order of the last bit length code to send.
*/
static int build_bl_tree(
deflate_state *s
)
{
int max_blindex; /* index of last bit length code of non zero freq */
/* Determine the bit length frequencies for literal and distance trees */
scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
/* Build the bit length tree: */
build_tree(s, (tree_desc *)(&(s->bl_desc)));
/* opt_len now includes the length of the tree representations, except
* the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
*/
/* Determine the number of bit length codes to send. The pkzip format
* requires that at least 4 bit length codes be sent. (appnote.txt says
* 3 but the actual value used is 4.)
*/
for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
}
/* Update opt_len to include the bit length tree and counts */
s->opt_len += 3*(max_blindex+1) + 5+5+4;
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
s->opt_len, s->static_len));
return max_blindex;
}
/* ===========================================================================
* Send the header for a block using dynamic Huffman trees: the counts, the
* lengths of the bit length codes, the literal tree and the distance tree.
* IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
*/
static void send_all_trees(
deflate_state *s,
int lcodes, /* number of codes for each tree */
int dcodes, /* number of codes for each tree */
int blcodes /* number of codes for each tree */
)
{
int rank; /* index in bl_order */
Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
"too many codes");
Tracev((stderr, "\nbl counts: "));
send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
send_bits(s, dcodes-1, 5);
send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
for (rank = 0; rank < blcodes; rank++) {
Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
}
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
}
/* ===========================================================================
* Send a stored block
*/
void zlib_tr_stored_block(
deflate_state *s,
char *buf, /* input block */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
s->compressed_len += (stored_len + 4) << 3;
copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
}
/* Send just the `stored block' type code without any length bytes or data.
*/
void zlib_tr_stored_type_only(
deflate_state *s
)
{
send_bits(s, (STORED_BLOCK << 1), 3);
bi_windup(s);
s->compressed_len = (s->compressed_len + 3) & ~7L;
}
/* ===========================================================================
* Send one empty static block to give enough lookahead for inflate.
* This takes 10 bits, of which 7 may remain in the bit buffer.
* The current inflate code requires 9 bits of lookahead. If the
* last two codes for the previous block (real code plus EOB) were coded
* on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
* the last real code. In this case we send two empty static blocks instead
* of one. (There are no problems if the previous block is stored or fixed.)
* To simplify the code, we assume the worst case of last real code encoded
* on one bit only.
*/
void zlib_tr_align(
deflate_state *s
)
{
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
bi_flush(s);
/* Of the 10 bits for the empty block, we have already sent
* (10 - bi_valid) bits. The lookahead for the last real code (before
* the EOB of the previous block) was thus at least one plus the length
* of the EOB plus what we have just sent of the empty static block.
*/
if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L;
bi_flush(s);
}
s->last_eob_len = 7;
}
/* ===========================================================================
* Determine the best encoding for the current block: dynamic trees, static
* trees or store, and output the encoded block to the zip file. This function
* returns the total compressed length for the file so far.
*/
ulg zlib_tr_flush_block(
deflate_state *s,
char *buf, /* input block, or NULL if too old */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
int max_blindex = 0; /* index of last bit length code of non zero freq */
/* Build the Huffman trees unless a stored block is forced */
if (s->level > 0) {
/* Check if the file is ascii or binary */
if (s->data_type == Z_UNKNOWN) set_data_type(s);
/* Construct the literal and distance trees */
build_tree(s, (tree_desc *)(&(s->l_desc)));
Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
build_tree(s, (tree_desc *)(&(s->d_desc)));
Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
/* At this point, opt_len and static_len are the total bit lengths of
* the compressed block data, excluding the tree representations.
*/
/* Build the bit length tree for the above two trees, and get the index
* in bl_order of the last bit length code to send.
*/
max_blindex = build_bl_tree(s);
/* Determine the best encoding. Compute first the block length in bytes*/
opt_lenb = (s->opt_len+3+7)>>3;
static_lenb = (s->static_len+3+7)>>3;
Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
s->last_lit));
if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
} else {
Assert(buf != (char*)0, "lost buf");
opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
}
/* If compression failed and this is the first and last block,
* and if the .zip file can be seeked (to rewrite the local header),
* the whole file is transformed into a stored file:
*/
#ifdef STORED_FILE_OK
# ifdef FORCE_STORED_FILE
if (eof && s->compressed_len == 0L) { /* force stored file */
# else
if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
# endif
/* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
if (buf == (char*)0) error ("block vanished");
copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
s->compressed_len = stored_len << 3;
s->method = STORED;
} else
#endif /* STORED_FILE_OK */
#ifdef FORCE_STORED
if (buf != (char*)0) { /* force stored block */
#else
if (stored_len+4 <= opt_lenb && buf != (char*)0) {
/* 4: two words for the lengths */
#endif
/* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
* Otherwise we can't have processed more than WSIZE input bytes since
* the last block flush, because compression would have been
* successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
* transform a block into a stored block.
*/
zlib_tr_stored_block(s, buf, stored_len, eof);
#ifdef FORCE_STATIC
} else if (static_lenb >= 0) { /* force static trees */
#else
} else if (static_lenb == opt_lenb) {
#endif
send_bits(s, (STATIC_TREES<<1)+eof, 3);
compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
s->compressed_len += 3 + s->static_len;
} else {
send_bits(s, (DYN_TREES<<1)+eof, 3);
send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
max_blindex+1);
compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
s->compressed_len += 3 + s->opt_len;
}
Assert (s->compressed_len == s->bits_sent, "bad compressed size");
init_block(s);
if (eof) {
bi_windup(s);
s->compressed_len += 7; /* align on byte boundary */
}
Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
s->compressed_len-7*eof));
return s->compressed_len >> 3;
}
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
* the current block must be flushed.
*/
int zlib_tr_tally(
deflate_state *s,
unsigned dist, /* distance of matched string */
unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
)
{
s->d_buf[s->last_lit] = (ush)dist;
s->l_buf[s->last_lit++] = (uch)lc;
if (dist == 0) {
/* lc is the unmatched char */
s->dyn_ltree[lc].Freq++;
} else {
s->matches++;
/* Here, lc is the match length - MIN_MATCH */
dist--; /* dist = match distance - 1 */
Assert((ush)dist < (ush)MAX_DIST(s) &&
(ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
(ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
s->dyn_dtree[d_code(dist)].Freq++;
}
/* Try to guess if it is profitable to stop the current block here */
if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
/* Compute an upper bound for the compressed length */
ulg out_length = (ulg)s->last_lit*8L;
ulg in_length = (ulg)((long)s->strstart - s->block_start);
int dcode;
for (dcode = 0; dcode < D_CODES; dcode++) {
out_length += (ulg)s->dyn_dtree[dcode].Freq *
(5L+extra_dbits[dcode]);
}
out_length >>= 3;
Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
s->last_lit, in_length, out_length,
100L - out_length*100L/in_length));
if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
}
return (s->last_lit == s->lit_bufsize-1);
/* We avoid equality with lit_bufsize because of wraparound at 64K
* on 16 bit machines and because stored blocks are restricted to
* 64K-1 bytes.
*/
}
/* ===========================================================================
* Send the block data compressed using the given Huffman trees
*/
static void compress_block(
deflate_state *s,
ct_data *ltree, /* literal tree */
ct_data *dtree /* distance tree */
)
{
unsigned dist; /* distance of matched string */
int lc; /* match length or unmatched char (if dist == 0) */
unsigned lx = 0; /* running index in l_buf */
unsigned code; /* the code to send */
int extra; /* number of extra bits to send */
if (s->last_lit != 0) do {
dist = s->d_buf[lx];
lc = s->l_buf[lx++];
if (dist == 0) {
send_code(s, lc, ltree); /* send a literal byte */
Tracecv(isgraph(lc), (stderr," '%c' ", lc));
} else {
/* Here, lc is the match length - MIN_MATCH */
code = length_code[lc];
send_code(s, code+LITERALS+1, ltree); /* send the length code */
extra = extra_lbits[code];
if (extra != 0) {
lc -= base_length[code];
send_bits(s, lc, extra); /* send the extra length bits */
}
dist--; /* dist is now the match distance - 1 */
code = d_code(dist);
Assert (code < D_CODES, "bad d_code");
send_code(s, code, dtree); /* send the distance code */
extra = extra_dbits[code];
if (extra != 0) {
dist -= base_dist[code];
send_bits(s, dist, extra); /* send the extra distance bits */
}
} /* literal or match pair ? */
/* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
} while (lx < s->last_lit);
send_code(s, END_BLOCK, ltree);
s->last_eob_len = ltree[END_BLOCK].Len;
}
/* ===========================================================================
* Set the data type to ASCII or BINARY, using a crude approximation:
* binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
* IN assertion: the fields freq of dyn_ltree are set and the total of all
* frequencies does not exceed 64K (to fit in an int on 16 bit machines).
*/
static void set_data_type(
deflate_state *s
)
{
int n = 0;
unsigned ascii_freq = 0;
unsigned bin_freq = 0;
while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
}
/* ===========================================================================
* Copy a stored block, storing first the length and its
* one's complement if requested.
*/
static void copy_block(
deflate_state *s,
char *buf, /* the input data */
unsigned len, /* its length */
int header /* true if block header must be written */
)
{
bi_windup(s); /* align on byte boundary */
s->last_eob_len = 8; /* enough lookahead for inflate */
if (header) {
put_short(s, (ush)len);
put_short(s, (ush)~len);
#ifdef DEBUG_ZLIB
s->bits_sent += 2*16;
#endif
}
#ifdef DEBUG_ZLIB
s->bits_sent += (ulg)len<<3;
#endif
/* bundle up the put_byte(s, *buf++) calls */
memcpy(&s->pending_buf[s->pending], buf, len);
s->pending += len;
}
| gpl-2.0 |
zhaiyu/linux-2.6 | drivers/iio/amplifiers/ad8366.c | 1001 | 4429 | /*
* AD8366 SPI Dual-Digital Variable Gain Amplifier (VGA)
*
* Copyright 2012 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/bitrev.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
struct ad8366_state {
struct spi_device *spi;
struct regulator *reg;
unsigned char ch[2];
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
unsigned char data[2] ____cacheline_aligned;
};
static int ad8366_write(struct iio_dev *indio_dev,
unsigned char ch_a, char unsigned ch_b)
{
struct ad8366_state *st = iio_priv(indio_dev);
int ret;
ch_a = bitrev8(ch_a & 0x3F);
ch_b = bitrev8(ch_b & 0x3F);
st->data[0] = ch_b >> 4;
st->data[1] = (ch_b << 4) | (ch_a >> 2);
ret = spi_write(st->spi, st->data, ARRAY_SIZE(st->data));
if (ret < 0)
dev_err(&indio_dev->dev, "write failed (%d)", ret);
return ret;
}
static int ad8366_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad8366_state *st = iio_priv(indio_dev);
int ret;
unsigned code;
mutex_lock(&indio_dev->mlock);
switch (m) {
case IIO_CHAN_INFO_HARDWAREGAIN:
code = st->ch[chan->channel];
/* Values in dB */
code = code * 253 + 4500;
*val = code / 1000;
*val2 = (code % 1000) * 1000;
ret = IIO_VAL_INT_PLUS_MICRO_DB;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&indio_dev->mlock);
return ret;
};
static int ad8366_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct ad8366_state *st = iio_priv(indio_dev);
unsigned code;
int ret;
if (val < 0 || val2 < 0)
return -EINVAL;
/* Values in dB */
code = (((u8)val * 1000) + ((u32)val2 / 1000));
if (code > 20500 || code < 4500)
return -EINVAL;
code = (code - 4500) / 253;
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
st->ch[chan->channel] = code;
ret = ad8366_write(indio_dev, st->ch[0], st->ch[1]);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&indio_dev->mlock);
return ret;
}
static const struct iio_info ad8366_info = {
.read_raw = &ad8366_read_raw,
.write_raw = &ad8366_write_raw,
.driver_module = THIS_MODULE,
};
#define AD8366_CHAN(_channel) { \
.type = IIO_VOLTAGE, \
.output = 1, \
.indexed = 1, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN),\
}
static const struct iio_chan_spec ad8366_channels[] = {
AD8366_CHAN(0),
AD8366_CHAN(1),
};
static int ad8366_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct ad8366_state *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
return ret;
}
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad8366_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad8366_channels;
indio_dev->num_channels = ARRAY_SIZE(ad8366_channels);
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
ad8366_write(indio_dev, 0 , 0);
return 0;
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
return ret;
}
static int ad8366_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8366_state *st = iio_priv(indio_dev);
struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
if (!IS_ERR(reg))
regulator_disable(reg);
return 0;
}
static const struct spi_device_id ad8366_id[] = {
{"ad8366", 0},
{}
};
static struct spi_driver ad8366_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = ad8366_probe,
.remove = ad8366_remove,
.id_table = ad8366_id,
};
module_spi_driver(ad8366_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD8366 VGA");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
keks2293/kernel_zte | drivers/acpi/osl.c | 1513 | 42881 | /*
* acpi_osl.c - OS-dependent functions ($Revision: 83 $)
*
* Copyright (C) 2000 Andrew Henroid
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/jiffies.h>
#include <linux/semaphore.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
#define PREFIX "ACPI: "
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
int wait;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
#include CONFIG_ACPI_CUSTOM_DSDT_FILE
#endif
#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>
/* stuff for debugger support */
int acpi_in_debugger;
EXPORT_SYMBOL(acpi_in_debugger);
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
u32 pm1b_ctrl);
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
*/
struct acpi_ioremap {
struct list_head list;
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
unsigned long refcount;
};
static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
static void __init acpi_osi_setup_late(void);
/*
* The story of _OSI(Linux)
*
* From pre-history through Linux-2.6.22,
* Linux responded TRUE upon a BIOS OSI(Linux) query.
*
* Unfortunately, reference BIOS writers got wind of this
* and put OSI(Linux) in their example code, quickly exposing
* this string as ill-conceived and opening the door to
* an un-bounded number of BIOS incompatibilities.
*
* For example, OSI(Linux) was used on resume to re-POST a
* video card on one system, because Linux at that time
* could not do a speedy restore in its native driver.
* But then upon gaining quick native restore capability,
* Linux has no way to tell the BIOS to skip the time-consuming
* POST -- putting Linux at a permanent performance disadvantage.
* On another system, the BIOS writer used OSI(Linux)
* to infer native OS support for IPMI! On other systems,
* OSI(Linux) simply got in the way of Linux claiming to
* be compatible with other operating systems, exposing
* BIOS issues such as skipped device initialization.
*
* So "Linux" turned out to be a really poor chose of
* OSI string, and from Linux-2.6.23 onward we respond FALSE.
*
* BIOS writers should NOT query _OSI(Linux) on future systems.
* Linux will complain on the console when it sees it, and return FALSE.
* To get Linux to return TRUE for your system will require
* a kernel source update to add a DMI entry,
* or boot with "acpi_osi=Linux"
*/
static struct osi_linux {
unsigned int enable:1;
unsigned int dmi:1;
unsigned int cmdline:1;
} osi_linux = {0, 0, 0};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
if (!strcmp("Linux", interface)) {
printk_once(KERN_NOTICE FW_BUG PREFIX
"BIOS _OSI(Linux) query %s%s\n",
osi_linux.enable ? "honored" : "ignored",
osi_linux.cmdline ? " via cmdline" :
osi_linux.dmi ? " via DMI" : "");
}
return supported;
}
static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
u64 addr;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !length)
return;
/* Resources are never freed */
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
request_region(addr, length, desc);
else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1b_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
"ACPI PM1a_CNT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
"ACPI PM1b_CNT_BLK");
if (acpi_gbl_FADT.pm_timer_length == 4)
acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
"ACPI PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
return 0;
}
device_initcall(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
acpi_os_vprintf(fmt, args);
va_end(args);
}
void acpi_os_vprintf(const char *fmt, va_list args)
{
static char buffer[512];
vsprintf(buffer, fmt, args);
#ifdef ENABLE_DEBUGGER
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
printk(KERN_CONT "%s", buffer);
}
#else
printk(KERN_CONT "%s", buffer);
#endif
}
#ifdef CONFIG_KEXEC
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
acpi_rsdp = simple_strtoul(arg, NULL, 16);
return 0;
}
early_param("acpi_rsdp", setup_acpi_rsdp);
#endif
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
#ifdef CONFIG_KEXEC
if (acpi_rsdp)
return acpi_rsdp;
#endif
if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
return efi.acpi;
else {
printk(KERN_ERR PREFIX
"System description tables not found\n");
return 0;
}
} else {
acpi_physical_address pa = 0;
acpi_find_root_pointer(&pa);
return pa;
}
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
if (map->phys <= phys &&
phys + size <= map->phys + map->size)
return map;
return NULL;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static void __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
{
struct acpi_ioremap *map;
map = acpi_map_lookup(phys, size);
if (map)
return map->virt + (phys - map->phys);
return NULL;
}
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
{
struct acpi_ioremap *map;
void __iomem *virt = NULL;
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(phys, size);
if (map) {
virt = map->virt + (phys - map->phys);
map->refcount++;
}
mutex_unlock(&acpi_ioremap_lock);
return virt;
}
EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
if (map->virt <= virt &&
virt + size <= map->virt + map->size)
return map;
return NULL;
}
#ifndef CONFIG_IA64
#define should_use_kmap(pfn) page_is_ram(pfn)
#else
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn) 0
#endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{
unsigned long pfn;
pfn = pg_off >> PAGE_SHIFT;
if (should_use_kmap(pfn)) {
if (pg_sz > PAGE_SIZE)
return NULL;
return (void __iomem __force *)kmap(pfn_to_page(pfn));
} else
return acpi_os_ioremap(pg_off, pg_sz);
}
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
{
unsigned long pfn;
pfn = pg_off >> PAGE_SHIFT;
if (should_use_kmap(pfn))
kunmap(pfn_to_page(pfn));
else
iounmap(vaddr);
}
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
return NULL;
}
if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
mutex_lock(&acpi_ioremap_lock);
/* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size);
if (map) {
map->refcount++;
goto out;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return NULL;
}
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_map(pg_off, pg_sz);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
return NULL;
}
INIT_LIST_HEAD(&map->list);
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
map->refcount = 1;
list_add_tail_rcu(&map->list, &acpi_ioremaps);
out:
mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
if (!--map->refcount)
list_del_rcu(&map->list);
}
static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{
if (!map->refcount) {
synchronize_rcu();
acpi_unmap(map->phys, map->virt);
kfree(map);
}
}
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
if (!acpi_gbl_permanent_mmap)
__acpi_unmap_table(virt, size);
}
int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
void __iomem *virt;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !gas->bit_width)
return -EINVAL;
virt = acpi_os_map_memory(addr, gas->bit_width / 8);
if (!virt)
return -EIO;
return 0;
}
EXPORT_SYMBOL(acpi_os_map_generic_address);
void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !gas->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
}
acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
{
if (!phys || !virt)
return AE_BAD_PARAMETER;
*phys = virt_to_phys(virt);
return AE_OK;
}
#endif
#define ACPI_MAX_OVERRIDE_LEN 100
static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
acpi_string * new_val)
{
if (!init_val || !new_val)
return AE_BAD_PARAMETER;
*new_val = NULL;
if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
acpi_os_name);
*new_val = acpi_os_name;
}
return AE_OK;
}
#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
#include <linux/earlycpio.h>
#include <linux/memblock.h>
static u64 acpi_tables_addr;
static int all_tables_size;
/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
u8 __init acpi_table_checksum(u8 *buffer, u32 length)
{
u8 sum = 0;
u8 *end = buffer + length;
while (buffer < end)
sum = (u8) (sum + *(buffer++));
return sum;
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
static const char * const table_sigs[] = {
ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
/* Non-fatal errors: Affected tables/files are ignored */
#define INVALID_TABLE(x, path, name) \
{ pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
/* Must not increase 10 or needs code modification below */
#define ACPI_OVERRIDE_TABLES 10
void __init acpi_initrd_override(void *data, size_t size)
{
int sig, no, table_nr = 0, total_offset = 0;
long offset = 0;
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
char *p;
if (data == NULL || size == 0)
return;
for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
file = find_cpio_data(cpio_path, data, size, &offset);
if (!file.data)
break;
data += offset;
size -= offset;
if (file.size < sizeof(struct acpi_table_header))
INVALID_TABLE("Table smaller than ACPI header",
cpio_path, file.name);
table = file.data;
for (sig = 0; table_sigs[sig]; sig++)
if (!memcmp(table->signature, table_sigs[sig], 4))
break;
if (!table_sigs[sig])
INVALID_TABLE("Unknown signature",
cpio_path, file.name);
if (file.size != table->length)
INVALID_TABLE("File length does not match table length",
cpio_path, file.name);
if (acpi_table_checksum(file.data, table->length))
INVALID_TABLE("Bad table checksum",
cpio_path, file.name);
pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
early_initrd_files[table_nr].data = file.data;
early_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
return;
acpi_tables_addr =
memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
all_tables_size, PAGE_SIZE);
if (!acpi_tables_addr) {
WARN_ON(1);
return;
}
/*
* Only calling e820_add_reserve does not work and the
* tables are invalid (memory got used) later.
* memblock_reserve works as expected and the tables won't get modified.
* But it's not enough on X86 because ioremap will
* complain later (used by acpi_os_map_memory) that the pages
* that should get mapped are not marked "reserved".
* Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
* works fine.
*/
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
p = early_ioremap(acpi_tables_addr, all_tables_size);
for (no = 0; no < table_nr; no++) {
memcpy(p + total_offset, early_initrd_files[no].data,
early_initrd_files[no].size);
total_offset += early_initrd_files[no].size;
}
early_iounmap(p, all_tables_size);
}
#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
static void acpi_table_taint(struct acpi_table_header *table)
{
pr_warn(PREFIX
"Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
table->signature, table->oem_table_id);
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
{
if (!existing_table || !new_table)
return AE_BAD_PARAMETER;
*new_table = NULL;
#ifdef CONFIG_ACPI_CUSTOM_DSDT
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
#endif
if (*new_table != NULL)
acpi_table_taint(existing_table);
return AE_OK;
}
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
acpi_physical_address *address,
u32 *table_length)
{
#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
*table_length = 0;
*address = 0;
return AE_OK;
#else
int table_offset = 0;
struct acpi_table_header *table;
*table_length = 0;
*address = 0;
if (!acpi_tables_addr)
return AE_OK;
do {
if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
WARN_ON(1);
return AE_OK;
}
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
return AE_OK;
}
table_offset += table->length;
if (memcmp(existing_table->signature, table->signature, 4)) {
acpi_os_unmap_memory(table,
ACPI_HEADER_SIZE);
continue;
}
/* Only override tables with matching oem id */
if (memcmp(table->oem_table_id, existing_table->oem_table_id,
ACPI_OEM_TABLE_ID_SIZE)) {
acpi_os_unmap_memory(table,
ACPI_HEADER_SIZE);
continue;
}
table_offset -= table->length;
*table_length = table->length;
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
*address = acpi_tables_addr + table_offset;
break;
} while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
if (*address != 0)
acpi_table_taint(existing_table);
return AE_OK;
#endif
}
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
u32 handled;
handled = (*acpi_irq_handler) (acpi_irq_context);
if (handled) {
acpi_irq_handled++;
return IRQ_HANDLED;
} else {
acpi_irq_not_handled++;
return IRQ_NONE;
}
}
acpi_status
acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
void *context)
{
unsigned int irq;
acpi_irq_stats_init();
/*
* ACPI interrupts different from the SCI in our copy of the FADT are
* not supported.
*/
if (gsi != acpi_gbl_FADT.sci_interrupt)
return AE_BAD_PARAMETER;
if (acpi_irq_handler)
return AE_ALREADY_ACQUIRED;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
gsi);
return AE_OK;
}
acpi_irq_handler = handler;
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
return AE_OK;
}
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
{
if (irq != acpi_gbl_FADT.sci_interrupt)
return AE_BAD_PARAMETER;
free_irq(irq, acpi_irq);
acpi_irq_handler = NULL;
return AE_OK;
}
/*
* Running in interpreter thread context, safe to sleep
*/
void acpi_os_sleep(u64 ms)
{
schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
void acpi_os_stall(u32 us)
{
while (us) {
u32 delay = 1000;
if (delay > us)
delay = us;
udelay(delay);
touch_nmi_watchdog();
us -= delay;
}
}
/*
* Support ACPI 3.0 AML Timer operand
* Returns 64-bit free-running, monotonically increasing timer
* with 100ns granularity
*/
u64 acpi_os_get_timer(void)
{
static u64 t;
#ifdef CONFIG_HPET
/* TBD: use HPET if available */
#endif
#ifdef CONFIG_X86_PM_TIMER
/* TBD: default to PM timer if HPET was not available */
#endif
if (!t)
printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
return ++t;
}
acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
{
u32 dummy;
if (!value)
value = &dummy;
*value = 0;
if (width <= 8) {
*(u8 *) value = inb(port);
} else if (width <= 16) {
*(u16 *) value = inw(port);
} else if (width <= 32) {
*(u32 *) value = inl(port);
} else {
BUG();
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
outw(value, port);
} else if (width <= 32) {
outl(value, port);
} else {
BUG();
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_write_port);
#ifdef readq
static inline u64 read64(const volatile void __iomem *addr)
{
return readq(addr);
}
#else
static inline u64 read64(const volatile void __iomem *addr)
{
u64 l, h;
l = readl(addr);
h = readl(addr+4);
return l | (h << 32);
}
#endif
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
if (!value)
value = &dummy;
switch (width) {
case 8:
*(u8 *) value = readb(virt_addr);
break;
case 16:
*(u16 *) value = readw(virt_addr);
break;
case 32:
*(u32 *) value = readl(virt_addr);
break;
case 64:
*(u64 *) value = read64(virt_addr);
break;
default:
BUG();
}
if (unmap)
iounmap(virt_addr);
else
rcu_read_unlock();
return AE_OK;
}
#ifdef writeq
static inline void write64(u64 val, volatile void __iomem *addr)
{
writeq(val, addr);
}
#else
static inline void write64(u64 val, volatile void __iomem *addr)
{
writel(val, addr);
writel(val>>32, addr+4);
}
#endif
acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
switch (width) {
case 8:
writeb(value, virt_addr);
break;
case 16:
writew(value, virt_addr);
break;
case 32:
writel(value, virt_addr);
break;
case 64:
write64(value, virt_addr);
break;
default:
BUG();
}
if (unmap)
iounmap(virt_addr);
else
rcu_read_unlock();
return AE_OK;
}
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 *value, u32 width)
{
int result, size;
u32 value32;
if (!value)
return AE_BAD_PARAMETER;
switch (width) {
case 8:
size = 1;
break;
case 16:
size = 2;
break;
case 32:
size = 4;
break;
default:
return AE_ERROR;
}
result = raw_pci_read(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, &value32);
*value = value32;
return (result ? AE_ERROR : AE_OK);
}
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 value, u32 width)
{
int result, size;
switch (width) {
case 8:
size = 1;
break;
case 16:
size = 2;
break;
case 32:
size = 4;
break;
default:
return AE_ERROR;
}
result = raw_pci_write(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, value);
return (result ? AE_ERROR : AE_OK);
}
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (dpc->wait)
acpi_os_wait_events_complete();
dpc->function(dpc->context);
kfree(dpc);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_execute
*
* PARAMETERS: Type - Type of the callback
* Function - Function to be executed
* Context - Function parameters
*
* RETURN: Status
*
* DESCRIPTION: Depending on type, either queues function for deferred execution or
* immediately executes function on a separate thread.
*
******************************************************************************/
static acpi_status __acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context, int hp)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the work_struct list in a
* way that allows us to also free its memory inside the callee.
* Because we may want to schedule several tasks with different
* parameters we can't use the approach some kernel code uses of
* having a static work_struct.
*/
dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return AE_NO_MEMORY;
dpc->function = function;
dpc->context = context;
/*
* We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
* because the hotplug code may call driver .remove() functions,
* which invoke flush_scheduled_work/acpi_os_wait_events_complete
* to flush these workqueues.
*
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
if (hp) {
queue = kacpi_hotplug_wq;
dpc->wait = 1;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else if (type == OSL_NOTIFY_HANDLER) {
queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else {
queue = kacpid_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
}
/*
* On some machines, a software-initiated SMI causes corruption unless
* the SMI runs on CPU 0. An SMI can be initiated by any AML, but
* typically it's done in GPE-related methods that are run via
* workqueues, so we can avoid the known corruption cases by always
* queueing on CPU 0.
*/
ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
"Call to queue_work() failed.\n");
status = AE_ERROR;
kfree(dpc);
}
return status;
}
acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
return __acpi_os_execute(type, function, context, 0);
}
EXPORT_SYMBOL(acpi_os_execute);
acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
void *context)
{
return __acpi_os_execute(0, function, context, 1);
}
EXPORT_SYMBOL(acpi_os_hotplug_execute);
void acpi_os_wait_events_complete(void)
{
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
EXPORT_SYMBOL(acpi_os_wait_events_complete);
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
struct semaphore *sem = NULL;
sem = acpi_os_allocate(sizeof(struct semaphore));
if (!sem)
return AE_NO_MEMORY;
memset(sem, 0, sizeof(struct semaphore));
sema_init(sem, initial_units);
*handle = (acpi_handle *) sem;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
*handle, initial_units));
return AE_OK;
}
/*
* TODO: A better way to delete semaphores? Linux doesn't have a
* 'delete_semaphore()' function -- may result in an invalid
* pointer dereference for non-synchronized consumers. Should
* we at least check for blocked threads and signal/cancel them?
*/
acpi_status acpi_os_delete_semaphore(acpi_handle handle)
{
struct semaphore *sem = (struct semaphore *)handle;
if (!sem)
return AE_BAD_PARAMETER;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
BUG_ON(!list_empty(&sem->wait_list));
kfree(sem);
sem = NULL;
return AE_OK;
}
/*
* TODO: Support for units > 1?
*/
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
{
acpi_status status = AE_OK;
struct semaphore *sem = (struct semaphore *)handle;
long jiffies;
int ret = 0;
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
if (units > 1)
return AE_SUPPORT;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
if (timeout == ACPI_WAIT_FOREVER)
jiffies = MAX_SCHEDULE_TIMEOUT;
else
jiffies = msecs_to_jiffies(timeout);
ret = down_timeout(sem, jiffies);
if (ret)
status = AE_TIME;
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Failed to acquire semaphore[%p|%d|%d], %s",
handle, units, timeout,
acpi_format_exception(status)));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Acquired semaphore[%p|%d|%d]", handle,
units, timeout));
}
return status;
}
/*
* TODO: Support for units > 1?
*/
acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
{
struct semaphore *sem = (struct semaphore *)handle;
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
if (units > 1)
return AE_SUPPORT;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
units));
up(sem);
return AE_OK;
}
#ifdef ACPI_FUTURE_USAGE
u32 acpi_os_get_line(char *buffer)
{
#ifdef ENABLE_DEBUGGER
if (acpi_in_debugger) {
u32 chars;
kdb_read(buffer, sizeof(line_buf));
/* remove the CR kdb includes */
chars = strlen(buffer) - 1;
buffer[chars] = '\0';
}
#endif
return 0;
}
#endif /* ACPI_FUTURE_USAGE */
acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
case ACPI_SIGNAL_FATAL:
printk(KERN_ERR PREFIX "Fatal opcode executed\n");
break;
case ACPI_SIGNAL_BREAKPOINT:
/*
* AML Breakpoint
* ACPI spec. says to treat it as a NOP unless
* you are debugging. So if/when we integrate
* AML debugger into the kernel debugger its
* hook will go here. But until then it is
* not useful to print anything on breakpoints.
*/
break;
default:
break;
}
return AE_OK;
}
static int __init acpi_os_name_setup(char *str)
{
char *p = acpi_os_name;
int count = ACPI_MAX_OVERRIDE_LEN - 1;
if (!str || !*str)
return 0;
for (; count-- && str && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
continue;
else
break;
}
*p = 0;
return 1;
}
__setup("acpi_os_name=", acpi_os_name_setup);
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
struct osi_setup_entry {
char string[OSI_STRING_LENGTH_MAX];
bool enable;
};
static struct osi_setup_entry __initdata
osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
{"Module Device", true},
{"Processor Device", true},
{"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
void __init acpi_osi_setup(char *str)
{
struct osi_setup_entry *osi;
bool enable = true;
int i;
if (!acpi_gbl_create_osi_method)
return;
if (str == NULL || *str == '\0') {
printk(KERN_INFO PREFIX "_OSI method disabled\n");
acpi_gbl_create_osi_method = FALSE;
return;
}
if (*str == '!') {
str++;
enable = false;
}
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
if (!strcmp(osi->string, str)) {
osi->enable = enable;
break;
} else if (osi->string[0] == '\0') {
osi->enable = enable;
strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
break;
}
}
}
static void __init set_osi_linux(unsigned int enable)
{
if (osi_linux.enable != enable)
osi_linux.enable = enable;
if (osi_linux.enable)
acpi_osi_setup("Linux");
else
acpi_osi_setup("!Linux");
return;
}
static void __init acpi_cmdline_osi_linux(unsigned int enable)
{
osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
osi_linux.dmi = 0;
set_osi_linux(enable);
return;
}
void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
{
printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
if (enable == -1)
return;
osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
set_osi_linux(enable);
return;
}
/*
* Modify the list of "OS Interfaces" reported to BIOS via _OSI
*
* empty string disables _OSI
* string starting with '!' disables that string
* otherwise string is added to list, augmenting built-in strings
*/
static void __init acpi_osi_setup_late(void)
{
struct osi_setup_entry *osi;
char *str;
int i;
acpi_status status;
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
str = osi->string;
if (*str == '\0')
break;
if (osi->enable) {
status = acpi_install_interface(str);
if (ACPI_SUCCESS(status))
printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
} else {
status = acpi_remove_interface(str);
if (ACPI_SUCCESS(status))
printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
}
}
}
static int __init osi_setup(char *str)
{
if (str && !strcmp("Linux", str))
acpi_cmdline_osi_linux(1);
else if (str && !strcmp("!Linux", str))
acpi_cmdline_osi_linux(0);
else
acpi_osi_setup(str);
return 1;
}
__setup("acpi_osi=", osi_setup);
/* enable serialization to combat AE_ALREADY_EXISTS errors */
static int __init acpi_serialize_setup(char *str)
{
printk(KERN_INFO PREFIX "serialize enabled\n");
acpi_gbl_all_methods_serialized = TRUE;
return 1;
}
__setup("acpi_serialize", acpi_serialize_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
* IO ports and memory declared in ACPI might be used by the ACPI subsystem
* in arbitrary AML code and can interfere with legacy drivers.
* acpi_enforce_resources= can be set to:
*
* - strict (default) (2)
* -> further driver trying to access the resources will not load
* - lax (1)
* -> further driver trying to access the resources will load, but you
* get a system message that something might go wrong...
*
* - no (0)
* -> ACPI Operation Region resources will not be registered
*
*/
#define ENFORCE_RESOURCES_STRICT 2
#define ENFORCE_RESOURCES_LAX 1
#define ENFORCE_RESOURCES_NO 0
static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
static int __init acpi_enforce_resources_setup(char *str)
{
if (str == NULL || *str == '\0')
return 0;
if (!strcmp("strict", str))
acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
else if (!strcmp("lax", str))
acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
else if (!strcmp("no", str))
acpi_enforce_resources = ENFORCE_RESOURCES_NO;
return 1;
}
__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
/* Check for resource conflicts between ACPI OperationRegions and native
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
acpi_adr_space_type space_id;
acpi_size length;
u8 warn = 0;
int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
return 0;
if (res->flags & IORESOURCE_IO)
space_id = ACPI_ADR_SPACE_SYSTEM_IO;
else
space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
length = resource_size(res);
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
warn = 1;
clash = acpi_check_address_range(space_id, res->start, length, warn);
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
" instability\n");
printk(KERN_INFO "ACPI: If an ACPI driver is available"
" for this device, you should use it instead of"
" the native driver\n");
}
if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL(acpi_check_resource_conflict);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name)
{
struct resource res = {
.start = start,
.end = start + n - 1,
.name = name,
.flags = IORESOURCE_IO,
};
return acpi_check_resource_conflict(&res);
}
EXPORT_SYMBOL(acpi_check_region);
/*
* Let drivers know whether the resource checks are effective
*/
int acpi_resources_are_enforced(void)
{
return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
{
ACPI_FREE(handle);
}
/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
*/
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
{
acpi_cpu_flags flags;
spin_lock_irqsave(lockp, flags);
return flags;
}
/*
* Release a spinlock. See above.
*/
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
{
spin_unlock_irqrestore(lockp, flags);
}
#ifndef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_os_create_cache
*
* PARAMETERS: name - Ascii name for the cache
* size - Size of each cached object
* depth - Maximum depth of the cache (in objects) <ignored>
* cache - Where the new cache object is returned
*
* RETURN: status
*
* DESCRIPTION: Create a cache object
*
******************************************************************************/
acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
*cache = kmem_cache_create(name, size, 0, 0, NULL);
if (*cache == NULL)
return AE_ERROR;
else
return AE_OK;
}
/*******************************************************************************
*
* FUNCTION: acpi_os_purge_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache.
*
******************************************************************************/
acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
{
kmem_cache_shrink(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_delete_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache and delete the
* cache object.
*
******************************************************************************/
acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
{
kmem_cache_destroy(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_release_object
*
* PARAMETERS: Cache - Handle to cache object
* Object - The object to be released
*
* RETURN: None
*
* DESCRIPTION: Release an object to the specified cache. If cache is full,
* the object is deleted.
*
******************************************************************************/
acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
{
kmem_cache_free(cache, object);
return (AE_OK);
}
#endif
acpi_status __init acpi_os_initialize(void)
{
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
return AE_OK;
}
acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
acpi_install_interface_handler(acpi_osi_handler);
acpi_osi_setup_late();
return AE_OK;
}
acpi_status acpi_os_terminate(void)
{
if (acpi_irq_handler) {
acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
acpi_irq_handler);
}
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
destroy_workqueue(kacpi_hotplug_wq);
return AE_OK;
}
acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
u32 pm1b_control)
{
int rc = 0;
if (__acpi_os_prepare_sleep)
rc = __acpi_os_prepare_sleep(sleep_state,
pm1a_control, pm1b_control);
if (rc < 0)
return AE_ERROR;
else if (rc > 0)
return AE_CTRL_SKIP;
return AE_OK;
}
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl))
{
__acpi_os_prepare_sleep = func;
}
void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
void (*func)(struct work_struct *work))
{
struct acpi_hp_work *hp_work;
int ret;
hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
if (!hp_work)
return;
hp_work->handle = handle;
hp_work->type = type;
hp_work->context = context;
INIT_WORK(&hp_work->work, func);
ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
if (!ret)
kfree(hp_work);
}
EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
| gpl-2.0 |
gchild320/shamu | drivers/scsi/bfa/bfa_svc.c | 2281 | 156007 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_plog.h"
#include "bfa_cs.h"
#include "bfa_modules.h"
BFA_TRC_FILE(HAL, FCXP);
BFA_MODULE(fcdiag);
BFA_MODULE(fcxp);
BFA_MODULE(sgpg);
BFA_MODULE(lps);
BFA_MODULE(fcport);
BFA_MODULE(rport);
BFA_MODULE(uf);
/*
* LPS related definitions
*/
#define BFA_LPS_MIN_LPORTS (1)
#define BFA_LPS_MAX_LPORTS (256)
/*
* Maximum Vports supported per physical port or vf.
*/
#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
/*
* FC PORT related definitions
*/
/*
* The port is considered disabled if corresponding physical port or IOC are
* disabled explicitly
*/
#define BFA_PORT_IS_DISABLED(bfa) \
((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
/*
* BFA port state machine events
*/
enum bfa_fcport_sm_event {
BFA_FCPORT_SM_START = 1, /* start port state machine */
BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
BFA_FCPORT_SM_ENABLE = 3, /* enable port */
BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
};
/*
* BFA port link notification state machine events
*/
enum bfa_fcport_ln_sm_event {
BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
};
/*
* RPORT related definitions
*/
#define bfa_rport_offline_cb(__rp) do { \
if ((__rp)->bfa->fcs) \
bfa_cb_rport_offline((__rp)->rport_drv); \
else { \
bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
__bfa_cb_rport_offline, (__rp)); \
} \
} while (0)
#define bfa_rport_online_cb(__rp) do { \
if ((__rp)->bfa->fcs) \
bfa_cb_rport_online((__rp)->rport_drv); \
else { \
bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
__bfa_cb_rport_online, (__rp)); \
} \
} while (0)
/*
* forward declarations FCXP related functions
*/
static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_rsp_s *fcxp_rsp);
static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
static void bfa_fcxp_qresume(void *cbarg);
static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_req_s *send_req);
/*
* forward declarations for LPS functions
*/
static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev);
static void bfa_lps_detach(struct bfa_s *bfa);
static void bfa_lps_start(struct bfa_s *bfa);
static void bfa_lps_stop(struct bfa_s *bfa);
static void bfa_lps_iocdisable(struct bfa_s *bfa);
static void bfa_lps_login_rsp(struct bfa_s *bfa,
struct bfi_lps_login_rsp_s *rsp);
static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
static void bfa_lps_logout_rsp(struct bfa_s *bfa,
struct bfi_lps_logout_rsp_s *rsp);
static void bfa_lps_reqq_resume(void *lps_arg);
static void bfa_lps_free(struct bfa_lps_s *lps);
static void bfa_lps_send_login(struct bfa_lps_s *lps);
static void bfa_lps_send_logout(struct bfa_lps_s *lps);
static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
static void bfa_lps_login_comp(struct bfa_lps_s *lps);
static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
/*
* forward declaration for LPS state machine
*/
static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
event);
static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
enum bfa_lps_event event);
static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
event);
/*
* forward declaration for FC Port functions
*/
static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
enum bfa_port_linkstate event, bfa_boolean_t trunk);
static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
enum bfa_port_linkstate event);
static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
static void bfa_fcport_stats_get_timeout(void *cbarg);
static void bfa_fcport_stats_clr_timeout(void *cbarg);
static void bfa_trunk_iocdisable(struct bfa_s *bfa);
/*
* forward declaration for FC PORT state machine
*/
static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
static struct bfa_sm_table_s hal_port_sm_table[] = {
{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
};
/*
* forward declaration for RPORT related functions
*/
static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
static void bfa_rport_free(struct bfa_rport_s *rport);
static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
static void __bfa_cb_rport_online(void *cbarg,
bfa_boolean_t complete);
static void __bfa_cb_rport_offline(void *cbarg,
bfa_boolean_t complete);
/*
* forward declaration for RPORT state machine
*/
static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_created(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_online(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
/*
* PLOG related definitions
*/
static int
plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
{
if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
return 1;
if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
return 1;
return 0;
}
static u64
bfa_get_log_time(void)
{
u64 system_time = 0;
struct timeval tv;
do_gettimeofday(&tv);
/* We are interested in seconds only. */
system_time = tv.tv_sec;
return system_time;
}
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
u16 tail;
struct bfa_plog_rec_s *pl_recp;
if (plog->plog_enabled == 0)
return;
if (plkd_validate_logrec(pl_rec)) {
WARN_ON(1);
return;
}
tail = plog->tail;
pl_recp = &(plog->plog_recs[tail]);
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
pl_recp->tv = bfa_get_log_time();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
BFA_PL_LOG_REC_INCR(plog->head);
}
void
bfa_plog_init(struct bfa_plog_s *plog)
{
memset((char *)plog, 0, sizeof(struct bfa_plog_s));
memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
plog->head = plog->tail = 0;
plog->plog_enabled = 1;
}
void
bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, char *log_str)
{
struct bfa_plog_rec_s lp;
if (plog->plog_enabled) {
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
lp.mid = mid;
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
strncpy(lp.log_entry.string_log, log_str,
BFA_PL_STRING_LOG_SZ - 1);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
}
void
bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, u32 *intarr, u32 num_ints)
{
struct bfa_plog_rec_s lp;
u32 i;
if (num_ints > BFA_PL_INT_LOG_SZ)
num_ints = BFA_PL_INT_LOG_SZ;
if (plog->plog_enabled) {
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
lp.mid = mid;
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_INT;
lp.misc = misc;
for (i = 0; i < num_ints; i++)
lp.log_entry.int_log[i] = intarr[i];
lp.log_num_ints = (u8) num_ints;
bfa_plog_add(plog, &lp);
}
}
void
bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, struct fchs_s *fchdr)
{
struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
bfa_plog_intarr(plog, mid, event, misc, ints, 3);
}
}
void
bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
u32 pld_w0)
{
struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
ints[3] = pld_w0;
bfa_plog_intarr(plog, mid, event, misc, ints, 4);
}
}
/*
* fcxp_pvt BFA FCXP private functions
*/
static void
claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
{
u16 i;
struct bfa_fcxp_s *fcxp;
fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
INIT_LIST_HEAD(&mod->fcxp_req_free_q);
INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
mod->fcxp_list = fcxp;
for (i = 0; i < mod->num_fcxps; i++) {
fcxp->fcxp_mod = mod;
fcxp->fcxp_tag = i;
if (i < (mod->num_fcxps / 2)) {
list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
fcxp->req_rsp = BFA_TRUE;
} else {
list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
fcxp->req_rsp = BFA_FALSE;
}
bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
fcxp->reqq_waiting = BFA_FALSE;
fcxp = fcxp + 1;
}
bfa_mem_kva_curp(mod) = (void *)fcxp;
}
static void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
struct bfa_mem_dma_s *seg_ptr;
u16 nsegs, idx, per_seg_fcxp;
u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
u32 per_fcxp_sz;
if (num_fcxps == 0)
return;
if (cfg->drvcfg.min_cfg)
per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
else
per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
/* dma memory */
nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
if (num_fcxps >= per_seg_fcxp) {
num_fcxps -= per_seg_fcxp;
bfa_mem_dma_setup(minfo, seg_ptr,
per_seg_fcxp * per_fcxp_sz);
} else
bfa_mem_dma_setup(minfo, seg_ptr,
num_fcxps * per_fcxp_sz);
}
/* kva memory */
bfa_mem_kva_setup(minfo, fcxp_kva,
cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
}
static void
bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
mod->bfa = bfa;
mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
/*
* Initialize FCXP request and response payload sizes.
*/
mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
if (!cfg->drvcfg.min_cfg)
mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
INIT_LIST_HEAD(&mod->req_wait_q);
INIT_LIST_HEAD(&mod->rsp_wait_q);
claim_fcxps_mem(mod);
}
static void
bfa_fcxp_detach(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_start(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_stop(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct bfa_fcxp_s *fcxp;
struct list_head *qe, *qen;
/* Enqueue unused fcxp resources to free_q */
list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
if (fcxp->caller == NULL) {
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
bfa_fcxp_free(fcxp);
} else {
fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
bfa_cb_queue(bfa, &fcxp->hcb_qe,
__bfa_fcxp_send_cbfn, fcxp);
}
}
}
static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp;
if (req)
bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
else
bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
if (fcxp)
list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
return fcxp;
}
static void
bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
struct bfa_s *bfa,
u8 *use_ibuf,
u32 *nr_sgles,
bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
bfa_fcxp_get_sglen_t *r_sglen_cbfn,
struct list_head *r_sgpg_q,
int n_sgles,
bfa_fcxp_get_sgaddr_t sga_cbfn,
bfa_fcxp_get_sglen_t sglen_cbfn)
{
WARN_ON(bfa == NULL);
bfa_trc(bfa, fcxp->fcxp_tag);
if (n_sgles == 0) {
*use_ibuf = 1;
} else {
WARN_ON(*sga_cbfn == NULL);
WARN_ON(*sglen_cbfn == NULL);
*use_ibuf = 0;
*r_sga_cbfn = sga_cbfn;
*r_sglen_cbfn = sglen_cbfn;
*nr_sgles = n_sgles;
/*
* alloc required sgpgs
*/
if (n_sgles > BFI_SGE_INLINE)
WARN_ON(1);
}
}
static void
bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
void *caller, struct bfa_s *bfa, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
{
WARN_ON(bfa == NULL);
bfa_trc(bfa, fcxp->fcxp_tag);
fcxp->caller = caller;
bfa_fcxp_init_reqrsp(fcxp, bfa,
&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
bfa_fcxp_init_reqrsp(fcxp, bfa,
&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
}
static void
bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
struct bfa_fcxp_wqe_s *wqe;
if (fcxp->req_rsp)
bfa_q_deq(&mod->req_wait_q, &wqe);
else
bfa_q_deq(&mod->rsp_wait_q, &wqe);
if (wqe) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
wqe->nrsp_sgles, wqe->req_sga_cbfn,
wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
wqe->rsp_sglen_cbfn);
wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
return;
}
WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
list_del(&fcxp->qe);
if (fcxp->req_rsp)
list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
else
list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
}
static void
bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
/* discarded fcxp completion */
}
static void
__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
{
struct bfa_fcxp_s *fcxp = cbarg;
if (complete) {
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
fcxp->rsp_status, fcxp->rsp_len,
fcxp->residue_len, &fcxp->rsp_fchs);
} else {
bfa_fcxp_free(fcxp);
}
}
static void
hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct bfa_fcxp_s *fcxp;
u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
bfa_trc(bfa, fcxp_tag);
fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
/*
* @todo f/w should not set residue to non-0 when everything
* is received.
*/
if (fcxp_rsp->req_status == BFA_STATUS_OK)
fcxp_rsp->residue_len = 0;
else
fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
WARN_ON(fcxp->send_cbfn == NULL);
hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
if (fcxp->send_cbfn != NULL) {
bfa_trc(mod->bfa, (NULL == fcxp->caller));
if (fcxp->caller == NULL) {
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
fcxp_rsp->req_status, fcxp_rsp->rsp_len,
fcxp_rsp->residue_len, &fcxp_rsp->fchs);
/*
* fcxp automatically freed on return from the callback
*/
bfa_fcxp_free(fcxp);
} else {
fcxp->rsp_status = fcxp_rsp->req_status;
fcxp->rsp_len = fcxp_rsp->rsp_len;
fcxp->residue_len = fcxp_rsp->residue_len;
fcxp->rsp_fchs = fcxp_rsp->fchs;
bfa_cb_queue(bfa, &fcxp->hcb_qe,
__bfa_fcxp_send_cbfn, fcxp);
}
} else {
bfa_trc(bfa, (NULL == fcxp->send_cbfn));
}
}
static void
hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
struct fchs_s *fchs)
{
/*
* TODO: TX ox_id
*/
if (reqlen > 0) {
if (fcxp->use_ireqbuf) {
u32 pld_w0 =
*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_TX,
reqlen + sizeof(struct fchs_s), fchs,
pld_w0);
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_TX,
reqlen + sizeof(struct fchs_s),
fchs);
}
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
reqlen + sizeof(struct fchs_s), fchs);
}
}
static void
hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_rsp_s *fcxp_rsp)
{
if (fcxp_rsp->rsp_len > 0) {
if (fcxp->use_irspbuf) {
u32 pld_w0 =
*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len,
&fcxp_rsp->fchs, pld_w0);
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len,
&fcxp_rsp->fchs);
}
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
}
}
/*
* Handler to resume sending fcxp when space in available in cpe queue.
*/
static void
bfa_fcxp_qresume(void *cbarg)
{
struct bfa_fcxp_s *fcxp = cbarg;
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfi_fcxp_send_req_s *send_req;
fcxp->reqq_waiting = BFA_FALSE;
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
bfa_fcxp_queue(fcxp, send_req);
}
/*
* Queue fcxp send request to foimrware.
*/
static void
bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
{
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
struct bfa_rport_s *rport = reqi->bfa_rport;
bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
bfa_fn_lpu(bfa));
send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
if (rport) {
send_req->rport_fw_hndl = rport->fw_handle;
send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
if (send_req->max_frmsz == 0)
send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
} else {
send_req->rport_fw_hndl = 0;
send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
}
send_req->vf_id = cpu_to_be16(reqi->vf_id);
send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
send_req->class = reqi->class;
send_req->rsp_timeout = rspi->rsp_timeout;
send_req->cts = reqi->cts;
send_req->fchs = reqi->fchs;
send_req->req_len = cpu_to_be32(reqi->req_tot_len);
send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
/*
* setup req sgles
*/
if (fcxp->use_ireqbuf == 1) {
bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
BFA_FCXP_REQ_PLD_PA(fcxp));
} else {
if (fcxp->nreq_sgles > 0) {
WARN_ON(fcxp->nreq_sgles != 1);
bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
fcxp->req_sga_cbfn(fcxp->caller, 0));
} else {
WARN_ON(reqi->req_tot_len != 0);
bfa_alen_set(&send_req->rsp_alen, 0, 0);
}
}
/*
* setup rsp sgles
*/
if (fcxp->use_irspbuf == 1) {
WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
BFA_FCXP_RSP_PLD_PA(fcxp));
} else {
if (fcxp->nrsp_sgles > 0) {
WARN_ON(fcxp->nrsp_sgles != 1);
bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
fcxp->rsp_sga_cbfn(fcxp->caller, 0));
} else {
WARN_ON(rspi->rsp_maxlen != 0);
bfa_alen_set(&send_req->rsp_alen, 0, 0);
}
}
hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
}
/*
* Allocate an FCXP instance to send a response or to send a request
* that has a response. Request/response buffers are allocated by caller.
*
* @param[in] bfa BFA bfa instance
* @param[in] nreq_sgles Number of SG elements required for request
* buffer. 0, if fcxp internal buffers are used.
* Use bfa_fcxp_get_reqbuf() to get the
* internal req buffer.
* @param[in] req_sgles SG elements describing request buffer. Will be
* copied in by BFA and hence can be freed on
* return from this function.
* @param[in] get_req_sga function ptr to be called to get a request SG
* Address (given the sge index).
* @param[in] get_req_sglen function ptr to be called to get a request SG
* len (given the sge index).
* @param[in] get_rsp_sga function ptr to be called to get a response SG
* Address (given the sge index).
* @param[in] get_rsp_sglen function ptr to be called to get a response SG
* len (given the sge index).
* @param[in] req Allocated FCXP is used to send req or rsp?
* request - BFA_TRUE, response - BFA_FALSE
*
* @return FCXP instance. NULL on failure.
*/
struct bfa_fcxp_s *
bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp = NULL;
WARN_ON(bfa == NULL);
fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
if (fcxp == NULL)
return NULL;
bfa_trc(bfa, fcxp->fcxp_tag);
bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
return fcxp;
}
/*
* Get the internal request buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
*
* @return pointer to the internal request buffer
*/
void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
void *reqbuf;
WARN_ON(fcxp->use_ireqbuf != 1);
reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
mod->req_pld_sz + mod->rsp_pld_sz);
return reqbuf;
}
u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
return mod->req_pld_sz;
}
/*
* Get the internal response buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
*
* @return pointer to the internal request buffer
*/
void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
void *fcxp_buf;
WARN_ON(fcxp->use_irspbuf != 1);
fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
mod->req_pld_sz + mod->rsp_pld_sz);
/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
return ((u8 *) fcxp_buf) + mod->req_pld_sz;
}
/*
* Free the BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
*
* @return void
*/
void
bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
WARN_ON(fcxp == NULL);
bfa_trc(mod->bfa, fcxp->fcxp_tag);
bfa_fcxp_put(fcxp);
}
/*
* Send a FCXP request
*
* @param[in] fcxp BFA fcxp pointer
* @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
* @param[in] vf_id virtual Fabric ID
* @param[in] lp_tag lport tag
* @param[in] cts use Continuous sequence
* @param[in] cos fc Class of Service
* @param[in] reqlen request length, does not include FCHS length
* @param[in] fchs fc Header Pointer. The header content will be copied
* in by BFA.
*
* @param[in] cbfn call back function to be called on receiving
* the response
* @param[in] cbarg arg for cbfn
* @param[in] rsp_timeout
* response timeout
*
* @return bfa_status_t
*/
void
bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
{
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
struct bfi_fcxp_send_req_s *send_req;
bfa_trc(bfa, fcxp->fcxp_tag);
/*
* setup request/response info
*/
reqi->bfa_rport = rport;
reqi->vf_id = vf_id;
reqi->lp_tag = lp_tag;
reqi->class = cos;
rspi->rsp_timeout = rsp_timeout;
reqi->cts = cts;
reqi->fchs = *fchs;
reqi->req_tot_len = reqlen;
rspi->rsp_maxlen = rsp_maxlen;
fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
fcxp->send_cbarg = cbarg;
/*
* If no room in CPE queue, wait for space in request queue
*/
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
if (!send_req) {
bfa_trc(bfa, fcxp->fcxp_tag);
fcxp->reqq_waiting = BFA_TRUE;
bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
return;
}
bfa_fcxp_queue(fcxp, send_req);
}
/*
* Abort a BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
*
* @return void
*/
bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
{
bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
WARN_ON(1);
return BFA_STATUS_OK;
}
void
bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
void *caller, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
if (req)
WARN_ON(!list_empty(&mod->fcxp_req_free_q));
else
WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
wqe->alloc_cbfn = alloc_cbfn;
wqe->alloc_cbarg = alloc_cbarg;
wqe->caller = caller;
wqe->bfa = bfa;
wqe->nreq_sgles = nreq_sgles;
wqe->nrsp_sgles = nrsp_sgles;
wqe->req_sga_cbfn = req_sga_cbfn;
wqe->req_sglen_cbfn = req_sglen_cbfn;
wqe->rsp_sga_cbfn = rsp_sga_cbfn;
wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
if (req)
list_add_tail(&wqe->qe, &mod->req_wait_q);
else
list_add_tail(&wqe->qe, &mod->rsp_wait_q);
}
void
bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
list_del(&wqe->qe);
}
void
bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
{
/*
* If waiting for room in request queue, cancel reqq wait
* and free fcxp.
*/
if (fcxp->reqq_waiting) {
fcxp->reqq_waiting = BFA_FALSE;
bfa_reqq_wcancel(&fcxp->reqq_wqe);
bfa_fcxp_free(fcxp);
return;
}
fcxp->send_cbfn = bfa_fcxp_null_comp;
}
void
bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
switch (msg->mhdr.msg_id) {
case BFI_FCXP_I2H_SEND_RSP:
hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
break;
default:
bfa_trc(bfa, msg->mhdr.msg_id);
WARN_ON(1);
}
}
u32
bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
return mod->rsp_pld_sz;
}
void
bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct list_head *qe;
int i;
for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
list_add_tail(qe, &mod->fcxp_req_unused_q);
} else {
bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
list_add_tail(qe, &mod->fcxp_rsp_unused_q);
}
}
}
/*
* BFA LPS state machine functions
*/
/*
* Init state -- no login
*/
static void
bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_LOGIN:
if (bfa_reqq_full(lps->bfa, lps->reqq)) {
bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
} else {
bfa_sm_set_state(lps, bfa_lps_sm_login);
bfa_lps_send_login(lps);
}
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FDISC Request");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FLOGI Request");
break;
case BFA_LPS_SM_LOGOUT:
bfa_lps_logout_comp(lps);
break;
case BFA_LPS_SM_DELETE:
bfa_lps_free(lps);
break;
case BFA_LPS_SM_RX_CVL:
case BFA_LPS_SM_OFFLINE:
break;
case BFA_LPS_SM_FWRSP:
/*
* Could happen when fabric detects loopback and discards
* the lps request. Fw will eventually sent out the timeout
* Just ignore
*/
break;
case BFA_LPS_SM_SET_N2N_PID:
/*
* When topology is set to loop, bfa_lps_set_n2n_pid() sends
* this event. Ignore this event.
*/
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* login is in progress -- awaiting response from firmware
*/
static void
bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_FWRSP:
case BFA_LPS_SM_OFFLINE:
if (lps->status == BFA_STATUS_OK) {
bfa_sm_set_state(lps, bfa_lps_sm_online);
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FDISC Accept");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
/* If N2N, send the assigned PID to FW */
bfa_trc(lps->bfa, lps->fport);
bfa_trc(lps->bfa, lps->lp_pid);
if (!lps->fport && lps->lp_pid)
bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
} else {
bfa_sm_set_state(lps, bfa_lps_sm_init);
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0,
"FDISC Fail (RJT or timeout)");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0,
"FLOGI Fail (RJT or timeout)");
}
bfa_lps_login_comp(lps);
break;
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
case BFA_LPS_SM_SET_N2N_PID:
bfa_trc(lps->bfa, lps->fport);
bfa_trc(lps->bfa, lps->lp_pid);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* login pending - awaiting space in request queue
*/
static void
bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_login);
bfa_lps_send_login(lps);
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
case BFA_LPS_SM_RX_CVL:
/*
* Login was not even sent out; so when getting out
* of this state, it will appear like a login retry
* after Clear virtual link
*/
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* login complete
*/
static void
bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_LOGOUT:
if (bfa_reqq_full(lps->bfa, lps->reqq)) {
bfa_sm_set_state(lps, bfa_lps_sm_logowait);
bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
} else {
bfa_sm_set_state(lps, bfa_lps_sm_logout);
bfa_lps_send_logout(lps);
}
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGO, 0, "Logout");
break;
case BFA_LPS_SM_RX_CVL:
bfa_sm_set_state(lps, bfa_lps_sm_init);
/* Let the vport module know about this event */
bfa_lps_cvl_event(lps);
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
break;
case BFA_LPS_SM_SET_N2N_PID:
if (bfa_reqq_full(lps->bfa, lps->reqq)) {
bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
} else
bfa_lps_send_set_n2n_pid(lps);
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* login complete
*/
static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_online);
bfa_lps_send_set_n2n_pid(lps);
break;
case BFA_LPS_SM_LOGOUT:
bfa_sm_set_state(lps, bfa_lps_sm_logowait);
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGO, 0, "Logout");
break;
case BFA_LPS_SM_RX_CVL:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
/* Let the vport module know about this event */
bfa_lps_cvl_event(lps);
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* logout in progress - awaiting firmware response
*/
static void
bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_FWRSP:
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_lps_logout_comp(lps);
break;
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* logout pending -- awaiting space in request queue
*/
static void
bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_logout);
bfa_lps_send_logout(lps);
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/*
* lps_pvt BFA LPS private functions
*/
/*
* return memory requirement
*/
static void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
if (cfg->drvcfg.min_cfg)
bfa_mem_kva_setup(minfo, lps_kva,
sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
else
bfa_mem_kva_setup(minfo, lps_kva,
sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
}
/*
* bfa module attach at initialization time
*/
static void
bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
int i;
mod->num_lps = BFA_LPS_MAX_LPORTS;
if (cfg->drvcfg.min_cfg)
mod->num_lps = BFA_LPS_MIN_LPORTS;
else
mod->num_lps = BFA_LPS_MAX_LPORTS;
mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
INIT_LIST_HEAD(&mod->lps_free_q);
INIT_LIST_HEAD(&mod->lps_active_q);
INIT_LIST_HEAD(&mod->lps_login_q);
for (i = 0; i < mod->num_lps; i++, lps++) {
lps->bfa = bfa;
lps->bfa_tag = (u8) i;
lps->reqq = BFA_REQQ_LPS;
bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
list_add_tail(&lps->qe, &mod->lps_free_q);
}
}
static void
bfa_lps_detach(struct bfa_s *bfa)
{
}
static void
bfa_lps_start(struct bfa_s *bfa)
{
}
static void
bfa_lps_stop(struct bfa_s *bfa)
{
}
/*
* IOC in disabled state -- consider all lps offline
*/
static void
bfa_lps_iocdisable(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &mod->lps_active_q) {
lps = (struct bfa_lps_s *) qe;
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
list_for_each_safe(qe, qen, &mod->lps_login_q) {
lps = (struct bfa_lps_s *) qe;
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
}
/*
* Firmware login response
*/
static void
bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
WARN_ON(rsp->bfa_tag >= mod->num_lps);
lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
lps->status = rsp->status;
switch (rsp->status) {
case BFA_STATUS_OK:
lps->fw_tag = rsp->fw_tag;
lps->fport = rsp->f_port;
if (lps->fport)
lps->lp_pid = rsp->lp_pid;
lps->npiv_en = rsp->npiv_en;
lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
lps->pr_pwwn = rsp->port_name;
lps->pr_nwwn = rsp->node_name;
lps->auth_req = rsp->auth_req;
lps->lp_mac = rsp->lp_mac;
lps->brcd_switch = rsp->brcd_switch;
lps->fcf_mac = rsp->fcf_mac;
lps->pr_bbscn = rsp->bb_scn;
break;
case BFA_STATUS_FABRIC_RJT:
lps->lsrjt_rsn = rsp->lsrjt_rsn;
lps->lsrjt_expl = rsp->lsrjt_expl;
break;
case BFA_STATUS_EPROTOCOL:
lps->ext_status = rsp->ext_status;
break;
case BFA_STATUS_VPORT_MAX:
if (rsp->ext_status)
bfa_lps_no_res(lps, rsp->ext_status);
break;
default:
/* Nothing to do with other status */
break;
}
list_del(&lps->qe);
list_add_tail(&lps->qe, &mod->lps_active_q);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
static void
bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
{
struct bfa_s *bfa = first_lps->bfa;
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct list_head *qe, *qe_next;
struct bfa_lps_s *lps;
bfa_trc(bfa, count);
qe = bfa_q_next(first_lps);
while (count && qe) {
qe_next = bfa_q_next(qe);
lps = (struct bfa_lps_s *)qe;
bfa_trc(bfa, lps->bfa_tag);
lps->status = first_lps->status;
list_del(&lps->qe);
list_add_tail(&lps->qe, &mod->lps_active_q);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
qe = qe_next;
count--;
}
}
/*
* Firmware logout response
*/
static void
bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
WARN_ON(rsp->bfa_tag >= mod->num_lps);
lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
/*
* Firmware received a Clear virtual link request (for FCoE)
*/
static void
bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
}
/*
* Space is available in request queue, resume queueing request to firmware.
*/
static void
bfa_lps_reqq_resume(void *lps_arg)
{
struct bfa_lps_s *lps = lps_arg;
bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
}
/*
* lps is freed -- triggered by vport delete
*/
static void
bfa_lps_free(struct bfa_lps_s *lps)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
lps->lp_pid = 0;
list_del(&lps->qe);
list_add_tail(&lps->qe, &mod->lps_free_q);
}
/*
* send login request to firmware
*/
static void
bfa_lps_send_login(struct bfa_lps_s *lps)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
struct bfi_lps_login_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
bfa_fn_lpu(lps->bfa));
m->bfa_tag = lps->bfa_tag;
m->alpa = lps->alpa;
m->pdu_size = cpu_to_be16(lps->pdusz);
m->pwwn = lps->pwwn;
m->nwwn = lps->nwwn;
m->fdisc = lps->fdisc;
m->auth_en = lps->auth_en;
m->bb_scn = lps->bb_scn;
bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
list_del(&lps->qe);
list_add_tail(&lps->qe, &mod->lps_login_q);
}
/*
* send logout request to firmware
*/
static void
bfa_lps_send_logout(struct bfa_lps_s *lps)
{
struct bfi_lps_logout_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
bfa_fn_lpu(lps->bfa));
m->fw_tag = lps->fw_tag;
m->port_name = lps->pwwn;
bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
}
/*
* send n2n pid set request to firmware
*/
static void
bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
{
struct bfi_lps_n2n_pid_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
bfa_fn_lpu(lps->bfa));
m->fw_tag = lps->fw_tag;
m->lp_pid = lps->lp_pid;
bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
}
/*
* Indirect login completion handler for non-fcs
*/
static void
bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
if (lps->fdisc)
bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
else
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
/*
* Login completion handler -- direct call for fcs, queue for others
*/
static void
bfa_lps_login_comp(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
lps);
return;
}
if (lps->fdisc)
bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
else
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
/*
* Indirect logout completion handler for non-fcs
*/
static void
bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
else
bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/*
* Logout completion handler -- direct call for fcs, queue for others
*/
static void
bfa_lps_logout_comp(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
lps);
return;
}
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
}
/*
* Clear virtual link completion handler for non-fcs
*/
static void
bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
/* Clear virtual link to base port will result in link down */
if (lps->fdisc)
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
/*
* Received Clear virtual link event --direct call for fcs,
* queue for others
*/
static void
bfa_lps_cvl_event(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
lps);
return;
}
/* Clear virtual link to base port will result in link down */
if (lps->fdisc)
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
/*
* lps_public BFA LPS public functions
*/
u32
bfa_lps_get_max_vport(struct bfa_s *bfa)
{
if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
return BFA_LPS_MAX_VPORTS_SUPP_CT;
else
return BFA_LPS_MAX_VPORTS_SUPP_CB;
}
/*
* Allocate a lport srvice tag.
*/
struct bfa_lps_s *
bfa_lps_alloc(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps = NULL;
bfa_q_deq(&mod->lps_free_q, &lps);
if (lps == NULL)
return NULL;
list_add_tail(&lps->qe, &mod->lps_active_q);
bfa_sm_set_state(lps, bfa_lps_sm_init);
return lps;
}
/*
* Free lport service tag. This can be called anytime after an alloc.
* No need to wait for any pending login/logout completions.
*/
void
bfa_lps_delete(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
}
/*
* Initiate a lport login.
*/
void
bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
{
lps->uarg = uarg;
lps->alpa = alpa;
lps->pdusz = pdusz;
lps->pwwn = pwwn;
lps->nwwn = nwwn;
lps->fdisc = BFA_FALSE;
lps->auth_en = auth_en;
lps->bb_scn = bb_scn;
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
/*
* Initiate a lport fdisc login.
*/
void
bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
wwn_t nwwn)
{
lps->uarg = uarg;
lps->alpa = 0;
lps->pdusz = pdusz;
lps->pwwn = pwwn;
lps->nwwn = nwwn;
lps->fdisc = BFA_TRUE;
lps->auth_en = BFA_FALSE;
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
/*
* Initiate a lport FDSIC logout.
*/
void
bfa_lps_fdisclogo(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
u8
bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
}
/*
* Return lport services tag given the pid
*/
u8
bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
int i;
for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
if (lps->lp_pid == pid)
return lps->bfa_tag;
}
/* Return base port tag anyway */
return 0;
}
/*
* return port id assigned to the base lport
*/
u32
bfa_lps_get_base_pid(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
}
/*
* Set PID in case of n2n (which is assigned during PLOGI)
*/
void
bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
{
bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, n2n_pid);
lps->lp_pid = n2n_pid;
bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
}
/*
* LPS firmware message class handler.
*/
void
bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
union bfi_lps_i2h_msg_u msg;
bfa_trc(bfa, m->mhdr.msg_id);
msg.msg = m;
switch (m->mhdr.msg_id) {
case BFI_LPS_I2H_LOGIN_RSP:
bfa_lps_login_rsp(bfa, msg.login_rsp);
break;
case BFI_LPS_I2H_LOGOUT_RSP:
bfa_lps_logout_rsp(bfa, msg.logout_rsp);
break;
case BFI_LPS_I2H_CVL_EVENT:
bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
break;
default:
bfa_trc(bfa, m->mhdr.msg_id);
WARN_ON(1);
}
}
static void
bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
aen_entry->aen_data.port.pwwn = fcport->pwwn;
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
BFA_AEN_CAT_PORT, event);
}
/*
* FC PORT state machine functions
*/
static void
bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_START:
/*
* Start event after IOC is configured and BFA is started.
*/
fcport->use_flash_cfg = BFA_TRUE;
if (bfa_fcport_send_enable(fcport)) {
bfa_trc(fcport->bfa, BFA_TRUE);
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
} else {
bfa_trc(fcport->bfa, BFA_FALSE);
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
}
break;
case BFA_FCPORT_SM_ENABLE:
/*
* Port is persistently configured to be in enabled state. Do
* not change state. Port enabling is done when START event is
* received.
*/
break;
case BFA_FCPORT_SM_DISABLE:
/*
* If a port is persistently configured to be disabled, the
* first event will a port disable request.
*/
bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_QRESUME:
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
bfa_fcport_send_enable(fcport);
break;
case BFA_FCPORT_SM_STOP:
bfa_reqq_wcancel(&fcport->reqq_wait);
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_ENABLE:
/*
* Already enable is in progress.
*/
break;
case BFA_FCPORT_SM_DISABLE:
/*
* Just send disable request to firmware when room becomes
* available in request queue.
*/
bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
bfa_reqq_wcancel(&fcport->reqq_wait);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
/*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_reqq_wcancel(&fcport->reqq_wait);
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
case BFA_FCPORT_SM_FAA_MISCONFIG:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_FWRSP:
case BFA_FCPORT_SM_LINKDOWN:
bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
break;
case BFA_FCPORT_SM_LINKUP:
bfa_fcport_update_linkinfo(fcport);
bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
WARN_ON(!fcport->event_cbfn);
bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
break;
case BFA_FCPORT_SM_ENABLE:
/*
* Already being enabled.
*/
break;
case BFA_FCPORT_SM_DISABLE:
if (bfa_fcport_send_disable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_disabling_qwait);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
case BFA_FCPORT_SM_FAA_MISCONFIG:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_LINKUP:
bfa_fcport_update_linkinfo(fcport);
bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
WARN_ON(!fcport->event_cbfn);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
bfa_trc(fcport->bfa,
pevent->link_state.attr.vc_fcf.fcf.fipenabled);
bfa_trc(fcport->bfa,
pevent->link_state.attr.vc_fcf.fcf.fipfailed);
if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_FIP_FCF_DISC, 0,
"FIP FCF Discovery Failed");
else
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_FIP_FCF_DISC, 0,
"FIP FCF Discovered");
}
bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port online: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
/* If QoS is enabled and it is not online, send AEN */
if (fcport->cfg.qos_enabled &&
fcport->qos_attr.state != BFA_QOS_ONLINE)
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
break;
case BFA_FCPORT_SM_LINKDOWN:
/*
* Possible to get link down event.
*/
break;
case BFA_FCPORT_SM_ENABLE:
/*
* Already enabled.
*/
break;
case BFA_FCPORT_SM_DISABLE:
if (bfa_fcport_send_disable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_disabling_qwait);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
case BFA_FCPORT_SM_FAA_MISCONFIG:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_ENABLE:
/*
* Already enabled.
*/
break;
case BFA_FCPORT_SM_DISABLE:
if (bfa_fcport_send_disable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_disabling_qwait);
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKDOWN:
bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
wwn2str(pwwn_buf, fcport->pwwn);
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
} else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
}
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
bfa_fcport_reset_linkinfo(fcport);
wwn2str(pwwn_buf, fcport->pwwn);
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
} else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
}
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
wwn2str(pwwn_buf, fcport->pwwn);
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
} else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
}
break;
case BFA_FCPORT_SM_FAA_MISCONFIG:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_QRESUME:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
bfa_fcport_send_disable(fcport);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
bfa_reqq_wcancel(&fcport->reqq_wait);
break;
case BFA_FCPORT_SM_ENABLE:
bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
break;
case BFA_FCPORT_SM_DISABLE:
/*
* Already being disabled.
*/
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
/*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
bfa_reqq_wcancel(&fcport->reqq_wait);
break;
case BFA_FCPORT_SM_FAA_MISCONFIG:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_QRESUME:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
bfa_fcport_send_disable(fcport);
if (bfa_fcport_send_enable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
bfa_reqq_wcancel(&fcport->reqq_wait);
break;
case BFA_FCPORT_SM_ENABLE:
break;
case BFA_FCPORT_SM_DISABLE:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
/*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
bfa_reqq_wcancel(&fcport->reqq_wait);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_FWRSP:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
case BFA_FCPORT_SM_DISABLE:
/*
* Already being disabled.
*/
break;
case BFA_FCPORT_SM_ENABLE:
if (bfa_fcport_send_enable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
/*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
char pwwn_buf[BFA_STRING_32];
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_START:
/*
* Ignore start event for a port that is disabled.
*/
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_ENABLE:
if (bfa_fcport_send_enable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_DISABLE:
/*
* Already disabled.
*/
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
case BFA_FCPORT_SM_DPORTENABLE:
bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_START:
if (bfa_fcport_send_enable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
break;
default:
/*
* Ignore all other events.
*/
;
}
}
/*
* Port is enabled. IOC is down/failed.
*/
static void
bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_START:
if (bfa_fcport_send_enable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
bfa_sm_set_state(fcport,
bfa_fcport_sm_enabling_qwait);
break;
default:
/*
* Ignore all events.
*/
;
}
}
/*
* Port is disabled. IOC is down/failed.
*/
static void
bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_START:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
case BFA_FCPORT_SM_ENABLE:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
/*
* Ignore all events.
*/
;
}
}
static void
bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_DPORTENABLE:
case BFA_FCPORT_SM_DISABLE:
case BFA_FCPORT_SM_ENABLE:
case BFA_FCPORT_SM_START:
/*
* Ignore event for a port that is dport
*/
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
case BFA_FCPORT_SM_DPORTDISABLE:
bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
static void
bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event)
{
bfa_trc(fcport->bfa, event);
switch (event) {
case BFA_FCPORT_SM_DPORTENABLE:
case BFA_FCPORT_SM_ENABLE:
case BFA_FCPORT_SM_START:
/*
* Ignore event for a port as there is FAA misconfig
*/
break;
case BFA_FCPORT_SM_DISABLE:
if (bfa_fcport_send_disable(fcport))
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
case BFA_FCPORT_SM_HWFAIL:
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
bfa_sm_fault(fcport->bfa, event);
}
}
/*
* Link state is down
*/
static void
bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKUP:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is waiting for down notification
*/
static void
bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKUP:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
break;
case BFA_FCPORT_LN_SM_NOTIFICATION:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is waiting for down notification and there is a pending up
*/
static void
bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKDOWN:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
break;
case BFA_FCPORT_LN_SM_NOTIFICATION:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is up
*/
static void
bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKDOWN:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is waiting for up notification
*/
static void
bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKDOWN:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
break;
case BFA_FCPORT_LN_SM_NOTIFICATION:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is waiting for up notification and there is a pending down
*/
static void
bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKUP:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
break;
case BFA_FCPORT_LN_SM_NOTIFICATION:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
/*
* Link state is waiting for up notification and there are pending down and up
*/
static void
bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event)
{
bfa_trc(ln->fcport->bfa, event);
switch (event) {
case BFA_FCPORT_LN_SM_LINKDOWN:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
break;
case BFA_FCPORT_LN_SM_NOTIFICATION:
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
break;
default:
bfa_sm_fault(ln->fcport->bfa, event);
}
}
static void
__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
{
struct bfa_fcport_ln_s *ln = cbarg;
if (complete)
ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
else
bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
}
/*
* Send SCN notification to upper layers.
* trunk - false if caller is fcport to ignore fcport event in trunked mode
*/
static void
bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
bfa_boolean_t trunk)
{
if (fcport->cfg.trunked && !trunk)
return;
switch (event) {
case BFA_PORT_LINKUP:
bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
break;
case BFA_PORT_LINKDOWN:
bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
break;
default:
WARN_ON(1);
}
}
static void
bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
{
struct bfa_fcport_s *fcport = ln->fcport;
if (fcport->bfa->fcs) {
fcport->event_cbfn(fcport->event_cbarg, event);
bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
} else {
ln->ln_event = event;
bfa_cb_queue(fcport->bfa, &ln->ln_qe,
__bfa_cb_fcport_event, ln);
}
}
#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
BFA_CACHELINE_SZ))
static void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
}
static void
bfa_fcport_qresume(void *cbarg)
{
struct bfa_fcport_s *fcport = cbarg;
bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
}
static void
bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
{
struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
fcport->stats = (union bfa_fcport_stats_u *)
bfa_mem_dma_virt(fcport_dma);
}
/*
* Memory initialization.
*/
static void
bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
bfa_fcport_mem_claim(fcport);
bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
/*
* initialize time stamp for stats reset
*/
do_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
fcport->stats_dma_ready = BFA_FALSE;
/*
* initialize and set default configuration
*/
port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
port_cfg->speed = BFA_PORT_SPEED_AUTO;
port_cfg->trunked = BFA_FALSE;
port_cfg->maxfrsize = 0;
port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
port_cfg->qos_bw.med = BFA_QOS_BW_MED;
port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
INIT_LIST_HEAD(&fcport->stats_pending_q);
INIT_LIST_HEAD(&fcport->statsclr_pending_q);
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
static void
bfa_fcport_detach(struct bfa_s *bfa)
{
}
/*
* Called when IOC is ready.
*/
static void
bfa_fcport_start(struct bfa_s *bfa)
{
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
}
/*
* Called before IOC is stopped.
*/
static void
bfa_fcport_stop(struct bfa_s *bfa)
{
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
bfa_trunk_iocdisable(bfa);
}
/*
* Called when IOC failure is detected.
*/
static void
bfa_fcport_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
bfa_trunk_iocdisable(bfa);
}
/*
* Update loop info in fcport for SCN online
*/
static void
bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
struct bfa_fcport_loop_info_s *loop_info)
{
fcport->myalpa = loop_info->myalpa;
fcport->alpabm_valid =
loop_info->alpabm_val;
memcpy(fcport->alpabm.alpa_bm,
loop_info->alpabm.alpa_bm,
sizeof(struct fc_alpabm_s));
}
static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
{
struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
fcport->speed = pevent->link_state.speed;
fcport->topology = pevent->link_state.topology;
if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
bfa_fcport_update_loop_info(fcport,
&pevent->link_state.attr.loop_info);
return;
}
/* QoS Details */
fcport->qos_attr = pevent->link_state.qos_attr;
fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
/*
* update trunk state if applicable
*/
if (!fcport->cfg.trunked)
trunk->attr.state = BFA_TRUNK_DISABLED;
/* update FCoE specific */
fcport->fcoe_vlan =
be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
bfa_trc(fcport->bfa, fcport->speed);
bfa_trc(fcport->bfa, fcport->topology);
}
static void
bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
{
fcport->speed = BFA_PORT_SPEED_UNKNOWN;
fcport->topology = BFA_PORT_TOPOLOGY_NONE;
fcport->bbsc_op_state = BFA_FALSE;
}
/*
* Send port enable message to firmware.
*/
static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
struct bfi_fcport_enable_req_s *m;
/*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!m) {
bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
&fcport->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
bfa_fn_lpu(fcport->bfa));
m->nwwn = fcport->nwwn;
m->pwwn = fcport->pwwn;
m->port_cfg = fcport->cfg;
m->msgtag = fcport->msgtag;
m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
m->use_flash_cfg = fcport->use_flash_cfg;
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
return BFA_TRUE;
}
/*
* Send port disable message to firmware.
*/
static bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
{
struct bfi_fcport_req_s *m;
/*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!m) {
bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
&fcport->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
bfa_fn_lpu(fcport->bfa));
m->msgtag = fcport->msgtag;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
return BFA_TRUE;
}
static void
bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
{
fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
bfa_trc(fcport->bfa, fcport->pwwn);
bfa_trc(fcport->bfa, fcport->nwwn);
}
static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
struct bfa_qos_stats_s *s)
{
u32 *dip = (u32 *) d;
__be32 *sip = (__be32 *) s;
int i;
/* Now swap the 32 bit fields */
for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
dip[i] = be32_to_cpu(sip[i]);
}
static void
bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
struct bfa_fcoe_stats_s *s)
{
u32 *dip = (u32 *) d;
__be32 *sip = (__be32 *) s;
int i;
for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
i = i + 2) {
#ifdef __BIG_ENDIAN
dip[i] = be32_to_cpu(sip[i]);
dip[i + 1] = be32_to_cpu(sip[i + 1]);
#else
dip[i] = be32_to_cpu(sip[i + 1]);
dip[i + 1] = be32_to_cpu(sip[i]);
#endif
}
}
static void
__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
struct bfa_cb_pending_q_s *cb;
struct list_head *qe, *qen;
union bfa_fcport_stats_u *ret;
if (complete) {
struct timeval tv;
if (fcport->stats_status == BFA_STATUS_OK)
do_gettimeofday(&tv);
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
if (fcport->stats_status == BFA_STATUS_OK) {
ret = (union bfa_fcport_stats_u *)cb->data;
/* Swap FC QoS or FCoE stats */
if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
bfa_fcport_qos_stats_swap(&ret->fcqos,
&fcport->stats->fcqos);
else {
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
tv.tv_sec - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
fcport->stats_status);
}
fcport->stats_status = BFA_STATUS_OK;
} else {
INIT_LIST_HEAD(&fcport->stats_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
static void
bfa_fcport_stats_get_timeout(void *cbarg)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
bfa_trc(fcport->bfa, fcport->stats_qfull);
if (fcport->stats_qfull) {
bfa_reqq_wcancel(&fcport->stats_reqq_wait);
fcport->stats_qfull = BFA_FALSE;
}
fcport->stats_status = BFA_STATUS_ETIMER;
__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
}
static void
bfa_fcport_send_stats_get(void *cbarg)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
struct bfi_fcport_req_s *msg;
msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!msg) {
fcport->stats_qfull = BFA_TRUE;
bfa_reqq_winit(&fcport->stats_reqq_wait,
bfa_fcport_send_stats_get, fcport);
bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
&fcport->stats_reqq_wait);
return;
}
fcport->stats_qfull = BFA_FALSE;
memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
bfa_fn_lpu(fcport->bfa));
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
}
static void
__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
struct bfa_cb_pending_q_s *cb;
struct list_head *qe, *qen;
if (complete) {
struct timeval tv;
/*
* re-initialize time stamp for stats reset
*/
do_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
fcport->stats_status);
}
fcport->stats_status = BFA_STATUS_OK;
} else {
INIT_LIST_HEAD(&fcport->statsclr_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
static void
bfa_fcport_stats_clr_timeout(void *cbarg)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
bfa_trc(fcport->bfa, fcport->stats_qfull);
if (fcport->stats_qfull) {
bfa_reqq_wcancel(&fcport->stats_reqq_wait);
fcport->stats_qfull = BFA_FALSE;
}
fcport->stats_status = BFA_STATUS_ETIMER;
__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
}
static void
bfa_fcport_send_stats_clear(void *cbarg)
{
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
struct bfi_fcport_req_s *msg;
msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!msg) {
fcport->stats_qfull = BFA_TRUE;
bfa_reqq_winit(&fcport->stats_reqq_wait,
bfa_fcport_send_stats_clear, fcport);
bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
&fcport->stats_reqq_wait);
return;
}
fcport->stats_qfull = BFA_FALSE;
memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
bfa_fn_lpu(fcport->bfa));
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
}
/*
* Handle trunk SCN event from firmware.
*/
static void
bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
{
struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
struct bfi_fcport_trunk_link_s *tlink;
struct bfa_trunk_link_attr_s *lattr;
enum bfa_trunk_state state_prev;
int i;
int link_bm = 0;
bfa_trc(fcport->bfa, fcport->cfg.trunked);
WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
scn->trunk_state != BFA_TRUNK_OFFLINE);
bfa_trc(fcport->bfa, trunk->attr.state);
bfa_trc(fcport->bfa, scn->trunk_state);
bfa_trc(fcport->bfa, scn->trunk_speed);
/*
* Save off new state for trunk attribute query
*/
state_prev = trunk->attr.state;
if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
trunk->attr.state = scn->trunk_state;
trunk->attr.speed = scn->trunk_speed;
for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
lattr = &trunk->attr.link_attr[i];
tlink = &scn->tlink[i];
lattr->link_state = tlink->state;
lattr->trunk_wwn = tlink->trunk_wwn;
lattr->fctl = tlink->fctl;
lattr->speed = tlink->speed;
lattr->deskew = be32_to_cpu(tlink->deskew);
if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
fcport->speed = tlink->speed;
fcport->topology = BFA_PORT_TOPOLOGY_P2P;
link_bm |= 1 << i;
}
bfa_trc(fcport->bfa, lattr->link_state);
bfa_trc(fcport->bfa, lattr->trunk_wwn);
bfa_trc(fcport->bfa, lattr->fctl);
bfa_trc(fcport->bfa, lattr->speed);
bfa_trc(fcport->bfa, lattr->deskew);
}
switch (link_bm) {
case 3:
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
break;
case 2:
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
break;
case 1:
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
break;
default:
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
}
/*
* Notify upper layers if trunk state changed.
*/
if ((state_prev != trunk->attr.state) ||
(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
}
}
static void
bfa_trunk_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
int i = 0;
/*
* In trunked mode, notify upper layers that link is down
*/
if (fcport->cfg.trunked) {
if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
fcport->trunk.attr.link_attr[i].fctl =
BFA_TRUNK_LINK_FCTL_NORMAL;
fcport->trunk.attr.link_attr[i].link_state =
BFA_TRUNK_LINK_STATE_DN_LINKDN;
fcport->trunk.attr.link_attr[i].speed =
BFA_PORT_SPEED_UNKNOWN;
fcport->trunk.attr.link_attr[i].deskew = 0;
}
}
}
/*
* Called to initialize port attributes
*/
void
bfa_fcport_init(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
/*
* Initialize port attributes from IOC hardware data.
*/
bfa_fcport_set_wwns(fcport);
if (fcport->cfg.maxfrsize == 0)
fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
if (bfa_fcport_is_pbcdisabled(bfa))
bfa->modules.port.pbc_disabled = BFA_TRUE;
WARN_ON(!fcport->cfg.maxfrsize);
WARN_ON(!fcport->cfg.rx_bbcredit);
WARN_ON(!fcport->speed_sup);
}
/*
* Firmware message handler.
*/
void
bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
union bfi_fcport_i2h_msg_u i2hmsg;
i2hmsg.msg = msg;
fcport->event_arg.i2hmsg = i2hmsg;
bfa_trc(bfa, msg->mhdr.msg_id);
bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
switch (msg->mhdr.msg_id) {
case BFI_FCPORT_I2H_ENABLE_RSP:
if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
fcport->stats_dma_ready = BFA_TRUE;
if (fcport->use_flash_cfg) {
fcport->cfg = i2hmsg.penable_rsp->port_cfg;
fcport->cfg.maxfrsize =
cpu_to_be16(fcport->cfg.maxfrsize);
fcport->cfg.path_tov =
cpu_to_be16(fcport->cfg.path_tov);
fcport->cfg.q_depth =
cpu_to_be16(fcport->cfg.q_depth);
if (fcport->cfg.trunked)
fcport->trunk.attr.state =
BFA_TRUNK_OFFLINE;
else
fcport->trunk.attr.state =
BFA_TRUNK_DISABLED;
fcport->qos_attr.qos_bw =
i2hmsg.penable_rsp->port_cfg.qos_bw;
fcport->use_flash_cfg = BFA_FALSE;
}
if (fcport->cfg.qos_enabled)
fcport->qos_attr.state = BFA_QOS_OFFLINE;
else
fcport->qos_attr.state = BFA_QOS_DISABLED;
fcport->qos_attr.qos_bw_op =
i2hmsg.penable_rsp->port_cfg.qos_bw;
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
}
break;
case BFI_FCPORT_I2H_DISABLE_RSP:
if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
break;
case BFI_FCPORT_I2H_EVENT:
if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
else {
if (i2hmsg.event->link_state.linkstate_rsn ==
BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
bfa_sm_send_event(fcport,
BFA_FCPORT_SM_FAA_MISCONFIG);
else
bfa_sm_send_event(fcport,
BFA_FCPORT_SM_LINKDOWN);
}
fcport->qos_attr.qos_bw_op =
i2hmsg.event->link_state.qos_attr.qos_bw_op;
break;
case BFI_FCPORT_I2H_TRUNK_SCN:
bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
break;
case BFI_FCPORT_I2H_STATS_GET_RSP:
/*
* check for timer pop before processing the rsp
*/
if (list_empty(&fcport->stats_pending_q) ||
(fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = i2hmsg.pstatsget_rsp->status;
__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
/*
* check for timer pop before processing the rsp
*/
if (list_empty(&fcport->statsclr_pending_q) ||
(fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = BFA_STATUS_OK;
__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_ENABLE_AEN:
bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
break;
case BFI_FCPORT_I2H_DISABLE_AEN:
bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
break;
default:
WARN_ON(1);
break;
}
}
/*
* Registered callback for port events.
*/
void
bfa_fcport_event_register(struct bfa_s *bfa,
void (*cbfn) (void *cbarg,
enum bfa_port_linkstate event),
void *cbarg)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
fcport->event_cbfn = cbfn;
fcport->event_cbarg = cbarg;
}
bfa_status_t
bfa_fcport_enable(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
if (bfa_fcport_is_pbcdisabled(bfa))
return BFA_STATUS_PBC;
if (bfa_ioc_is_disabled(&bfa->ioc))
return BFA_STATUS_IOC_DISABLED;
if (fcport->diag_busy)
return BFA_STATUS_DIAG_BUSY;
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_fcport_disable(struct bfa_s *bfa)
{
if (bfa_fcport_is_pbcdisabled(bfa))
return BFA_STATUS_PBC;
if (bfa_ioc_is_disabled(&bfa->ioc))
return BFA_STATUS_IOC_DISABLED;
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
return BFA_STATUS_OK;
}
/* If PBC is disabled on port, return error */
bfa_status_t
bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
bfa_trc(bfa, fcport->pwwn);
return BFA_STATUS_PBC;
}
return BFA_STATUS_OK;
}
/*
* Configure port speed.
*/
bfa_status_t
bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, speed);
if (fcport->cfg.trunked == BFA_TRUE)
return BFA_STATUS_TRUNK_ENABLED;
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(speed == BFA_PORT_SPEED_16GBPS))
return BFA_STATUS_UNSUPP_SPEED;
if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
bfa_trc(bfa, fcport->speed_sup);
return BFA_STATUS_UNSUPP_SPEED;
}
/* Port speed entered needs to be checked */
if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
/* For CT2, 1G is not supported */
if ((speed == BFA_PORT_SPEED_1GBPS) &&
(bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
return BFA_STATUS_UNSUPP_SPEED;
/* Already checked for Auto Speed and Max Speed supp */
if (!(speed == BFA_PORT_SPEED_1GBPS ||
speed == BFA_PORT_SPEED_2GBPS ||
speed == BFA_PORT_SPEED_4GBPS ||
speed == BFA_PORT_SPEED_8GBPS ||
speed == BFA_PORT_SPEED_16GBPS ||
speed == BFA_PORT_SPEED_AUTO))
return BFA_STATUS_UNSUPP_SPEED;
} else {
if (speed != BFA_PORT_SPEED_10GBPS)
return BFA_STATUS_UNSUPP_SPEED;
}
fcport->cfg.speed = speed;
return BFA_STATUS_OK;
}
/*
* Get current speed.
*/
enum bfa_port_speed
bfa_fcport_get_speed(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->speed;
}
/*
* Configure port topology.
*/
bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, topology);
bfa_trc(bfa, fcport->cfg.topology);
switch (topology) {
case BFA_PORT_TOPOLOGY_P2P:
break;
case BFA_PORT_TOPOLOGY_LOOP:
if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
(fcport->qos_attr.state != BFA_QOS_DISABLED))
return BFA_STATUS_ERROR_QOS_ENABLED;
if (fcport->cfg.ratelimit != BFA_FALSE)
return BFA_STATUS_ERROR_TRL_ENABLED;
if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
return BFA_STATUS_ERROR_TRUNK_ENABLED;
if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
return BFA_STATUS_UNSUPP_SPEED;
if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
return BFA_STATUS_LOOP_UNSUPP_MEZZ;
if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
return BFA_STATUS_DPORT_ERR;
break;
case BFA_PORT_TOPOLOGY_AUTO:
break;
default:
return BFA_STATUS_EINVAL;
}
fcport->cfg.topology = topology;
return BFA_STATUS_OK;
}
/*
* Get current topology.
*/
enum bfa_port_topology
bfa_fcport_get_topology(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->topology;
}
/**
* Get config topology.
*/
enum bfa_port_topology
bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->cfg.topology;
}
bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, alpa);
bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
bfa_trc(bfa, fcport->cfg.hardalpa);
fcport->cfg.cfg_hardalpa = BFA_TRUE;
fcport->cfg.hardalpa = alpa;
return BFA_STATUS_OK;
}
bfa_status_t
bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
bfa_trc(bfa, fcport->cfg.hardalpa);
fcport->cfg.cfg_hardalpa = BFA_FALSE;
return BFA_STATUS_OK;
}
bfa_boolean_t
bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
*alpa = fcport->cfg.hardalpa;
return fcport->cfg.cfg_hardalpa;
}
u8
bfa_fcport_get_myalpa(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->myalpa;
}
bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, maxfrsize);
bfa_trc(bfa, fcport->cfg.maxfrsize);
/* with in range */
if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
return BFA_STATUS_INVLD_DFSZ;
/* power of 2, if not the max frame size of 2112 */
if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
return BFA_STATUS_INVLD_DFSZ;
fcport->cfg.maxfrsize = maxfrsize;
return BFA_STATUS_OK;
}
u16
bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->cfg.maxfrsize;
}
u8
bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
{
if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
else
return 0;
}
void
bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
fcport->cfg.bb_scn = bb_scn;
if (bb_scn)
fcport->bbsc_op_state = BFA_TRUE;
}
/*
* Get port attributes.
*/
wwn_t
bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
if (node)
return fcport->nwwn;
else
return fcport->pwwn;
}
void
bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
memset(attr, 0, sizeof(struct bfa_port_attr_s));
attr->nwwn = fcport->nwwn;
attr->pwwn = fcport->pwwn;
attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
memcpy(&attr->pport_cfg, &fcport->cfg,
sizeof(struct bfa_port_cfg_s));
/* speed attributes */
attr->pport_cfg.speed = fcport->cfg.speed;
attr->speed_supported = fcport->speed_sup;
attr->speed = fcport->speed;
attr->cos_supported = FC_CLASS_3;
/* topology attributes */
attr->pport_cfg.topology = fcport->cfg.topology;
attr->topology = fcport->topology;
attr->pport_cfg.trunked = fcport->cfg.trunked;
/* beacon attributes */
attr->beacon = fcport->beacon;
attr->link_e2e_beacon = fcport->link_e2e_beacon;
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
attr->bbsc_op_status = fcport->bbsc_op_state;
/* PBC Disabled State */
if (bfa_fcport_is_pbcdisabled(bfa))
attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
else {
if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
attr->port_state = BFA_PORT_ST_IOCDIS;
else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
attr->port_state = BFA_PORT_ST_FWMISMATCH;
}
/* FCoE vlan */
attr->fcoe_vlan = fcport->fcoe_vlan;
}
#define BFA_FCPORT_STATS_TOV 1000
/*
* Fetch port statistics (FCQoS or FCoE).
*/
bfa_status_t
bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
if (!bfa_iocfc_is_operational(bfa) ||
!fcport->stats_dma_ready)
return BFA_STATUS_IOC_NON_OP;
if (!list_empty(&fcport->statsclr_pending_q))
return BFA_STATUS_DEVBUSY;
if (list_empty(&fcport->stats_pending_q)) {
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
bfa_fcport_send_stats_get(fcport);
bfa_timer_start(bfa, &fcport->timer,
bfa_fcport_stats_get_timeout,
fcport, BFA_FCPORT_STATS_TOV);
} else
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
return BFA_STATUS_OK;
}
/*
* Reset port statistics (FCQoS or FCoE).
*/
bfa_status_t
bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
if (!bfa_iocfc_is_operational(bfa) ||
!fcport->stats_dma_ready)
return BFA_STATUS_IOC_NON_OP;
if (!list_empty(&fcport->stats_pending_q))
return BFA_STATUS_DEVBUSY;
if (list_empty(&fcport->statsclr_pending_q)) {
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
bfa_fcport_send_stats_clear(fcport);
bfa_timer_start(bfa, &fcport->timer,
bfa_fcport_stats_clr_timeout,
fcport, BFA_FCPORT_STATS_TOV);
} else
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
return BFA_STATUS_OK;
}
/*
* Fetch port attributes.
*/
bfa_boolean_t
bfa_fcport_is_disabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DISABLED;
}
bfa_boolean_t
bfa_fcport_is_dport(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DPORT);
}
bfa_status_t
bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
bfa_trc(bfa, ioc_type);
if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
return BFA_STATUS_QOS_BW_INVALID;
if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
return BFA_STATUS_QOS_BW_INVALID;
if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
(qos_bw->low > qos_bw->high))
return BFA_STATUS_QOS_BW_INVALID;
if ((ioc_type == BFA_IOC_TYPE_FC) &&
(fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
fcport->cfg.qos_bw = *qos_bw;
return BFA_STATUS_OK;
}
bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
}
/*
* Enable/Disable FAA feature in port config
*/
void
bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, state);
fcport->cfg.faa_state = state;
}
/*
* Get default minimum ratelim speed
*/
enum bfa_port_speed
bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, fcport->cfg.trl_def_speed);
return fcport->cfg.trl_def_speed;
}
void
bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon)
{
struct bfa_s *bfa = dev;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, beacon);
bfa_trc(bfa, link_e2e_beacon);
bfa_trc(bfa, fcport->beacon);
bfa_trc(bfa, fcport->link_e2e_beacon);
fcport->beacon = beacon;
fcport->link_e2e_beacon = link_e2e_beacon;
}
bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return (!fcport->cfg.trunked &&
bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
(fcport->cfg.trunked &&
fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
}
bfa_boolean_t
bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->cfg.qos_enabled;
}
bfa_boolean_t
bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
return fcport->cfg.trunked;
}
void
bfa_fcport_dportenable(struct bfa_s *bfa)
{
/*
* Assume caller check for port is in disable state
*/
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
}
void
bfa_fcport_dportdisable(struct bfa_s *bfa)
{
/*
* Assume caller check for port is in disable state
*/
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
}
/*
* Rport State machine functions
*/
/*
* Beginning state, only online event expected.
*/
static void
bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_CREATE:
bfa_stats(rp, sm_un_cr);
bfa_sm_set_state(rp, bfa_rport_sm_created);
break;
default:
bfa_stats(rp, sm_un_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_cr_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_cr_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_cr_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_cr_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Waiting for rport create response from firmware.
*/
static void
bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_fwc_rsp);
bfa_sm_set_state(rp, bfa_rport_sm_online);
bfa_rport_online_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwc_del);
bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_fwc_off);
bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwc_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_fwc_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Request queue is full, awaiting queue resume to send create request.
*/
static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
bfa_rport_send_fwcreate(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwc_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_fwc_off);
bfa_sm_set_state(rp, bfa_rport_sm_offline);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwc_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_reqq_wcancel(&rp->reqq_wait);
break;
default:
bfa_stats(rp, sm_fwc_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Online state - normal parking state.
*/
static void
bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
struct bfi_rport_qos_scn_s *qos_scn;
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_on_off);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_on_del);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
else
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_on_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
case BFA_RPORT_SM_SET_SPEED:
bfa_rport_send_fwspeed(rp);
break;
case BFA_RPORT_SM_QOS_SCN:
qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
rp->qos_attr = qos_scn->new_qos_attr;
bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
qos_scn->old_qos_attr.qos_flow_id =
be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
qos_scn->new_qos_attr.qos_flow_id =
be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
if (qos_scn->old_qos_attr.qos_flow_id !=
qos_scn->new_qos_attr.qos_flow_id)
bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
qos_scn->old_qos_attr,
qos_scn->new_qos_attr);
if (qos_scn->old_qos_attr.qos_priority !=
qos_scn->new_qos_attr.qos_priority)
bfa_cb_rport_qos_scn_prio(rp->rport_drv,
qos_scn->old_qos_attr,
qos_scn->new_qos_attr);
break;
default:
bfa_stats(rp, sm_on_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Firmware rport is being deleted - awaiting f/w response.
*/
static void
bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_fwd_rsp);
bfa_sm_set_state(rp, bfa_rport_sm_offline);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwd_del);
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwd_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_fwd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
bfa_rport_send_fwdelete(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwd_del);
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwd_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_fwd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Offline state.
*/
static void
bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_off_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_off_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_off_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_off_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Rport is deleted, waiting for firmware response to delete.
*/
static void
bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_del_fwrsp);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_del_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
default:
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_stats(rp, sm_del_fwrsp);
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
bfa_rport_send_fwdelete(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_del_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_free(rp);
break;
default:
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Waiting for rport create response from firmware. A delete is pending.
*/
static void
bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_delp_fwrsp);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
else
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_delp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
default:
bfa_stats(rp, sm_delp_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* Waiting for rport create response from firmware. Rport offline is pending.
*/
static void
bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_offp_fwrsp);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_offp_del);
bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_offp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_offp_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* IOC h/w failed.
*/
static void
bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_iocd_off);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_iocd_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_iocd_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
break;
default:
bfa_stats(rp, sm_iocd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/*
* bfa_rport_private BFA rport private functions
*/
static void
__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
{
struct bfa_rport_s *rp = cbarg;
if (complete)
bfa_cb_rport_online(rp->rport_drv);
}
static void
__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
{
struct bfa_rport_s *rp = cbarg;
if (complete)
bfa_cb_rport_offline(rp->rport_drv);
}
static void
bfa_rport_qresume(void *cbarg)
{
struct bfa_rport_s *rp = cbarg;
bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
}
static void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
cfg->fwcfg.num_rports = BFA_RPORT_MIN;
/* kva memory */
bfa_mem_kva_setup(minfo, rport_kva,
cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
}
static void
bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct bfa_rport_s *rp;
u16 i;
INIT_LIST_HEAD(&mod->rp_free_q);
INIT_LIST_HEAD(&mod->rp_active_q);
INIT_LIST_HEAD(&mod->rp_unused_q);
rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
mod->rps_list = rp;
mod->num_rports = cfg->fwcfg.num_rports;
WARN_ON(!mod->num_rports ||
(mod->num_rports & (mod->num_rports - 1)));
for (i = 0; i < mod->num_rports; i++, rp++) {
memset(rp, 0, sizeof(struct bfa_rport_s));
rp->bfa = bfa;
rp->rport_tag = i;
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
/*
* - is unused
*/
if (i)
list_add_tail(&rp->qe, &mod->rp_free_q);
bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
}
/*
* consume memory
*/
bfa_mem_kva_curp(mod) = (u8 *) rp;
}
static void
bfa_rport_detach(struct bfa_s *bfa)
{
}
static void
bfa_rport_start(struct bfa_s *bfa)
{
}
static void
bfa_rport_stop(struct bfa_s *bfa)
{
}
static void
bfa_rport_iocdisable(struct bfa_s *bfa)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct bfa_rport_s *rport;
struct list_head *qe, *qen;
/* Enqueue unused rport resources to free_q */
list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
list_for_each_safe(qe, qen, &mod->rp_active_q) {
rport = (struct bfa_rport_s *) qe;
bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
}
}
static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s *mod)
{
struct bfa_rport_s *rport;
bfa_q_deq(&mod->rp_free_q, &rport);
if (rport)
list_add_tail(&rport->qe, &mod->rp_active_q);
return rport;
}
static void
bfa_rport_free(struct bfa_rport_s *rport)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
list_del(&rport->qe);
list_add_tail(&rport->qe, &mod->rp_free_q);
}
static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
{
struct bfi_rport_create_req_s *m;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
bfa_fn_lpu(rp->bfa));
m->bfa_handle = rp->rport_tag;
m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
m->pid = rp->rport_info.pid;
m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
m->local_pid = rp->rport_info.local_pid;
m->fc_class = rp->rport_info.fc_class;
m->vf_en = rp->rport_info.vf_en;
m->vf_id = rp->rport_info.vf_id;
m->cisc = rp->rport_info.cisc;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
{
struct bfi_rport_delete_req_s *m;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
bfa_fn_lpu(rp->bfa));
m->fw_handle = rp->fw_handle;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
{
struct bfa_rport_speed_req_s *m;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_trc(rp->bfa, rp->rport_info.speed);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
bfa_fn_lpu(rp->bfa));
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
/*
* bfa_rport_public
*/
/*
* Rport interrupt processing.
*/
void
bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
union bfi_rport_i2h_msg_u msg;
struct bfa_rport_s *rp;
bfa_trc(bfa, m->mhdr.msg_id);
msg.msg = m;
switch (m->mhdr.msg_id) {
case BFI_RPORT_I2H_CREATE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
rp->fw_handle = msg.create_rsp->fw_handle;
rp->qos_attr = msg.create_rsp->qos_attr;
bfa_rport_set_lunmask(bfa, rp);
WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
case BFI_RPORT_I2H_DELETE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
bfa_rport_unset_lunmask(bfa, rp);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
case BFI_RPORT_I2H_QOS_SCN:
rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
rp->event_arg.fw_msg = msg.qos_scn_evt;
bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
break;
case BFI_RPORT_I2H_LIP_SCN_ONLINE:
bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
&msg.lip_scn->loop_info);
bfa_cb_rport_scn_online(bfa);
break;
case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
bfa_cb_rport_scn_offline(bfa);
break;
case BFI_RPORT_I2H_NO_DEV:
rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
bfa_cb_rport_scn_no_dev(rp->rport_drv);
break;
default:
bfa_trc(bfa, m->mhdr.msg_id);
WARN_ON(1);
}
}
void
bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct list_head *qe;
int i;
for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
bfa_q_deq_tail(&mod->rp_free_q, &qe);
list_add_tail(qe, &mod->rp_unused_q);
}
}
/*
* bfa_rport_api
*/
struct bfa_rport_s *
bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
{
struct bfa_rport_s *rp;
rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
if (rp == NULL)
return NULL;
rp->bfa = bfa;
rp->rport_drv = rport_drv;
memset(&rp->stats, 0, sizeof(rp->stats));
WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
return rp;
}
void
bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
{
WARN_ON(rport_info->max_frmsz == 0);
/*
* Some JBODs are seen to be not setting PDU size correctly in PLOGI
* responses. Default to minimum size.
*/
if (rport_info->max_frmsz == 0) {
bfa_trc(rport->bfa, rport->rport_tag);
rport_info->max_frmsz = FC_MIN_PDUSZ;
}
rport->rport_info = *rport_info;
bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
}
void
bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
{
WARN_ON(speed == 0);
WARN_ON(speed == BFA_PORT_SPEED_AUTO);
if (rport) {
rport->rport_info.speed = speed;
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
}
}
/* Set Rport LUN Mask */
void
bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
{
struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
wwn_t lp_wwn, rp_wwn;
u8 lp_tag = (u8)rp->rport_info.lp_tag;
rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
rp->lun_mask = BFA_TRUE;
bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
}
/* Unset Rport LUN mask */
void
bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
{
struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
wwn_t lp_wwn, rp_wwn;
rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
rp->lun_mask = BFA_FALSE;
bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
}
/*
* SGPG related functions
*/
/*
* Compute and return memory needed by FCP(im) module.
*/
static void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
struct bfa_mem_dma_s *seg_ptr;
u16 nsegs, idx, per_seg_sgpg, num_sgpg;
u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
num_sgpg = cfg->drvcfg.num_sgpgs;
nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
if (num_sgpg >= per_seg_sgpg) {
num_sgpg -= per_seg_sgpg;
bfa_mem_dma_setup(minfo, seg_ptr,
per_seg_sgpg * sgpg_sz);
} else
bfa_mem_dma_setup(minfo, seg_ptr,
num_sgpg * sgpg_sz);
}
/* kva memory */
bfa_mem_kva_setup(minfo, sgpg_kva,
cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
}
static void
bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
struct bfa_sgpg_s *hsgpg;
struct bfi_sgpg_s *sgpg;
u64 align_len;
struct bfa_mem_dma_s *seg_ptr;
u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
union {
u64 pa;
union bfi_addr_u addr;
} sgpg_pa, sgpg_pa_tmp;
INIT_LIST_HEAD(&mod->sgpg_q);
INIT_LIST_HEAD(&mod->sgpg_wait_q);
bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
num_sgpg = cfg->drvcfg.num_sgpgs;
nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
/* dma/kva mem claim */
hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
if (!bfa_mem_dma_virt(seg_ptr))
break;
align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
bfa_mem_dma_phys(seg_ptr);
sgpg = (struct bfi_sgpg_s *)
(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
memset(hsgpg, 0, sizeof(*hsgpg));
memset(sgpg, 0, sizeof(*sgpg));
hsgpg->sgpg = sgpg;
sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
list_add_tail(&hsgpg->qe, &mod->sgpg_q);
sgpg++;
hsgpg++;
sgpg_pa.pa += sgpg_sz;
}
}
bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
}
static void
bfa_sgpg_detach(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_start(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_stop(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_iocdisable(struct bfa_s *bfa)
{
}
bfa_status_t
bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
struct bfa_sgpg_s *hsgpg;
int i;
if (mod->free_sgpgs < nsgpgs)
return BFA_STATUS_ENOMEM;
for (i = 0; i < nsgpgs; i++) {
bfa_q_deq(&mod->sgpg_q, &hsgpg);
WARN_ON(!hsgpg);
list_add_tail(&hsgpg->qe, sgpg_q);
}
mod->free_sgpgs -= nsgpgs;
return BFA_STATUS_OK;
}
void
bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
struct bfa_sgpg_wqe_s *wqe;
mod->free_sgpgs += nsgpg;
WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
list_splice_tail_init(sgpg_q, &mod->sgpg_q);
if (list_empty(&mod->sgpg_wait_q))
return;
/*
* satisfy as many waiting requests as possible
*/
do {
wqe = bfa_q_first(&mod->sgpg_wait_q);
if (mod->free_sgpgs < wqe->nsgpg)
nsgpg = mod->free_sgpgs;
else
nsgpg = wqe->nsgpg;
bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
wqe->nsgpg -= nsgpg;
if (wqe->nsgpg == 0) {
list_del(&wqe->qe);
wqe->cbfn(wqe->cbarg);
}
} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
}
void
bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
WARN_ON(nsgpg <= 0);
WARN_ON(nsgpg <= mod->free_sgpgs);
wqe->nsgpg_total = wqe->nsgpg = nsgpg;
/*
* allocate any left to this one first
*/
if (mod->free_sgpgs) {
/*
* no one else is waiting for SGPG
*/
WARN_ON(!list_empty(&mod->sgpg_wait_q));
list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
wqe->nsgpg -= mod->free_sgpgs;
mod->free_sgpgs = 0;
}
list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
}
void
bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
list_del(&wqe->qe);
if (wqe->nsgpg_total != wqe->nsgpg)
bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
wqe->nsgpg_total - wqe->nsgpg);
}
void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
void *cbarg)
{
INIT_LIST_HEAD(&wqe->sgpg_q);
wqe->cbfn = cbfn;
wqe->cbarg = cbarg;
}
/*
* UF related functions
*/
/*
*****************************************************************************
* Internal functions
*****************************************************************************
*/
static void
__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
{
struct bfa_uf_s *uf = cbarg;
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
if (complete)
ufm->ufrecv(ufm->cbarg, uf);
}
static void
claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
{
struct bfi_uf_buf_post_s *uf_bp_msg;
u16 i;
u16 buf_len;
ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
uf_bp_msg = ufm->uf_buf_posts;
for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
i++, uf_bp_msg++) {
memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
uf_bp_msg->buf_tag = i;
buf_len = sizeof(struct bfa_uf_buf_s);
uf_bp_msg->buf_len = cpu_to_be16(buf_len);
bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
bfa_fn_lpu(ufm->bfa));
bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
}
/*
* advance pointer beyond consumed memory
*/
bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
}
static void
claim_ufs(struct bfa_uf_mod_s *ufm)
{
u16 i;
struct bfa_uf_s *uf;
/*
* Claim block of memory for UF list
*/
ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
/*
* Initialize UFs and queue it in UF free queue
*/
for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
memset(uf, 0, sizeof(struct bfa_uf_s));
uf->bfa = ufm->bfa;
uf->uf_tag = i;
uf->pb_len = BFA_PER_UF_DMA_SZ;
uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
uf->buf_pa = ufm_pbs_pa(ufm, i);
list_add_tail(&uf->qe, &ufm->uf_free_q);
}
/*
* advance memory pointer
*/
bfa_mem_kva_curp(ufm) = (u8 *) uf;
}
static void
uf_mem_claim(struct bfa_uf_mod_s *ufm)
{
claim_ufs(ufm);
claim_uf_post_msgs(ufm);
}
static void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
u32 num_ufs = cfg->fwcfg.num_uf_bufs;
struct bfa_mem_dma_s *seg_ptr;
u16 nsegs, idx, per_seg_uf = 0;
nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
if (num_ufs >= per_seg_uf) {
num_ufs -= per_seg_uf;
bfa_mem_dma_setup(minfo, seg_ptr,
per_seg_uf * BFA_PER_UF_DMA_SZ);
} else
bfa_mem_dma_setup(minfo, seg_ptr,
num_ufs * BFA_PER_UF_DMA_SZ);
}
/* kva memory */
bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
}
static void
bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
ufm->bfa = bfa;
ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
INIT_LIST_HEAD(&ufm->uf_free_q);
INIT_LIST_HEAD(&ufm->uf_posted_q);
INIT_LIST_HEAD(&ufm->uf_unused_q);
uf_mem_claim(ufm);
}
static void
bfa_uf_detach(struct bfa_s *bfa)
{
}
static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
{
struct bfa_uf_s *uf;
bfa_q_deq(&uf_mod->uf_free_q, &uf);
return uf;
}
static void
bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
{
list_add_tail(&uf->qe, &uf_mod->uf_free_q);
}
static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
{
struct bfi_uf_buf_post_s *uf_post_msg;
uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
if (!uf_post_msg)
return BFA_STATUS_FAILED;
memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
sizeof(struct bfi_uf_buf_post_s));
bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
bfa_trc(ufm->bfa, uf->uf_tag);
list_add_tail(&uf->qe, &ufm->uf_posted_q);
return BFA_STATUS_OK;
}
static void
bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
{
struct bfa_uf_s *uf;
while ((uf = bfa_uf_get(uf_mod)) != NULL) {
if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
break;
}
}
static void
uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
u16 uf_tag = m->buf_tag;
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf;
uint8_t *buf;
struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
buf = &uf_buf->d[0];
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
fchs = (struct fchs_s *)uf_buf;
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
uf->data_len = m->xfr_len;
WARN_ON(uf->data_len < sizeof(struct fchs_s));
if (uf->data_len == sizeof(struct fchs_s)) {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
uf->data_len, (struct fchs_s *)buf);
} else {
u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
BFA_PL_EID_RX, uf->data_len,
(struct fchs_s *)buf, pld_w0);
}
if (bfa->fcs)
__bfa_cb_uf_recv(uf, BFA_TRUE);
else
bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
}
static void
bfa_uf_stop(struct bfa_s *bfa)
{
}
static void
bfa_uf_iocdisable(struct bfa_s *bfa)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
struct bfa_uf_s *uf;
struct list_head *qe, *qen;
/* Enqueue unused uf resources to free_q */
list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
uf = (struct bfa_uf_s *) qe;
list_del(&uf->qe);
bfa_uf_put(ufm, uf);
}
}
static void
bfa_uf_start(struct bfa_s *bfa)
{
bfa_uf_post_all(BFA_UF_MOD(bfa));
}
/*
* Register handler for all unsolicted receive frames.
*
* @param[in] bfa BFA instance
* @param[in] ufrecv receive handler function
* @param[in] cbarg receive handler arg
*/
void
bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
ufm->ufrecv = ufrecv;
ufm->cbarg = cbarg;
}
/*
* Free an unsolicited frame back to BFA.
*
* @param[in] uf unsolicited frame to be freed
*
* @return None
*/
void
bfa_uf_free(struct bfa_uf_s *uf)
{
bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
}
/*
* uf_pub BFA uf module public functions
*/
void
bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
bfa_trc(bfa, msg->mhdr.msg_id);
switch (msg->mhdr.msg_id) {
case BFI_UF_I2H_FRM_RCVD:
uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
break;
default:
bfa_trc(bfa, msg->mhdr.msg_id);
WARN_ON(1);
}
}
void
bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
{
struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
struct list_head *qe;
int i;
for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
bfa_q_deq_tail(&mod->uf_free_q, &qe);
list_add_tail(qe, &mod->uf_unused_q);
}
}
/*
* Dport forward declaration
*/
/*
* BFA DPORT state machine events
*/
enum bfa_dport_sm_event {
BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
};
static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_qresume(void *cbarg);
static void bfa_dport_req_comp(struct bfa_dport_s *dport,
bfi_diag_dport_rsp_t *msg);
/*
* BFA fcdiag module
*/
#define BFA_DIAG_QTEST_TOV 1000 /* msec */
/*
* Set port status to busy
*/
static void
bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
if (fcdiag->lb.lock)
fcport->diag_busy = BFA_TRUE;
else
fcport->diag_busy = BFA_FALSE;
}
static void
bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
}
static void
bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
struct bfa_dport_s *dport = &fcdiag->dport;
fcdiag->bfa = bfa;
fcdiag->trcmod = bfa->trcmod;
/* The common DIAG attach bfa_diag_attach() will do all memory claim */
dport->bfa = bfa;
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
dport->cbfn = NULL;
dport->cbarg = NULL;
}
static void
bfa_fcdiag_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
struct bfa_dport_s *dport = &fcdiag->dport;
bfa_trc(fcdiag, fcdiag->lb.lock);
if (fcdiag->lb.lock) {
fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
fcdiag->lb.lock = 0;
bfa_fcdiag_set_busy_status(fcdiag);
}
bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
}
static void
bfa_fcdiag_detach(struct bfa_s *bfa)
{
}
static void
bfa_fcdiag_start(struct bfa_s *bfa)
{
}
static void
bfa_fcdiag_stop(struct bfa_s *bfa)
{
}
static void
bfa_fcdiag_queuetest_timeout(void *cbarg)
{
struct bfa_fcdiag_s *fcdiag = cbarg;
struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
bfa_trc(fcdiag, fcdiag->qtest.all);
bfa_trc(fcdiag, fcdiag->qtest.count);
fcdiag->qtest.timer_active = 0;
res->status = BFA_STATUS_ETIMER;
res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
if (fcdiag->qtest.all)
res->queue = fcdiag->qtest.all;
bfa_trc(fcdiag, BFA_STATUS_ETIMER);
fcdiag->qtest.status = BFA_STATUS_ETIMER;
fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
fcdiag->qtest.lock = 0;
}
static bfa_status_t
bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
{
u32 i;
struct bfi_diag_qtest_req_s *req;
req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
if (!req)
return BFA_STATUS_DEVBUSY;
/* build host command */
bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
bfa_fn_lpu(fcdiag->bfa));
for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
req->data[i] = QTEST_PAT_DEFAULT;
bfa_trc(fcdiag, fcdiag->qtest.queue);
/* ring door bell */
bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
return BFA_STATUS_OK;
}
static void
bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
bfi_diag_qtest_rsp_t *rsp)
{
struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
bfa_status_t status = BFA_STATUS_OK;
int i;
/* Check timer, should still be active */
if (!fcdiag->qtest.timer_active) {
bfa_trc(fcdiag, fcdiag->qtest.timer_active);
return;
}
/* update count */
fcdiag->qtest.count--;
/* Check result */
for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
res->status = BFA_STATUS_DATACORRUPTED;
break;
}
}
if (res->status == BFA_STATUS_OK) {
if (fcdiag->qtest.count > 0) {
status = bfa_fcdiag_queuetest_send(fcdiag);
if (status == BFA_STATUS_OK)
return;
else
res->status = status;
} else if (fcdiag->qtest.all > 0 &&
fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
fcdiag->qtest.count = QTEST_CNT_DEFAULT;
fcdiag->qtest.queue++;
status = bfa_fcdiag_queuetest_send(fcdiag);
if (status == BFA_STATUS_OK)
return;
else
res->status = status;
}
}
/* Stop timer when we comp all queue */
if (fcdiag->qtest.timer_active) {
bfa_timer_stop(&fcdiag->qtest.timer);
fcdiag->qtest.timer_active = 0;
}
res->queue = fcdiag->qtest.queue;
res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
bfa_trc(fcdiag, res->count);
bfa_trc(fcdiag, res->status);
fcdiag->qtest.status = res->status;
fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
fcdiag->qtest.lock = 0;
}
static void
bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
struct bfi_diag_lb_rsp_s *rsp)
{
struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
res->status = rsp->res.status;
fcdiag->lb.status = rsp->res.status;
bfa_trc(fcdiag, fcdiag->lb.status);
fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
fcdiag->lb.lock = 0;
bfa_fcdiag_set_busy_status(fcdiag);
}
static bfa_status_t
bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
struct bfa_diag_loopback_s *loopback)
{
struct bfi_diag_lb_req_s *lb_req;
lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
if (!lb_req)
return BFA_STATUS_DEVBUSY;
/* build host command */
bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
bfa_fn_lpu(fcdiag->bfa));
lb_req->lb_mode = loopback->lb_mode;
lb_req->speed = loopback->speed;
lb_req->loopcnt = loopback->loopcnt;
lb_req->pattern = loopback->pattern;
/* ring door bell */
bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
bfa_trc(fcdiag, loopback->lb_mode);
bfa_trc(fcdiag, loopback->speed);
bfa_trc(fcdiag, loopback->loopcnt);
bfa_trc(fcdiag, loopback->pattern);
return BFA_STATUS_OK;
}
/*
* cpe/rme intr handler
*/
void
bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
switch (msg->mhdr.msg_id) {
case BFI_DIAG_I2H_LOOPBACK:
bfa_fcdiag_loopback_comp(fcdiag,
(struct bfi_diag_lb_rsp_s *) msg);
break;
case BFI_DIAG_I2H_QTEST:
bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
break;
case BFI_DIAG_I2H_DPORT:
bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
break;
default:
bfa_trc(fcdiag, msg->mhdr.msg_id);
WARN_ON(1);
}
}
/*
* Loopback test
*
* @param[in] *bfa - bfa data struct
* @param[in] opmode - port operation mode
* @param[in] speed - port speed
* @param[in] lpcnt - loop count
* @param[in] pat - pattern to build packet
* @param[in] *result - pt to bfa_diag_loopback_result_t data struct
* @param[in] cbfn - callback function
* @param[in] cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
enum bfa_port_speed speed, u32 lpcnt, u32 pat,
struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
void *cbarg)
{
struct bfa_diag_loopback_s loopback;
struct bfa_port_attr_s attr;
bfa_status_t status;
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
if (!bfa_iocfc_is_operational(bfa))
return BFA_STATUS_IOC_NON_OP;
/* if port is PBC disabled, return error */
if (bfa_fcport_is_pbcdisabled(bfa)) {
bfa_trc(fcdiag, BFA_STATUS_PBC);
return BFA_STATUS_PBC;
}
if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
bfa_trc(fcdiag, opmode);
return BFA_STATUS_PORT_NOT_DISABLED;
}
/*
* Check if input speed is supported by the port mode
*/
if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
if (!(speed == BFA_PORT_SPEED_1GBPS ||
speed == BFA_PORT_SPEED_2GBPS ||
speed == BFA_PORT_SPEED_4GBPS ||
speed == BFA_PORT_SPEED_8GBPS ||
speed == BFA_PORT_SPEED_16GBPS ||
speed == BFA_PORT_SPEED_AUTO)) {
bfa_trc(fcdiag, speed);
return BFA_STATUS_UNSUPP_SPEED;
}
bfa_fcport_get_attr(bfa, &attr);
bfa_trc(fcdiag, attr.speed_supported);
if (speed > attr.speed_supported)
return BFA_STATUS_UNSUPP_SPEED;
} else {
if (speed != BFA_PORT_SPEED_10GBPS) {
bfa_trc(fcdiag, speed);
return BFA_STATUS_UNSUPP_SPEED;
}
}
/*
* For CT2, 1G is not supported
*/
if ((speed == BFA_PORT_SPEED_1GBPS) &&
(bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
bfa_trc(fcdiag, speed);
return BFA_STATUS_UNSUPP_SPEED;
}
/* For Mezz card, port speed entered needs to be checked */
if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
if (!(speed == BFA_PORT_SPEED_1GBPS ||
speed == BFA_PORT_SPEED_2GBPS ||
speed == BFA_PORT_SPEED_4GBPS ||
speed == BFA_PORT_SPEED_8GBPS ||
speed == BFA_PORT_SPEED_16GBPS ||
speed == BFA_PORT_SPEED_AUTO))
return BFA_STATUS_UNSUPP_SPEED;
} else {
if (speed != BFA_PORT_SPEED_10GBPS)
return BFA_STATUS_UNSUPP_SPEED;
}
}
/* check to see if there is another destructive diag cmd running */
if (fcdiag->lb.lock) {
bfa_trc(fcdiag, fcdiag->lb.lock);
return BFA_STATUS_DEVBUSY;
}
fcdiag->lb.lock = 1;
loopback.lb_mode = opmode;
loopback.speed = speed;
loopback.loopcnt = lpcnt;
loopback.pattern = pat;
fcdiag->lb.result = result;
fcdiag->lb.cbfn = cbfn;
fcdiag->lb.cbarg = cbarg;
memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
bfa_fcdiag_set_busy_status(fcdiag);
/* Send msg to fw */
status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
return status;
}
/*
* DIAG queue test command
*
* @param[in] *bfa - bfa data struct
* @param[in] force - 1: don't do ioc op checking
* @param[in] queue - queue no. to test
* @param[in] *result - pt to bfa_diag_qtest_result_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
void *cbarg)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
bfa_status_t status;
bfa_trc(fcdiag, force);
bfa_trc(fcdiag, queue);
if (!force && !bfa_iocfc_is_operational(bfa))
return BFA_STATUS_IOC_NON_OP;
/* check to see if there is another destructive diag cmd running */
if (fcdiag->qtest.lock) {
bfa_trc(fcdiag, fcdiag->qtest.lock);
return BFA_STATUS_DEVBUSY;
}
/* Initialization */
fcdiag->qtest.lock = 1;
fcdiag->qtest.cbfn = cbfn;
fcdiag->qtest.cbarg = cbarg;
fcdiag->qtest.result = result;
fcdiag->qtest.count = QTEST_CNT_DEFAULT;
/* Init test results */
fcdiag->qtest.result->status = BFA_STATUS_OK;
fcdiag->qtest.result->count = 0;
/* send */
if (queue < BFI_IOC_MAX_CQS) {
fcdiag->qtest.result->queue = (u8)queue;
fcdiag->qtest.queue = (u8)queue;
fcdiag->qtest.all = 0;
} else {
fcdiag->qtest.result->queue = 0;
fcdiag->qtest.queue = 0;
fcdiag->qtest.all = 1;
}
status = bfa_fcdiag_queuetest_send(fcdiag);
/* Start a timer */
if (status == BFA_STATUS_OK) {
bfa_timer_start(bfa, &fcdiag->qtest.timer,
bfa_fcdiag_queuetest_timeout, fcdiag,
BFA_DIAG_QTEST_TOV);
fcdiag->qtest.timer_active = 1;
}
return status;
}
/*
* DIAG PLB is running
*
* @param[in] *bfa - bfa data struct
*
* @param[out]
*/
bfa_status_t
bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
}
/*
* D-port
*/
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
enum bfi_dport_req req);
static void
bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
{
if (dport->cbfn != NULL) {
dport->cbfn(dport->cbarg, bfa_status);
dport->cbfn = NULL;
dport->cbarg = NULL;
}
}
static void
bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_ENABLE:
bfa_fcport_dportenable(dport->bfa);
if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
bfa_sm_set_state(dport, bfa_dport_sm_enabling);
else
bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
break;
case BFA_DPORT_SM_DISABLE:
/* Already disabled */
break;
case BFA_DPORT_SM_HWFAIL:
/* ignore */
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static void
bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_QRESUME:
bfa_sm_set_state(dport, bfa_dport_sm_enabling);
bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
break;
case BFA_DPORT_SM_HWFAIL:
bfa_reqq_wcancel(&dport->reqq_wait);
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static void
bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_FWRSP:
bfa_sm_set_state(dport, bfa_dport_sm_enabled);
break;
case BFA_DPORT_SM_HWFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static void
bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_ENABLE:
/* Already enabled */
break;
case BFA_DPORT_SM_DISABLE:
bfa_fcport_dportdisable(dport->bfa);
if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
bfa_sm_set_state(dport, bfa_dport_sm_disabling);
else
bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
break;
case BFA_DPORT_SM_HWFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static void
bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_QRESUME:
bfa_sm_set_state(dport, bfa_dport_sm_disabling);
bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
break;
case BFA_DPORT_SM_HWFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
bfa_reqq_wcancel(&dport->reqq_wait);
bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static void
bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
{
bfa_trc(dport->bfa, event);
switch (event) {
case BFA_DPORT_SM_FWRSP:
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
break;
case BFA_DPORT_SM_HWFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_disabled);
bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
break;
default:
bfa_sm_fault(dport->bfa, event);
}
}
static bfa_boolean_t
bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
{
struct bfi_diag_dport_req_s *m;
/*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
dport->msgtag++;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
if (!m) {
bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
bfa_fn_lpu(dport->bfa));
m->req = req;
m->msgtag = dport->msgtag;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
return BFA_TRUE;
}
static void
bfa_dport_qresume(void *cbarg)
{
struct bfa_dport_s *dport = cbarg;
bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
}
static void
bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
{
bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
bfa_cb_fcdiag_dport(dport, msg->status);
}
/*
* Dport enable
*
* @param[in] *bfa - bfa data struct
*/
bfa_status_t
bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
struct bfa_dport_s *dport = &fcdiag->dport;
/*
* Dport is not support in MEZZ card
*/
if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
bfa_trc(dport->bfa, BFA_STATUS_PBC);
return BFA_STATUS_CMD_NOTSUPP_MEZZ;
}
/*
* Check to see if IOC is down
*/
if (!bfa_iocfc_is_operational(bfa))
return BFA_STATUS_IOC_NON_OP;
/* if port is PBC disabled, return error */
if (bfa_fcport_is_pbcdisabled(bfa)) {
bfa_trc(dport->bfa, BFA_STATUS_PBC);
return BFA_STATUS_PBC;
}
/*
* Check if port mode is FC port
*/
if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
return BFA_STATUS_CMD_NOTSUPP_CNA;
}
/*
* Check if port is in LOOP mode
*/
if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
(bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_TOPOLOGY_LOOP;
}
/*
* Check if port is TRUNK mode
*/
if (bfa_fcport_is_trunk_enabled(bfa)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_ERROR_TRUNK_ENABLED;
}
/*
* Check to see if port is disable or in dport state
*/
if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
(bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_PORT_NOT_DISABLED;
}
/*
* Check if dport is busy
*/
if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
return BFA_STATUS_DEVBUSY;
}
/*
* Check if dport is already enabled
*/
if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_DPORT_ENABLED;
}
dport->cbfn = cbfn;
dport->cbarg = cbarg;
bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
return BFA_STATUS_OK;
}
/*
* Dport disable
*
* @param[in] *bfa - bfa data struct
*/
bfa_status_t
bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
struct bfa_dport_s *dport = &fcdiag->dport;
if (bfa_ioc_is_disabled(&bfa->ioc))
return BFA_STATUS_IOC_DISABLED;
/* if port is PBC disabled, return error */
if (bfa_fcport_is_pbcdisabled(bfa)) {
bfa_trc(dport->bfa, BFA_STATUS_PBC);
return BFA_STATUS_PBC;
}
/*
* Check to see if port is disable or in dport state
*/
if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
(bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_PORT_NOT_DISABLED;
}
/*
* Check if dport is busy
*/
if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
return BFA_STATUS_DEVBUSY;
/*
* Check if dport is already disabled
*/
if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
bfa_trc(dport->bfa, 0);
return BFA_STATUS_DPORT_DISABLED;
}
dport->cbfn = cbfn;
dport->cbarg = cbarg;
bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
return BFA_STATUS_OK;
}
/*
* Get D-port state
*
* @param[in] *bfa - bfa data struct
*/
bfa_status_t
bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
struct bfa_dport_s *dport = &fcdiag->dport;
if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
*state = BFA_DPORT_ST_ENABLED;
else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
*state = BFA_DPORT_ST_ENABLING;
else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
*state = BFA_DPORT_ST_DISABLED;
else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
*state = BFA_DPORT_ST_DISABLING;
else {
bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
}
| gpl-2.0 |
TeamBliss-Devices/android_kernel_nvidia_shieldtablet | arch/x86/kernel/apic/x2apic_cluster.c | 2281 | 7701 | #include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <linux/cpu.h>
#include <asm/smp.h>
#include <asm/x2apic.h>
static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled();
}
static inline u32 x2apic_cluster(int cpu)
{
return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
}
static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{
struct cpumask *cpus_in_cluster_ptr;
struct cpumask *ipi_mask_ptr;
unsigned int cpu, this_cpu;
unsigned long flags;
u32 dest;
x2apic_wrmsr_fence();
local_irq_save(flags);
this_cpu = smp_processor_id();
/*
* We are to modify mask, so we need an own copy
* and be sure it's manipulated with irq off.
*/
ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
cpumask_copy(ipi_mask_ptr, mask);
/*
* The idea is to send one IPI per cluster.
*/
for_each_cpu(cpu, ipi_mask_ptr) {
unsigned long i;
cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
dest = 0;
/* Collect cpus in cluster. */
for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
}
if (!dest)
continue;
__x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
/*
* Cluster sibling cpus should be discared now so
* we would not send IPI them second time.
*/
cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
}
static void
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
}
static void x2apic_send_IPI_allbutself(int vector)
{
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
}
static void x2apic_send_IPI_all(int vector)
{
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
static int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
u32 dest = 0;
u16 cluster;
int i;
for_each_cpu_and(i, cpumask, andmask) {
if (!cpumask_test_cpu(i, cpu_online_mask))
continue;
dest = per_cpu(x86_cpu_to_logical_apicid, i);
cluster = x2apic_cluster(i);
break;
}
if (!dest)
return -EINVAL;
for_each_cpu_and(i, cpumask, andmask) {
if (!cpumask_test_cpu(i, cpu_online_mask))
continue;
if (cluster != x2apic_cluster(i))
continue;
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
}
*apicid = dest;
return 0;
}
static void init_x2apic_ldr(void)
{
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
continue;
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
__cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
}
}
/*
* At CPU state changes, update the x2apic cluster sibling info.
*/
static int __cpuinit
update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int this_cpu = (unsigned long)hcpu;
unsigned int cpu;
int err = 0;
switch (action) {
case CPU_UP_PREPARE:
if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
GFP_KERNEL)) {
err = -ENOMEM;
} else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
GFP_KERNEL)) {
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
err = -ENOMEM;
}
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
continue;
__cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
__cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
}
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
free_cpumask_var(per_cpu(ipi_mask, this_cpu));
break;
}
return notifier_from_errno(err);
}
static struct notifier_block __refdata x2apic_cpu_notifier = {
.notifier_call = update_clusterinfo,
};
static int x2apic_init_cpu_notifier(void)
{
int cpu = smp_processor_id();
zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
__cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
register_hotcpu_notifier(&x2apic_cpu_notifier);
return 1;
}
static int x2apic_cluster_probe(void)
{
if (x2apic_mode)
return x2apic_init_cpu_notifier();
else
return 0;
}
static const struct cpumask *x2apic_cluster_target_cpus(void)
{
return cpu_all_mask;
}
/*
* Each x2apic cluster is an allocation domain.
*/
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
/*
* To minimize vector pressure, default case of boot, device bringup
* etc will use a single cpu for the interrupt destination.
*
* On explicit migration requests coming from irqbalance etc,
* interrupts will be routed to the x2apic cluster (cluster-id
* derived from the first cpu in the mask) members specified
* in the mask.
*/
if (mask == x2apic_cluster_target_cpus())
cpumask_copy(retmask, cpumask_of(cpu));
else
cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
}
static struct apic apic_x2apic_cluster = {
.name = "cluster x2apic",
.probe = x2apic_cluster_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
.irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */
.target_cpus = x2apic_cluster_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
.vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
.check_phys_apicid_present = default_check_phys_apicid_present,
.enable_apic_mode = NULL,
.phys_pkg_id = x2apic_phys_pkg_id,
.mps_oem_check = NULL,
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.eoi_write = native_apic_msr_eoi_write,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,
.wait_icr_idle = native_x2apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
apic_driver(apic_x2apic_cluster);
| gpl-2.0 |
AndroidPrimou/android_kernel_htc_msm7x30 | drivers/net/ppp_synctty.c | 2537 | 17953 | /*
* PPP synchronous tty channel driver for Linux.
*
* This is a ppp channel driver that can be used with tty device drivers
* that are frame oriented, such as synchronous HDLC devices.
*
* Complete PPP frames without encoding/decoding are exchanged between
* the channel driver and the device driver.
*
* The async map IOCTL codes are implemented to keep the user mode
* applications happy if they call them. Synchronous PPP does not use
* the async maps.
*
* Copyright 1999 Paul Mackerras.
*
* Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames over sync serial lines. It relies on
* the generic PPP layer to give it frames to send and to process
* received frames. It implements the PPP line discipline.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20040616==
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.2"
/* Structure for storing local state. */
struct syncppp {
struct tty_struct *tty;
unsigned int flags;
unsigned int rbits;
int mru;
spinlock_t xmit_lock;
spinlock_t recv_lock;
unsigned long xmit_flags;
u32 xaccm[8];
u32 raccm;
unsigned int bytes_sent;
unsigned int bytes_rcvd;
struct sk_buff *tpkt;
unsigned long last_xmit;
struct sk_buff_head rqueue;
struct tasklet_struct tsk;
atomic_t refcnt;
struct completion dead_cmp;
struct ppp_channel chan; /* interface to generic ppp layer */
};
/* Bit numbers in xmit_flags */
#define XMIT_WAKEUP 0
#define XMIT_FULL 1
/* Bits in rbits */
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
#define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
/*
* Prototypes.
*/
static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_sync_process(unsigned long arg);
static int ppp_sync_push(struct syncppp *ap);
static void ppp_sync_flush_output(struct syncppp *ap);
static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
char *flags, int count);
static const struct ppp_channel_ops sync_ops = {
.start_xmit = ppp_sync_send,
.ioctl = ppp_sync_ioctl,
};
/*
* Utility procedures to print a buffer in hex/ascii
*/
static void
ppp_print_hex (register __u8 * out, const __u8 * in, int count)
{
register __u8 next_ch;
static const char hex[] = "0123456789ABCDEF";
while (count-- > 0) {
next_ch = *in++;
*out++ = hex[(next_ch >> 4) & 0x0F];
*out++ = hex[next_ch & 0x0F];
++out;
}
}
static void
ppp_print_char (register __u8 * out, const __u8 * in, int count)
{
register __u8 next_ch;
while (count-- > 0) {
next_ch = *in++;
if (next_ch < 0x20 || next_ch > 0x7e)
*out++ = '.';
else {
*out++ = next_ch;
if (next_ch == '%') /* printk/syslogd has a bug !! */
*out++ = '%';
}
}
*out = '\0';
}
static void
ppp_print_buffer (const char *name, const __u8 *buf, int count)
{
__u8 line[44];
if (name != NULL)
printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
while (count > 8) {
memset (line, 32, 44);
ppp_print_hex (line, buf, 8);
ppp_print_char (&line[8 * 3], buf, 8);
printk(KERN_DEBUG "%s\n", line);
count -= 8;
buf += 8;
}
if (count > 0) {
memset (line, 32, 44);
ppp_print_hex (line, buf, count);
ppp_print_char (&line[8 * 3], buf, count);
printk(KERN_DEBUG "%s\n", line);
}
}
/*
* Routines implementing the synchronous PPP line discipline.
*/
/*
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_synctty_receive while another
* calls ppp_synctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_synctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*
* FIXME: Fixed in tty_io nowadays.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct syncppp *sp_get(struct tty_struct *tty)
{
struct syncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
atomic_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void sp_put(struct syncppp *ap)
{
if (atomic_dec_and_test(&ap->refcnt))
complete(&ap->dead_cmp);
}
/*
* Called when a tty is put into sync-PPP line discipline.
*/
static int
ppp_sync_open(struct tty_struct *tty)
{
struct syncppp *ap;
int err;
int speed;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
err = -ENOMEM;
if (!ap)
goto out;
/* initialize the syncppp structure */
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
spin_lock_init(&ap->recv_lock);
ap->xaccm[0] = ~0U;
ap->xaccm[3] = 0x60000000U;
ap->raccm = ~0U;
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
atomic_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp);
ap->chan.private = ap;
ap->chan.ops = &sync_ops;
ap->chan.mtu = PPP_MRU;
ap->chan.hdrlen = 2; /* for A/C bytes */
speed = tty_get_baud_rate(tty);
ap->chan.speed = speed;
err = ppp_register_channel(&ap->chan);
if (err)
goto out_free;
tty->disc_data = ap;
tty->receive_room = 65536;
return 0;
out_free:
kfree(ap);
out:
return err;
}
/*
* Called when the tty is put into another line discipline
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_synctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the syncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_sync_close(struct tty_struct *tty)
{
struct syncppp *ap;
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!ap)
return;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_sync_send/ioctl) are in progress
* by the time it returns.
*/
if (!atomic_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead_cmp);
tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan);
skb_queue_purge(&ap->rqueue);
kfree_skb(ap->tpkt);
kfree(ap);
}
/*
* Called on tty hangup in process context.
*
* Wait for I/O to driver to complete and unregister PPP channel.
* This is already done by the close routine, so just call that.
*/
static int ppp_sync_hangup(struct tty_struct *tty)
{
ppp_sync_close(tty);
return 0;
}
/*
* Read does nothing - no data is ever available this way.
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
ppp_sync_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t count)
{
return -EAGAIN;
}
/*
* Write on the tty does nothing, the packets all come in
* from the ppp generic stuff.
*/
static ssize_t
ppp_sync_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t count)
{
return -EAGAIN;
}
static int
ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = sp_get(tty);
int __user *p = (int __user *)arg;
int err, val;
if (!ap)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p))
break;
err = 0;
break;
case PPPIOCGUNIT:
err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p))
break;
err = 0;
break;
case TCFLSH:
/* flush our buffers and the serial port's buffer */
if (arg == TCIOFLUSH || arg == TCOFLUSH)
ppp_sync_flush_output(ap);
err = tty_perform_flush(tty, arg);
break;
case FIONREAD:
val = 0;
if (put_user(val, p))
break;
err = 0;
break;
default:
err = tty_mode_ioctl(tty, file, cmd, arg);
break;
}
sp_put(ap);
return err;
}
/* No kernel lock - fine */
static unsigned int
ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
struct syncppp *ap = sp_get(tty);
unsigned long flags;
if (!ap)
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
}
static void
ppp_sync_wakeup(struct tty_struct *tty)
{
struct syncppp *ap = sp_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (!ap)
return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk);
sp_put(ap);
}
static struct tty_ldisc_ops ppp_sync_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "pppsync",
.open = ppp_sync_open,
.close = ppp_sync_close,
.hangup = ppp_sync_hangup,
.read = ppp_sync_read,
.write = ppp_sync_write,
.ioctl = ppp_synctty_ioctl,
.poll = ppp_sync_poll,
.receive_buf = ppp_sync_receive,
.write_wakeup = ppp_sync_wakeup,
};
static int __init
ppp_sync_init(void)
{
int err;
err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
err);
return err;
}
/*
* The following routines provide the PPP channel interface.
*/
static int
ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = chan->private;
int err, val;
u32 accm[8];
void __user *argp = (void __user *)arg;
u32 __user *p = argp;
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = ap->flags | ap->rbits;
if (put_user(val, (int __user *) argp))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, (int __user *) argp))
break;
ap->flags = val & ~SC_RCV_BITS;
spin_lock_irq(&ap->recv_lock);
ap->rbits = val & SC_RCV_BITS;
spin_unlock_irq(&ap->recv_lock);
err = 0;
break;
case PPPIOCGASYNCMAP:
if (put_user(ap->xaccm[0], p))
break;
err = 0;
break;
case PPPIOCSASYNCMAP:
if (get_user(ap->xaccm[0], p))
break;
err = 0;
break;
case PPPIOCGRASYNCMAP:
if (put_user(ap->raccm, p))
break;
err = 0;
break;
case PPPIOCSRASYNCMAP:
if (get_user(ap->raccm, p))
break;
err = 0;
break;
case PPPIOCGXASYNCMAP:
if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
break;
err = 0;
break;
case PPPIOCSXASYNCMAP:
if (copy_from_user(accm, argp, sizeof(accm)))
break;
accm[2] &= ~0x40000000U; /* can't escape 0x5e */
accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
err = 0;
break;
case PPPIOCGMRU:
if (put_user(ap->mru, (int __user *) argp))
break;
err = 0;
break;
case PPPIOCSMRU:
if (get_user(val, (int __user *) argp))
break;
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
/*
* This is called at softirq level to deliver received packets
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
static void ppp_sync_process(unsigned long arg)
{
struct syncppp *ap = (struct syncppp *) arg;
struct sk_buff *skb;
/* process received packets */
while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
if (skb->len == 0) {
/* zero length buffers indicate error */
ppp_input_error(&ap->chan, 0);
kfree_skb(skb);
}
else
ppp_input(&ap->chan, skb);
}
/* try to push more stuff out */
if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
ppp_output_wakeup(&ap->chan);
}
/*
* Procedures for encapsulation and framing.
*/
static struct sk_buff*
ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
{
int proto;
unsigned char *data;
int islcp;
data = skb->data;
proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
* have been negotiated.
*/
islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
/* compress protocol field if option enabled */
if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
skb_pull(skb,1);
/* prepend address/control fields if necessary */
if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
if (skb_headroom(skb) < 2) {
struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
if (npkt == NULL) {
kfree_skb(skb);
return NULL;
}
skb_reserve(npkt,2);
skb_copy_from_linear_data(skb,
skb_put(npkt, skb->len), skb->len);
kfree_skb(skb);
skb = npkt;
}
skb_push(skb,2);
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
}
ap->last_xmit = jiffies;
if (skb && ap->flags & SC_LOG_OUTPKT)
ppp_print_buffer ("send buffer", skb->data, skb->len);
return skb;
}
/*
* Transmit-side routines.
*/
/*
* Send a packet to the peer over an sync tty line.
* Returns 1 iff the packet was accepted.
* If the packet was not accepted, we will call ppp_output_wakeup
* at some later time.
*/
static int
ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct syncppp *ap = chan->private;
ppp_sync_push(ap);
if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
return 0; /* already full */
skb = ppp_sync_txmunge(ap, skb);
if (skb != NULL)
ap->tpkt = skb;
else
clear_bit(XMIT_FULL, &ap->xmit_flags);
ppp_sync_push(ap);
return 1;
}
/*
* Push as much data as possible out to the tty.
*/
static int
ppp_sync_push(struct syncppp *ap)
{
int sent, done = 0;
struct tty_struct *tty = ap->tty;
int tty_stuffed = 0;
if (!spin_trylock_bh(&ap->xmit_lock))
return 0;
for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0;
if (!tty_stuffed && ap->tpkt) {
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
if (sent < 0)
goto flush; /* error, e.g. loss of CD */
if (sent < ap->tpkt->len) {
tty_stuffed = 1;
} else {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
continue;
}
/* haven't made any progress */
spin_unlock_bh(&ap->xmit_lock);
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
(!tty_stuffed && ap->tpkt)))
break;
if (!spin_trylock_bh(&ap->xmit_lock))
break;
}
return done;
flush:
if (ap->tpkt) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
return done;
}
/*
* Flush output from our internal buffers.
* Called for the TCFLSH ioctl.
*/
static void
ppp_sync_flush_output(struct syncppp *ap)
{
int done = 0;
spin_lock_bh(&ap->xmit_lock);
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
if (done)
ppp_output_wakeup(&ap->chan);
}
/*
* Receive-side routines.
*/
/* called when the tty driver has data for us.
*
* Data is frame oriented: each call to ppp_sync_input is considered
* a whole frame. If the 1st flag byte is non-zero then the whole
* frame is considered to be in error and is tossed.
*/
static void
ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
char *flags, int count)
{
struct sk_buff *skb;
unsigned char *p;
if (count == 0)
return;
if (ap->flags & SC_LOG_INPKT)
ppp_print_buffer ("receive buffer", buf, count);
/* stuff the chars in the skb */
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb) {
printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
goto err;
}
/* Try to get the payload 4-byte aligned */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
if (flags && *flags) {
/* error flag set, ignore frame */
goto err;
} else if (count > skb_tailroom(skb)) {
/* packet overflowed MRU */
goto err;
}
p = skb_put(skb, count);
memcpy(p, buf, count);
/* strip address/control field if present */
p = skb->data;
if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto err;
p = skb_pull(skb, 2);
}
/* decompress protocol field if compressed */
if (p[0] & 1) {
/* protocol is compressed */
skb_push(skb, 1)[0] = 0;
} else if (skb->len < 2)
goto err;
/* queue the frame to be processed */
skb_queue_tail(&ap->rqueue, skb);
return;
err:
/* queue zero length packet as error indication */
if (skb || (skb = dev_alloc_skb(0))) {
skb_trim(skb, 0);
skb_queue_tail(&ap->rqueue, skb);
}
}
static void __exit
ppp_sync_cleanup(void)
{
if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
}
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
| gpl-2.0 |
scue/lenovo_k860i_kernel | drivers/staging/generic_serial/vme_scc.c | 2537 | 30956 | /*
* drivers/char/vme_scc.c: MVME147, MVME162, BVME6000 SCC serial ports
* implementation.
* Copyright 1999 Richard Hirst <richard@sleepie.demon.co.uk>
*
* Based on atari_SCC.c which was
* Copyright 1994-95 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
* Partially based on PC-Linux serial.c by Linus Torvalds and Theodore Ts'o
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/module.h>
#include <linux/kdev_t.h>
#include <asm/io.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/mm.h>
#include <linux/serial.h>
#include <linux/fcntl.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/miscdevice.h>
#include <linux/console.h>
#include <linux/init.h>
#include <asm/setup.h>
#include <asm/bootinfo.h>
#ifdef CONFIG_MVME147_SCC
#include <asm/mvme147hw.h>
#endif
#ifdef CONFIG_MVME162_SCC
#include <asm/mvme16xhw.h>
#endif
#ifdef CONFIG_BVME6000_SCC
#include <asm/bvme6000hw.h>
#endif
#include <linux/generic_serial.h>
#include "scc.h"
#define CHANNEL_A 0
#define CHANNEL_B 1
#define SCC_MINOR_BASE 64
/* Shadows for all SCC write registers */
static unsigned char scc_shadow[2][16];
/* Location to access for SCC register access delay */
static volatile unsigned char *scc_del = NULL;
/* To keep track of STATUS_REG state for detection of Ext/Status int source */
static unsigned char scc_last_status_reg[2];
/***************************** Prototypes *****************************/
/* Function prototypes */
static void scc_disable_tx_interrupts(void * ptr);
static void scc_enable_tx_interrupts(void * ptr);
static void scc_disable_rx_interrupts(void * ptr);
static void scc_enable_rx_interrupts(void * ptr);
static int scc_carrier_raised(struct tty_port *port);
static void scc_shutdown_port(void * ptr);
static int scc_set_real_termios(void *ptr);
static void scc_hungup(void *ptr);
static void scc_close(void *ptr);
static int scc_chars_in_buffer(void * ptr);
static int scc_open(struct tty_struct * tty, struct file * filp);
static int scc_ioctl(struct tty_struct * tty,
unsigned int cmd, unsigned long arg);
static void scc_throttle(struct tty_struct *tty);
static void scc_unthrottle(struct tty_struct *tty);
static irqreturn_t scc_tx_int(int irq, void *data);
static irqreturn_t scc_rx_int(int irq, void *data);
static irqreturn_t scc_stat_int(int irq, void *data);
static irqreturn_t scc_spcond_int(int irq, void *data);
static void scc_setsignals(struct scc_port *port, int dtr, int rts);
static int scc_break_ctl(struct tty_struct *tty, int break_state);
static struct tty_driver *scc_driver;
static struct scc_port scc_ports[2];
/*---------------------------------------------------------------------------
* Interface from generic_serial.c back here
*--------------------------------------------------------------------------*/
static struct real_driver scc_real_driver = {
scc_disable_tx_interrupts,
scc_enable_tx_interrupts,
scc_disable_rx_interrupts,
scc_enable_rx_interrupts,
scc_shutdown_port,
scc_set_real_termios,
scc_chars_in_buffer,
scc_close,
scc_hungup,
NULL
};
static const struct tty_operations scc_ops = {
.open = scc_open,
.close = gs_close,
.write = gs_write,
.put_char = gs_put_char,
.flush_chars = gs_flush_chars,
.write_room = gs_write_room,
.chars_in_buffer = gs_chars_in_buffer,
.flush_buffer = gs_flush_buffer,
.ioctl = scc_ioctl,
.throttle = scc_throttle,
.unthrottle = scc_unthrottle,
.set_termios = gs_set_termios,
.stop = gs_stop,
.start = gs_start,
.hangup = gs_hangup,
.break_ctl = scc_break_ctl,
};
static const struct tty_port_operations scc_port_ops = {
.carrier_raised = scc_carrier_raised,
};
/*----------------------------------------------------------------------------
* vme_scc_init() and support functions
*---------------------------------------------------------------------------*/
static int __init scc_init_drivers(void)
{
int error;
scc_driver = alloc_tty_driver(2);
if (!scc_driver)
return -ENOMEM;
scc_driver->owner = THIS_MODULE;
scc_driver->driver_name = "scc";
scc_driver->name = "ttyS";
scc_driver->major = TTY_MAJOR;
scc_driver->minor_start = SCC_MINOR_BASE;
scc_driver->type = TTY_DRIVER_TYPE_SERIAL;
scc_driver->subtype = SERIAL_TYPE_NORMAL;
scc_driver->init_termios = tty_std_termios;
scc_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
scc_driver->init_termios.c_ispeed = 9600;
scc_driver->init_termios.c_ospeed = 9600;
scc_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(scc_driver, &scc_ops);
if ((error = tty_register_driver(scc_driver))) {
printk(KERN_ERR "scc: Couldn't register scc driver, error = %d\n",
error);
put_tty_driver(scc_driver);
return 1;
}
return 0;
}
/* ports[] array is indexed by line no (i.e. [0] for ttyS0, [1] for ttyS1).
*/
static void __init scc_init_portstructs(void)
{
struct scc_port *port;
int i;
for (i = 0; i < 2; i++) {
port = scc_ports + i;
tty_port_init(&port->gs.port);
port->gs.port.ops = &scc_port_ops;
port->gs.magic = SCC_MAGIC;
port->gs.close_delay = HZ/2;
port->gs.closing_wait = 30 * HZ;
port->gs.rd = &scc_real_driver;
#ifdef NEW_WRITE_LOCKING
port->gs.port_write_mutex = MUTEX;
#endif
init_waitqueue_head(&port->gs.port.open_wait);
init_waitqueue_head(&port->gs.port.close_wait);
}
}
#ifdef CONFIG_MVME147_SCC
static int __init mvme147_scc_init(void)
{
struct scc_port *port;
int error;
printk(KERN_INFO "SCC: MVME147 Serial Driver\n");
/* Init channel A */
port = &scc_ports[0];
port->channel = CHANNEL_A;
port->ctrlp = (volatile unsigned char *)M147_SCC_A_ADDR;
port->datap = port->ctrlp + 1;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(MVME147_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
"SCC-A TX", port);
if (error)
goto fail;
error = request_irq(MVME147_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-A status", port);
if (error)
goto fail_free_a_tx;
error = request_irq(MVME147_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
"SCC-A RX", port);
if (error)
goto fail_free_a_stat;
error = request_irq(MVME147_IRQ_SCCA_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-A special cond", port);
if (error)
goto fail_free_a_rx;
{
SCC_ACCESS_INIT(port);
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
/* Set the interrupt vector */
SCCwrite(INT_VECTOR_REG, MVME147_IRQ_SCC_BASE);
/* Interrupt parameters: vector includes status, status low */
SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
}
/* Init channel B */
port = &scc_ports[1];
port->channel = CHANNEL_B;
port->ctrlp = (volatile unsigned char *)M147_SCC_B_ADDR;
port->datap = port->ctrlp + 1;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(MVME147_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
"SCC-B TX", port);
if (error)
goto fail_free_a_spcond;
error = request_irq(MVME147_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-B status", port);
if (error)
goto fail_free_b_tx;
error = request_irq(MVME147_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
"SCC-B RX", port);
if (error)
goto fail_free_b_stat;
error = request_irq(MVME147_IRQ_SCCB_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-B special cond", port);
if (error)
goto fail_free_b_rx;
{
SCC_ACCESS_INIT(port);
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
}
/* Ensure interrupts are enabled in the PCC chip */
m147_pcc->serial_cntrl=PCC_LEVEL_SERIAL|PCC_INT_ENAB;
/* Initialise the tty driver structures and register */
scc_init_portstructs();
scc_init_drivers();
return 0;
fail_free_b_rx:
free_irq(MVME147_IRQ_SCCB_RX, port);
fail_free_b_stat:
free_irq(MVME147_IRQ_SCCB_STAT, port);
fail_free_b_tx:
free_irq(MVME147_IRQ_SCCB_TX, port);
fail_free_a_spcond:
free_irq(MVME147_IRQ_SCCA_SPCOND, port);
fail_free_a_rx:
free_irq(MVME147_IRQ_SCCA_RX, port);
fail_free_a_stat:
free_irq(MVME147_IRQ_SCCA_STAT, port);
fail_free_a_tx:
free_irq(MVME147_IRQ_SCCA_TX, port);
fail:
return error;
}
#endif
#ifdef CONFIG_MVME162_SCC
static int __init mvme162_scc_init(void)
{
struct scc_port *port;
int error;
if (!(mvme16x_config & MVME16x_CONFIG_GOT_SCCA))
return (-ENODEV);
printk(KERN_INFO "SCC: MVME162 Serial Driver\n");
/* Init channel A */
port = &scc_ports[0];
port->channel = CHANNEL_A;
port->ctrlp = (volatile unsigned char *)MVME_SCC_A_ADDR;
port->datap = port->ctrlp + 2;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(MVME162_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
"SCC-A TX", port);
if (error)
goto fail;
error = request_irq(MVME162_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-A status", port);
if (error)
goto fail_free_a_tx;
error = request_irq(MVME162_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
"SCC-A RX", port);
if (error)
goto fail_free_a_stat;
error = request_irq(MVME162_IRQ_SCCA_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-A special cond", port);
if (error)
goto fail_free_a_rx;
{
SCC_ACCESS_INIT(port);
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
/* Set the interrupt vector */
SCCwrite(INT_VECTOR_REG, MVME162_IRQ_SCC_BASE);
/* Interrupt parameters: vector includes status, status low */
SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
}
/* Init channel B */
port = &scc_ports[1];
port->channel = CHANNEL_B;
port->ctrlp = (volatile unsigned char *)MVME_SCC_B_ADDR;
port->datap = port->ctrlp + 2;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(MVME162_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
"SCC-B TX", port);
if (error)
goto fail_free_a_spcond;
error = request_irq(MVME162_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-B status", port);
if (error)
goto fail_free_b_tx;
error = request_irq(MVME162_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
"SCC-B RX", port);
if (error)
goto fail_free_b_stat;
error = request_irq(MVME162_IRQ_SCCB_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-B special cond", port);
if (error)
goto fail_free_b_rx;
{
SCC_ACCESS_INIT(port); /* Either channel will do */
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
}
/* Ensure interrupts are enabled in the MC2 chip */
*(volatile char *)0xfff4201d = 0x14;
/* Initialise the tty driver structures and register */
scc_init_portstructs();
scc_init_drivers();
return 0;
fail_free_b_rx:
free_irq(MVME162_IRQ_SCCB_RX, port);
fail_free_b_stat:
free_irq(MVME162_IRQ_SCCB_STAT, port);
fail_free_b_tx:
free_irq(MVME162_IRQ_SCCB_TX, port);
fail_free_a_spcond:
free_irq(MVME162_IRQ_SCCA_SPCOND, port);
fail_free_a_rx:
free_irq(MVME162_IRQ_SCCA_RX, port);
fail_free_a_stat:
free_irq(MVME162_IRQ_SCCA_STAT, port);
fail_free_a_tx:
free_irq(MVME162_IRQ_SCCA_TX, port);
fail:
return error;
}
#endif
#ifdef CONFIG_BVME6000_SCC
static int __init bvme6000_scc_init(void)
{
struct scc_port *port;
int error;
printk(KERN_INFO "SCC: BVME6000 Serial Driver\n");
/* Init channel A */
port = &scc_ports[0];
port->channel = CHANNEL_A;
port->ctrlp = (volatile unsigned char *)BVME_SCC_A_ADDR;
port->datap = port->ctrlp + 4;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(BVME_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
"SCC-A TX", port);
if (error)
goto fail;
error = request_irq(BVME_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-A status", port);
if (error)
goto fail_free_a_tx;
error = request_irq(BVME_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
"SCC-A RX", port);
if (error)
goto fail_free_a_stat;
error = request_irq(BVME_IRQ_SCCA_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-A special cond", port);
if (error)
goto fail_free_a_rx;
{
SCC_ACCESS_INIT(port);
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
/* Set the interrupt vector */
SCCwrite(INT_VECTOR_REG, BVME_IRQ_SCC_BASE);
/* Interrupt parameters: vector includes status, status low */
SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
}
/* Init channel B */
port = &scc_ports[1];
port->channel = CHANNEL_B;
port->ctrlp = (volatile unsigned char *)BVME_SCC_B_ADDR;
port->datap = port->ctrlp + 4;
port->port_a = &scc_ports[0];
port->port_b = &scc_ports[1];
error = request_irq(BVME_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
"SCC-B TX", port);
if (error)
goto fail_free_a_spcond;
error = request_irq(BVME_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
"SCC-B status", port);
if (error)
goto fail_free_b_tx;
error = request_irq(BVME_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
"SCC-B RX", port);
if (error)
goto fail_free_b_stat;
error = request_irq(BVME_IRQ_SCCB_SPCOND, scc_spcond_int,
IRQF_DISABLED, "SCC-B special cond", port);
if (error)
goto fail_free_b_rx;
{
SCC_ACCESS_INIT(port); /* Either channel will do */
/* disable interrupts for this channel */
SCCwrite(INT_AND_DMA_REG, 0);
}
/* Initialise the tty driver structures and register */
scc_init_portstructs();
scc_init_drivers();
return 0;
fail:
free_irq(BVME_IRQ_SCCA_STAT, port);
fail_free_a_tx:
free_irq(BVME_IRQ_SCCA_RX, port);
fail_free_a_stat:
free_irq(BVME_IRQ_SCCA_SPCOND, port);
fail_free_a_rx:
free_irq(BVME_IRQ_SCCB_TX, port);
fail_free_a_spcond:
free_irq(BVME_IRQ_SCCB_STAT, port);
fail_free_b_tx:
free_irq(BVME_IRQ_SCCB_RX, port);
fail_free_b_stat:
free_irq(BVME_IRQ_SCCB_SPCOND, port);
fail_free_b_rx:
return error;
}
#endif
static int __init vme_scc_init(void)
{
int res = -ENODEV;
#ifdef CONFIG_MVME147_SCC
if (MACH_IS_MVME147)
res = mvme147_scc_init();
#endif
#ifdef CONFIG_MVME162_SCC
if (MACH_IS_MVME16x)
res = mvme162_scc_init();
#endif
#ifdef CONFIG_BVME6000_SCC
if (MACH_IS_BVME6000)
res = bvme6000_scc_init();
#endif
return res;
}
module_init(vme_scc_init);
/*---------------------------------------------------------------------------
* Interrupt handlers
*--------------------------------------------------------------------------*/
static irqreturn_t scc_rx_int(int irq, void *data)
{
unsigned char ch;
struct scc_port *port = data;
struct tty_struct *tty = port->gs.port.tty;
SCC_ACCESS_INIT(port);
ch = SCCread_NB(RX_DATA_REG);
if (!tty) {
printk(KERN_WARNING "scc_rx_int with NULL tty!\n");
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
tty_insert_flip_char(tty, ch, 0);
/* Check if another character is already ready; in that case, the
* spcond_int() function must be used, because this character may have an
* error condition that isn't signalled by the interrupt vector used!
*/
if (SCCread(INT_PENDING_REG) &
(port->channel == CHANNEL_A ? IPR_A_RX : IPR_B_RX)) {
scc_spcond_int (irq, data);
return IRQ_HANDLED;
}
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
static irqreturn_t scc_spcond_int(int irq, void *data)
{
struct scc_port *port = data;
struct tty_struct *tty = port->gs.port.tty;
unsigned char stat, ch, err;
int int_pending_mask = port->channel == CHANNEL_A ?
IPR_A_RX : IPR_B_RX;
SCC_ACCESS_INIT(port);
if (!tty) {
printk(KERN_WARNING "scc_spcond_int with NULL tty!\n");
SCCwrite(COMMAND_REG, CR_ERROR_RESET);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
do {
stat = SCCread(SPCOND_STATUS_REG);
ch = SCCread_NB(RX_DATA_REG);
if (stat & SCSR_RX_OVERRUN)
err = TTY_OVERRUN;
else if (stat & SCSR_PARITY_ERR)
err = TTY_PARITY;
else if (stat & SCSR_CRC_FRAME_ERR)
err = TTY_FRAME;
else
err = 0;
tty_insert_flip_char(tty, ch, err);
/* ++TeSche: *All* errors have to be cleared manually,
* else the condition persists for the next chars
*/
if (err)
SCCwrite(COMMAND_REG, CR_ERROR_RESET);
} while(SCCread(INT_PENDING_REG) & int_pending_mask);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
static irqreturn_t scc_tx_int(int irq, void *data)
{
struct scc_port *port = data;
SCC_ACCESS_INIT(port);
if (!port->gs.port.tty) {
printk(KERN_WARNING "scc_tx_int with NULL tty!\n");
SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
while ((SCCread_NB(STATUS_REG) & SR_TX_BUF_EMPTY)) {
if (port->x_char) {
SCCwrite(TX_DATA_REG, port->x_char);
port->x_char = 0;
}
else if ((port->gs.xmit_cnt <= 0) ||
port->gs.port.tty->stopped ||
port->gs.port.tty->hw_stopped)
break;
else {
SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]);
port->gs.xmit_tail = port->gs.xmit_tail & (SERIAL_XMIT_SIZE-1);
if (--port->gs.xmit_cnt <= 0)
break;
}
}
if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped ||
port->gs.port.tty->hw_stopped) {
/* disable tx interrupts */
SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */
port->gs.port.flags &= ~GS_TX_INTEN;
}
if (port->gs.port.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars)
tty_wakeup(port->gs.port.tty);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
static irqreturn_t scc_stat_int(int irq, void *data)
{
struct scc_port *port = data;
unsigned channel = port->channel;
unsigned char last_sr, sr, changed;
SCC_ACCESS_INIT(port);
last_sr = scc_last_status_reg[channel];
sr = scc_last_status_reg[channel] = SCCread_NB(STATUS_REG);
changed = last_sr ^ sr;
if (changed & SR_DCD) {
port->c_dcd = !!(sr & SR_DCD);
if (!(port->gs.port.flags & ASYNC_CHECK_CD))
; /* Don't report DCD changes */
else if (port->c_dcd) {
wake_up_interruptible(&port->gs.port.open_wait);
}
else {
if (port->gs.port.tty)
tty_hangup (port->gs.port.tty);
}
}
SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
/*---------------------------------------------------------------------------
* generic_serial.c callback funtions
*--------------------------------------------------------------------------*/
static void scc_disable_tx_interrupts(void *ptr)
{
struct scc_port *port = ptr;
unsigned long flags;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
port->gs.port.flags &= ~GS_TX_INTEN;
local_irq_restore(flags);
}
static void scc_enable_tx_interrupts(void *ptr)
{
struct scc_port *port = ptr;
unsigned long flags;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
SCCmod(INT_AND_DMA_REG, 0xff, IDR_TX_INT_ENAB);
/* restart the transmitter */
scc_tx_int (0, port);
local_irq_restore(flags);
}
static void scc_disable_rx_interrupts(void *ptr)
{
struct scc_port *port = ptr;
unsigned long flags;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
SCCmod(INT_AND_DMA_REG,
~(IDR_RX_INT_MASK|IDR_PARERR_AS_SPCOND|IDR_EXTSTAT_INT_ENAB), 0);
local_irq_restore(flags);
}
static void scc_enable_rx_interrupts(void *ptr)
{
struct scc_port *port = ptr;
unsigned long flags;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
SCCmod(INT_AND_DMA_REG, 0xff,
IDR_EXTSTAT_INT_ENAB|IDR_PARERR_AS_SPCOND|IDR_RX_INT_ALL);
local_irq_restore(flags);
}
static int scc_carrier_raised(struct tty_port *port)
{
struct scc_port *sc = container_of(port, struct scc_port, gs.port);
unsigned channel = sc->channel;
return !!(scc_last_status_reg[channel] & SR_DCD);
}
static void scc_shutdown_port(void *ptr)
{
struct scc_port *port = ptr;
port->gs.port.flags &= ~ GS_ACTIVE;
if (port->gs.port.tty && (port->gs.port.tty->termios->c_cflag & HUPCL)) {
scc_setsignals (port, 0, 0);
}
}
static int scc_set_real_termios (void *ptr)
{
/* the SCC has char sizes 5,7,6,8 in that order! */
static int chsize_map[4] = { 0, 2, 1, 3 };
unsigned cflag, baud, chsize, channel, brgval = 0;
unsigned long flags;
struct scc_port *port = ptr;
SCC_ACCESS_INIT(port);
if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
channel = port->channel;
if (channel == CHANNEL_A)
return 0; /* Settings controlled by boot PROM */
cflag = port->gs.port.tty->termios->c_cflag;
baud = port->gs.baud;
chsize = (cflag & CSIZE) >> 4;
if (baud == 0) {
/* speed == 0 -> drop DTR */
local_irq_save(flags);
SCCmod(TX_CTRL_REG, ~TCR_DTR, 0);
local_irq_restore(flags);
return 0;
}
else if ((MACH_IS_MVME16x && (baud < 50 || baud > 38400)) ||
(MACH_IS_MVME147 && (baud < 50 || baud > 19200)) ||
(MACH_IS_BVME6000 &&(baud < 50 || baud > 76800))) {
printk(KERN_NOTICE "SCC: Bad speed requested, %d\n", baud);
return 0;
}
if (cflag & CLOCAL)
port->gs.port.flags &= ~ASYNC_CHECK_CD;
else
port->gs.port.flags |= ASYNC_CHECK_CD;
#ifdef CONFIG_MVME147_SCC
if (MACH_IS_MVME147)
brgval = (M147_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2;
#endif
#ifdef CONFIG_MVME162_SCC
if (MACH_IS_MVME16x)
brgval = (MVME_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2;
#endif
#ifdef CONFIG_BVME6000_SCC
if (MACH_IS_BVME6000)
brgval = (BVME_SCC_RTxC + baud/2) / (16 * 2 * baud) - 2;
#endif
/* Now we have all parameters and can go to set them: */
local_irq_save(flags);
/* receiver's character size and auto-enables */
SCCmod(RX_CTRL_REG, ~(RCR_CHSIZE_MASK|RCR_AUTO_ENAB_MODE),
(chsize_map[chsize] << 6) |
((cflag & CRTSCTS) ? RCR_AUTO_ENAB_MODE : 0));
/* parity and stop bits (both, Tx and Rx), clock mode never changes */
SCCmod (AUX1_CTRL_REG,
~(A1CR_PARITY_MASK | A1CR_MODE_MASK),
((cflag & PARENB
? (cflag & PARODD ? A1CR_PARITY_ODD : A1CR_PARITY_EVEN)
: A1CR_PARITY_NONE)
| (cflag & CSTOPB ? A1CR_MODE_ASYNC_2 : A1CR_MODE_ASYNC_1)));
/* sender's character size, set DTR for valid baud rate */
SCCmod(TX_CTRL_REG, ~TCR_CHSIZE_MASK, chsize_map[chsize] << 5 | TCR_DTR);
/* clock sources never change */
/* disable BRG before changing the value */
SCCmod(DPLL_CTRL_REG, ~DCR_BRG_ENAB, 0);
/* BRG value */
SCCwrite(TIMER_LOW_REG, brgval & 0xff);
SCCwrite(TIMER_HIGH_REG, (brgval >> 8) & 0xff);
/* BRG enable, and clock source never changes */
SCCmod(DPLL_CTRL_REG, 0xff, DCR_BRG_ENAB);
local_irq_restore(flags);
return 0;
}
static int scc_chars_in_buffer (void *ptr)
{
struct scc_port *port = ptr;
SCC_ACCESS_INIT(port);
return (SCCread (SPCOND_STATUS_REG) & SCSR_ALL_SENT) ? 0 : 1;
}
/* Comment taken from sx.c (2.4.0):
I haven't the foggiest why the decrement use count has to happen
here. The whole linux serial drivers stuff needs to be redesigned.
My guess is that this is a hack to minimize the impact of a bug
elsewhere. Thinking about it some more. (try it sometime) Try
running minicom on a serial port that is driven by a modularized
driver. Have the modem hangup. Then remove the driver module. Then
exit minicom. I expect an "oops". -- REW */
static void scc_hungup(void *ptr)
{
scc_disable_tx_interrupts(ptr);
scc_disable_rx_interrupts(ptr);
}
static void scc_close(void *ptr)
{
scc_disable_tx_interrupts(ptr);
scc_disable_rx_interrupts(ptr);
}
/*---------------------------------------------------------------------------
* Internal support functions
*--------------------------------------------------------------------------*/
static void scc_setsignals(struct scc_port *port, int dtr, int rts)
{
unsigned long flags;
unsigned char t;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
t = SCCread(TX_CTRL_REG);
if (dtr >= 0) t = dtr? (t | TCR_DTR): (t & ~TCR_DTR);
if (rts >= 0) t = rts? (t | TCR_RTS): (t & ~TCR_RTS);
SCCwrite(TX_CTRL_REG, t);
local_irq_restore(flags);
}
static void scc_send_xchar(struct tty_struct *tty, char ch)
{
struct scc_port *port = tty->driver_data;
port->x_char = ch;
if (ch)
scc_enable_tx_interrupts(port);
}
/*---------------------------------------------------------------------------
* Driver entrypoints referenced from above
*--------------------------------------------------------------------------*/
static int scc_open (struct tty_struct * tty, struct file * filp)
{
int line = tty->index;
int retval;
struct scc_port *port = &scc_ports[line];
int i, channel = port->channel;
unsigned long flags;
SCC_ACCESS_INIT(port);
#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_MVME147_SCC)
static const struct {
unsigned reg, val;
} mvme_init_tab[] = {
/* Values for MVME162 and MVME147 */
/* no parity, 1 stop bit, async, 1:16 */
{ AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 },
/* parity error is special cond, ints disabled, no DMA */
{ INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB },
/* Rx 8 bits/char, no auto enable, Rx off */
{ RX_CTRL_REG, RCR_CHSIZE_8 },
/* DTR off, Tx 8 bits/char, RTS off, Tx off */
{ TX_CTRL_REG, TCR_CHSIZE_8 },
/* special features off */
{ AUX2_CTRL_REG, 0 },
{ CLK_CTRL_REG, CCR_RXCLK_BRG | CCR_TXCLK_BRG },
{ DPLL_CTRL_REG, DCR_BRG_ENAB | DCR_BRG_USE_PCLK },
/* Start Rx */
{ RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 },
/* Start Tx */
{ TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 },
/* Ext/Stat ints: DCD only */
{ INT_CTRL_REG, ICR_ENAB_DCD_INT },
/* Reset Ext/Stat ints */
{ COMMAND_REG, CR_EXTSTAT_RESET },
/* ...again */
{ COMMAND_REG, CR_EXTSTAT_RESET },
};
#endif
#if defined(CONFIG_BVME6000_SCC)
static const struct {
unsigned reg, val;
} bvme_init_tab[] = {
/* Values for BVME6000 */
/* no parity, 1 stop bit, async, 1:16 */
{ AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 },
/* parity error is special cond, ints disabled, no DMA */
{ INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB },
/* Rx 8 bits/char, no auto enable, Rx off */
{ RX_CTRL_REG, RCR_CHSIZE_8 },
/* DTR off, Tx 8 bits/char, RTS off, Tx off */
{ TX_CTRL_REG, TCR_CHSIZE_8 },
/* special features off */
{ AUX2_CTRL_REG, 0 },
{ CLK_CTRL_REG, CCR_RTxC_XTAL | CCR_RXCLK_BRG | CCR_TXCLK_BRG },
{ DPLL_CTRL_REG, DCR_BRG_ENAB },
/* Start Rx */
{ RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 },
/* Start Tx */
{ TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 },
/* Ext/Stat ints: DCD only */
{ INT_CTRL_REG, ICR_ENAB_DCD_INT },
/* Reset Ext/Stat ints */
{ COMMAND_REG, CR_EXTSTAT_RESET },
/* ...again */
{ COMMAND_REG, CR_EXTSTAT_RESET },
};
#endif
if (!(port->gs.port.flags & ASYNC_INITIALIZED)) {
local_irq_save(flags);
#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC)
if (MACH_IS_MVME147 || MACH_IS_MVME16x) {
for (i = 0; i < ARRAY_SIZE(mvme_init_tab); ++i)
SCCwrite(mvme_init_tab[i].reg, mvme_init_tab[i].val);
}
#endif
#if defined(CONFIG_BVME6000_SCC)
if (MACH_IS_BVME6000) {
for (i = 0; i < ARRAY_SIZE(bvme_init_tab); ++i)
SCCwrite(bvme_init_tab[i].reg, bvme_init_tab[i].val);
}
#endif
/* remember status register for detection of DCD and CTS changes */
scc_last_status_reg[channel] = SCCread(STATUS_REG);
port->c_dcd = 0; /* Prevent initial 1->0 interrupt */
scc_setsignals (port, 1,1);
local_irq_restore(flags);
}
tty->driver_data = port;
port->gs.port.tty = tty;
port->gs.port.count++;
retval = gs_init_port(&port->gs);
if (retval) {
port->gs.port.count--;
return retval;
}
port->gs.port.flags |= GS_ACTIVE;
retval = gs_block_til_ready(port, filp);
if (retval) {
port->gs.port.count--;
return retval;
}
port->c_dcd = tty_port_carrier_raised(&port->gs.port);
scc_enable_rx_interrupts(port);
return 0;
}
static void scc_throttle (struct tty_struct * tty)
{
struct scc_port *port = tty->driver_data;
unsigned long flags;
SCC_ACCESS_INIT(port);
if (tty->termios->c_cflag & CRTSCTS) {
local_irq_save(flags);
SCCmod(TX_CTRL_REG, ~TCR_RTS, 0);
local_irq_restore(flags);
}
if (I_IXOFF(tty))
scc_send_xchar(tty, STOP_CHAR(tty));
}
static void scc_unthrottle (struct tty_struct * tty)
{
struct scc_port *port = tty->driver_data;
unsigned long flags;
SCC_ACCESS_INIT(port);
if (tty->termios->c_cflag & CRTSCTS) {
local_irq_save(flags);
SCCmod(TX_CTRL_REG, 0xff, TCR_RTS);
local_irq_restore(flags);
}
if (I_IXOFF(tty))
scc_send_xchar(tty, START_CHAR(tty));
}
static int scc_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
static int scc_break_ctl(struct tty_struct *tty, int break_state)
{
struct scc_port *port = tty->driver_data;
unsigned long flags;
SCC_ACCESS_INIT(port);
local_irq_save(flags);
SCCmod(TX_CTRL_REG, ~TCR_SEND_BREAK,
break_state ? TCR_SEND_BREAK : 0);
local_irq_restore(flags);
return 0;
}
/*---------------------------------------------------------------------------
* Serial console stuff...
*--------------------------------------------------------------------------*/
#define scc_delay() do { __asm__ __volatile__ (" nop; nop"); } while (0)
static void scc_ch_write (char ch)
{
volatile char *p = NULL;
#ifdef CONFIG_MVME147_SCC
if (MACH_IS_MVME147)
p = (volatile char *)M147_SCC_A_ADDR;
#endif
#ifdef CONFIG_MVME162_SCC
if (MACH_IS_MVME16x)
p = (volatile char *)MVME_SCC_A_ADDR;
#endif
#ifdef CONFIG_BVME6000_SCC
if (MACH_IS_BVME6000)
p = (volatile char *)BVME_SCC_A_ADDR;
#endif
do {
scc_delay();
}
while (!(*p & 4));
scc_delay();
*p = 8;
scc_delay();
*p = ch;
}
/* The console must be locked when we get here. */
static void scc_console_write (struct console *co, const char *str, unsigned count)
{
unsigned long flags;
local_irq_save(flags);
while (count--)
{
if (*str == '\n')
scc_ch_write ('\r');
scc_ch_write (*str++);
}
local_irq_restore(flags);
}
static struct tty_driver *scc_console_device(struct console *c, int *index)
{
*index = c->index;
return scc_driver;
}
static struct console sercons = {
.name = "ttyS",
.write = scc_console_write,
.device = scc_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static int __init vme_scc_console_init(void)
{
if (vme_brdtype == VME_TYPE_MVME147 ||
vme_brdtype == VME_TYPE_MVME162 ||
vme_brdtype == VME_TYPE_MVME172 ||
vme_brdtype == VME_TYPE_BVME4000 ||
vme_brdtype == VME_TYPE_BVME6000)
register_console(&sercons);
return 0;
}
console_initcall(vme_scc_console_init);
| gpl-2.0 |
kerberizer/android_kernel_sony_msm8x60 | drivers/video/omap2/dss/dispc.c | 2793 | 81696 | /*
* linux/drivers/video/omap2/dss/dispc.c
*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define DSS_SUBSYS_NAME "DISPC"
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <plat/clock.h>
#include <video/omapdss.h>
#include "dss.h"
#include "dss_features.h"
#include "dispc.h"
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
DISPC_IRQ_OCP_ERR | \
DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
DISPC_IRQ_SYNC_LOST | \
DISPC_IRQ_SYNC_LOST_DIGIT)
#define DISPC_MAX_NR_ISRS 8
struct omap_dispc_isr_data {
omap_dispc_isr_t isr;
void *arg;
u32 mask;
};
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
BURST_SIZE_X8 = 2,
};
#define REG_GET(idx, start, end) \
FLD_GET(dispc_read_reg(idx), start, end)
#define REG_FLD_MOD(idx, val, start, end) \
dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
struct dispc_irq_stats {
unsigned long last_reset;
unsigned irq_count;
unsigned irqs[32];
};
static struct {
struct platform_device *pdev;
void __iomem *base;
int ctx_loss_cnt;
int irq;
struct clk *dss_clk;
u32 fifo_size[MAX_DSS_OVERLAYS];
spinlock_t irq_lock;
u32 irq_error_mask;
struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
u32 error_irqs;
struct work_struct error_work;
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spinlock_t irq_stats_lock;
struct dispc_irq_stats irq_stats;
#endif
} dispc;
enum omap_color_component {
/* used for all color formats for OMAP3 and earlier
* and for RGB and Y color component on OMAP4
*/
DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0,
/* used for UV component for
* OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12
* color formats on OMAP4
*/
DISPC_COLOR_COMPONENT_UV = 1 << 1,
};
static void _omap_dispc_set_irqs(void);
static inline void dispc_write_reg(const u16 idx, u32 val)
{
__raw_writel(val, dispc.base + idx);
}
static inline u32 dispc_read_reg(const u16 idx)
{
return __raw_readl(dispc.base + idx);
}
static int dispc_get_ctx_loss_count(void)
{
struct device *dev = &dispc.pdev->dev;
struct omap_display_platform_data *pdata = dev->platform_data;
struct omap_dss_board_info *board_data = pdata->board_data;
int cnt;
if (!board_data->get_context_loss_count)
return -ENOENT;
cnt = board_data->get_context_loss_count(dev);
WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
return cnt;
}
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
static void dispc_save_context(void)
{
int i, j;
DSSDBG("dispc_save_context\n");
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
SR(LINE_NUMBER);
if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
SR(GLOBAL_ALPHA);
if (dss_has_feature(FEAT_MGR_LCD2)) {
SR(CONTROL2);
SR(CONFIG2);
}
for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
SR(DEFAULT_COLOR(i));
SR(TRANS_COLOR(i));
SR(SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
SR(TIMING_H(i));
SR(TIMING_V(i));
SR(POL_FREQ(i));
SR(DIVISORo(i));
SR(DATA_CYCLE1(i));
SR(DATA_CYCLE2(i));
SR(DATA_CYCLE3(i));
if (dss_has_feature(FEAT_CPR)) {
SR(CPR_COEF_R(i));
SR(CPR_COEF_G(i));
SR(CPR_COEF_B(i));
}
}
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
SR(OVL_BA0(i));
SR(OVL_BA1(i));
SR(OVL_POSITION(i));
SR(OVL_SIZE(i));
SR(OVL_ATTRIBUTES(i));
SR(OVL_FIFO_THRESHOLD(i));
SR(OVL_ROW_INC(i));
SR(OVL_PIXEL_INC(i));
if (dss_has_feature(FEAT_PRELOAD))
SR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
SR(OVL_WINDOW_SKIP(i));
SR(OVL_TABLE_BA(i));
continue;
}
SR(OVL_FIR(i));
SR(OVL_PICTURE_SIZE(i));
SR(OVL_ACCU0(i));
SR(OVL_ACCU1(i));
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
SR(OVL_CONV_COEF(i, j));
if (dss_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V(i, j));
}
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(i));
SR(OVL_BA1_UV(i));
SR(OVL_FIR2(i));
SR(OVL_ACCU2_0(i));
SR(OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V2(i, j));
}
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(i));
}
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
dispc.ctx_valid = true;
DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
}
static void dispc_restore_context(void)
{
int i, j, ctx;
DSSDBG("dispc_restore_context\n");
if (!dispc.ctx_valid)
return;
ctx = dispc_get_ctx_loss_count();
if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
return;
DSSDBG("ctx_loss_count: saved %d, current %d\n",
dispc.ctx_loss_cnt, ctx);
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
RR(LINE_NUMBER);
if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
RR(GLOBAL_ALPHA);
if (dss_has_feature(FEAT_MGR_LCD2))
RR(CONFIG2);
for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
RR(DEFAULT_COLOR(i));
RR(TRANS_COLOR(i));
RR(SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
RR(TIMING_H(i));
RR(TIMING_V(i));
RR(POL_FREQ(i));
RR(DIVISORo(i));
RR(DATA_CYCLE1(i));
RR(DATA_CYCLE2(i));
RR(DATA_CYCLE3(i));
if (dss_has_feature(FEAT_CPR)) {
RR(CPR_COEF_R(i));
RR(CPR_COEF_G(i));
RR(CPR_COEF_B(i));
}
}
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
RR(OVL_BA0(i));
RR(OVL_BA1(i));
RR(OVL_POSITION(i));
RR(OVL_SIZE(i));
RR(OVL_ATTRIBUTES(i));
RR(OVL_FIFO_THRESHOLD(i));
RR(OVL_ROW_INC(i));
RR(OVL_PIXEL_INC(i));
if (dss_has_feature(FEAT_PRELOAD))
RR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
RR(OVL_WINDOW_SKIP(i));
RR(OVL_TABLE_BA(i));
continue;
}
RR(OVL_FIR(i));
RR(OVL_PICTURE_SIZE(i));
RR(OVL_ACCU0(i));
RR(OVL_ACCU1(i));
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
RR(OVL_CONV_COEF(i, j));
if (dss_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V(i, j));
}
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(i));
RR(OVL_BA1_UV(i));
RR(OVL_FIR2(i));
RR(OVL_ACCU2_0(i));
RR(OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V2(i, j));
}
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(i));
}
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
if (dss_has_feature(FEAT_MGR_LCD2))
RR(CONTROL2);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
* the context is fully restored
*/
RR(IRQENABLE);
DSSDBG("context restored\n");
}
#undef SR
#undef RR
int dispc_runtime_get(void)
{
int r;
DSSDBG("dispc_runtime_get\n");
r = pm_runtime_get_sync(&dispc.pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
void dispc_runtime_put(void)
{
int r;
DSSDBG("dispc_runtime_put\n");
r = pm_runtime_put_sync(&dispc.pdev->dev);
WARN_ON(r < 0);
}
static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
return true;
else
return false;
}
static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
{
struct omap_overlay_manager *mgr =
omap_dss_get_overlay_manager(channel);
return mgr ? mgr->device : NULL;
}
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DISPC_IRQ_VSYNC;
case OMAP_DSS_CHANNEL_LCD2:
return DISPC_IRQ_VSYNC2;
case OMAP_DSS_CHANNEL_DIGIT:
return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
default:
BUG();
}
}
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
{
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DISPC_IRQ_FRAMEDONE;
case OMAP_DSS_CHANNEL_LCD2:
return DISPC_IRQ_FRAMEDONE2;
case OMAP_DSS_CHANNEL_DIGIT:
return 0;
default:
BUG();
}
}
bool dispc_mgr_go_busy(enum omap_channel channel)
{
int bit;
if (dispc_mgr_is_lcd(channel))
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
if (channel == OMAP_DSS_CHANNEL_LCD2)
return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
else
return REG_GET(DISPC_CONTROL, bit, bit) == 1;
}
void dispc_mgr_go(enum omap_channel channel)
{
int bit;
bool enable_bit, go_bit;
if (dispc_mgr_is_lcd(channel))
bit = 0; /* LCDENABLE */
else
bit = 1; /* DIGITALENABLE */
/* if the channel is not enabled, we don't need GO */
if (channel == OMAP_DSS_CHANNEL_LCD2)
enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
else
enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (!enable_bit)
return;
if (dispc_mgr_is_lcd(channel))
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
if (channel == OMAP_DSS_CHANNEL_LCD2)
go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
else
go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
return;
}
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
(channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT"));
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
else
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
}
static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
}
static void dispc_ovl_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
}
static void dispc_ovl_write_firv_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
}
static void dispc_ovl_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
}
static void dispc_ovl_write_firhv2_reg(enum omap_plane plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
}
static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc,
int fir_vinc, int five_taps,
enum omap_color_component color_comp)
{
const struct dispc_coef *h_coef, *v_coef;
int i;
h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
for (i = 0; i < 8; i++) {
u32 h, hv;
h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(h_coef[i].hc1_vc0, 15, 8)
| FLD_VAL(h_coef[i].hc2_vc1, 23, 16)
| FLD_VAL(h_coef[i].hc3_vc2, 31, 24);
hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0)
| FLD_VAL(v_coef[i].hc1_vc0, 15, 8)
| FLD_VAL(v_coef[i].hc2_vc1, 23, 16)
| FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
dispc_ovl_write_firh_reg(plane, i, h);
dispc_ovl_write_firhv_reg(plane, i, hv);
} else {
dispc_ovl_write_firh2_reg(plane, i, h);
dispc_ovl_write_firhv2_reg(plane, i, hv);
}
}
if (five_taps) {
for (i = 0; i < 8; i++) {
u32 v;
v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
dispc_ovl_write_firv_reg(plane, i, v);
else
dispc_ovl_write_firv2_reg(plane, i, v);
}
}
}
static void _dispc_setup_color_conv_coef(void)
{
int i;
const struct color_conv_coef {
int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
int full_range;
} ctbl_bt601_5 = {
298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
};
const struct color_conv_coef *ct;
#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
ct = &ctbl_bt601_5;
for (i = 1; i < dss_feat_get_num_ovls(); i++) {
dispc_write_reg(DISPC_OVL_CONV_COEF(i, 0),
CVAL(ct->rcr, ct->ry));
dispc_write_reg(DISPC_OVL_CONV_COEF(i, 1),
CVAL(ct->gy, ct->rcb));
dispc_write_reg(DISPC_OVL_CONV_COEF(i, 2),
CVAL(ct->gcb, ct->gcr));
dispc_write_reg(DISPC_OVL_CONV_COEF(i, 3),
CVAL(ct->bcr, ct->by));
dispc_write_reg(DISPC_OVL_CONV_COEF(i, 4),
CVAL(0, ct->bcb));
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), ct->full_range,
11, 11);
}
#undef CVAL
}
static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
}
static void dispc_ovl_set_ba1(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
}
static void dispc_ovl_set_ba0_uv(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
}
static void dispc_ovl_set_ba1_uv(enum omap_plane plane, u32 paddr)
{
dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
}
static void dispc_ovl_set_pos(enum omap_plane plane, int x, int y)
{
u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
dispc_write_reg(DISPC_OVL_POSITION(plane), val);
}
static void dispc_ovl_set_pic_size(enum omap_plane plane, int width, int height)
{
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_GFX)
dispc_write_reg(DISPC_OVL_SIZE(plane), val);
else
dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
}
static void dispc_ovl_set_vid_size(enum omap_plane plane, int width, int height)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
dispc_write_reg(DISPC_OVL_SIZE(plane), val);
}
static void dispc_ovl_set_zorder(enum omap_plane plane, u8 zorder)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
return;
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
}
static void dispc_ovl_enable_zorder_planes(void)
{
int i;
if (!dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
return;
for (i = 0; i < dss_feat_get_num_ovls(); i++)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
static void dispc_ovl_set_pre_mult_alpha(enum omap_plane plane, bool enable)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
return;
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
}
static void dispc_ovl_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
{
static const unsigned shifts[] = { 0, 8, 16, 24, };
int shift;
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return;
shift = shifts[plane];
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
}
static void dispc_ovl_set_pix_inc(enum omap_plane plane, s32 inc)
{
dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
}
static void dispc_ovl_set_row_inc(enum omap_plane plane, s32 inc)
{
dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
}
static void dispc_ovl_set_color_mode(enum omap_plane plane,
enum omap_color_mode color_mode)
{
u32 m = 0;
if (plane != OMAP_DSS_GFX) {
switch (color_mode) {
case OMAP_DSS_COLOR_NV12:
m = 0x0; break;
case OMAP_DSS_COLOR_RGBX16:
m = 0x1; break;
case OMAP_DSS_COLOR_RGBA16:
m = 0x2; break;
case OMAP_DSS_COLOR_RGB12U:
m = 0x4; break;
case OMAP_DSS_COLOR_ARGB16:
m = 0x5; break;
case OMAP_DSS_COLOR_RGB16:
m = 0x6; break;
case OMAP_DSS_COLOR_ARGB16_1555:
m = 0x7; break;
case OMAP_DSS_COLOR_RGB24U:
m = 0x8; break;
case OMAP_DSS_COLOR_RGB24P:
m = 0x9; break;
case OMAP_DSS_COLOR_YUV2:
m = 0xa; break;
case OMAP_DSS_COLOR_UYVY:
m = 0xb; break;
case OMAP_DSS_COLOR_ARGB32:
m = 0xc; break;
case OMAP_DSS_COLOR_RGBA32:
m = 0xd; break;
case OMAP_DSS_COLOR_RGBX32:
m = 0xe; break;
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
BUG(); break;
}
} else {
switch (color_mode) {
case OMAP_DSS_COLOR_CLUT1:
m = 0x0; break;
case OMAP_DSS_COLOR_CLUT2:
m = 0x1; break;
case OMAP_DSS_COLOR_CLUT4:
m = 0x2; break;
case OMAP_DSS_COLOR_CLUT8:
m = 0x3; break;
case OMAP_DSS_COLOR_RGB12U:
m = 0x4; break;
case OMAP_DSS_COLOR_ARGB16:
m = 0x5; break;
case OMAP_DSS_COLOR_RGB16:
m = 0x6; break;
case OMAP_DSS_COLOR_ARGB16_1555:
m = 0x7; break;
case OMAP_DSS_COLOR_RGB24U:
m = 0x8; break;
case OMAP_DSS_COLOR_RGB24P:
m = 0x9; break;
case OMAP_DSS_COLOR_RGBX16:
m = 0xa; break;
case OMAP_DSS_COLOR_RGBA16:
m = 0xb; break;
case OMAP_DSS_COLOR_ARGB32:
m = 0xc; break;
case OMAP_DSS_COLOR_RGBA32:
m = 0xd; break;
case OMAP_DSS_COLOR_RGBX32:
m = 0xe; break;
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
BUG(); break;
}
}
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
{
int shift;
u32 val;
int chan = 0, chan2 = 0;
switch (plane) {
case OMAP_DSS_GFX:
shift = 8;
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
BUG();
return;
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
if (dss_has_feature(FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
chan2 = 0;
break;
case OMAP_DSS_CHANNEL_DIGIT:
chan = 1;
chan2 = 0;
break;
case OMAP_DSS_CHANNEL_LCD2:
chan = 0;
chan2 = 1;
break;
default:
BUG();
}
val = FLD_MOD(val, chan, shift, shift);
val = FLD_MOD(val, chan2, 31, 30);
} else {
val = FLD_MOD(val, channel, shift, shift);
}
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
{
int shift;
u32 val;
enum omap_channel channel;
switch (plane) {
case OMAP_DSS_GFX:
shift = 8;
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
BUG();
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
if (dss_has_feature(FEAT_MGR_LCD2)) {
if (FLD_GET(val, 31, 30) == 0)
channel = FLD_GET(val, shift, shift);
else
channel = OMAP_DSS_CHANNEL_LCD2;
} else {
channel = FLD_GET(val, shift, shift);
}
return channel;
}
static void dispc_ovl_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
static const unsigned shifts[] = { 6, 14, 14, 14, };
int shift;
shift = shifts[plane];
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
}
static void dispc_configure_burst_sizes(void)
{
int i;
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
for (i = 0; i < omap_dss_get_num_overlays(); ++i)
dispc_ovl_set_burst_size(i, burst_size);
}
static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
{
unsigned unit = dss_feat_get_burst_size_unit();
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
return unit * 8;
}
void dispc_enable_gamma_table(bool enable)
{
/*
* This is partially implemented to support only disabling of
* the gamma table.
*/
if (enable) {
DSSWARN("Gamma table enabling for TV not yet supported");
return;
}
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
u16 reg;
if (channel == OMAP_DSS_CHANNEL_LCD)
reg = DISPC_CONFIG;
else if (channel == OMAP_DSS_CHANNEL_LCD2)
reg = DISPC_CONFIG2;
else
return;
REG_FLD_MOD(reg, enable, 15, 15);
}
static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
if (!dispc_mgr_is_lcd(channel))
return;
coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
FLD_VAL(coefs->rb, 9, 0);
coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
FLD_VAL(coefs->gb, 9, 0);
coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
FLD_VAL(coefs->bb, 9, 0);
dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
}
static void dispc_ovl_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
val = FLD_MOD(val, enable, 9, 9);
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
{
static const unsigned shifts[] = { 5, 10, 10, 10 };
int shift;
shift = shifts[plane];
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
{
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
}
void dispc_set_digit_size(u16 width, u16 height)
{
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
}
static void dispc_read_plane_fifo_sizes(void)
{
u32 size;
int plane;
u8 start, end;
u32 unit;
unit = dss_feat_get_buffer_size_unit();
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (plane = 0; plane < dss_feat_get_num_ovls(); ++plane) {
size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
size *= unit;
dispc.fifo_size[plane] = size;
}
}
static u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
{
return dispc.fifo_size[plane];
}
void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
unit = dss_feat_get_buffer_size_unit();
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
low /= unit;
high /= unit;
dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
lo_start, lo_end) * unit,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
hi_start, hi_end) * unit,
low * unit, high * unit);
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
}
void dispc_enable_fifomerge(bool enable)
{
if (!dss_has_feature(FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
unsigned buf_unit = dss_feat_get_buffer_size_unit();
unsigned ovl_fifo_size, total_fifo_size, burst_size;
int i;
burst_size = dispc_ovl_get_burst_size(plane);
ovl_fifo_size = dispc_ovl_get_fifo_size(plane);
if (use_fifomerge) {
total_fifo_size = 0;
for (i = 0; i < omap_dss_get_num_overlays(); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(i);
} else {
total_fifo_size = ovl_fifo_size;
}
/*
* We use the same low threshold for both fifomerge and non-fifomerge
* cases, but for fifomerge we calculate the high threshold using the
* combined fifo size
*/
if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else {
*fifo_low = ovl_fifo_size - burst_size;
*fifo_high = total_fifo_size - buf_unit;
}
}
static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
enum omap_color_component color_comp)
{
u32 val;
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
dss_feat_get_reg_field(FEAT_REG_FIRHINC,
&hinc_start, &hinc_end);
dss_feat_get_reg_field(FEAT_REG_FIRVINC,
&vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
dispc_write_reg(DISPC_OVL_FIR(plane), val);
} else {
val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
dispc_write_reg(DISPC_OVL_FIR2(plane), val);
}
}
static void dispc_ovl_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
}
static void dispc_ovl_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
}
static void dispc_ovl_set_vid_accu2_0(enum omap_plane plane, int haccu,
int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
}
static void dispc_ovl_set_vid_accu2_1(enum omap_plane plane, int haccu,
int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
}
static void dispc_ovl_set_scale_param(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool five_taps, u8 rotation,
enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps,
color_comp);
dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
static void dispc_ovl_set_scaling_common(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, enum omap_color_mode color_mode,
u8 rotation)
{
int accu0 = 0;
int accu1 = 0;
u32 l;
dispc_ovl_set_scale_param(plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_RGB_Y);
l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
/* RESIZEENABLE and VERTICALTAPS */
l &= ~((0x3 << 5) | (0x1 << 21));
l |= (orig_width != out_width) ? (1 << 5) : 0;
l |= (orig_height != out_height) ? (1 << 6) : 0;
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
if (dss_has_feature(FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
if (ilace && !fieldmode) {
accu1 = 0;
accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff;
if (accu0 >= 1024/2) {
accu1 = 1024/2;
accu0 -= accu1;
}
}
dispc_ovl_set_vid_accu0(plane, 0, accu0);
dispc_ovl_set_vid_accu1(plane, 0, accu1);
}
static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, enum omap_color_mode color_mode,
u8 rotation)
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
if ((color_mode != OMAP_DSS_COLOR_YUV2 &&
color_mode != OMAP_DSS_COLOR_UYVY &&
color_mode != OMAP_DSS_COLOR_NV12)) {
/* reset chroma resampling for RGB formats */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
return;
}
switch (color_mode) {
case OMAP_DSS_COLOR_NV12:
/* UV is subsampled by 2 vertically*/
orig_height >>= 1;
/* UV is subsampled by 2 horz.*/
orig_width >>= 1;
break;
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
/*For YUV422 with 90/270 rotation,
*we don't upsample chroma
*/
if (rotation == OMAP_DSS_ROT_0 ||
rotation == OMAP_DSS_ROT_180)
/* UV is subsampled by 2 hrz*/
orig_width >>= 1;
/* must use FIR for YUV422 if rotated */
if (rotation != OMAP_DSS_ROT_0)
scale_x = scale_y = true;
break;
default:
BUG();
}
if (out_width != orig_width)
scale_x = true;
if (out_height != orig_height)
scale_y = true;
dispc_ovl_set_scale_param(plane, orig_width, orig_height,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_UV);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
(scale_x || scale_y) ? 1 : 0, 8, 8);
/* set H scaling */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
}
static void dispc_ovl_set_scaling(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
bool fieldmode, enum omap_color_mode color_mode,
u8 rotation)
{
BUG_ON(plane == OMAP_DSS_GFX);
dispc_ovl_set_scaling_common(plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
fieldmode, color_mode,
rotation);
dispc_ovl_set_scaling_uv(plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
fieldmode, color_mode,
rotation);
}
static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
bool mirroring, enum omap_color_mode color_mode)
{
bool row_repeat = false;
int vidrot = 0;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY) {
if (mirroring) {
switch (rotation) {
case OMAP_DSS_ROT_0:
vidrot = 2;
break;
case OMAP_DSS_ROT_90:
vidrot = 1;
break;
case OMAP_DSS_ROT_180:
vidrot = 0;
break;
case OMAP_DSS_ROT_270:
vidrot = 3;
break;
}
} else {
switch (rotation) {
case OMAP_DSS_ROT_0:
vidrot = 0;
break;
case OMAP_DSS_ROT_90:
vidrot = 1;
break;
case OMAP_DSS_ROT_180:
vidrot = 2;
break;
case OMAP_DSS_ROT_270:
vidrot = 3;
break;
}
}
if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
row_repeat = true;
else
row_repeat = false;
}
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
if (dss_has_feature(FEAT_ROWREPEATENABLE))
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
{
switch (color_mode) {
case OMAP_DSS_COLOR_CLUT1:
return 1;
case OMAP_DSS_COLOR_CLUT2:
return 2;
case OMAP_DSS_COLOR_CLUT4:
return 4;
case OMAP_DSS_COLOR_CLUT8:
case OMAP_DSS_COLOR_NV12:
return 8;
case OMAP_DSS_COLOR_RGB12U:
case OMAP_DSS_COLOR_RGB16:
case OMAP_DSS_COLOR_ARGB16:
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
case OMAP_DSS_COLOR_RGBA16:
case OMAP_DSS_COLOR_RGBX16:
case OMAP_DSS_COLOR_ARGB16_1555:
case OMAP_DSS_COLOR_XRGB16_1555:
return 16;
case OMAP_DSS_COLOR_RGB24P:
return 24;
case OMAP_DSS_COLOR_RGB24U:
case OMAP_DSS_COLOR_ARGB32:
case OMAP_DSS_COLOR_RGBA32:
case OMAP_DSS_COLOR_RGBX32:
return 32;
default:
BUG();
}
}
static s32 pixinc(int pixels, u8 ps)
{
if (pixels == 1)
return 1;
else if (pixels > 1)
return 1 + (pixels - 1) * ps;
else if (pixels < 0)
return 1 - (-pixels + 1) * ps;
else
BUG();
}
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
u16 screen_width,
u16 width, u16 height,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
s32 *row_inc, s32 *pix_inc)
{
u8 ps;
/* FIXME CLUT formats */
switch (color_mode) {
case OMAP_DSS_COLOR_CLUT1:
case OMAP_DSS_COLOR_CLUT2:
case OMAP_DSS_COLOR_CLUT4:
case OMAP_DSS_COLOR_CLUT8:
BUG();
return;
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
ps = 4;
break;
default:
ps = color_mode_to_bpp(color_mode) / 8;
break;
}
DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
width, height);
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
switch (rotation + mirror * 4) {
case OMAP_DSS_ROT_0:
case OMAP_DSS_ROT_180:
/*
* If the pixel format is YUV or UYVY divide the width
* of the image by 2 for 0 and 180 degree rotation.
*/
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
case OMAP_DSS_ROT_90:
case OMAP_DSS_ROT_270:
*offset1 = 0;
if (field_offset)
*offset0 = field_offset * screen_width * ps;
else
*offset0 = 0;
*row_inc = pixinc(1 + (screen_width - width) +
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(1, ps);
break;
case OMAP_DSS_ROT_0 + 4:
case OMAP_DSS_ROT_180 + 4:
/* If the pixel format is YUV or UYVY divide the width
* of the image by 2 for 0 degree and 180 degree
*/
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
case OMAP_DSS_ROT_90 + 4:
case OMAP_DSS_ROT_270 + 4:
*offset1 = 0;
if (field_offset)
*offset0 = field_offset * screen_width * ps;
else
*offset0 = 0;
*row_inc = pixinc(1 - (screen_width + width) -
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(1, ps);
break;
default:
BUG();
}
}
static void calc_dma_rotation_offset(u8 rotation, bool mirror,
u16 screen_width,
u16 width, u16 height,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
s32 *row_inc, s32 *pix_inc)
{
u8 ps;
u16 fbw, fbh;
/* FIXME CLUT formats */
switch (color_mode) {
case OMAP_DSS_COLOR_CLUT1:
case OMAP_DSS_COLOR_CLUT2:
case OMAP_DSS_COLOR_CLUT4:
case OMAP_DSS_COLOR_CLUT8:
BUG();
return;
default:
ps = color_mode_to_bpp(color_mode) / 8;
break;
}
DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
width, height);
/* width & height are overlay sizes, convert to fb sizes */
if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) {
fbw = width;
fbh = height;
} else {
fbw = height;
fbh = width;
}
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
switch (rotation + mirror * 4) {
case OMAP_DSS_ROT_0:
*offset1 = 0;
if (field_offset)
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(1 + (screen_width - fbw) +
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(1, ps);
break;
case OMAP_DSS_ROT_90:
*offset1 = screen_width * (fbh - 1) * ps;
if (field_offset)
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(screen_width * (fbh - 1) + 1 +
(fieldmode ? 1 : 0), ps);
*pix_inc = pixinc(-screen_width, ps);
break;
case OMAP_DSS_ROT_180:
*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
if (field_offset)
*offset0 = *offset1 - field_offset * screen_width * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(-1 -
(screen_width - fbw) -
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(-1, ps);
break;
case OMAP_DSS_ROT_270:
*offset1 = (fbw - 1) * ps;
if (field_offset)
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
(fieldmode ? 1 : 0), ps);
*pix_inc = pixinc(screen_width, ps);
break;
/* mirroring */
case OMAP_DSS_ROT_0 + 4:
*offset1 = (fbw - 1) * ps;
if (field_offset)
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(screen_width * 2 - 1 +
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(-1, ps);
break;
case OMAP_DSS_ROT_90 + 4:
*offset1 = 0;
if (field_offset)
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
(fieldmode ? 1 : 0),
ps);
*pix_inc = pixinc(screen_width, ps);
break;
case OMAP_DSS_ROT_180 + 4:
*offset1 = screen_width * (fbh - 1) * ps;
if (field_offset)
*offset0 = *offset1 - field_offset * screen_width * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(1 - screen_width * 2 -
(fieldmode ? screen_width : 0),
ps);
*pix_inc = pixinc(1, ps);
break;
case OMAP_DSS_ROT_270 + 4:
*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
if (field_offset)
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
*row_inc = pixinc(screen_width * (fbh - 1) - 1 -
(fieldmode ? 1 : 0),
ps);
*pix_inc = pixinc(-screen_width, ps);
break;
default:
BUG();
}
}
static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
u32 fclk = 0;
u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
if (height > out_height) {
struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
unsigned int ppl = dssdev->panel.timings.x_res;
tmp = pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
fclk = tmp;
if (height > 2 * out_height) {
if (ppl == out_width)
return 0;
tmp = pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
fclk = max(fclk, (u32) tmp);
}
}
if (width > out_width) {
tmp = pclk * width;
do_div(tmp, out_width);
fclk = max(fclk, (u32) tmp);
if (color_mode == OMAP_DSS_COLOR_RGB24U)
fclk <<= 1;
}
return fclk;
}
static unsigned long calc_fclk(enum omap_channel channel, u16 width,
u16 height, u16 out_width, u16 out_height)
{
unsigned int hf, vf;
unsigned long pclk = dispc_mgr_pclk_rate(channel);
/*
* FIXME how to determine the 'A' factor
* for the no downscaling case ?
*/
if (width > 3 * out_width)
hf = 4;
else if (width > 2 * out_width)
hf = 3;
else if (width > out_width)
hf = 2;
else
hf = 1;
if (height > out_height)
vf = 2;
else
vf = 1;
if (cpu_is_omap24xx()) {
if (vf > 1 && hf > 1)
return pclk * 4;
else
return pclk * 2;
} else if (cpu_is_omap34xx()) {
return pclk * vf * hf;
} else {
if (hf > 1)
return DIV_ROUND_UP(pclk, out_width) * width;
else
return pclk;
}
}
static int dispc_ovl_calc_scaling(enum omap_plane plane,
enum omap_channel channel, u16 width, u16 height,
u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
unsigned long fclk = 0;
if (width == out_width && height == out_height)
return 0;
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
if (out_width < width / maxdownscale ||
out_width > width * 8)
return -EINVAL;
if (out_height < height / maxdownscale ||
out_height > height * 8)
return -EINVAL;
if (cpu_is_omap24xx()) {
if (width > maxsinglelinewidth)
DSSERR("Cannot scale max input width exceeded");
*five_taps = false;
fclk = calc_fclk(channel, width, height, out_width,
out_height);
} else if (cpu_is_omap34xx()) {
if (width > (maxsinglelinewidth * 2)) {
DSSERR("Cannot setup scaling");
DSSERR("width exceeds maximum width possible");
return -EINVAL;
}
fclk = calc_fclk_five_taps(channel, width, height, out_width,
out_height, color_mode);
if (width > maxsinglelinewidth) {
if (height > out_height && height < out_height * 2)
*five_taps = false;
else {
DSSERR("cannot setup scaling with five taps");
return -EINVAL;
}
}
if (!*five_taps)
fclk = calc_fclk(channel, width, height, out_width,
out_height);
} else {
if (width > maxsinglelinewidth) {
DSSERR("Cannot scale width exceeds max line width");
return -EINVAL;
}
fclk = calc_fclk(channel, width, height, out_width,
out_height);
}
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
if (!fclk || fclk > dispc_fclk_rate()) {
DSSERR("failed to set up scaling, "
"required fclk rate = %lu Hz, "
"current fclk rate = %lu Hz\n",
fclk, dispc_fclk_rate());
return -EINVAL;
}
return 0;
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
bool ilace, bool replication)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
bool five_taps = true;
bool fieldmode = 0;
int r, cconv = 0;
unsigned offset0, offset1;
s32 row_inc;
s32 pix_inc;
u16 frame_height = oi->height;
unsigned int field_offset = 0;
u16 outw, outh;
enum omap_channel channel;
channel = dispc_ovl_get_channel_out(plane);
DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
"%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d\n",
plane, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, ilace, channel, replication);
if (oi->paddr == 0)
return -EINVAL;
outw = oi->out_width == 0 ? oi->width : oi->out_width;
outh = oi->out_height == 0 ? oi->height : oi->out_height;
if (ilace && oi->height == outh)
fieldmode = 1;
if (ilace) {
if (fieldmode)
oi->height /= 2;
oi->pos_y /= 2;
outh /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, "
"out_height %d\n",
oi->height, oi->pos_y, outh);
}
if (!dss_feat_color_mode_supported(plane, oi->color_mode))
return -EINVAL;
r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
outw, outh, oi->color_mode,
&five_taps);
if (r)
return r;
if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
oi->color_mode == OMAP_DSS_COLOR_UYVY ||
oi->color_mode == OMAP_DSS_COLOR_NV12)
cconv = 1;
if (ilace && !fieldmode) {
/*
* when downscaling the bottom field may have to start several
* source lines below the top field. Unfortunately ACCUI
* registers will only hold the fractional part of the offset
* so the integer part must be added to the base address of the
* bottom field.
*/
if (!oi->height || oi->height == outh)
field_offset = 0;
else
field_offset = oi->height / outh / 2;
}
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
if (oi->rotation_type == OMAP_DSS_ROT_DMA)
calc_dma_rotation_offset(oi->rotation, oi->mirror,
oi->screen_width, oi->width, frame_height,
oi->color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
else
calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
oi->screen_width, oi->width, frame_height,
oi->color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
dispc_ovl_set_color_mode(plane, oi->color_mode);
dispc_ovl_set_ba0(plane, oi->paddr + offset0);
dispc_ovl_set_ba1(plane, oi->paddr + offset1);
if (OMAP_DSS_COLOR_NV12 == oi->color_mode) {
dispc_ovl_set_ba0_uv(plane, oi->p_uv_addr + offset0);
dispc_ovl_set_ba1_uv(plane, oi->p_uv_addr + offset1);
}
dispc_ovl_set_row_inc(plane, row_inc);
dispc_ovl_set_pix_inc(plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
oi->height, outw, outh);
dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
dispc_ovl_set_pic_size(plane, oi->width, oi->height);
if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
dispc_ovl_set_scaling(plane, oi->width, oi->height,
outw, outh,
ilace, five_taps, fieldmode,
oi->color_mode, oi->rotation);
dispc_ovl_set_vid_size(plane, outw, outh);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
dispc_ovl_set_rotation_attrs(plane, oi->rotation, oi->mirror,
oi->color_mode);
dispc_ovl_set_zorder(plane, oi->zorder);
dispc_ovl_set_pre_mult_alpha(plane, oi->pre_mult_alpha);
dispc_ovl_setup_global_alpha(plane, oi->global_alpha);
dispc_ovl_enable_replication(plane, replication);
return 0;
}
int dispc_ovl_enable(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
return 0;
}
static void dispc_disable_isr(void *data, u32 mask)
{
struct completion *compl = data;
complete(compl);
}
static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_LCD2) {
REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
/* flush posted write */
dispc_read_reg(DISPC_CONTROL2);
} else {
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
dispc_read_reg(DISPC_CONTROL);
}
}
static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
{
struct completion frame_done_completion;
bool is_on;
int r;
u32 irq;
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
is_on = channel == OMAP_DSS_CHANNEL_LCD2 ?
REG_GET(DISPC_CONTROL2, 0, 0) :
REG_GET(DISPC_CONTROL, 0, 0);
irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
DISPC_IRQ_FRAMEDONE;
if (!enable && is_on) {
init_completion(&frame_done_completion);
r = omap_dispc_register_isr(dispc_disable_isr,
&frame_done_completion, irq);
if (r)
DSSERR("failed to register FRAMEDONE isr\n");
}
_enable_lcd_out(channel, enable);
if (!enable && is_on) {
if (!wait_for_completion_timeout(&frame_done_completion,
msecs_to_jiffies(100)))
DSSERR("timeout waiting for FRAME DONE\n");
r = omap_dispc_unregister_isr(dispc_disable_isr,
&frame_done_completion, irq);
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
}
}
static void _enable_digit_out(bool enable)
{
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
/* flush posted write */
dispc_read_reg(DISPC_CONTROL);
}
static void dispc_mgr_enable_digit_out(bool enable)
{
struct completion frame_done_completion;
enum dss_hdmi_venc_clk_source_select src;
int r, i;
u32 irq_mask;
int num_irqs;
if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
src = dss_get_hdmi_venc_clk_source();
if (enable) {
unsigned long flags;
/* When we enable digit output, we'll get an extra digit
* sync lost interrupt, that we need to ignore */
spin_lock_irqsave(&dispc.irq_lock, flags);
dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
/* When we disable digit output, we need to wait until fields are done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode. And when enabling, we need to
* wait for the extra sync losts */
init_completion(&frame_done_completion);
if (src == DSS_HDMI_M_PCLK && enable == false) {
irq_mask = DISPC_IRQ_FRAMEDONETV;
num_irqs = 1;
} else {
irq_mask = DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
/* XXX I understand from TRM that we should only wait for the
* current field to complete. But it seems we have to wait for
* both fields */
num_irqs = 2;
}
r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
irq_mask);
if (r)
DSSERR("failed to register %x isr\n", irq_mask);
_enable_digit_out(enable);
for (i = 0; i < num_irqs; ++i) {
if (!wait_for_completion_timeout(&frame_done_completion,
msecs_to_jiffies(100)))
DSSERR("timeout waiting for digit out to %s\n",
enable ? "start" : "stop");
}
r = omap_dispc_unregister_isr(dispc_disable_isr, &frame_done_completion,
irq_mask);
if (r)
DSSERR("failed to unregister %x isr\n", irq_mask);
if (enable) {
unsigned long flags;
spin_lock_irqsave(&dispc.irq_lock, flags);
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST_DIGIT;
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
}
bool dispc_mgr_is_enabled(enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_LCD)
return !!REG_GET(DISPC_CONTROL, 0, 0);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
return !!REG_GET(DISPC_CONTROL, 1, 1);
else if (channel == OMAP_DSS_CHANNEL_LCD2)
return !!REG_GET(DISPC_CONTROL2, 0, 0);
else
BUG();
}
void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
if (dispc_mgr_is_lcd(channel))
dispc_mgr_enable_lcd_out(channel, enable);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
dispc_mgr_enable_digit_out(enable);
else
BUG();
}
void dispc_lcd_enable_signal_polarity(bool act_high)
{
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
}
void dispc_lcd_enable_signal(bool enable)
{
if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
}
void dispc_pck_free_enable(bool enable)
{
if (!dss_has_feature(FEAT_PCKFREEENABLE))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
else
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
}
void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type)
{
int mode;
switch (type) {
case OMAP_DSS_LCD_DISPLAY_STN:
mode = 0;
break;
case OMAP_DSS_LCD_DISPLAY_TFT:
mode = 1;
break;
default:
BUG();
return;
}
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
else
REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
{
REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
}
static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
{
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
}
static void dispc_mgr_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
}
static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
{
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
}
static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
bool enable)
{
if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
}
void dispc_mgr_setup(enum omap_channel channel,
struct omap_overlay_manager_info *info)
{
dispc_mgr_set_default_color(channel, info->default_color);
dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
dispc_mgr_enable_trans_key(channel, info->trans_enabled);
dispc_mgr_enable_alpha_fixed_zorder(channel,
info->partial_alpha_enabled);
if (dss_has_feature(FEAT_CPR)) {
dispc_mgr_enable_cpr(channel, info->cpr_enable);
dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
}
}
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
switch (data_lines) {
case 12:
code = 0;
break;
case 16:
code = 1;
break;
case 18:
code = 2;
break;
case 24:
code = 3;
break;
default:
BUG();
return;
}
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
else
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
}
void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
switch (mode) {
case DSS_IO_PAD_MODE_RESET:
gpout0 = 0;
gpout1 = 0;
break;
case DSS_IO_PAD_MODE_RFBI:
gpout0 = 1;
gpout1 = 0;
break;
case DSS_IO_PAD_MODE_BYPASS:
gpout0 = 1;
gpout1 = 1;
break;
default:
BUG();
return;
}
l = dispc_read_reg(DISPC_CONTROL);
l = FLD_MOD(l, gpout0, 15, 15);
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(DISPC_CONTROL, l);
}
void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11);
else
REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
if (hsw < 1 || hsw > 64 ||
hfp < 1 || hfp > 256 ||
hbp < 1 || hbp > 256 ||
vsw < 1 || vsw > 64 ||
vfp < 0 || vfp > 255 ||
vbp < 0 || vbp > 255)
return false;
} else {
if (hsw < 1 || hsw > 256 ||
hfp < 1 || hfp > 4096 ||
hbp < 1 || hbp > 4096 ||
vsw < 1 || vsw > 256 ||
vfp < 0 || vfp > 4095 ||
vbp < 0 || vbp > 4095)
return false;
}
return true;
}
bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
{
return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
timings->hbp, timings->vsw,
timings->vfp, timings->vbp);
}
static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
int hfp, int hbp, int vsw, int vfp, int vbp)
{
u32 timing_h, timing_v;
if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
FLD_VAL(hbp-1, 27, 20);
timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) |
FLD_VAL(vbp, 27, 20);
} else {
timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
FLD_VAL(hbp-1, 31, 20);
timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
FLD_VAL(vbp, 31, 20);
}
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
}
/* change name to mode? */
void dispc_mgr_set_lcd_timings(enum omap_channel channel,
struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
timings->hbp, timings->vsw,
timings->vfp, timings->vbp))
BUG();
_dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
timings->hbp, timings->vsw, timings->vfp,
timings->vbp);
dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
ht = (timings->pixel_clock * 1000) / xtot;
vt = (timings->pixel_clock * 1000) / xtot / ytot;
DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
timings->y_res);
DSSDBG("pck %u\n", timings->pixel_clock);
DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
timings->hsw, timings->hfp, timings->hbp,
timings->vsw, timings->vfp, timings->vbp);
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
}
static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
}
static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
int *pck_div)
{
u32 l;
l = dispc_read_reg(DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
unsigned long dispc_fclk_rate(void)
{
struct platform_device *dsidev;
unsigned long r = 0;
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(1);
r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
break;
default:
BUG();
}
return r;
}
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
{
struct platform_device *dsidev;
int lcd;
unsigned long r;
u32 l;
l = dispc_read_reg(DISPC_DIVISORo(channel));
lcd = FLD_GET(l, 23, 16);
switch (dss_get_lcd_clk_source(channel)) {
case OMAP_DSS_CLK_SRC_FCK:
r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(1);
r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
break;
default:
BUG();
}
return r / lcd;
}
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
{
unsigned long r;
if (dispc_mgr_is_lcd(channel)) {
int pcd;
u32 l;
l = dispc_read_reg(DISPC_DIVISORo(channel));
pcd = FLD_GET(l, 7, 0);
r = dispc_mgr_lclk_rate(channel);
return r / pcd;
} else {
struct omap_dss_device *dssdev =
dispc_mgr_get_device(channel);
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_VENC:
return venc_get_pixel_clock();
case OMAP_DISPLAY_TYPE_HDMI:
return hdmi_get_pixel_clock();
default:
BUG();
}
}
}
void dispc_dump_clocks(struct seq_file *s)
{
int lcd, pcd;
u32 l;
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
enum omap_dss_clk_source lcd_clk_src;
if (dispc_runtime_get())
return;
seq_printf(s, "- DISPC -\n");
seq_printf(s, "dispc fclk source = %s (%s)\n",
dss_get_generic_clk_source_name(dispc_clk_src),
dss_feat_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
l = dispc_read_reg(DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
(dispc_fclk_rate()/lcd), lcd);
}
seq_printf(s, "- LCD1 -\n");
lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD);
seq_printf(s, "lcd1_clk source = %s (%s)\n",
dss_get_generic_clk_source_name(lcd_clk_src),
dss_feat_get_clk_source_name(lcd_clk_src));
dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
if (dss_has_feature(FEAT_MGR_LCD2)) {
seq_printf(s, "- LCD2 -\n");
lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2);
seq_printf(s, "lcd2_clk source = %s (%s)\n",
dss_get_generic_clk_source_name(lcd_clk_src),
dss_feat_get_clk_source_name(lcd_clk_src));
dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
dispc_runtime_put();
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
void dispc_dump_irqs(struct seq_file *s)
{
unsigned long flags;
struct dispc_irq_stats stats;
spin_lock_irqsave(&dispc.irq_stats_lock, flags);
stats = dispc.irq_stats;
memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
dispc.irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
jiffies_to_msecs(jiffies - stats.last_reset));
seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
PIS(FRAMEDONE);
PIS(VSYNC);
PIS(EVSYNC_EVEN);
PIS(EVSYNC_ODD);
PIS(ACBIAS_COUNT_STAT);
PIS(PROG_LINE_NUM);
PIS(GFX_FIFO_UNDERFLOW);
PIS(GFX_END_WIN);
PIS(PAL_GAMMA_MASK);
PIS(OCP_ERR);
PIS(VID1_FIFO_UNDERFLOW);
PIS(VID1_END_WIN);
PIS(VID2_FIFO_UNDERFLOW);
PIS(VID2_END_WIN);
if (dss_feat_get_num_ovls() > 3) {
PIS(VID3_FIFO_UNDERFLOW);
PIS(VID3_END_WIN);
}
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
PIS(WAKEUP);
if (dss_has_feature(FEAT_MGR_LCD2)) {
PIS(FRAMEDONE2);
PIS(VSYNC2);
PIS(ACBIAS_COUNT_STAT2);
PIS(SYNC_LOST2);
}
#undef PIS
}
#endif
void dispc_dump_regs(struct seq_file *s)
{
int i, j;
const char *mgr_names[] = {
[OMAP_DSS_CHANNEL_LCD] = "LCD",
[OMAP_DSS_CHANNEL_DIGIT] = "TV",
[OMAP_DSS_CHANNEL_LCD2] = "LCD2",
};
const char *ovl_names[] = {
[OMAP_DSS_GFX] = "GFX",
[OMAP_DSS_VIDEO1] = "VID1",
[OMAP_DSS_VIDEO2] = "VID2",
[OMAP_DSS_VIDEO3] = "VID3",
};
const char **p_names;
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
if (dispc_runtime_get())
return;
/* DISPC common registers */
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
DUMPREG(DISPC_SYSSTATUS);
DUMPREG(DISPC_IRQSTATUS);
DUMPREG(DISPC_IRQENABLE);
DUMPREG(DISPC_CONTROL);
DUMPREG(DISPC_CONFIG);
DUMPREG(DISPC_CAPABLE);
DUMPREG(DISPC_LINE_STATUS);
DUMPREG(DISPC_LINE_NUMBER);
if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
DUMPREG(DISPC_GLOBAL_ALPHA);
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_CONTROL2);
DUMPREG(DISPC_CONFIG2);
}
#undef DUMPREG
#define DISPC_REG(i, name) name(i)
#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
48 - strlen(#r) - strlen(p_names[i]), " ", \
dispc_read_reg(DISPC_REG(i, r)))
p_names = mgr_names;
/* DISPC channel specific registers */
for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
DUMPREG(i, DISPC_DEFAULT_COLOR);
DUMPREG(i, DISPC_TRANS_COLOR);
DUMPREG(i, DISPC_SIZE_MGR);
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
DUMPREG(i, DISPC_DEFAULT_COLOR);
DUMPREG(i, DISPC_TRANS_COLOR);
DUMPREG(i, DISPC_TIMING_H);
DUMPREG(i, DISPC_TIMING_V);
DUMPREG(i, DISPC_POL_FREQ);
DUMPREG(i, DISPC_DIVISORo);
DUMPREG(i, DISPC_SIZE_MGR);
DUMPREG(i, DISPC_DATA_CYCLE1);
DUMPREG(i, DISPC_DATA_CYCLE2);
DUMPREG(i, DISPC_DATA_CYCLE3);
if (dss_has_feature(FEAT_CPR)) {
DUMPREG(i, DISPC_CPR_COEF_R);
DUMPREG(i, DISPC_CPR_COEF_G);
DUMPREG(i, DISPC_CPR_COEF_B);
}
}
p_names = ovl_names;
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
DUMPREG(i, DISPC_OVL_BA0);
DUMPREG(i, DISPC_OVL_BA1);
DUMPREG(i, DISPC_OVL_POSITION);
DUMPREG(i, DISPC_OVL_SIZE);
DUMPREG(i, DISPC_OVL_ATTRIBUTES);
DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
DUMPREG(i, DISPC_OVL_ROW_INC);
DUMPREG(i, DISPC_OVL_PIXEL_INC);
if (dss_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
if (i == OMAP_DSS_GFX) {
DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
DUMPREG(i, DISPC_OVL_TABLE_BA);
continue;
}
DUMPREG(i, DISPC_OVL_FIR);
DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(i, DISPC_OVL_ACCU0);
DUMPREG(i, DISPC_OVL_ACCU1);
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(i, DISPC_OVL_BA0_UV);
DUMPREG(i, DISPC_OVL_BA1_UV);
DUMPREG(i, DISPC_OVL_FIR2);
DUMPREG(i, DISPC_OVL_ACCU2_0);
DUMPREG(i, DISPC_OVL_ACCU2_1);
}
if (dss_has_feature(FEAT_ATTR2))
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
if (dss_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
}
#undef DISPC_REG
#undef DUMPREG
#define DISPC_REG(plane, name, i) name(plane, i)
#define DUMPREG(plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
46 - strlen(#name) - strlen(p_names[plane]), " ", \
dispc_read_reg(DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
for (i = 1; i < dss_feat_get_num_ovls(); i++) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_HV, j);
for (j = 0; j < 5; j++)
DUMPREG(i, DISPC_OVL_CONV_COEF, j);
if (dss_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
}
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_HV2, j);
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_V2, j);
}
}
dispc_runtime_put();
#undef DISPC_REG
#undef DUMPREG
}
static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff,
bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi,
u8 acb)
{
u32 l = 0;
DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n",
onoff, rf, ieo, ipc, ihs, ivs, acbi, acb);
l |= FLD_VAL(onoff, 17, 17);
l |= FLD_VAL(rf, 16, 16);
l |= FLD_VAL(ieo, 15, 15);
l |= FLD_VAL(ipc, 14, 14);
l |= FLD_VAL(ihs, 13, 13);
l |= FLD_VAL(ivs, 12, 12);
l |= FLD_VAL(acbi, 11, 8);
l |= FLD_VAL(acb, 7, 0);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
}
void dispc_mgr_set_pol_freq(enum omap_channel channel,
enum omap_panel_config config, u8 acbi, u8 acb)
{
_dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
(config & OMAP_DSS_LCD_RF) != 0,
(config & OMAP_DSS_LCD_IEO) != 0,
(config & OMAP_DSS_LCD_IPC) != 0,
(config & OMAP_DSS_LCD_IHS) != 0,
(config & OMAP_DSS_LCD_IVS) != 0,
acbi, acb);
}
/* with fck as input clock rate, find dispc dividers that produce req_pck */
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo)
{
u16 pcd_min, pcd_max;
unsigned long best_pck;
u16 best_ld, cur_ld;
u16 best_pd, cur_pd;
pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
if (!is_tft)
pcd_min = 3;
best_pck = 0;
best_ld = 0;
best_pd = 0;
for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
unsigned long lck = fck / cur_ld;
for (cur_pd = pcd_min; cur_pd <= pcd_max; ++cur_pd) {
unsigned long pck = lck / cur_pd;
long old_delta = abs(best_pck - req_pck);
long new_delta = abs(pck - req_pck);
if (best_pck == 0 || new_delta < old_delta) {
best_pck = pck;
best_ld = cur_ld;
best_pd = cur_pd;
if (pck == req_pck)
goto found;
}
if (pck < req_pck)
break;
}
if (lck / pcd_min < req_pck)
break;
}
found:
cinfo->lck_div = best_ld;
cinfo->pck_div = best_pd;
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
}
/* calculate clock rates using dividers in cinfo */
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo)
{
if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
return -EINVAL;
if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
return -EINVAL;
cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
return 0;
}
int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
return 0;
}
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
unsigned long fck;
fck = dispc_fclk_rate();
cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16);
cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
return 0;
}
/* dispc.irq_lock has to be locked by the caller */
static void _omap_dispc_set_irqs(void)
{
u32 mask;
u32 old_mask;
int i;
struct omap_dispc_isr_data *isr_data;
mask = dispc.irq_error_mask;
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
isr_data = &dispc.registered_isr[i];
if (isr_data->isr == NULL)
continue;
mask |= isr_data->mask;
}
old_mask = dispc_read_reg(DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
}
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
{
int i;
int ret;
unsigned long flags;
struct omap_dispc_isr_data *isr_data;
if (isr == NULL)
return -EINVAL;
spin_lock_irqsave(&dispc.irq_lock, flags);
/* check for duplicate entry */
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
isr_data = &dispc.registered_isr[i];
if (isr_data->isr == isr && isr_data->arg == arg &&
isr_data->mask == mask) {
ret = -EINVAL;
goto err;
}
}
isr_data = NULL;
ret = -EBUSY;
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
isr_data = &dispc.registered_isr[i];
if (isr_data->isr != NULL)
continue;
isr_data->isr = isr;
isr_data->arg = arg;
isr_data->mask = mask;
ret = 0;
break;
}
if (ret)
goto err;
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
return 0;
err:
spin_unlock_irqrestore(&dispc.irq_lock, flags);
return ret;
}
EXPORT_SYMBOL(omap_dispc_register_isr);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
{
int i;
unsigned long flags;
int ret = -EINVAL;
struct omap_dispc_isr_data *isr_data;
spin_lock_irqsave(&dispc.irq_lock, flags);
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
isr_data = &dispc.registered_isr[i];
if (isr_data->isr != isr || isr_data->arg != arg ||
isr_data->mask != mask)
continue;
/* found the correct isr */
isr_data->isr = NULL;
isr_data->arg = NULL;
isr_data->mask = 0;
ret = 0;
break;
}
if (ret == 0)
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
return ret;
}
EXPORT_SYMBOL(omap_dispc_unregister_isr);
#ifdef DEBUG
static void print_irq_status(u32 status)
{
if ((status & dispc.irq_error_mask) == 0)
return;
printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
#define PIS(x) \
if (status & DISPC_IRQ_##x) \
printk(#x " ");
PIS(GFX_FIFO_UNDERFLOW);
PIS(OCP_ERR);
PIS(VID1_FIFO_UNDERFLOW);
PIS(VID2_FIFO_UNDERFLOW);
if (dss_feat_get_num_ovls() > 3)
PIS(VID3_FIFO_UNDERFLOW);
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
if (dss_has_feature(FEAT_MGR_LCD2))
PIS(SYNC_LOST2);
#undef PIS
printk("\n");
}
#endif
/* Called from dss.c. Note that we don't touch clocks here,
* but we presume they are on because we got an IRQ. However,
* an irq handler may turn the clocks off, so we may not have
* clock later in the function. */
static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
{
int i;
u32 irqstatus, irqenable;
u32 handledirqs = 0;
u32 unhandled_errors;
struct omap_dispc_isr_data *isr_data;
struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
spin_lock(&dispc.irq_lock);
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
irqenable = dispc_read_reg(DISPC_IRQENABLE);
/* IRQ is not for us */
if (!(irqstatus & irqenable)) {
spin_unlock(&dispc.irq_lock);
return IRQ_NONE;
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dispc.irq_stats_lock);
dispc.irq_stats.irq_count++;
dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
spin_unlock(&dispc.irq_stats_lock);
#endif
#ifdef DEBUG
if (dss_debug)
print_irq_status(irqstatus);
#endif
/* Ack the interrupt. Do it here before clocks are possibly turned
* off */
dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
/* flush posted write */
dispc_read_reg(DISPC_IRQSTATUS);
/* make a copy and unlock, so that isrs can unregister
* themselves */
memcpy(registered_isr, dispc.registered_isr,
sizeof(registered_isr));
spin_unlock(&dispc.irq_lock);
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
isr_data = ®istered_isr[i];
if (!isr_data->isr)
continue;
if (isr_data->mask & irqstatus) {
isr_data->isr(isr_data->arg, irqstatus);
handledirqs |= isr_data->mask;
}
}
spin_lock(&dispc.irq_lock);
unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
if (unhandled_errors) {
dispc.error_irqs |= unhandled_errors;
dispc.irq_error_mask &= ~unhandled_errors;
_omap_dispc_set_irqs();
schedule_work(&dispc.error_work);
}
spin_unlock(&dispc.irq_lock);
return IRQ_HANDLED;
}
static void dispc_error_worker(struct work_struct *work)
{
int i;
u32 errors;
unsigned long flags;
static const unsigned fifo_underflow_bits[] = {
DISPC_IRQ_GFX_FIFO_UNDERFLOW,
DISPC_IRQ_VID1_FIFO_UNDERFLOW,
DISPC_IRQ_VID2_FIFO_UNDERFLOW,
DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
static const unsigned sync_lost_bits[] = {
DISPC_IRQ_SYNC_LOST,
DISPC_IRQ_SYNC_LOST_DIGIT,
DISPC_IRQ_SYNC_LOST2,
};
spin_lock_irqsave(&dispc.irq_lock, flags);
errors = dispc.error_irqs;
dispc.error_irqs = 0;
spin_unlock_irqrestore(&dispc.irq_lock, flags);
dispc_runtime_get();
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
unsigned bit;
ovl = omap_dss_get_overlay(i);
bit = fifo_underflow_bits[i];
if (bit & errors) {
DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
ovl->name);
dispc_ovl_enable(ovl->id, false);
dispc_mgr_go(ovl->manager->id);
mdelay(50);
}
}
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
unsigned bit;
mgr = omap_dss_get_overlay_manager(i);
bit = sync_lost_bits[i];
if (bit & errors) {
struct omap_dss_device *dssdev = mgr->device;
bool enable;
DSSERR("SYNC_LOST on channel %s, restarting the output "
"with video overlays disabled\n",
mgr->name);
enable = dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
dssdev->driver->disable(dssdev);
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
if (ovl->id != OMAP_DSS_GFX &&
ovl->manager == mgr)
dispc_ovl_enable(ovl->id, false);
}
dispc_mgr_go(mgr->id);
mdelay(50);
if (enable)
dssdev->driver->enable(dssdev);
}
}
if (errors & DISPC_IRQ_OCP_ERR) {
DSSERR("OCP_ERR\n");
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
mgr = omap_dss_get_overlay_manager(i);
if (mgr->device && mgr->device->driver)
mgr->device->driver->disable(mgr->device);
}
}
spin_lock_irqsave(&dispc.irq_lock, flags);
dispc.irq_error_mask |= errors;
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
dispc_runtime_put();
}
int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
{
void dispc_irq_wait_handler(void *data, u32 mask)
{
complete((struct completion *)data);
}
int r;
DECLARE_COMPLETION_ONSTACK(completion);
r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
irqmask);
if (r)
return r;
timeout = wait_for_completion_timeout(&completion, timeout);
omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
if (timeout == 0)
return -ETIMEDOUT;
if (timeout == -ERESTARTSYS)
return -ERESTARTSYS;
return 0;
}
int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
unsigned long timeout)
{
void dispc_irq_wait_handler(void *data, u32 mask)
{
complete((struct completion *)data);
}
int r;
DECLARE_COMPLETION_ONSTACK(completion);
r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
irqmask);
if (r)
return r;
timeout = wait_for_completion_interruptible_timeout(&completion,
timeout);
omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
if (timeout == 0)
return -ETIMEDOUT;
if (timeout == -ERESTARTSYS)
return -ERESTARTSYS;
return 0;
}
#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
void dispc_fake_vsync_irq(void)
{
u32 irqstatus = DISPC_IRQ_VSYNC;
int i;
WARN_ON(!in_interrupt());
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
struct omap_dispc_isr_data *isr_data;
isr_data = &dispc.registered_isr[i];
if (!isr_data->isr)
continue;
if (isr_data->mask & irqstatus)
isr_data->isr(isr_data->arg, irqstatus);
}
}
#endif
static void _omap_dispc_initialize_irq(void)
{
unsigned long flags;
spin_lock_irqsave(&dispc.irq_lock, flags);
memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
if (dss_has_feature(FEAT_MGR_LCD2))
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
if (dss_feat_get_num_ovls() > 3)
dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
* so clear it */
dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
void dispc_enable_sidle(void)
{
REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */
}
void dispc_disable_sidle(void)
{
REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
static void _omap_dispc_initial_config(void)
{
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
l = FLD_MOD(l, 1, 23, 16);
dispc_write_reg(DISPC_DIVISOR, l);
}
/* FUNCGATED */
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
_dispc_setup_color_conv_coef();
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
dispc_read_plane_fifo_sizes();
dispc_configure_burst_sizes();
dispc_ovl_enable_zorder_planes();
}
/* DISPC HW IP initialisation */
static int omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
struct resource *dispc_mem;
struct clk *clk;
dispc.pdev = pdev;
spin_lock_init(&dispc.irq_lock);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock_init(&dispc.irq_stats_lock);
dispc.irq_stats.last_reset = jiffies;
#endif
INIT_WORK(&dispc.error_work, dispc_error_worker);
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
return -EINVAL;
}
dispc.base = devm_ioremap(&pdev->dev, dispc_mem->start,
resource_size(dispc_mem));
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
return -ENOMEM;
}
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
DSSERR("platform_get_irq failed\n");
return -ENODEV;
}
r = devm_request_irq(&pdev->dev, dispc.irq, omap_dispc_irq_handler,
IRQF_SHARED, "OMAP DISPC", dispc.pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
return r;
}
clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
r = PTR_ERR(clk);
return r;
}
dispc.dss_clk = clk;
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
if (r)
goto err_runtime_get;
_omap_dispc_initial_config();
_omap_dispc_initialize_irq();
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dispc_runtime_put();
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
clk_put(dispc.dss_clk);
return r;
}
static int omap_dispchw_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
clk_put(dispc.dss_clk);
return 0;
}
static int dispc_runtime_suspend(struct device *dev)
{
dispc_save_context();
dss_runtime_put();
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
int r;
r = dss_runtime_get();
if (r < 0)
return r;
dispc_restore_context();
return 0;
}
static const struct dev_pm_ops dispc_pm_ops = {
.runtime_suspend = dispc_runtime_suspend,
.runtime_resume = dispc_runtime_resume,
};
static struct platform_driver omap_dispchw_driver = {
.probe = omap_dispchw_probe,
.remove = omap_dispchw_remove,
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
.pm = &dispc_pm_ops,
},
};
int dispc_init_platform_driver(void)
{
return platform_driver_register(&omap_dispchw_driver);
}
void dispc_uninit_platform_driver(void)
{
return platform_driver_unregister(&omap_dispchw_driver);
}
| gpl-2.0 |
civato/V30B-SithLord | arch/mips/lasat/reset.c | 4841 | 1629 | /*
* Thomas Horsten <thh@lasat.com>
* Copyright (C) 2000 LASAT Networks A/S.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Reset the LASAT board.
*/
#include <linux/kernel.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/system.h>
#include <asm/lasat/lasat.h>
#include "picvue.h"
#include "prom.h"
static void lasat_machine_restart(char *command);
static void lasat_machine_halt(void);
/* Used to set machine to boot in service mode via /proc interface */
int lasat_boot_to_service;
static void lasat_machine_restart(char *command)
{
local_irq_disable();
if (lasat_boot_to_service) {
*(volatile unsigned int *)0xa0000024 = 0xdeadbeef;
*(volatile unsigned int *)0xa00000fc = 0xfedeabba;
}
*lasat_misc->reset_reg = 0xbedead;
for (;;) ;
}
static void lasat_machine_halt(void)
{
local_irq_disable();
prom_monitor();
for (;;) ;
}
void lasat_reboot_setup(void)
{
_machine_restart = lasat_machine_restart;
_machine_halt = lasat_machine_halt;
pm_power_off = lasat_machine_halt;
}
| gpl-2.0 |
sudosurootdev/kernel_samsung_u8500 | arch/mips/lasat/reset.c | 4841 | 1629 | /*
* Thomas Horsten <thh@lasat.com>
* Copyright (C) 2000 LASAT Networks A/S.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Reset the LASAT board.
*/
#include <linux/kernel.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/system.h>
#include <asm/lasat/lasat.h>
#include "picvue.h"
#include "prom.h"
static void lasat_machine_restart(char *command);
static void lasat_machine_halt(void);
/* Used to set machine to boot in service mode via /proc interface */
int lasat_boot_to_service;
static void lasat_machine_restart(char *command)
{
local_irq_disable();
if (lasat_boot_to_service) {
*(volatile unsigned int *)0xa0000024 = 0xdeadbeef;
*(volatile unsigned int *)0xa00000fc = 0xfedeabba;
}
*lasat_misc->reset_reg = 0xbedead;
for (;;) ;
}
static void lasat_machine_halt(void)
{
local_irq_disable();
prom_monitor();
for (;;) ;
}
void lasat_reboot_setup(void)
{
_machine_restart = lasat_machine_restart;
_machine_halt = lasat_machine_halt;
pm_power_off = lasat_machine_halt;
}
| gpl-2.0 |
PriceElectronics/linux-imx | arch/mips/jz4740/prom.c | 7913 | 1508 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC prom code
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/serial_reg.h>
#include <asm/bootinfo.h>
#include <asm/mach-jz4740/base.h>
static __init void jz4740_init_cmdline(int argc, char *argv[])
{
unsigned int count = COMMAND_LINE_SIZE - 1;
int i;
char *dst = &(arcs_cmdline[0]);
char *src;
for (i = 1; i < argc && count; ++i) {
src = argv[i];
while (*src && count) {
*dst++ = *src++;
--count;
}
*dst++ = ' ';
}
if (i > 1)
--dst;
*dst = 0;
}
void __init prom_init(void)
{
jz4740_init_cmdline((int)fw_arg0, (char **)fw_arg1);
mips_machtype = MACH_INGENIC_JZ4740;
}
void __init prom_free_prom_memory(void)
{
}
#define UART_REG(_reg) ((void __iomem *)CKSEG1ADDR(JZ4740_UART0_BASE_ADDR + (_reg << 2)))
void prom_putchar(char c)
{
uint8_t lsr;
do {
lsr = readb(UART_REG(UART_LSR));
} while ((lsr & UART_LSR_TEMT) == 0);
writeb(c, UART_REG(UART_TX));
}
| gpl-2.0 |
zarboz/brick_kernel_msm8960 | drivers/parport/parport_cs.c | 8169 | 5882 | /*======================================================================
A driver for PCMCIA parallel port adapters
(specifically, for the Quatech SPP-100 EPP card: other cards will
probably require driver tweaks)
parport_cs.c 1.29 2002/10/11 06:57:41
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
which case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
======================================================================*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
/*====================================================================*/
/* Module parameters */
MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
MODULE_DESCRIPTION("PCMCIA parallel port card driver");
MODULE_LICENSE("Dual MPL/GPL");
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
INT_MODULE_PARM(epp_mode, 1);
/*====================================================================*/
#define FORCE_EPP_MODE 0x08
typedef struct parport_info_t {
struct pcmcia_device *p_dev;
int ndev;
struct parport *port;
} parport_info_t;
static void parport_detach(struct pcmcia_device *p_dev);
static int parport_config(struct pcmcia_device *link);
static void parport_cs_release(struct pcmcia_device *);
static int parport_probe(struct pcmcia_device *link)
{
parport_info_t *info;
dev_dbg(&link->dev, "parport_attach()\n");
/* Create new parport device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
link->priv = info;
info->p_dev = link;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return parport_config(link);
} /* parport_attach */
static void parport_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "parport_detach\n");
parport_cs_release(link);
kfree(link->priv);
} /* parport_detach */
static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int parport_config(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
struct parport *p;
int ret;
dev_dbg(&link->dev, "parport_config\n");
if (epp_mode)
link->config_index |= FORCE_EPP_MODE;
ret = pcmcia_loop_config(link, parport_config_check, NULL);
if (ret)
goto failed;
if (!link->irq)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
p = parport_pc_probe_port(link->resource[0]->start,
link->resource[1]->start,
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
"0x%3x, irq %u failed\n",
(unsigned int) link->resource[0]->start,
link->irq);
goto failed;
}
p->modes |= PARPORT_MODE_PCSPP;
if (epp_mode)
p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
info->ndev = 1;
info->port = p;
return 0;
failed:
parport_cs_release(link);
return -ENODEV;
} /* parport_config */
static void parport_cs_release(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
dev_dbg(&link->dev, "parport_release\n");
if (info->ndev) {
struct parport *p = info->port;
parport_pc_unregister_port(p);
}
info->ndev = 0;
pcmcia_disable_device(link);
} /* parport_cs_release */
static const struct pcmcia_device_id parport_ids[] = {
PCMCIA_DEVICE_FUNC_ID(3),
PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, parport_ids);
static struct pcmcia_driver parport_cs_driver = {
.owner = THIS_MODULE,
.name = "parport_cs",
.probe = parport_probe,
.remove = parport_detach,
.id_table = parport_ids,
};
static int __init init_parport_cs(void)
{
return pcmcia_register_driver(&parport_cs_driver);
}
static void __exit exit_parport_cs(void)
{
pcmcia_unregister_driver(&parport_cs_driver);
}
module_init(init_parport_cs);
module_exit(exit_parport_cs);
| gpl-2.0 |
TMartinPPC/maru_kernel_flo | fs/mbcache.c | 9961 | 16783 | /*
* linux/fs/mbcache.c
* (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
/*
* Filesystem Meta Information Block Cache (mbcache)
*
* The mbcache caches blocks of block devices that need to be located
* by their device/block number, as well as by other criteria (such
* as the block's contents).
*
* There can only be one cache entry in a cache per device and block number.
* Additional indexes need not be unique in this sense. The number of
* additional indexes (=other criteria) can be hardwired at compile time
* or specified at cache create time.
*
* Each cache entry is of fixed size. An entry may be `valid' or `invalid'
* in the cache. A valid entry is in the main hash tables of the cache,
* and may also be in the lru list. An invalid entry is not in any hashes
* or lists.
*
* A valid cache entry is only in the lru list if no handles refer to it.
* Invalid cache entries will be freed when the last handle to the cache
* entry is released. Entries that cannot be freed immediately are put
* back on the lru list.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/mbcache.h>
#ifdef MB_CACHE_DEBUG
# define mb_debug(f...) do { \
printk(KERN_DEBUG f); \
printk("\n"); \
} while (0)
#define mb_assert(c) do { if (!(c)) \
printk(KERN_ERR "assertion " #c " failed\n"); \
} while(0)
#else
# define mb_debug(f...) do { } while(0)
# define mb_assert(c) do { } while(0)
#endif
#define mb_error(f...) do { \
printk(KERN_ERR f); \
printk("\n"); \
} while(0)
#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(mb_cache_create);
EXPORT_SYMBOL(mb_cache_shrink);
EXPORT_SYMBOL(mb_cache_destroy);
EXPORT_SYMBOL(mb_cache_entry_alloc);
EXPORT_SYMBOL(mb_cache_entry_insert);
EXPORT_SYMBOL(mb_cache_entry_release);
EXPORT_SYMBOL(mb_cache_entry_free);
EXPORT_SYMBOL(mb_cache_entry_get);
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
EXPORT_SYMBOL(mb_cache_entry_find_first);
EXPORT_SYMBOL(mb_cache_entry_find_next);
#endif
/*
* Global data: list of all mbcache's, lru list, and a spinlock for
* accessing cache data structures on SMP machines. The lru list is
* global across all mbcaches.
*/
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
/*
* What the mbcache registers as to get shrunk dynamically.
*/
static int mb_cache_shrink_fn(struct shrinker *shrink,
struct shrink_control *sc);
static struct shrinker mb_cache_shrinker = {
.shrink = mb_cache_shrink_fn,
.seeks = DEFAULT_SEEKS,
};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
{
return !list_empty(&ce->e_block_list);
}
static void
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
{
if (__mb_cache_entry_is_hashed(ce)) {
list_del_init(&ce->e_block_list);
list_del(&ce->e_index.o_list);
}
}
static void
__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
{
struct mb_cache *cache = ce->e_cache;
mb_assert(!(ce->e_used || ce->e_queued));
kmem_cache_free(cache->c_entry_cache, ce);
atomic_dec(&cache->c_entry_count);
}
static void
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
__releases(mb_cache_spinlock)
{
/* Wake up all processes queuing for this cache entry. */
if (ce->e_queued)
wake_up_all(&mb_cache_queue);
if (ce->e_used >= MB_CACHE_WRITER)
ce->e_used -= MB_CACHE_WRITER;
ce->e_used--;
if (!(ce->e_used || ce->e_queued)) {
if (!__mb_cache_entry_is_hashed(ce))
goto forget;
mb_assert(list_empty(&ce->e_lru_list));
list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
}
spin_unlock(&mb_cache_spinlock);
return;
forget:
spin_unlock(&mb_cache_spinlock);
__mb_cache_entry_forget(ce, GFP_KERNEL);
}
/*
* mb_cache_shrink_fn() memory pressure callback
*
* This function is called by the kernel memory management when memory
* gets low.
*
* @shrink: (ignored)
* @sc: shrink_control passed from reclaim
*
* Returns the number of objects which are present in the cache.
*/
static int
mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
{
LIST_HEAD(free_list);
struct mb_cache *cache;
struct mb_cache_entry *entry, *tmp;
int count = 0;
int nr_to_scan = sc->nr_to_scan;
gfp_t gfp_mask = sc->gfp_mask;
mb_debug("trying to free %d entries", nr_to_scan);
spin_lock(&mb_cache_spinlock);
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
struct mb_cache_entry *ce =
list_entry(mb_cache_lru_list.next,
struct mb_cache_entry, e_lru_list);
list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce);
}
list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
mb_debug("cache %s (%d)", cache->c_name,
atomic_read(&cache->c_entry_count));
count += atomic_read(&cache->c_entry_count);
}
spin_unlock(&mb_cache_spinlock);
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
__mb_cache_entry_forget(entry, gfp_mask);
}
return (count / 100) * sysctl_vfs_cache_pressure;
}
/*
* mb_cache_create() create a new cache
*
* All entries in one cache are equal size. Cache entries may be from
* multiple devices. If this is the first mbcache created, registers
* the cache with kernel memory management. Returns NULL if no more
* memory was available.
*
* @name: name of the cache (informal)
* @bucket_bits: log2(number of hash buckets)
*/
struct mb_cache *
mb_cache_create(const char *name, int bucket_bits)
{
int n, bucket_count = 1 << bucket_bits;
struct mb_cache *cache = NULL;
cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
if (!cache)
return NULL;
cache->c_name = name;
atomic_set(&cache->c_entry_count, 0);
cache->c_bucket_bits = bucket_bits;
cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
GFP_KERNEL);
if (!cache->c_block_hash)
goto fail;
for (n=0; n<bucket_count; n++)
INIT_LIST_HEAD(&cache->c_block_hash[n]);
cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
GFP_KERNEL);
if (!cache->c_index_hash)
goto fail;
for (n=0; n<bucket_count; n++)
INIT_LIST_HEAD(&cache->c_index_hash[n]);
cache->c_entry_cache = kmem_cache_create(name,
sizeof(struct mb_cache_entry), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!cache->c_entry_cache)
goto fail2;
/*
* Set an upper limit on the number of cache entries so that the hash
* chains won't grow too long.
*/
cache->c_max_entries = bucket_count << 4;
spin_lock(&mb_cache_spinlock);
list_add(&cache->c_cache_list, &mb_cache_list);
spin_unlock(&mb_cache_spinlock);
return cache;
fail2:
kfree(cache->c_index_hash);
fail:
kfree(cache->c_block_hash);
kfree(cache);
return NULL;
}
/*
* mb_cache_shrink()
*
* Removes all cache entries of a device from the cache. All cache entries
* currently in use cannot be freed, and thus remain in the cache. All others
* are freed.
*
* @bdev: which device's cache entries to shrink
*/
void
mb_cache_shrink(struct block_device *bdev)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
spin_lock(&mb_cache_spinlock);
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_lru_list);
if (ce->e_bdev == bdev) {
list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce);
}
}
spin_unlock(&mb_cache_spinlock);
list_for_each_safe(l, ltmp, &free_list) {
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
e_lru_list), GFP_KERNEL);
}
}
/*
* mb_cache_destroy()
*
* Shrinks the cache to its minimum possible size (hopefully 0 entries),
* and then destroys it. If this was the last mbcache, un-registers the
* mbcache from kernel memory management.
*/
void
mb_cache_destroy(struct mb_cache *cache)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
spin_lock(&mb_cache_spinlock);
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_lru_list);
if (ce->e_cache == cache) {
list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce);
}
}
list_del(&cache->c_cache_list);
spin_unlock(&mb_cache_spinlock);
list_for_each_safe(l, ltmp, &free_list) {
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
e_lru_list), GFP_KERNEL);
}
if (atomic_read(&cache->c_entry_count) > 0) {
mb_error("cache %s: %d orphaned entries",
cache->c_name,
atomic_read(&cache->c_entry_count));
}
kmem_cache_destroy(cache->c_entry_cache);
kfree(cache->c_index_hash);
kfree(cache->c_block_hash);
kfree(cache);
}
/*
* mb_cache_entry_alloc()
*
* Allocates a new cache entry. The new entry will not be valid initially,
* and thus cannot be looked up yet. It should be filled with data, and
* then inserted into the cache using mb_cache_entry_insert(). Returns NULL
* if no more memory was available.
*/
struct mb_cache_entry *
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
{
struct mb_cache_entry *ce = NULL;
if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
spin_lock(&mb_cache_spinlock);
if (!list_empty(&mb_cache_lru_list)) {
ce = list_entry(mb_cache_lru_list.next,
struct mb_cache_entry, e_lru_list);
list_del_init(&ce->e_lru_list);
__mb_cache_entry_unhash(ce);
}
spin_unlock(&mb_cache_spinlock);
}
if (!ce) {
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
if (!ce)
return NULL;
atomic_inc(&cache->c_entry_count);
INIT_LIST_HEAD(&ce->e_lru_list);
INIT_LIST_HEAD(&ce->e_block_list);
ce->e_cache = cache;
ce->e_queued = 0;
}
ce->e_used = 1 + MB_CACHE_WRITER;
return ce;
}
/*
* mb_cache_entry_insert()
*
* Inserts an entry that was allocated using mb_cache_entry_alloc() into
* the cache. After this, the cache entry can be looked up, but is not yet
* in the lru list as the caller still holds a handle to it. Returns 0 on
* success, or -EBUSY if a cache entry for that device + inode exists
* already (this may happen after a failed lookup, but when another process
* has inserted the same cache entry in the meantime).
*
* @bdev: device the cache entry belongs to
* @block: block number
* @key: lookup key
*/
int
mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
sector_t block, unsigned int key)
{
struct mb_cache *cache = ce->e_cache;
unsigned int bucket;
struct list_head *l;
int error = -EBUSY;
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
cache->c_bucket_bits);
spin_lock(&mb_cache_spinlock);
list_for_each_prev(l, &cache->c_block_hash[bucket]) {
struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_block_list);
if (ce->e_bdev == bdev && ce->e_block == block)
goto out;
}
__mb_cache_entry_unhash(ce);
ce->e_bdev = bdev;
ce->e_block = block;
list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
ce->e_index.o_key = key;
bucket = hash_long(key, cache->c_bucket_bits);
list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
error = 0;
out:
spin_unlock(&mb_cache_spinlock);
return error;
}
/*
* mb_cache_entry_release()
*
* Release a handle to a cache entry. When the last handle to a cache entry
* is released it is either freed (if it is invalid) or otherwise inserted
* in to the lru list.
*/
void
mb_cache_entry_release(struct mb_cache_entry *ce)
{
spin_lock(&mb_cache_spinlock);
__mb_cache_entry_release_unlock(ce);
}
/*
* mb_cache_entry_free()
*
* This is equivalent to the sequence mb_cache_entry_takeout() --
* mb_cache_entry_release().
*/
void
mb_cache_entry_free(struct mb_cache_entry *ce)
{
spin_lock(&mb_cache_spinlock);
mb_assert(list_empty(&ce->e_lru_list));
__mb_cache_entry_unhash(ce);
__mb_cache_entry_release_unlock(ce);
}
/*
* mb_cache_entry_get()
*
* Get a cache entry by device / block number. (There can only be one entry
* in the cache per device and block.) Returns NULL if no such cache entry
* exists. The returned cache entry is locked for exclusive access ("single
* writer").
*/
struct mb_cache_entry *
mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
sector_t block)
{
unsigned int bucket;
struct list_head *l;
struct mb_cache_entry *ce;
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
cache->c_bucket_bits);
spin_lock(&mb_cache_spinlock);
list_for_each(l, &cache->c_block_hash[bucket]) {
ce = list_entry(l, struct mb_cache_entry, e_block_list);
if (ce->e_bdev == bdev && ce->e_block == block) {
DEFINE_WAIT(wait);
if (!list_empty(&ce->e_lru_list))
list_del_init(&ce->e_lru_list);
while (ce->e_used > 0) {
ce->e_queued++;
prepare_to_wait(&mb_cache_queue, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&mb_cache_spinlock);
schedule();
spin_lock(&mb_cache_spinlock);
ce->e_queued--;
}
finish_wait(&mb_cache_queue, &wait);
ce->e_used += 1 + MB_CACHE_WRITER;
if (!__mb_cache_entry_is_hashed(ce)) {
__mb_cache_entry_release_unlock(ce);
return NULL;
}
goto cleanup;
}
}
ce = NULL;
cleanup:
spin_unlock(&mb_cache_spinlock);
return ce;
}
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
static struct mb_cache_entry *
__mb_cache_entry_find(struct list_head *l, struct list_head *head,
struct block_device *bdev, unsigned int key)
{
while (l != head) {
struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_index.o_list);
if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
DEFINE_WAIT(wait);
if (!list_empty(&ce->e_lru_list))
list_del_init(&ce->e_lru_list);
/* Incrementing before holding the lock gives readers
priority over writers. */
ce->e_used++;
while (ce->e_used >= MB_CACHE_WRITER) {
ce->e_queued++;
prepare_to_wait(&mb_cache_queue, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&mb_cache_spinlock);
schedule();
spin_lock(&mb_cache_spinlock);
ce->e_queued--;
}
finish_wait(&mb_cache_queue, &wait);
if (!__mb_cache_entry_is_hashed(ce)) {
__mb_cache_entry_release_unlock(ce);
spin_lock(&mb_cache_spinlock);
return ERR_PTR(-EAGAIN);
}
return ce;
}
l = l->next;
}
return NULL;
}
/*
* mb_cache_entry_find_first()
*
* Find the first cache entry on a given device with a certain key in
* an additional index. Additional matches can be found with
* mb_cache_entry_find_next(). Returns NULL if no match was found. The
* returned cache entry is locked for shared access ("multiple readers").
*
* @cache: the cache to search
* @bdev: the device the cache entry should belong to
* @key: the key in the index
*/
struct mb_cache_entry *
mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
unsigned int key)
{
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
struct list_head *l;
struct mb_cache_entry *ce;
spin_lock(&mb_cache_spinlock);
l = cache->c_index_hash[bucket].next;
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
spin_unlock(&mb_cache_spinlock);
return ce;
}
/*
* mb_cache_entry_find_next()
*
* Find the next cache entry on a given device with a certain key in an
* additional index. Returns NULL if no match could be found. The previous
* entry is atomatically released, so that mb_cache_entry_find_next() can
* be called like this:
*
* entry = mb_cache_entry_find_first();
* while (entry) {
* ...
* entry = mb_cache_entry_find_next(entry, ...);
* }
*
* @prev: The previous match
* @bdev: the device the cache entry should belong to
* @key: the key in the index
*/
struct mb_cache_entry *
mb_cache_entry_find_next(struct mb_cache_entry *prev,
struct block_device *bdev, unsigned int key)
{
struct mb_cache *cache = prev->e_cache;
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
struct list_head *l;
struct mb_cache_entry *ce;
spin_lock(&mb_cache_spinlock);
l = prev->e_index.o_list.next;
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
__mb_cache_entry_release_unlock(prev);
return ce;
}
#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
static int __init init_mbcache(void)
{
register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
module_exit(exit_mbcache)
| gpl-2.0 |
cooks8/android_kernel_samsung_jf | drivers/mtd/maps/scx200_docflash.c | 12777 | 5559 | /* linux/drivers/mtd/maps/scx200_docflash.c
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
National Semiconductor SCx200 flash mapped with DOCCS
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/pci.h>
#include <linux/scx200.h>
#define NAME "scx200_docflash"
MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>");
MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
MODULE_LICENSE("GPL");
static int probe = 0; /* Don't autoprobe */
static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
static unsigned width = 8; /* Default to 8 bits wide */
static char *flashtype = "cfi_probe";
module_param(probe, int, 0);
MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
module_param(size, int, 0);
MODULE_PARM_DESC(size, "Size of the flash mapping");
module_param(width, int, 0);
MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
module_param(flashtype, charp, 0);
MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
static struct resource docmem = {
.flags = IORESOURCE_MEM,
.name = "NatSemi SCx200 DOCCS Flash",
};
static struct mtd_info *mymtd;
static struct mtd_partition partition_info[] = {
{
.name = "DOCCS Boot kernel",
.offset = 0,
.size = 0xc0000
},
{
.name = "DOCCS Low BIOS",
.offset = 0xc0000,
.size = 0x40000
},
{
.name = "DOCCS File system",
.offset = 0x100000,
.size = ~0 /* calculate from flash size */
},
{
.name = "DOCCS High BIOS",
.offset = ~0, /* calculate from flash size */
.size = 0x80000
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct map_info scx200_docflash_map = {
.name = "NatSemi SCx200 DOCCS Flash",
};
static int __init init_scx200_docflash(void)
{
unsigned u;
unsigned base;
unsigned ctrl;
unsigned pmr;
struct pci_dev *bridge;
printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
PCI_DEVICE_ID_NS_SCx200_BRIDGE,
NULL)) == NULL)
return -ENODEV;
/* check that we have found the configuration block */
if (!scx200_cb_present()) {
pci_dev_put(bridge);
return -ENODEV;
}
if (probe) {
/* Try to use the present flash mapping if any */
pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
pci_dev_put(bridge);
pmr = inl(scx200_cb_base + SCx200_PMR);
if (base == 0
|| (ctrl & 0x07000000) != 0x07000000
|| (ctrl & 0x0007ffff) == 0)
return -ENODEV;
size = ((ctrl&0x1fff)<<13) + (1<<13);
for (u = size; u > 1; u >>= 1)
;
if (u != 1)
return -ENODEV;
if (pmr & (1<<6))
width = 16;
else
width = 8;
docmem.start = base;
docmem.end = base + size;
if (request_resource(&iomem_resource, &docmem)) {
printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
return -ENOMEM;
}
} else {
pci_dev_put(bridge);
for (u = size; u > 1; u >>= 1)
;
if (u != 1) {
printk(KERN_ERR NAME ": invalid size for flash mapping\n");
return -EINVAL;
}
if (width != 8 && width != 16) {
printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
return -EINVAL;
}
if (allocate_resource(&iomem_resource, &docmem,
size,
0xc0000000, 0xffffffff,
size, NULL, NULL)) {
printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
return -ENOMEM;
}
ctrl = 0x07000000 | ((size-1) >> 13);
printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
pmr = inl(scx200_cb_base + SCx200_PMR);
if (width == 8) {
pmr &= ~(1<<6);
} else {
pmr |= (1<<6);
}
outl(pmr, scx200_cb_base + SCx200_PMR);
}
printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
&docmem, width);
scx200_docflash_map.size = size;
if (width == 8)
scx200_docflash_map.bankwidth = 1;
else
scx200_docflash_map.bankwidth = 2;
simple_map_init(&scx200_docflash_map);
scx200_docflash_map.phys = docmem.start;
scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
if (!scx200_docflash_map.virt) {
printk(KERN_ERR NAME ": failed to ioremap the flash\n");
release_resource(&docmem);
return -EIO;
}
mymtd = do_map_probe(flashtype, &scx200_docflash_map);
if (!mymtd) {
printk(KERN_ERR NAME ": unable to detect flash\n");
iounmap(scx200_docflash_map.virt);
release_resource(&docmem);
return -ENXIO;
}
if (size < mymtd->size)
printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
mymtd->owner = THIS_MODULE;
partition_info[3].offset = mymtd->size-partition_info[3].size;
partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_scx200_docflash(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (scx200_docflash_map.virt) {
iounmap(scx200_docflash_map.virt);
release_resource(&docmem);
}
}
module_init(init_scx200_docflash);
module_exit(cleanup_scx200_docflash);
/*
Local variables:
compile-command: "make -k -C ../../.. SUBDIRS=drivers/mtd/maps modules"
c-basic-offset: 8
End:
*/
| gpl-2.0 |
FrostBite-Android/android_kernel_samsung_smdk4412 | drivers/media/video/cx88/cx88-cards.c | 2538 | 93244 | /*
*
* device driver for Conexant 2388x based TV cards
* card-specific stuff.
*
* (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "cx88.h"
#include "tea5767.h"
static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int card[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
module_param_array(tuner, int, NULL, 0444);
module_param_array(radio, int, NULL, 0444);
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(tuner,"tuner type");
MODULE_PARM_DESC(radio,"radio tuner type");
MODULE_PARM_DESC(card,"card type");
static unsigned int latency = UNSET;
module_param(latency,int,0444);
MODULE_PARM_DESC(latency,"pci latency timer");
static int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "Disable IR support");
#define info_printk(core, fmt, arg...) \
printk(KERN_INFO "%s: " fmt, core->name , ## arg)
#define warn_printk(core, fmt, arg...) \
printk(KERN_WARNING "%s: " fmt, core->name , ## arg)
#define err_printk(core, fmt, arg...) \
printk(KERN_ERR "%s: " fmt, core->name , ## arg)
/* ------------------------------------------------------------------ */
/* board config info */
/* If radio_type !=UNSET, radio_addr should be specified
*/
static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_UNKNOWN] = {
.name = "UNKNOWN/GENERIC",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE2,
.vmux = 1,
},{
.type = CX88_VMUX_COMPOSITE3,
.vmux = 2,
},{
.type = CX88_VMUX_COMPOSITE4,
.vmux = 3,
}},
},
[CX88_BOARD_HAUPPAUGE] = {
.name = "Hauppauge WinTV 34xxx models",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00, // internal decoder
},{
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01, // mono from tuner chip
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xff01,
},
},
[CX88_BOARD_GDI] = {
.name = "GDI Black Gold",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
},
[CX88_BOARD_PIXELVIEW] = {
.name = "PixelView",
.tuner_type = TUNER_PHILIPS_PAL,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00, // internal decoder
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xff10,
},
},
[CX88_BOARD_ATI_WONDER_PRO] = {
.name = "ATI TV Wonder Pro",
.tuner_type = TUNER_PHILIPS_4IN1,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x03ff,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x03fe,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x03fe,
}},
},
[CX88_BOARD_WINFAST2000XP_EXPERT] = {
.name = "Leadtek Winfast 2000XP Expert",
.tuner_type = TUNER_PHILIPS_4IN1,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00F5e700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5e700,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00F5c700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5c700,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00F5c700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5c700,
.gpio3 = 0x02000000,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00F5d700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5d700,
.gpio3 = 0x02000000,
},
},
[CX88_BOARD_AVERTV_STUDIO_303] = {
.name = "AverTV Studio 303 (M126)",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio1 = 0xe09f,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio1 = 0xe05f,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio1 = 0xe05f,
}},
.radio = {
.gpio1 = 0xe0df,
.type = CX88_RADIO,
},
},
[CX88_BOARD_MSI_TVANYWHERE_MASTER] = {
// added gpio values thanks to Michal
// values for PAL from DScaler
.name = "MSI TV-@nywhere Master",
.tuner_type = TUNER_MT2032,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER_NTSC,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
}},
.radio = {
.type = CX88_RADIO,
.vmux = 3,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff20,
},
},
[CX88_BOARD_WINFAST_DV2000] = {
.name = "Leadtek Winfast DV2000",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0035e700,
.gpio1 = 0x00003004,
.gpio2 = 0x0035e700,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0035c700,
.gpio1 = 0x00003004,
.gpio2 = 0x0035c700,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0035c700,
.gpio1 = 0x0035c700,
.gpio2 = 0x02000000,
.gpio3 = 0x02000000,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0035d700,
.gpio1 = 0x00007004,
.gpio2 = 0x0035d700,
.gpio3 = 0x02000000,
},
},
[CX88_BOARD_LEADTEK_PVR2000] = {
// gpio values for PAL version from regspy by DScaler
.name = "Leadtek PVR 2000",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000bde2,
.audioroute = 1,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0000bde6,
.audioroute = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000bde6,
.audioroute = 1,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0000bd62,
.audioroute = 1,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_IODATA_GVVCP3PCI] = {
.name = "IODATA GV-VCP3/PCI",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE2,
.vmux = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
},
[CX88_BOARD_PROLINK_PLAYTVPVR] = {
.name = "Prolink PlayTV PVR",
.tuner_type = TUNER_PHILIPS_FM1236_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xbff0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xbff3,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xbff3,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xbff0,
},
},
[CX88_BOARD_ASUS_PVR_416] = {
.name = "ASUS PVR-416",
.tuner_type = TUNER_PHILIPS_FM1236_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000fde6,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000fde6, // 0x0000fda6 L,R RCA audio in?
.audioroute = 1,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0000fde2,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_MSI_TVANYWHERE] = {
.name = "MSI TV-@nywhere",
.tuner_type = TUNER_MT2032,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc08,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc68,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc68,
}},
},
[CX88_BOARD_KWORLD_DVB_T] = {
.name = "KWorld/VStream XPert DVB-T",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1] = {
.name = "DViCO FusionHDTV DVB-T1",
.tuner_type = TUNER_ABSENT, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000027df,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000027df,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_LTV883] = {
.name = "KWorld LTV883RF",
.tuner_type = TUNER_TNF_8831BGFF,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x07f8,
},{
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0x07f9, // mono from tuner chip
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000007fa,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000007fa,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x000007f8,
},
},
[CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q] = {
.name = "DViCO FusionHDTV 3 Gold-Q",
.tuner_type = TUNER_MICROTUNE_4042FI5,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/*
GPIO[0] resets DT3302 DTV receiver
0 - reset asserted
1 - normal operation
GPIO[1] mutes analog audio output connector
0 - enable selected source
1 - mute
GPIO[2] selects source for analog audio output connector
0 - analog audio input connector on tab
1 - analog DAC output from CX23881 chip
GPIO[3] selects RF input connector on tuner module
0 - RF connector labeled CABLE
1 - RF connector labeled ANT
GPIO[4] selects high RF for QAM256 mode
0 - normal RF
1 - high RF
*/
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0f0d,
},{
.type = CX88_VMUX_CABLE,
.vmux = 0,
.gpio0 = 0x0f05,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0f00,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0f00,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_DVB_T1] = {
.name = "Hauppauge Nova-T DVB-T",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_CONEXANT_DVB_T1] = {
.name = "Conexant DVB-T reference design",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROVIDEO_PV259] = {
.name = "Provideo PV259",
.tuner_type = TUNER_PHILIPS_FQ1216ME,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.audioroute = 1,
}},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS] = {
.name = "DViCO FusionHDTV DVB-T Plus",
.tuner_type = TUNER_ABSENT, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000027df,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000027df,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DNTV_LIVE_DVB_T] = {
.name = "digitalnow DNTV Live! DVB-T",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000700,
.gpio2 = 0x00000101,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000700,
.gpio2 = 0x00000101,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PCHDTV_HD3000] = {
.name = "pcHDTV HD3000 HDTV",
.tuner_type = TUNER_THOMSON_DTT761X,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
/* GPIO[2] = audio source for analog audio out connector
* 0 = analog audio input connector
* 1 = CX88 audio DACs
*
* GPIO[7] = input to CX88's audio/chroma ADC
* 0 = FM 10.7 MHz IF
* 1 = Sound 4.5 MHz IF
*
* GPIO[1,5,6] = Oren 51132 pins 27,35,28 respectively
*
* GPIO[16] = Remote control input
*/
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00008484,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00008400,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00008400,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00008404,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_ROSLYN] = {
// entry added by Kaustubh D. Bhalerao <bhalerao.1@osu.edu>
// GPIO values obtained from regspy, courtesy Sean Covel
.name = "Hauppauge WinTV 28xxx (Roslyn) models",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xed1a,
.gpio2 = 0x00ff,
},{
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xed92,
.gpio2 = 0x00ff,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xed96,
.gpio2 = 0x00ff,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_DIGITALLOGIC_MEC] = {
.name = "Digital-Logic MICROSPACE Entertainment Center (MEC)",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00009d80,
.audioroute = 1,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00009d76,
.audioroute = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00009d76,
.audioroute = 1,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00009d00,
.audioroute = 1,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_IODATA_GVBCTV7E] = {
.name = "IODATA GV/BCTV7E",
.tuner_type = TUNER_PHILIPS_FQ1286,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 1,
.gpio1 = 0x0000e03f,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 2,
.gpio1 = 0x0000e07f,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 3,
.gpio1 = 0x0000e07f,
}}
},
[CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO] = {
.name = "PixelView PlayTV Ultra Pro (Stereo)",
/* May be also TUNER_YMEC_TVF_5533MF for NTSC/M or PAL/M */
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/* Some variants use a tda9874 and so need the tvaudio module. */
.audio_chip = V4L2_IDENT_TVAUDIO,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xbf61, /* internal decoder */
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xbf63,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xbf63,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xbf60,
},
},
[CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T] = {
.name = "DViCO FusionHDTV 3 Gold-T",
.tuner_type = TUNER_THOMSON_DTT761X,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x97ed,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x97e9,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x97e9,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_ADSTECH_DVB_T_PCI] = {
.name = "ADS Tech Instant TV DVB-T PCI",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1] = {
.name = "TerraTec Cinergy 1400 DVB-T",
.tuner_type = TUNER_ABSENT,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD] = {
.name = "DViCO FusionHDTV 5 Gold",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H062F */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x87fd,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x87f9,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x87f9,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_AVERMEDIA_ULTRATV_MC_550] = {
.name = "AverMedia UltraTV Media Center PCI 550",
.tuner_type = TUNER_PHILIPS_FM1236_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
.gpio0 = 0x0000cd73,
.audioroute = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 1,
.gpio0 = 0x0000cd73,
.audioroute = 1,
},{
.type = CX88_VMUX_TELEVISION,
.vmux = 3,
.gpio0 = 0x0000cdb3,
.audioroute = 1,
}},
.radio = {
.type = CX88_RADIO,
.vmux = 2,
.gpio0 = 0x0000cdf3,
.audioroute = 1,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_KWORLD_VSTREAM_EXPERT_DVD] = {
/* Alexander Wold <awold@bigfoot.com> */
.name = "Kworld V-Stream Xpert DVD",
.tuner_type = UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x03000000,
.gpio1 = 0x01000000,
.gpio2 = 0x02000000,
.gpio3 = 0x00100000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x03000000,
.gpio1 = 0x01000000,
.gpio2 = 0x02000000,
.gpio3 = 0x00100000,
}},
},
[CX88_BOARD_ATI_HDTVWONDER] = {
.name = "ATI HDTV Wonder",
.tuner_type = TUNER_PHILIPS_TUV1236D,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00000ff7,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000ffe,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000ffe,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_WINFAST_DTV1000] = {
.name = "WinFast DTV1000-T",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_AVERTV_303] = {
.name = "AVerTV 303 (M126)",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00ff,
.gpio1 = 0xe09f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00ff,
.gpio1 = 0xe05f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00ff,
.gpio1 = 0xe05f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
}},
},
[CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = {
.name = "Hauppauge Nova-S-Plus DVB-S",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_chip = V4L2_IDENT_WM8775,
.i2sinputcntl = 2,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
/* 2: Line-In */
.audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = {
.name = "Hauppauge Nova-SE2 DVB-S",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_DVBS_100] = {
.name = "KWorld DVB-S 100",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_chip = V4L2_IDENT_WM8775,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
/* 2: Line-In */
.audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR1100] = {
.name = "Hauppauge WinTV-HVR1100 DVB-T/Hybrid",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
}},
/* fixme: Add radio support */
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR1100LP] = {
.name = "Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile)",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
}},
/* fixme: Add radio support */
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DNTV_LIVE_DVB_T_PRO] = {
.name = "digitalnow DNTV Live! DVB-T Pro",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE |
TDA9887_PORT2_ACTIVE,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xf80808,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xf80808,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xf80808,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xf80808,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_DVB_T_CX22702] = {
/* Kworld V-stream Xpert DVB-T with Thomson tuner */
/* DTT 7579 Conexant CX22702-19 Conexant CX2388x */
/* Manenti Marco <marco_manenti@colman.it> */
.name = "KWorld/VStream XPert DVB-T with cx22702",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = {
.name = "DViCO FusionHDTV DVB-T Dual Digital",
.tuner_type = TUNER_ABSENT, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000067df,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000067df,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT] = {
.name = "KWorld HardwareMpegTV XPert",
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x3de2,
.gpio2 = 0x00ff,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x3de6,
.audioroute = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x3de6,
.audioroute = 1,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x3de6,
.gpio2 = 0x00ff,
},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID] = {
.name = "DViCO FusionHDTV DVB-T Hybrid",
.tuner_type = TUNER_THOMSON_FE6600,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000a75f,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0000a75b,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000a75b,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PCHDTV_HD5500] = {
.name = "pcHDTV HD5500 HDTV",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x87fd,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x87f9,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x87f9,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_MCE200_DELUXE] = {
/* FIXME: tested TV input only, disabled composite,
svideo and radio until they can be tested also. */
.name = "Kworld MCE 200 Deluxe",
.tuner_type = TUNER_TENA_9533_DI,
.radio_type = UNSET,
.tda9887_conf = TDA9887_PRESENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000BDE6
}},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_PIXELVIEW_PLAYTV_P7000] = {
/* FIXME: SVideo, Composite and FM inputs are untested */
.name = "PixelView PlayTV P7000",
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE |
TDA9887_PORT2_ACTIVE,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x5da6,
}},
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_NPGTECH_REALTV_TOP10FM] = {
.name = "NPG Tech Real TV FM Top 10",
.tuner_type = TUNER_TNF_5335MF, /* Actually a TNF9535 */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0788,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x078b,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x078b,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x074a,
},
},
[CX88_BOARD_WINFAST_DTV2000H] = {
.name = "WinFast DTV2000 H",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00017304,
.gpio1 = 0x00008203,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0001d701,
.gpio1 = 0x0000b207,
.gpio2 = 0x0001d701,
.gpio3 = 0x02000000,
}, {
.type = CX88_VMUX_COMPOSITE2,
.vmux = 2,
.gpio0 = 0x0001d503,
.gpio1 = 0x0000b207,
.gpio2 = 0x0001d503,
.gpio3 = 0x02000000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 3,
.gpio0 = 0x0001d701,
.gpio1 = 0x0000b207,
.gpio2 = 0x0001d701,
.gpio3 = 0x02000000,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00015702,
.gpio1 = 0x0000f207,
.gpio2 = 0x00015702,
.gpio3 = 0x02000000,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_WINFAST_DTV2000H_J] = {
.name = "WinFast DTV2000 H rev. J",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00017300,
.gpio1 = 0x00008207,
.gpio2 = 0x00000000,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00018300,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00018301,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00018301,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00015702,
.gpio1 = 0x0000f207,
.gpio2 = 0x00015702,
.gpio3 = 0x02000000,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_GENIATECH_DVBS] = {
.name = "Geniatech DVB-S",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR3000] = {
.name = "Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.audio_chip = V4L2_IDENT_WM8775,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x84bf,
/* 1: TV Audio / FM Mono */
.audioroute = 1,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x84bf,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x84bf,
/* 2: Line-In */
.audioroute = 2,
}},
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x84bf,
/* 4: FM Stereo (untested) */
.audioroute = 8,
},
.mpeg = CX88_MPEG_DVB,
.num_frontends = 2,
},
[CX88_BOARD_NORWOOD_MICRO] = {
.name = "Norwood Micro TV Tuner",
.tuner_type = TUNER_TNF_5335MF,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0709,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x070b,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x070b,
}},
},
[CX88_BOARD_TE_DTV_250_OEM_SWANN] = {
.name = "Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM",
.tuner_type = TUNER_LG_PAL_NEW_TAPC,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
}},
},
[CX88_BOARD_HAUPPAUGE_HVR1300] = {
.name = "Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.audio_chip = V4L2_IDENT_WM8775,
/*
* gpio0 as reported by Mike Crash <mike AT mikecrash.com>
*/
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xef88,
/* 1: TV Audio / FM Mono */
.audioroute = 1,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xef88,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xef88,
/* 2: Line-In */
.audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xef88,
/* 4: FM Stereo (untested) */
.audioroute = 8,
},
},
[CX88_BOARD_SAMSUNG_SMT_7020] = {
.name = "Samsung SMT 7020 DVB-S",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_ADSTECH_PTV_390] = {
.name = "ADS Tech Instant Video PCI",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DEBUG,
.vmux = 3,
.gpio0 = 0x04ff,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x07fa,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x07fa,
}},
},
[CX88_BOARD_PINNACLE_PCTV_HD_800i] = {
.name = "Pinnacle PCTV HD 800i",
.tuner_type = TUNER_XC5000,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x04fb,
.gpio1 = 0x10ff,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x04fb,
.gpio1 = 0x10ef,
.audioroute = 1,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x04fb,
.gpio1 = 0x10ef,
.audioroute = 1,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO] = {
.name = "DViCO FusionHDTV 5 PCI nano",
/* xc3008 tuner, digital only for now */
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x000027df, /* Unconfirmed */
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000027df, /* Unconfirmed */
.audioroute = 1,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000027df, /* Unconfirmed */
.audioroute = 1,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PINNACLE_HYBRID_PCTV] = {
.name = "Pinnacle Hybrid PCTV",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.radio_type = TUNER_XC2028,
.radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x004ff,
.gpio1 = 0x010ff,
.gpio2 = 0x00001,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x004fb,
.gpio1 = 0x010ef,
.audioroute = 1,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x004fb,
.gpio1 = 0x010ef,
.audioroute = 1,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x004ff,
.gpio1 = 0x010ff,
.gpio2 = 0x0ff,
},
.mpeg = CX88_MPEG_DVB,
},
/* Terry Wu <terrywu2009@gmail.com> */
/* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */
/* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */
/* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */
/* Mute Audio : set GPIO 2 value to 1 */
[CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = {
.name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.radio_type = TUNER_XC2028,
.radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
.gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
.gpio3 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
.gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
.gpio3 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
.gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
.gpio3 = 0x0000,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
.gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
.gpio3 = 0x0000,
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
.name = "PowerColor RA330", /* Long names may confuse LIRC. */
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
.type = CX88_VMUX_DEBUG,
.vmux = 3, /* Due to the way the cx88 driver is written, */
.gpio0 = 0x00ff, /* there is no way to deactivate audio pass- */
.gpio1 = 0xf39d, /* through without this entry. Furthermore, if */
.gpio3 = 0x0000, /* the TV mux entry is first, you get audio */
}, { /* from the tuner on boot for a little while. */
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00ff,
.gpio1 = 0xf35d,
.gpio3 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00ff,
.gpio1 = 0xf37d,
.gpio3 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000ff,
.gpio1 = 0x0f37d,
.gpio3 = 0x00000,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x000ff,
.gpio1 = 0x0f35d,
.gpio3 = 0x00000,
},
},
[CX88_BOARD_GENIATECH_X8000_MT] = {
/* Also PowerColor Real Angel 330 and Geniatech X800 OEM */
.name = "Geniatech X8000-MT DVBT",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00000000,
.gpio1 = 0x00e3e341,
.gpio2 = 0x00000000,
.gpio3 = 0x00000000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000000,
.gpio1 = 0x00e3e361,
.gpio2 = 0x00000000,
.gpio3 = 0x00000000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000000,
.gpio1 = 0x00e3e361,
.gpio2 = 0x00000000,
.gpio3 = 0x00000000,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00000000,
.gpio1 = 0x00e3e341,
.gpio2 = 0x00000000,
.gpio3 = 0x00000000,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO] = {
.name = "DViCO FusionHDTV DVB-T PRO",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.radio_type = UNSET,
.radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000067df,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000067df,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD] = {
.name = "DViCO FusionHDTV 7 Gold",
.tuner_type = TUNER_XC5000,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x10df,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x16d9,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x16d9,
}},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROLINK_PV_8000GT] = {
.name = "Prolink Pixelview MPEG 8000GT",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0ff,
.gpio2 = 0x0cfb,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio2 = 0x0cfb,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio2 = 0x0cfb,
} },
.radio = {
.type = CX88_RADIO,
.gpio2 = 0x0cfb,
},
},
[CX88_BOARD_PROLINK_PV_GLOBAL_XTREME] = {
.name = "Prolink Pixelview Global Extreme",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x04fb,
.gpio1 = 0x04080,
.gpio2 = 0x0cf7,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x04fb,
.gpio1 = 0x04080,
.gpio2 = 0x0cfb,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x04fb,
.gpio1 = 0x04080,
.gpio2 = 0x0cfb,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x04ff,
.gpio1 = 0x04080,
.gpio2 = 0x0cf7,
},
},
/* Both radio, analog and ATSC work with this board.
However, for analog to work, s5h1409 gate should be open,
otherwise, tuner-xc3028 won't be detected.
A proper fix require using the newer i2c methods to add
tuner-xc3028 without doing an i2c probe.
*/
[CX88_BOARD_KWORLD_ATSC_120] = {
.name = "Kworld PlusTV HD PCI 120 (ATSC 120)",
.tuner_type = TUNER_XC2028,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x000000ff,
.gpio1 = 0x0000f35d,
.gpio2 = 0x00000000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000000ff,
.gpio1 = 0x0000f37e,
.gpio2 = 0x00000000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000000ff,
.gpio1 = 0x0000f37e,
.gpio2 = 0x00000000,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x000000ff,
.gpio1 = 0x0000f35d,
.gpio2 = 0x00000000,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR4000] = {
.name = "Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid",
.tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.audio_chip = V4L2_IDENT_WM8775,
/*
* GPIO0 (WINTV2000)
*
* Analogue SAT DVB-T
* Antenna 0xc4bf 0xc4bb
* Composite 0xc4bf 0xc4bb
* S-Video 0xc4bf 0xc4bb
* Composite1 0xc4ff 0xc4fb
* S-Video1 0xc4ff 0xc4fb
*
* BIT VALUE FUNCTION GP{x}_IO
* 0 1 I:?
* 1 1 I:?
* 2 1 O:MPEG PORT 0=DVB-T 1=DVB-S
* 3 1 I:?
* 4 1 I:?
* 5 1 I:?
* 6 0 O:INPUT SELECTOR 0=INTERNAL 1=EXPANSION
* 7 1 O:DVB-T DEMOD RESET LOW
*
* BIT VALUE FUNCTION GP{x}_OE
* 8 0 I
* 9 0 I
* a 1 O
* b 0 I
* c 0 I
* d 0 I
* e 1 O
* f 1 O
*
* WM8775 ADC
*
* 1: TV Audio / FM Mono
* 2: Line-In
* 3: Line-In Expansion
* 4: FM Stereo
*/
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xc4bf,
/* 1: TV Audio / FM Mono */
.audioroute = 1,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xc4bf,
/* 2: Line-In */
.audioroute = 2,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xc4bf,
/* 2: Line-In */
.audioroute = 2,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xc4bf,
/* 4: FM Stereo */
.audioroute = 8,
},
.mpeg = CX88_MPEG_DVB,
.num_frontends = 2,
},
[CX88_BOARD_HAUPPAUGE_HVR4000LITE] = {
.name = "Hauppauge WinTV-HVR4000(Lite) DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TEVII_S420] = {
.name = "TeVii S420 DVB-S",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TEVII_S460] = {
.name = "TeVii S460 DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TEVII_S464] = {
.name = "TeVii S464 DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_OMICOM_SS4_PCI] = {
.name = "Omicom SS4 DVB-S/S2 PCI",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TBS_8910] = {
.name = "TBS 8910 DVB-S",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TBS_8920] = {
.name = "TBS 8920 DVB-S/S2",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
.gpio0 = 0x8080,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROF_6200] = {
.name = "Prof 6200 DVB-S",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROF_7300] = {
.name = "PROF 7300 DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_SATTRADE_ST4200] = {
.name = "SATTRADE ST4200 DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII] = {
.name = "Terratec Cinergy HT PCI MKII",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.radio_type = TUNER_XC2028,
.radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x004ff,
.gpio1 = 0x010ff,
.gpio2 = 0x00001,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x004fb,
.gpio1 = 0x010ef,
.audioroute = 1,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x004fb,
.gpio1 = 0x010ef,
.audioroute = 1,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x004ff,
.gpio1 = 0x010ff,
.gpio2 = 0x0ff,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_IRONLY] = {
.name = "Hauppauge WinTV-IR Only",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
[CX88_BOARD_WINFAST_DTV1800H] = {
.name = "Leadtek WinFast DTV1800 Hybrid",
.tuner_type = TUNER_XC2028,
.radio_type = TUNER_XC2028,
.tuner_addr = 0x61,
.radio_addr = 0x61,
/*
* GPIO setting
*
* 2: mute (0=off,1=on)
* 12: tuner reset pin
* 13: audio source (0=tuner audio,1=line in)
* 14: FM (0=on,1=off ???)
*/
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
.gpio2 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
.gpio2 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
.gpio2 = 0x0000,
} },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
.gpio2 = 0x0000,
},
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROF_7301] = {
.name = "Prof 7301 DVB-S/S2",
.tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TWINHAN_VP1027_DVBS] = {
.name = "Twinhan VP-1027 DVB-S",
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
.mpeg = CX88_MPEG_DVB,
},
};
/* ------------------------------------------------------------------ */
/* PCI subsystem IDs */
static const struct cx88_subid cx88_subids[] = {
{
.subvendor = 0x0070,
.subdevice = 0x3400,
.card = CX88_BOARD_HAUPPAUGE,
},{
.subvendor = 0x0070,
.subdevice = 0x3401,
.card = CX88_BOARD_HAUPPAUGE,
},{
.subvendor = 0x14c7,
.subdevice = 0x0106,
.card = CX88_BOARD_GDI,
},{
.subvendor = 0x14c7,
.subdevice = 0x0107, /* with mpeg encoder */
.card = CX88_BOARD_GDI,
},{
.subvendor = PCI_VENDOR_ID_ATI,
.subdevice = 0x00f8,
.card = CX88_BOARD_ATI_WONDER_PRO,
}, {
.subvendor = PCI_VENDOR_ID_ATI,
.subdevice = 0x00f9,
.card = CX88_BOARD_ATI_WONDER_PRO,
}, {
.subvendor = 0x107d,
.subdevice = 0x6611,
.card = CX88_BOARD_WINFAST2000XP_EXPERT,
},{
.subvendor = 0x107d,
.subdevice = 0x6613, /* NTSC */
.card = CX88_BOARD_WINFAST2000XP_EXPERT,
},{
.subvendor = 0x107d,
.subdevice = 0x6620,
.card = CX88_BOARD_WINFAST_DV2000,
},{
.subvendor = 0x107d,
.subdevice = 0x663b,
.card = CX88_BOARD_LEADTEK_PVR2000,
},{
.subvendor = 0x107d,
.subdevice = 0x663c,
.card = CX88_BOARD_LEADTEK_PVR2000,
},{
.subvendor = 0x1461,
.subdevice = 0x000b,
.card = CX88_BOARD_AVERTV_STUDIO_303,
},{
.subvendor = 0x1462,
.subdevice = 0x8606,
.card = CX88_BOARD_MSI_TVANYWHERE_MASTER,
},{
.subvendor = 0x10fc,
.subdevice = 0xd003,
.card = CX88_BOARD_IODATA_GVVCP3PCI,
},{
.subvendor = 0x1043,
.subdevice = 0x4823, /* with mpeg encoder */
.card = CX88_BOARD_ASUS_PVR_416,
},{
.subvendor = 0x17de,
.subdevice = 0x08a6,
.card = CX88_BOARD_KWORLD_DVB_T,
},{
.subvendor = 0x18ac,
.subdevice = 0xd810,
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q,
},{
.subvendor = 0x18ac,
.subdevice = 0xd820,
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb00,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1,
},{
.subvendor = 0x0070,
.subdevice = 0x9002,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
},{
.subvendor = 0x14f1,
.subdevice = 0x0187,
.card = CX88_BOARD_CONEXANT_DVB_T1,
},{
.subvendor = 0x1540,
.subdevice = 0x2580,
.card = CX88_BOARD_PROVIDEO_PV259,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb10,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
},{
.subvendor = 0x1554,
.subdevice = 0x4811,
.card = CX88_BOARD_PIXELVIEW,
},{
.subvendor = 0x7063,
.subdevice = 0x3000, /* HD-3000 card */
.card = CX88_BOARD_PCHDTV_HD3000,
},{
.subvendor = 0x17de,
.subdevice = 0xa8a6,
.card = CX88_BOARD_DNTV_LIVE_DVB_T,
},{
.subvendor = 0x0070,
.subdevice = 0x2801,
.card = CX88_BOARD_HAUPPAUGE_ROSLYN,
},{
.subvendor = 0x14f1,
.subdevice = 0x0342,
.card = CX88_BOARD_DIGITALLOGIC_MEC,
},{
.subvendor = 0x10fc,
.subdevice = 0xd035,
.card = CX88_BOARD_IODATA_GVBCTV7E,
},{
.subvendor = 0x1421,
.subdevice = 0x0334,
.card = CX88_BOARD_ADSTECH_DVB_T_PCI,
},{
.subvendor = 0x153b,
.subdevice = 0x1166,
.card = CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1,
},{
.subvendor = 0x18ac,
.subdevice = 0xd500,
.card = CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD,
},{
.subvendor = 0x1461,
.subdevice = 0x8011,
.card = CX88_BOARD_AVERMEDIA_ULTRATV_MC_550,
},{
.subvendor = PCI_VENDOR_ID_ATI,
.subdevice = 0xa101,
.card = CX88_BOARD_ATI_HDTVWONDER,
},{
.subvendor = 0x107d,
.subdevice = 0x665f,
.card = CX88_BOARD_WINFAST_DTV1000,
},{
.subvendor = 0x1461,
.subdevice = 0x000a,
.card = CX88_BOARD_AVERTV_303,
},{
.subvendor = 0x0070,
.subdevice = 0x9200,
.card = CX88_BOARD_HAUPPAUGE_NOVASE2_S1,
},{
.subvendor = 0x0070,
.subdevice = 0x9201,
.card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
},{
.subvendor = 0x0070,
.subdevice = 0x9202,
.card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
},{
.subvendor = 0x17de,
.subdevice = 0x08b2,
.card = CX88_BOARD_KWORLD_DVBS_100,
},{
.subvendor = 0x0070,
.subdevice = 0x9400,
.card = CX88_BOARD_HAUPPAUGE_HVR1100,
},{
.subvendor = 0x0070,
.subdevice = 0x9402,
.card = CX88_BOARD_HAUPPAUGE_HVR1100,
},{
.subvendor = 0x0070,
.subdevice = 0x9800,
.card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
},{
.subvendor = 0x0070,
.subdevice = 0x9802,
.card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
},{
.subvendor = 0x0070,
.subdevice = 0x9001,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
},{
.subvendor = 0x1822,
.subdevice = 0x0025,
.card = CX88_BOARD_DNTV_LIVE_DVB_T_PRO,
},{
.subvendor = 0x17de,
.subdevice = 0x08a1,
.card = CX88_BOARD_KWORLD_DVB_T_CX22702,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb50,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb54,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL,
/* Re-branded DViCO: DigitalNow DVB-T Dual */
},{
.subvendor = 0x18ac,
.subdevice = 0xdb11,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
/* Re-branded DViCO: UltraView DVB-T Plus */
}, {
.subvendor = 0x18ac,
.subdevice = 0xdb30,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO,
}, {
.subvendor = 0x17de,
.subdevice = 0x0840,
.card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
},{
.subvendor = 0x1421,
.subdevice = 0x0305,
.card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb40,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
},{
.subvendor = 0x18ac,
.subdevice = 0xdb44,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
},{
.subvendor = 0x7063,
.subdevice = 0x5500,
.card = CX88_BOARD_PCHDTV_HD5500,
},{
.subvendor = 0x17de,
.subdevice = 0x0841,
.card = CX88_BOARD_KWORLD_MCE200_DELUXE,
},{
.subvendor = 0x1822,
.subdevice = 0x0019,
.card = CX88_BOARD_DNTV_LIVE_DVB_T_PRO,
},{
.subvendor = 0x1554,
.subdevice = 0x4813,
.card = CX88_BOARD_PIXELVIEW_PLAYTV_P7000,
},{
.subvendor = 0x14f1,
.subdevice = 0x0842,
.card = CX88_BOARD_NPGTECH_REALTV_TOP10FM,
},{
.subvendor = 0x107d,
.subdevice = 0x665e,
.card = CX88_BOARD_WINFAST_DTV2000H,
},{
.subvendor = 0x107d,
.subdevice = 0x6f2b,
.card = CX88_BOARD_WINFAST_DTV2000H_J,
},{
.subvendor = 0x18ac,
.subdevice = 0xd800, /* FusionHDTV 3 Gold (original revision) */
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q,
},{
.subvendor = 0x14f1,
.subdevice = 0x0084,
.card = CX88_BOARD_GENIATECH_DVBS,
},{
.subvendor = 0x0070,
.subdevice = 0x1404,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
}, {
.subvendor = 0x18ac,
.subdevice = 0xdc00,
.card = CX88_BOARD_SAMSUNG_SMT_7020,
}, {
.subvendor = 0x18ac,
.subdevice = 0xdccd,
.card = CX88_BOARD_SAMSUNG_SMT_7020,
},{
.subvendor = 0x1461,
.subdevice = 0xc111, /* AverMedia M150-D */
/* This board is known to work with the ASUS PVR416 config */
.card = CX88_BOARD_ASUS_PVR_416,
},{
.subvendor = 0xc180,
.subdevice = 0xc980,
.card = CX88_BOARD_TE_DTV_250_OEM_SWANN,
},{
.subvendor = 0x0070,
.subdevice = 0x9600,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
},{
.subvendor = 0x0070,
.subdevice = 0x9601,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
},{
.subvendor = 0x0070,
.subdevice = 0x9602,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
},{
.subvendor = 0x107d,
.subdevice = 0x6632,
.card = CX88_BOARD_LEADTEK_PVR2000,
},{
.subvendor = 0x12ab,
.subdevice = 0x2300, /* Club3D Zap TV2100 */
.card = CX88_BOARD_KWORLD_DVB_T_CX22702,
},{
.subvendor = 0x0070,
.subdevice = 0x9000,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
},{
.subvendor = 0x0070,
.subdevice = 0x1400,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
},{
.subvendor = 0x0070,
.subdevice = 0x1401,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
},{
.subvendor = 0x0070,
.subdevice = 0x1402,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
},{
.subvendor = 0x1421,
.subdevice = 0x0341, /* ADS Tech InstantTV DVB-S */
.card = CX88_BOARD_KWORLD_DVBS_100,
},{
.subvendor = 0x1421,
.subdevice = 0x0390,
.card = CX88_BOARD_ADSTECH_PTV_390,
},{
.subvendor = 0x11bd,
.subdevice = 0x0051,
.card = CX88_BOARD_PINNACLE_PCTV_HD_800i,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd530,
.card = CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO,
}, {
.subvendor = 0x12ab,
.subdevice = 0x1788,
.card = CX88_BOARD_PINNACLE_HYBRID_PCTV,
}, {
.subvendor = 0x14f1,
.subdevice = 0xea3d,
.card = CX88_BOARD_POWERCOLOR_REAL_ANGEL,
}, {
.subvendor = 0x107d,
.subdevice = 0x6f18,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8852,
.card = CX88_BOARD_GENIATECH_X8000_MT,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd610,
.card = CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD,
}, {
.subvendor = 0x1554,
.subdevice = 0x4935,
.card = CX88_BOARD_PROLINK_PV_8000GT,
}, {
.subvendor = 0x1554,
.subdevice = 0x4976,
.card = CX88_BOARD_PROLINK_PV_GLOBAL_XTREME,
}, {
.subvendor = 0x17de,
.subdevice = 0x08c1,
.card = CX88_BOARD_KWORLD_ATSC_120,
}, {
.subvendor = 0x0070,
.subdevice = 0x6900,
.card = CX88_BOARD_HAUPPAUGE_HVR4000,
}, {
.subvendor = 0x0070,
.subdevice = 0x6904,
.card = CX88_BOARD_HAUPPAUGE_HVR4000,
}, {
.subvendor = 0x0070,
.subdevice = 0x6902,
.card = CX88_BOARD_HAUPPAUGE_HVR4000,
}, {
.subvendor = 0x0070,
.subdevice = 0x6905,
.card = CX88_BOARD_HAUPPAUGE_HVR4000LITE,
}, {
.subvendor = 0x0070,
.subdevice = 0x6906,
.card = CX88_BOARD_HAUPPAUGE_HVR4000LITE,
}, {
.subvendor = 0xd420,
.subdevice = 0x9022,
.card = CX88_BOARD_TEVII_S420,
}, {
.subvendor = 0xd460,
.subdevice = 0x9022,
.card = CX88_BOARD_TEVII_S460,
}, {
.subvendor = 0xd464,
.subdevice = 0x9022,
.card = CX88_BOARD_TEVII_S464,
}, {
.subvendor = 0xA044,
.subdevice = 0x2011,
.card = CX88_BOARD_OMICOM_SS4_PCI,
}, {
.subvendor = 0x8910,
.subdevice = 0x8888,
.card = CX88_BOARD_TBS_8910,
}, {
.subvendor = 0x8920,
.subdevice = 0x8888,
.card = CX88_BOARD_TBS_8920,
}, {
.subvendor = 0xb022,
.subdevice = 0x3022,
.card = CX88_BOARD_PROF_6200,
}, {
.subvendor = 0xB033,
.subdevice = 0x3033,
.card = CX88_BOARD_PROF_7300,
}, {
.subvendor = 0xb200,
.subdevice = 0x4200,
.card = CX88_BOARD_SATTRADE_ST4200,
}, {
.subvendor = 0x153b,
.subdevice = 0x1177,
.card = CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII,
}, {
.subvendor = 0x0070,
.subdevice = 0x9290,
.card = CX88_BOARD_HAUPPAUGE_IRONLY,
}, {
.subvendor = 0x107d,
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
}, {
/* PVR2000 PAL Model [107d:6630] */
.subvendor = 0x107d,
.subdevice = 0x6630,
.card = CX88_BOARD_LEADTEK_PVR2000,
}, {
/* PVR2000 PAL Model [107d:6638] */
.subvendor = 0x107d,
.subdevice = 0x6638,
.card = CX88_BOARD_LEADTEK_PVR2000,
}, {
/* PVR2000 NTSC Model [107d:6631] */
.subvendor = 0x107d,
.subdevice = 0x6631,
.card = CX88_BOARD_LEADTEK_PVR2000,
}, {
/* PVR2000 NTSC Model [107d:6637] */
.subvendor = 0x107d,
.subdevice = 0x6637,
.card = CX88_BOARD_LEADTEK_PVR2000,
}, {
/* PVR2000 NTSC Model [107d:663d] */
.subvendor = 0x107d,
.subdevice = 0x663d,
.card = CX88_BOARD_LEADTEK_PVR2000,
}, {
/* DV2000 NTSC Model [107d:6621] */
.subvendor = 0x107d,
.subdevice = 0x6621,
.card = CX88_BOARD_WINFAST_DV2000,
}, {
/* TV2000 XP Global [107d:6618] */
.subvendor = 0x107d,
.subdevice = 0x6618,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
.subvendor = 0xb034,
.subdevice = 0x3034,
.card = CX88_BOARD_PROF_7301,
}, {
.subvendor = 0x1822,
.subdevice = 0x0023,
.card = CX88_BOARD_TWINHAN_VP1027_DVBS,
},
};
/* ----------------------------------------------------------------------- */
/* some leadtek specific stuff */
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
warn_printk(core, "Leadtek eeprom invalid.\n");
return;
}
/* Terry Wu <terrywu2009@gmail.com> */
switch (eeprom_data[6]) {
case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */
case 0x21: /* SSID 6621 for DV2000 NTSC Model */
case 0x31: /* SSID 6631 for PVR2000 NTSC Model */
case 0x37: /* SSID 6637 for PVR2000 NTSC Model */
case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */
core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3;
break;
default:
core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3;
break;
}
info_printk(core, "Leadtek Winfast 2000XP Expert config: "
"tuner=%d, eeprom[0]=0x%02x\n",
core->board.tuner_type, eeprom_data[0]);
}
static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
struct tveeprom tv;
tveeprom_hauppauge_analog(&core->i2c_client, &tv, eeprom_data);
core->board.tuner_type = tv.tuner_type;
core->tuner_formats = tv.tuner_formats;
core->board.radio.type = tv.has_radio ? CX88_RADIO : 0;
/* Make sure we support the board model */
switch (tv.model)
{
case 14009: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in) */
case 14019: /* WinTV-HVR3000 (Retail, IR Blaster, b/panel video, 3.5mm audio in) */
case 14029: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge) */
case 14109: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - low profile) */
case 14129: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge - LP) */
case 14559: /* WinTV-HVR3000 (OEM, no IR, b/panel video, 3.5mm audio in) */
case 14569: /* WinTV-HVR3000 (OEM, no IR, no back panel video) */
case 14659: /* WinTV-HVR3000 (OEM, no IR, b/panel video, RCA audio in - Low profile) */
case 14669: /* WinTV-HVR3000 (OEM, no IR, no b/panel video - Low profile) */
case 28552: /* WinTV-PVR 'Roslyn' (No IR) */
case 34519: /* WinTV-PCI-FM */
case 69009:
/* WinTV-HVR4000 (DVBS/S2/T, Video and IR, back panel inputs) */
case 69100: /* WinTV-HVR4000LITE (DVBS/S2, IR) */
case 69500: /* WinTV-HVR4000LITE (DVBS/S2, No IR) */
case 69559:
/* WinTV-HVR4000 (DVBS/S2/T, Video no IR, back panel inputs) */
case 69569: /* WinTV-HVR4000 (DVBS/S2/T, Video no IR) */
case 90002: /* Nova-T-PCI (9002) */
case 92001: /* Nova-S-Plus (Video and IR) */
case 92002: /* Nova-S-Plus (Video and IR) */
case 90003: /* Nova-T-PCI (9002 No RF out) */
case 90500: /* Nova-T-PCI (oem) */
case 90501: /* Nova-T-PCI (oem/IR) */
case 92000: /* Nova-SE2 (OEM, No Video or IR) */
case 92900: /* WinTV-IROnly (No analog or digital Video inputs) */
case 94009: /* WinTV-HVR1100 (Video and IR Retail) */
case 94501: /* WinTV-HVR1100 (Video and IR OEM) */
case 96009: /* WinTV-HVR1300 (PAL Video, MPEG Video and IR RX) */
case 96019: /* WinTV-HVR1300 (PAL Video, MPEG Video and IR RX/TX) */
case 96559: /* WinTV-HVR1300 (PAL Video, MPEG Video no IR) */
case 96569: /* WinTV-HVR1300 () */
case 96659: /* WinTV-HVR1300 () */
case 98559: /* WinTV-HVR1100LP (Video no IR, Retail - Low Profile) */
/* known */
break;
case CX88_BOARD_SAMSUNG_SMT_7020:
cx_set(MO_GP0_IO, 0x008989FF);
break;
default:
warn_printk(core, "warning: unknown hauppauge model #%d\n",
tv.model);
break;
}
info_printk(core, "hauppauge eeprom: model=%d\n", tv.model);
}
/* ----------------------------------------------------------------------- */
/* some GDI (was: Modular Technology) specific stuff */
static const struct {
int id;
int fm;
const char *name;
} gdi_tuner[] = {
[ 0x01 ] = { .id = TUNER_ABSENT,
.name = "NTSC_M" },
[ 0x02 ] = { .id = TUNER_ABSENT,
.name = "PAL_B" },
[ 0x03 ] = { .id = TUNER_ABSENT,
.name = "PAL_I" },
[ 0x04 ] = { .id = TUNER_ABSENT,
.name = "PAL_D" },
[ 0x05 ] = { .id = TUNER_ABSENT,
.name = "SECAM" },
[ 0x10 ] = { .id = TUNER_ABSENT,
.fm = 1,
.name = "TEMIC_4049" },
[ 0x11 ] = { .id = TUNER_TEMIC_4136FY5,
.name = "TEMIC_4136" },
[ 0x12 ] = { .id = TUNER_ABSENT,
.name = "TEMIC_4146" },
[ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME,
.fm = 1,
.name = "PHILIPS_FQ1216_MK3" },
[ 0x21 ] = { .id = TUNER_ABSENT, .fm = 1,
.name = "PHILIPS_FQ1236_MK3" },
[ 0x22 ] = { .id = TUNER_ABSENT,
.name = "PHILIPS_FI1236_MK3" },
[ 0x23 ] = { .id = TUNER_ABSENT,
.name = "PHILIPS_FI1216_MK3" },
};
static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
? gdi_tuner[eeprom_data[0x0d]].name : NULL;
info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
if (NULL == name)
return;
core->board.tuner_type = gdi_tuner[eeprom_data[0x0d]].id;
core->board.radio.type = gdi_tuner[eeprom_data[0x0d]].fm ?
CX88_RADIO : 0;
}
/* ------------------------------------------------------------------- */
/* some Divco specific stuff */
static int cx88_dvico_xc2028_callback(struct cx88_core *core,
int command, int arg)
{
switch (command) {
case XC2028_TUNER_RESET:
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
/* GPIO-4 xc3028 tuner */
cx_set(MO_GP0_IO, 0x00001000);
cx_clear(MO_GP0_IO, 0x00000010);
msleep(100);
cx_set(MO_GP0_IO, 0x00000010);
msleep(100);
break;
default:
cx_write(MO_GP0_IO, 0x101000);
mdelay(5);
cx_set(MO_GP0_IO, 0x101010);
}
break;
default:
return -EINVAL;
}
return 0;
}
/* ----------------------------------------------------------------------- */
/* some Geniatech specific stuff */
static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
int command, int mode)
{
switch (command) {
case XC2028_TUNER_RESET:
switch (INPUT(core->input).type) {
case CX88_RADIO:
break;
case CX88_VMUX_DVB:
cx_write(MO_GP1_IO, 0x030302);
mdelay(50);
break;
default:
cx_write(MO_GP1_IO, 0x030301);
mdelay(50);
}
cx_write(MO_GP1_IO, 0x101010);
mdelay(50);
cx_write(MO_GP1_IO, 0x101000);
mdelay(50);
cx_write(MO_GP1_IO, 0x101010);
mdelay(50);
return 0;
}
return -EINVAL;
}
static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
int command, int arg)
{
switch (command) {
case XC2028_TUNER_RESET:
/* GPIO 12 (xc3028 tuner reset) */
cx_set(MO_GP1_IO, 0x1010);
mdelay(50);
cx_clear(MO_GP1_IO, 0x10);
mdelay(50);
cx_set(MO_GP1_IO, 0x10);
mdelay(50);
return 0;
}
return -EINVAL;
}
/* ------------------------------------------------------------------- */
/* some Divco specific stuff */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
int command, int arg)
{
switch (command) {
case XC2028_TUNER_RESET:
cx_write(MO_GP2_IO, 0xcf7);
mdelay(50);
cx_write(MO_GP2_IO, 0xef5);
mdelay(50);
cx_write(MO_GP2_IO, 0xcf7);
break;
default:
return -EINVAL;
}
return 0;
}
/* ----------------------------------------------------------------------- */
/* some DViCO specific stuff */
static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
{
struct i2c_msg msg = { .addr = 0x45, .flags = 0 };
int i, err;
static u8 init_bufs[13][5] = {
{ 0x10, 0x00, 0x20, 0x01, 0x03 },
{ 0x10, 0x10, 0x01, 0x00, 0x21 },
{ 0x10, 0x10, 0x10, 0x00, 0xCA },
{ 0x10, 0x10, 0x12, 0x00, 0x08 },
{ 0x10, 0x10, 0x13, 0x00, 0x0A },
{ 0x10, 0x10, 0x16, 0x01, 0xC0 },
{ 0x10, 0x10, 0x22, 0x01, 0x3D },
{ 0x10, 0x10, 0x73, 0x01, 0x2E },
{ 0x10, 0x10, 0x72, 0x00, 0xC5 },
{ 0x10, 0x10, 0x71, 0x01, 0x97 },
{ 0x10, 0x10, 0x70, 0x00, 0x0F },
{ 0x10, 0x10, 0xB0, 0x00, 0x01 },
{ 0x03, 0x0C },
};
for (i = 0; i < ARRAY_SIZE(init_bufs); i++) {
msg.buf = init_bufs[i];
msg.len = (i != 12 ? 5 : 2);
err = i2c_transfer(&core->i2c_adap, &msg, 1);
if (err != 1) {
warn_printk(core, "dvico_fusionhdtv_hybrid_init buf %d "
"failed (err = %d)!\n", i, err);
return;
}
}
}
static int cx88_xc2028_tuner_callback(struct cx88_core *core,
int command, int arg)
{
/* Board-specific callbacks */
switch (core->boardnr) {
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
case CX88_BOARD_GENIATECH_X8000_MT:
case CX88_BOARD_KWORLD_ATSC_120:
return cx88_xc3028_geniatech_tuner_callback(core,
command, arg);
case CX88_BOARD_PROLINK_PV_8000GT:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
return cx88_pv_8000gt_callback(core, command, arg);
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
}
switch (command) {
case XC2028_TUNER_RESET:
switch (INPUT(core->input).type) {
case CX88_RADIO:
info_printk(core, "setting GPIO to radio!\n");
cx_write(MO_GP0_IO, 0x4ff);
mdelay(250);
cx_write(MO_GP2_IO, 0xff);
mdelay(250);
break;
case CX88_VMUX_DVB: /* Digital TV*/
default: /* Analog TV */
info_printk(core, "setting GPIO to TV!\n");
break;
}
cx_write(MO_GP1_IO, 0x101010);
mdelay(250);
cx_write(MO_GP1_IO, 0x101000);
mdelay(250);
cx_write(MO_GP1_IO, 0x101010);
mdelay(250);
return 0;
}
return -EINVAL;
}
/* ----------------------------------------------------------------------- */
/* Tuner callback function. Currently only needed for the Pinnacle *
* PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
* analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c) */
static int cx88_xc5000_tuner_callback(struct cx88_core *core,
int command, int arg)
{
switch (core->boardnr) {
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
if (command == 0) { /* This is the reset command from xc5000 */
/* djh - According to the engineer at PCTV Systems,
the xc5000 reset pin is supposed to be on GPIO12.
However, despite three nights of effort, pulling
that GPIO low didn't reset the xc5000. While
pulling MO_SRST_IO low does reset the xc5000, this
also resets in the s5h1409 being reset as well.
This causes tuning to always fail since the internal
state of the s5h1409 does not match the driver's
state. Given that the only two conditions in which
the driver performs a reset is during firmware load
and powering down the chip, I am taking out the
reset. We know that the chip is being reset
when the cx88 comes online, and not being able to
do power management for this board is worse than
not having any tuning at all. */
return 0;
} else {
err_printk(core, "xc5000: unknown tuner "
"callback command.\n");
return -EINVAL;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
if (command == 0) { /* This is the reset command from xc5000 */
cx_clear(MO_GP0_IO, 0x00000010);
msleep(10);
cx_set(MO_GP0_IO, 0x00000010);
return 0;
} else {
printk(KERN_ERR
"xc5000: unknown tuner callback command.\n");
return -EINVAL;
}
break;
}
return 0; /* Should never be here */
}
int cx88_tuner_callback(void *priv, int component, int command, int arg)
{
struct i2c_algo_bit_data *i2c_algo = priv;
struct cx88_core *core;
if (!i2c_algo) {
printk(KERN_ERR "cx88: Error - i2c private data undefined.\n");
return -EINVAL;
}
core = i2c_algo->data;
if (!core) {
printk(KERN_ERR "cx88: Error - device struct undefined.\n");
return -EINVAL;
}
if (component != DVB_FRONTEND_COMPONENT_TUNER)
return -EINVAL;
switch (core->board.tuner_type) {
case TUNER_XC2028:
info_printk(core, "Calling XC2028/3028 callback\n");
return cx88_xc2028_tuner_callback(core, command, arg);
case TUNER_XC5000:
info_printk(core, "Calling XC5000 callback\n");
return cx88_xc5000_tuner_callback(core, command, arg);
}
err_printk(core, "Error: Calling callback for tuner %d\n",
core->board.tuner_type);
return -EINVAL;
}
EXPORT_SYMBOL(cx88_tuner_callback);
/* ----------------------------------------------------------------------- */
static void cx88_card_list(struct cx88_core *core, struct pci_dev *pci)
{
int i;
if (0 == pci->subsystem_vendor &&
0 == pci->subsystem_device) {
printk(KERN_ERR
"%s: Your board has no valid PCI Subsystem ID and thus can't\n"
"%s: be autodetected. Please pass card=<n> insmod option to\n"
"%s: workaround that. Redirect complaints to the vendor of\n"
"%s: the TV card. Best regards,\n"
"%s: -- tux\n",
core->name,core->name,core->name,core->name,core->name);
} else {
printk(KERN_ERR
"%s: Your board isn't known (yet) to the driver. You can\n"
"%s: try to pick one of the existing card configs via\n"
"%s: card=<n> insmod option. Updating to the latest\n"
"%s: version might help as well.\n",
core->name,core->name,core->name,core->name);
}
err_printk(core, "Here is a list of valid choices for the card=<n> "
"insmod option:\n");
for (i = 0; i < ARRAY_SIZE(cx88_boards); i++)
printk(KERN_ERR "%s: card=%d -> %s\n",
core->name, i, cx88_boards[i].name);
}
static void cx88_card_setup_pre_i2c(struct cx88_core *core)
{
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/*
* Bring the 702 demod up before i2c scanning/attach or devices are hidden
* We leave here with the 702 on the bus
*
* "reset the IR receiver on GPIO[3]"
* Reported by Mike Crash <mike AT mikecrash.com>
*/
cx_write(MO_GP0_IO, 0x0000ef88);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000088);
udelay(50);
cx_set(MO_GP0_IO, 0x00000088); /* 702 out of reset */
udelay(1000);
break;
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
cx_write(MO_GP2_IO, 0xcf7);
mdelay(50);
cx_write(MO_GP2_IO, 0xef5);
mdelay(50);
cx_write(MO_GP2_IO, 0xcf7);
msleep(10);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
/* Enable the xc5000 tuner */
cx_set(MO_GP0_IO, 0x00001010);
break;
case CX88_BOARD_HAUPPAUGE_HVR3000:
case CX88_BOARD_HAUPPAUGE_HVR4000:
/* Init GPIO */
cx_write(MO_GP0_IO, core->board.input[0].gpio0);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000080);
udelay(50);
cx_set(MO_GP0_IO, 0x00000080); /* 702 out of reset */
udelay(1000);
break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
/* GPIO 12 (xc3028 tuner reset) */
cx_set(MO_GP1_IO, 0x1010);
mdelay(50);
cx_clear(MO_GP1_IO, 0x10);
mdelay(50);
cx_set(MO_GP1_IO, 0x10);
mdelay(50);
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
cx_write(MO_GP0_IO, 0x00003230);
cx_write(MO_GP0_IO, 0x00003210);
msleep(1);
cx_write(MO_GP0_IO, 0x00001230);
break;
}
}
/*
* Sets board-dependent xc3028 configuration
*/
void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
{
memset(ctl, 0, sizeof(*ctl));
ctl->fname = XC2028_DEFAULT_FIRMWARE;
ctl->max_len = 64;
switch (core->boardnr) {
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
/* Now works with firmware version 2.7 */
if (core->i2c_algo.udelay < 16)
core->i2c_algo.udelay = 16;
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_WINFAST_DTV1800H:
ctl->demod = XC3028_FE_ZARLINK456;
break;
case CX88_BOARD_KWORLD_ATSC_120:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
ctl->demod = XC3028_FE_OREN538;
break;
case CX88_BOARD_GENIATECH_X8000_MT:
/* FIXME: For this board, the xc3028 never recovers after being
powered down (the reset GPIO probably is not set properly).
We don't have access to the hardware so we cannot determine
which GPIO is used for xc3028, so just disable power xc3028
power management for now */
ctl->disable_power_mgmt = 1;
break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
/*
* Those boards uses non-MTS firmware
*/
break;
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII:
ctl->demod = XC3028_FE_ZARLINK456;
ctl->mts = 1;
break;
default:
ctl->demod = XC3028_FE_OREN538;
ctl->mts = 1;
}
}
EXPORT_SYMBOL_GPL(cx88_setup_xc3028);
static void cx88_card_setup(struct cx88_core *core)
{
static u8 eeprom[256];
struct tuner_setup tun_setup;
unsigned int mode_mask = T_RADIO | T_ANALOG_TV;
memset(&tun_setup, 0, sizeof(tun_setup));
if (0 == core->i2c_rc) {
core->i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
}
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE:
case CX88_BOARD_HAUPPAUGE_ROSLYN:
if (0 == core->i2c_rc)
hauppauge_eeprom(core, eeprom+8);
break;
case CX88_BOARD_GDI:
if (0 == core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
case CX88_BOARD_LEADTEK_PVR2000:
case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
if (0 == core->i2c_rc)
leadtek_eeprom(core, eeprom);
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
case CX88_BOARD_HAUPPAUGE_DVB_T1:
case CX88_BOARD_HAUPPAUGE_HVR1100:
case CX88_BOARD_HAUPPAUGE_HVR1100LP:
case CX88_BOARD_HAUPPAUGE_HVR3000:
case CX88_BOARD_HAUPPAUGE_HVR1300:
case CX88_BOARD_HAUPPAUGE_HVR4000:
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
case CX88_BOARD_HAUPPAUGE_IRONLY:
if (0 == core->i2c_rc)
hauppauge_eeprom(core, eeprom);
break;
case CX88_BOARD_KWORLD_DVBS_100:
cx_write(MO_GP0_IO, 0x000007f8);
cx_write(MO_GP1_IO, 0x00000001);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
/* GPIO0:0 is hooked to demod reset */
/* GPIO0:4 is hooked to xc3028 reset */
cx_write(MO_GP0_IO, 0x00111100);
msleep(1);
cx_write(MO_GP0_IO, 0x00111111);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
/* GPIO0:6 is hooked to FX2 reset pin */
cx_set(MO_GP0_IO, 0x00004040);
cx_clear(MO_GP0_IO, 0x00000040);
msleep(1000);
cx_set(MO_GP0_IO, 0x00004040);
/* FALLTHROUGH */
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
/* GPIO0:0 is hooked to mt352 reset pin */
cx_set(MO_GP0_IO, 0x00000101);
cx_clear(MO_GP0_IO, 0x00000001);
msleep(1);
cx_set(MO_GP0_IO, 0x00000101);
if (0 == core->i2c_rc &&
core->boardnr == CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID)
dvico_fusionhdtv_hybrid_init(core);
break;
case CX88_BOARD_KWORLD_DVB_T:
case CX88_BOARD_DNTV_LIVE_DVB_T:
cx_set(MO_GP0_IO, 0x00000707);
cx_set(MO_GP2_IO, 0x00000101);
cx_clear(MO_GP2_IO, 0x00000001);
msleep(1);
cx_clear(MO_GP0_IO, 0x00000007);
cx_set(MO_GP2_IO, 0x00000101);
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
cx_write(MO_GP0_IO, 0x00080808);
break;
case CX88_BOARD_ATI_HDTVWONDER:
if (0 == core->i2c_rc) {
/* enable tuner */
int i;
static const u8 buffer [][2] = {
{0x10,0x12},
{0x13,0x04},
{0x16,0x00},
{0x14,0x04},
{0x17,0x00}
};
core->i2c_client.addr = 0x0a;
for (i = 0; i < ARRAY_SIZE(buffer); i++)
if (2 != i2c_master_send(&core->i2c_client,
buffer[i],2))
warn_printk(core, "Unable to enable "
"tuner(%i).\n", i);
}
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
{
struct v4l2_priv_tun_config tea5767_cfg;
struct tea5767_ctrl ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.high_cut = 1;
ctl.st_noise = 1;
ctl.deemph_75 = 1;
ctl.xtal_freq = TEA5767_HIGH_LO_13MHz;
tea5767_cfg.tuner = TUNER_TEA5767;
tea5767_cfg.priv = &ctl;
call_all(core, tuner, s_config, &tea5767_cfg);
break;
}
case CX88_BOARD_TEVII_S420:
case CX88_BOARD_TEVII_S460:
case CX88_BOARD_TEVII_S464:
case CX88_BOARD_OMICOM_SS4_PCI:
case CX88_BOARD_TBS_8910:
case CX88_BOARD_TBS_8920:
case CX88_BOARD_PROF_6200:
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_SATTRADE_ST4200:
cx_write(MO_GP0_IO, 0x8000);
msleep(100);
cx_write(MO_SRST_IO, 0);
msleep(10);
cx_write(MO_GP0_IO, 0x8080);
msleep(100);
cx_write(MO_SRST_IO, 1);
msleep(100);
break;
} /*end switch() */
/* Setup tuners */
if ((core->board.radio_type != UNSET)) {
tun_setup.mode_mask = T_RADIO;
tun_setup.type = core->board.radio_type;
tun_setup.addr = core->board.radio_addr;
tun_setup.tuner_callback = cx88_tuner_callback;
call_all(core, tuner, s_type_addr, &tun_setup);
mode_mask &= ~T_RADIO;
}
if (core->board.tuner_type != TUNER_ABSENT) {
tun_setup.mode_mask = mode_mask;
tun_setup.type = core->board.tuner_type;
tun_setup.addr = core->board.tuner_addr;
tun_setup.tuner_callback = cx88_tuner_callback;
call_all(core, tuner, s_type_addr, &tun_setup);
}
if (core->board.tda9887_conf) {
struct v4l2_priv_tun_config tda9887_cfg;
tda9887_cfg.tuner = TUNER_TDA9887;
tda9887_cfg.priv = &core->board.tda9887_conf;
call_all(core, tuner, s_config, &tda9887_cfg);
}
if (core->board.tuner_type == TUNER_XC2028) {
struct v4l2_priv_tun_config xc2028_cfg;
struct xc2028_ctrl ctl;
/* Fills device-dependent initialization parameters */
cx88_setup_xc3028(core, &ctl);
/* Sends parameters to xc2028/3028 tuner */
memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
xc2028_cfg.tuner = TUNER_XC2028;
xc2028_cfg.priv = &ctl;
info_printk(core, "Asking xc2028/3028 to load firmware %s\n",
ctl.fname);
call_all(core, tuner, s_config, &xc2028_cfg);
}
call_all(core, core, s_power, 0);
}
/* ------------------------------------------------------------------ */
static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
{
unsigned int lat = UNSET;
u8 ctrl = 0;
u8 value;
/* check pci quirks */
if (pci_pci_problems & PCIPCI_TRITON) {
printk(KERN_INFO "%s: quirk: PCIPCI_TRITON -- set TBFX\n",
name);
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_NATOMA) {
printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA -- set TBFX\n",
name);
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VIAETBF) {
printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF -- set TBFX\n",
name);
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VSFX) {
printk(KERN_INFO "%s: quirk: PCIPCI_VSFX -- set VSFX\n",
name);
ctrl |= CX88X_EN_VSFX;
}
#ifdef PCIPCI_ALIMAGIK
if (pci_pci_problems & PCIPCI_ALIMAGIK) {
printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
name);
lat = 0x0A;
}
#endif
/* check insmod options */
if (UNSET != latency)
lat = latency;
/* apply stuff */
if (ctrl) {
pci_read_config_byte(pci, CX88X_DEVCTRL, &value);
value |= ctrl;
pci_write_config_byte(pci, CX88X_DEVCTRL, value);
}
if (UNSET != lat) {
printk(KERN_INFO "%s: setting pci latency timer to %d\n",
name, latency);
pci_write_config_byte(pci, PCI_LATENCY_TIMER, latency);
}
return 0;
}
int cx88_get_resources(const struct cx88_core *core, struct pci_dev *pci)
{
if (request_mem_region(pci_resource_start(pci,0),
pci_resource_len(pci,0),
core->name))
return 0;
printk(KERN_ERR
"%s/%d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
core->name, PCI_FUNC(pci->devfn),
(unsigned long long)pci_resource_start(pci, 0),
pci->subsystem_vendor, pci->subsystem_device);
return -EBUSY;
}
/* Allocate and initialize the cx88 core struct. One should hold the
* devlist mutex before calling this. */
struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
{
struct cx88_core *core;
int i;
core = kzalloc(sizeof(*core), GFP_KERNEL);
if (core == NULL)
return NULL;
atomic_inc(&core->refcount);
core->pci_bus = pci->bus->number;
core->pci_slot = PCI_SLOT(pci->devfn);
core->pci_irqmask = PCI_INT_RISC_RD_BERRINT | PCI_INT_RISC_WR_BERRINT |
PCI_INT_BRDG_BERRINT | PCI_INT_SRC_DMA_BERRINT |
PCI_INT_DST_DMA_BERRINT | PCI_INT_IPB_DMA_BERRINT;
mutex_init(&core->lock);
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
strcpy(core->v4l2_dev.name, core->name);
if (v4l2_device_register(NULL, &core->v4l2_dev)) {
kfree(core);
return NULL;
}
if (0 != cx88_get_resources(core, pci)) {
v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
return NULL;
}
/* PCI stuff */
cx88_pci_quirks(core->name, pci);
core->lmmio = ioremap(pci_resource_start(pci, 0),
pci_resource_len(pci, 0));
core->bmmio = (u8 __iomem *)core->lmmio;
if (core->lmmio == NULL) {
kfree(core);
return NULL;
}
/* board config */
core->boardnr = UNSET;
if (card[core->nr] < ARRAY_SIZE(cx88_boards))
core->boardnr = card[core->nr];
for (i = 0; UNSET == core->boardnr && i < ARRAY_SIZE(cx88_subids); i++)
if (pci->subsystem_vendor == cx88_subids[i].subvendor &&
pci->subsystem_device == cx88_subids[i].subdevice)
core->boardnr = cx88_subids[i].card;
if (UNSET == core->boardnr) {
core->boardnr = CX88_BOARD_UNKNOWN;
cx88_card_list(core, pci);
}
memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
core->board.num_frontends = 1;
info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
pci->subsystem_vendor, pci->subsystem_device, core->board.name,
core->boardnr, card[core->nr] == core->boardnr ?
"insmod option" : "autodetected",
core->board.num_frontends);
if (tuner[core->nr] != UNSET)
core->board.tuner_type = tuner[core->nr];
if (radio[core->nr] != UNSET)
core->board.radio_type = radio[core->nr];
info_printk(core, "TV tuner type %d, Radio tuner type %d\n",
core->board.tuner_type, core->board.radio_type);
/* init hardware */
cx88_reset(core);
cx88_card_setup_pre_i2c(core);
cx88_i2c_init(core, pci);
/* load tuner module, if needed */
if (TUNER_ABSENT != core->board.tuner_type) {
/* Ignore 0x6b and 0x6f on cx88 boards.
* FusionHDTV5 RT Gold has an ir receiver at 0x6b
* and an RTC at 0x6f which can get corrupted if probed. */
static const unsigned short tv_addrs[] = {
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6c, 0x6d, 0x6e,
I2C_CLIENT_END
};
int has_demod = (core->board.tda9887_conf & TDA9887_PRESENT);
/* I don't trust the radio_type as is stored in the card
definitions, so we just probe for it.
The radio_type is sometimes missing, or set to UNSET but
later code configures a tea5767.
*/
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
"tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
if (has_demod)
v4l2_i2c_new_subdev(&core->v4l2_dev,
&core->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (core->board.tuner_addr == ADDR_UNSET) {
v4l2_i2c_new_subdev(&core->v4l2_dev,
&core->i2c_adap, "tuner",
0, has_demod ? tv_addrs + 4 : tv_addrs);
} else {
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
"tuner", core->board.tuner_addr, NULL);
}
}
cx88_card_setup(core);
if (!disable_ir) {
cx88_i2c_init_ir(core);
cx88_ir_init(core, pci);
}
return core;
}
| gpl-2.0 |
profglavcho/mt6735-kernel-3.10.61 | drivers/media/usb/gspca/tv8532.c | 2538 | 9997 | /*
* Quickcam cameras initialization data
*
* V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define MODULE_NAME "tv8532"
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("TV8532 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
__u8 packet;
};
static const struct v4l2_pix_format sif_mode[] = {
{176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
/* TV-8532A (ICM532A) registers (LE) */
#define R00_PART_CONTROL 0x00
#define LATENT_CHANGE 0x80
#define EXPO_CHANGE 0x04
#define R01_TIMING_CONTROL_LOW 0x01
#define CMD_EEprom_Open 0x30
#define CMD_EEprom_Close 0x29
#define R03_TABLE_ADDR 0x03
#define R04_WTRAM_DATA_L 0x04
#define R05_WTRAM_DATA_M 0x05
#define R06_WTRAM_DATA_H 0x06
#define R07_TABLE_LEN 0x07
#define R08_RAM_WRITE_ACTION 0x08
#define R0C_AD_WIDTHL 0x0c
#define R0D_AD_WIDTHH 0x0d
#define R0E_AD_HEIGHTL 0x0e
#define R0F_AD_HEIGHTH 0x0f
#define R10_AD_COL_BEGINL 0x10
#define R11_AD_COL_BEGINH 0x11
#define MIRROR 0x04 /* [10] */
#define R14_AD_ROW_BEGINL 0x14
#define R15_AD_ROWBEGINH 0x15
#define R1C_AD_EXPOSE_TIMEL 0x1c
#define R20_GAIN_G1L 0x20
#define R21_GAIN_G1H 0x21
#define R22_GAIN_RL 0x22
#define R23_GAIN_RH 0x23
#define R24_GAIN_BL 0x24
#define R25_GAIN_BH 0x25
#define R26_GAIN_G2L 0x26
#define R27_GAIN_G2H 0x27
#define R28_QUANT 0x28
#define R29_LINE 0x29
#define R2C_POLARITY 0x2c
#define R2D_POINT 0x2d
#define R2E_POINTH 0x2e
#define R2F_POINTB 0x2f
#define R30_POINTBH 0x30
#define R31_UPD 0x31
#define R2A_HIGH_BUDGET 0x2a
#define R2B_LOW_BUDGET 0x2b
#define R34_VID 0x34
#define R35_VIDH 0x35
#define R36_PID 0x36
#define R37_PIDH 0x37
#define R39_Test1 0x39 /* GPIO */
#define R3B_Test3 0x3b /* GPIO */
#define R83_AD_IDH 0x83
#define R91_AD_SLOPEREG 0x91
#define R94_AD_BITCONTROL 0x94
static const u8 eeprom_data[][3] = {
/* dataH dataM dataL */
{0x01, 0x00, 0x01},
{0x01, 0x80, 0x11},
{0x05, 0x00, 0x14},
{0x05, 0x00, 0x1c},
{0x0d, 0x00, 0x1e},
{0x05, 0x00, 0x1f},
{0x05, 0x05, 0x19},
{0x05, 0x01, 0x1b},
{0x05, 0x09, 0x1e},
{0x0d, 0x89, 0x2e},
{0x05, 0x89, 0x2f},
{0x05, 0x0d, 0xd9},
{0x05, 0x09, 0xf1},
};
/* write 1 byte */
static void reg_w1(struct gspca_dev *gspca_dev,
__u16 index, __u8 value)
{
gspca_dev->usb_buf[0] = value;
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x02,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index, gspca_dev->usb_buf, 1, 500);
}
/* write 2 bytes */
static void reg_w2(struct gspca_dev *gspca_dev,
u16 index, u16 value)
{
gspca_dev->usb_buf[0] = value;
gspca_dev->usb_buf[1] = value >> 8;
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x02,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index, gspca_dev->usb_buf, 2, 500);
}
static void tv_8532WriteEEprom(struct gspca_dev *gspca_dev)
{
int i;
reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Open);
for (i = 0; i < ARRAY_SIZE(eeprom_data); i++) {
reg_w1(gspca_dev, R03_TABLE_ADDR, i);
reg_w1(gspca_dev, R04_WTRAM_DATA_L, eeprom_data[i][2]);
reg_w1(gspca_dev, R05_WTRAM_DATA_M, eeprom_data[i][1]);
reg_w1(gspca_dev, R06_WTRAM_DATA_H, eeprom_data[i][0]);
reg_w1(gspca_dev, R08_RAM_WRITE_ACTION, 0);
}
reg_w1(gspca_dev, R07_TABLE_LEN, i);
reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Close);
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
return 0;
}
static void tv_8532_setReg(struct gspca_dev *gspca_dev)
{
reg_w1(gspca_dev, R3B_Test3, 0x0a); /* Test0Sel = 10 */
/******************************************************/
reg_w1(gspca_dev, R0E_AD_HEIGHTL, 0x90);
reg_w1(gspca_dev, R0F_AD_HEIGHTH, 0x01);
reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, 0x018f);
reg_w1(gspca_dev, R10_AD_COL_BEGINL, 0x44);
/* begin active line */
reg_w1(gspca_dev, R11_AD_COL_BEGINH, 0x00);
/* mirror and digital gain */
reg_w1(gspca_dev, R14_AD_ROW_BEGINL, 0x0a);
reg_w1(gspca_dev, R94_AD_BITCONTROL, 0x02);
reg_w1(gspca_dev, R91_AD_SLOPEREG, 0x00);
reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE);
/* = 0x84 */
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
tv_8532WriteEEprom(gspca_dev);
return 0;
}
static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, val);
reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE);
/* 0x84 */
}
static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
reg_w2(gspca_dev, R20_GAIN_G1L, val);
reg_w2(gspca_dev, R22_GAIN_RL, val);
reg_w2(gspca_dev, R24_GAIN_BL, val);
reg_w2(gspca_dev, R26_GAIN_G2L, val);
}
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
reg_w1(gspca_dev, R0C_AD_WIDTHL, 0xe8); /* 0x20; 0x0c */
reg_w1(gspca_dev, R0D_AD_WIDTHH, 0x03);
/************************************************/
reg_w1(gspca_dev, R28_QUANT, 0x90);
/* 0x72 compressed mode 0x28 */
if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
/* 176x144 */
reg_w1(gspca_dev, R29_LINE, 0x41);
/* CIF - 2 lines/packet */
} else {
/* 352x288 */
reg_w1(gspca_dev, R29_LINE, 0x81);
/* CIF - 2 lines/packet */
}
/************************************************/
reg_w1(gspca_dev, R2C_POLARITY, 0x10); /* slow clock */
reg_w1(gspca_dev, R2D_POINT, 0x14);
reg_w1(gspca_dev, R2E_POINTH, 0x01);
reg_w1(gspca_dev, R2F_POINTB, 0x12);
reg_w1(gspca_dev, R30_POINTBH, 0x01);
tv_8532_setReg(gspca_dev);
/************************************************/
reg_w1(gspca_dev, R31_UPD, 0x01); /* update registers */
msleep(200);
reg_w1(gspca_dev, R31_UPD, 0x00); /* end update */
gspca_dev->empty_packet = 0; /* check the empty packets */
sd->packet = 0; /* ignore the first packets */
return 0;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
reg_w1(gspca_dev, R3B_Test3, 0x0b); /* Test0Sel = 11 = GPIO */
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
int packet_type0, packet_type1;
packet_type0 = packet_type1 = INTER_PACKET;
if (gspca_dev->empty_packet) {
gspca_dev->empty_packet = 0;
sd->packet = gspca_dev->height / 2;
packet_type0 = FIRST_PACKET;
} else if (sd->packet == 0)
return; /* 2 more lines in 352x288 ! */
sd->packet--;
if (sd->packet == 0)
packet_type1 = LAST_PACKET;
/* each packet contains:
* - header 2 bytes
* - RGRG line
* - 4 bytes
* - GBGB line
* - 4 bytes
*/
gspca_frame_add(gspca_dev, packet_type0,
data + 2, gspca_dev->width);
gspca_frame_add(gspca_dev, packet_type1,
data + gspca_dev->width + 5, gspca_dev->width);
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
gspca_dev->usb_err = 0;
if (!gspca_dev->streaming)
return 0;
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
setexposure(gspca_dev, ctrl->val);
break;
case V4L2_CID_GAIN:
setgain(gspca_dev, ctrl->val);
break;
}
return gspca_dev->usb_err;
}
static const struct v4l2_ctrl_ops sd_ctrl_ops = {
.s_ctrl = sd_s_ctrl,
};
static int sd_init_controls(struct gspca_dev *gspca_dev)
{
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 2);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 0x18f, 1, 0x18f);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_GAIN, 0, 0x7ff, 1, 0x100);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
}
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x046d, 0x0920)},
{USB_DEVICE(0x046d, 0x0921)},
{USB_DEVICE(0x0545, 0x808b)},
{USB_DEVICE(0x0545, 0x8333)},
{USB_DEVICE(0x0923, 0x010f)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
floft/rpi-linux | drivers/pps/clients/pps_parport.c | 4074 | 6204 | /*
* pps_parport.c -- kernel parallel port PPS client
*
*
* Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* TODO:
* implement echo over SEL pin
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irqnr.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/parport.h>
#include <linux/pps_kernel.h>
#define DRVDESC "parallel port PPS client"
/* module parameters */
#define CLEAR_WAIT_MAX 100
#define CLEAR_WAIT_MAX_ERRORS 5
static unsigned int clear_wait = 100;
MODULE_PARM_DESC(clear_wait,
"Maximum number of port reads when polling for signal clear,"
" zero turns clear edge capture off entirely");
module_param(clear_wait, uint, 0);
/* internal per port structure */
struct pps_client_pp {
struct pardevice *pardev; /* parport device */
struct pps_device *pps; /* PPS device */
unsigned int cw; /* port clear timeout */
unsigned int cw_err; /* number of timeouts */
};
static inline int signal_is_set(struct parport *port)
{
return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
}
/* parport interrupt handler */
static void parport_irq(void *handle)
{
struct pps_event_time ts_assert, ts_clear;
struct pps_client_pp *dev = handle;
struct parport *port = dev->pardev->port;
unsigned int i;
unsigned long flags;
/* first of all we get the time stamp... */
pps_get_ts(&ts_assert);
if (dev->cw == 0)
/* clear edge capture disabled */
goto out_assert;
/* try capture the clear edge */
/* We have to disable interrupts here. The idea is to prevent
* other interrupts on the same processor to introduce random
* lags while polling the port. Reading from IO port is known
* to take approximately 1us while other interrupt handlers can
* take much more potentially.
*
* Interrupts won't be disabled for a long time because the
* number of polls is limited by clear_wait parameter which is
* kept rather low. So it should never be an issue.
*/
local_irq_save(flags);
/* check the signal (no signal means the pulse is lost this time) */
if (!signal_is_set(port)) {
local_irq_restore(flags);
dev_err(dev->pps->dev, "lost the signal\n");
goto out_assert;
}
/* poll the port until the signal is unset */
for (i = dev->cw; i; i--)
if (!signal_is_set(port)) {
pps_get_ts(&ts_clear);
local_irq_restore(flags);
dev->cw_err = 0;
goto out_both;
}
local_irq_restore(flags);
/* timeout */
dev->cw_err++;
if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
dev_err(dev->pps->dev, "disabled clear edge capture after %d"
" timeouts\n", dev->cw_err);
dev->cw = 0;
dev->cw_err = 0;
}
out_assert:
/* fire assert event */
pps_event(dev->pps, &ts_assert,
PPS_CAPTUREASSERT, NULL);
return;
out_both:
/* fire assert event */
pps_event(dev->pps, &ts_assert,
PPS_CAPTUREASSERT, NULL);
/* fire clear event */
pps_event(dev->pps, &ts_clear,
PPS_CAPTURECLEAR, NULL);
return;
}
static void parport_attach(struct parport *port)
{
struct pps_client_pp *device;
struct pps_source_info info = {
.name = KBUILD_MODNAME,
.path = "",
.mode = PPS_CAPTUREBOTH | \
PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
PPS_ECHOASSERT | PPS_ECHOCLEAR | \
PPS_CANWAIT | PPS_TSFMT_TSPEC,
.owner = THIS_MODULE,
.dev = NULL
};
device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
if (!device) {
pr_err("memory allocation failed, not attaching\n");
return;
}
device->pardev = parport_register_device(port, KBUILD_MODNAME,
NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
if (!device->pardev) {
pr_err("couldn't register with %s\n", port->name);
goto err_free;
}
if (parport_claim_or_block(device->pardev) < 0) {
pr_err("couldn't claim %s\n", port->name);
goto err_unregister_dev;
}
device->pps = pps_register_source(&info,
PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
if (device->pps == NULL) {
pr_err("couldn't register PPS source\n");
goto err_release_dev;
}
device->cw = clear_wait;
port->ops->enable_irq(port);
pr_info("attached to %s\n", port->name);
return;
err_release_dev:
parport_release(device->pardev);
err_unregister_dev:
parport_unregister_device(device->pardev);
err_free:
kfree(device);
}
static void parport_detach(struct parport *port)
{
struct pardevice *pardev = port->cad;
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
if (strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
device = pardev->private;
port->ops->disable_irq(port);
pps_unregister_source(device->pps);
parport_release(pardev);
parport_unregister_device(pardev);
kfree(device);
}
static struct parport_driver pps_parport_driver = {
.name = KBUILD_MODNAME,
.attach = parport_attach,
.detach = parport_detach,
};
/* module staff */
static int __init pps_parport_init(void)
{
int ret;
pr_info(DRVDESC "\n");
if (clear_wait > CLEAR_WAIT_MAX) {
pr_err("clear_wait value should be not greater"
" then %d\n", CLEAR_WAIT_MAX);
return -EINVAL;
}
ret = parport_register_driver(&pps_parport_driver);
if (ret) {
pr_err("unable to register with parport\n");
return ret;
}
return 0;
}
static void __exit pps_parport_exit(void)
{
parport_unregister_driver(&pps_parport_driver);
}
module_init(pps_parport_init);
module_exit(pps_parport_exit);
MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
MODULE_DESCRIPTION(DRVDESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Red680812/X920D | arch/arm/mach-exynos/platsmp.c | 4330 | 4783 | /* linux/arch/arm/mach-exynos4/platsmp.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Cloned from linux/arch/arm/mach-vexpress/platsmp.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/hardware/gic.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
#include <mach/regs-clock.h>
#include <mach/regs-pmu.h>
#include <plat/cpu.h>
extern void exynos4_secondary_startup(void);
#define CPU1_BOOT_REG (samsung_rev() == EXYNOS4210_REV_1_1 ? \
S5P_INFORM5 : S5P_VA_SYSRAM)
/*
* control for which core is the next to come out of the secondary
* boot "holding pen"
*/
volatile int __cpuinitdata pen_release = -1;
/*
* Write pen_release in a way that is guaranteed to be visible to all
* observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably.
*/
static void write_pen_release(int val)
{
pen_release = val;
smp_wmb();
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
static void __iomem *scu_base_addr(void)
{
return (void __iomem *)(S5P_VA_SCU);
}
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
gic_secondary_init(0);
/*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
write_pen_release(-1);
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
write_pen_release(cpu_logical_map(cpu));
if (!(__raw_readl(S5P_ARM_CORE1_STATUS) & S5P_CORE_LOCAL_PWR_EN)) {
__raw_writel(S5P_CORE_LOCAL_PWR_EN,
S5P_ARM_CORE1_CONFIGURATION);
timeout = 10;
/* wait max 10 ms until cpu1 is on */
while ((__raw_readl(S5P_ARM_CORE1_STATUS)
& S5P_CORE_LOCAL_PWR_EN) != S5P_CORE_LOCAL_PWR_EN) {
if (timeout-- == 0)
break;
mdelay(1);
}
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
spin_unlock(&boot_lock);
return -ETIMEDOUT;
}
}
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
__raw_writel(virt_to_phys(exynos4_secondary_startup),
CPU1_BOOT_REG);
gic_raise_softirq(cpumask_of(cpu), 1);
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
void __init smp_init_cpus(void)
{
void __iomem *scu_base = scu_base_addr();
unsigned int i, ncores;
if (soc_is_exynos5250())
ncores = 2;
else
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
ncores, nr_cpu_ids);
ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
set_smp_cross_call(gic_raise_softirq);
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
if (!soc_is_exynos5250())
scu_enable(scu_base_addr());
/*
* Write the address of secondary startup into the
* system-wide flags register. The boot monitor waits
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
__raw_writel(virt_to_phys(exynos4_secondary_startup),
CPU1_BOOT_REG);
}
| gpl-2.0 |
Pulshen/XKernel | drivers/bluetooth/dtl1_cs.c | 4586 | 13698 | /*
*
* A driver for Nokia Connectivity Card DTL-1 devices
*
* Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
/* ======================== Module parameters ======================== */
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1");
MODULE_LICENSE("GPL");
/* ======================== Local structures ======================== */
typedef struct dtl1_info_t {
struct pcmcia_device *p_dev;
struct hci_dev *hdev;
spinlock_t lock; /* For serializing operations */
unsigned long flowmask; /* HCI flow mask */
int ri_latch;
struct sk_buff_head txq;
unsigned long tx_state;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
} dtl1_info_t;
static int dtl1_config(struct pcmcia_device *link);
static void dtl1_release(struct pcmcia_device *link);
static void dtl1_detach(struct pcmcia_device *p_dev);
/* Transmit states */
#define XMIT_SENDING 1
#define XMIT_WAKEUP 2
#define XMIT_WAITING 8
/* Receiver States */
#define RECV_WAIT_NSH 0
#define RECV_WAIT_DATA 1
typedef struct {
u8 type;
u8 zero;
u16 len;
} __packed nsh_t; /* Nokia Specific Header */
#define NSHL 4 /* Nokia Specific Header Length */
/* ======================== Interrupt handling ======================== */
static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
/* Tx FIFO should be empty */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE))
return 0;
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
outb(buf[actual], iobase + UART_TX);
actual++;
}
return actual;
}
static void dtl1_write_wakeup(dtl1_info_t *info)
{
if (!info) {
BT_ERR("Unknown device");
return;
}
if (test_bit(XMIT_WAITING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
break;
/* Send frame */
len = dtl1_write(iobase, 32, skb->data, skb->len);
if (len == skb->len) {
set_bit(XMIT_WAITING, &(info->tx_state));
kfree_skb(skb);
} else {
skb_pull(skb, len);
skb_queue_head(&(info->txq), skb);
}
info->hdev->stat.byte_tx += len;
} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));
clear_bit(XMIT_SENDING, &(info->tx_state));
}
static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb)
{
u8 flowmask = *(u8 *)skb->data;
int i;
printk(KERN_INFO "Bluetooth: Nokia control data =");
for (i = 0; i < skb->len; i++) {
printk(" %02x", skb->data[i]);
}
printk("\n");
/* transition to active state */
if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) {
clear_bit(XMIT_WAITING, &(info->tx_state));
dtl1_write_wakeup(info);
}
info->flowmask = flowmask;
kfree_skb(skb);
}
static void dtl1_receive(dtl1_info_t *info)
{
unsigned int iobase;
nsh_t *nsh;
int boguscount = 0;
if (!info) {
BT_ERR("Unknown device");
return;
}
iobase = info->p_dev->resource[0]->start;
do {
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL)
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
BT_ERR("Can't allocate mem for new packet");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
return;
}
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
nsh = (nsh_t *)info->rx_skb->data;
info->rx_count--;
if (info->rx_count == 0) {
switch (info->rx_state) {
case RECV_WAIT_NSH:
info->rx_state = RECV_WAIT_DATA;
info->rx_count = nsh->len + (nsh->len & 0x0001);
break;
case RECV_WAIT_DATA:
bt_cb(info->rx_skb)->pkt_type = nsh->type;
/* remove PAD byte if it exists */
if (nsh->len & 0x0001) {
info->rx_skb->tail--;
info->rx_skb->len--;
}
/* remove NSH */
skb_pull(info->rx_skb, NSHL);
switch (bt_cb(info->rx_skb)->pkt_type) {
case 0x80:
/* control data for the Nokia Card */
dtl1_control(info, info->rx_skb);
break;
case 0x82:
case 0x83:
case 0x84:
/* send frame to the HCI layer */
info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type &= 0x0f;
hci_recv_frame(info->rx_skb);
break;
default:
/* unknown packet */
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
kfree_skb(info->rx_skb);
break;
}
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
info->rx_skb = NULL;
break;
}
}
/* Make sure we don't stay here too long */
if (boguscount++ > 32)
break;
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
}
static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
{
dtl1_info_t *info = dev_inst;
unsigned int iobase;
unsigned char msr;
int boguscount = 0;
int iir, lsr;
irqreturn_t r = IRQ_NONE;
if (!info || !info->hdev)
/* our irq handler is shared */
return IRQ_NONE;
iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
while (iir) {
r = IRQ_HANDLED;
/* Clear interrupt */
lsr = inb(iobase + UART_LSR);
switch (iir) {
case UART_IIR_RLSI:
BT_ERR("RLSI");
break;
case UART_IIR_RDI:
/* Receive interrupt */
dtl1_receive(info);
break;
case UART_IIR_THRI:
if (lsr & UART_LSR_THRE) {
/* Transmitter ready for data */
dtl1_write_wakeup(info);
}
break;
default:
BT_ERR("Unhandled IIR=%#x", iir);
break;
}
/* Make sure we don't stay here too long */
if (boguscount++ > 100)
break;
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
}
msr = inb(iobase + UART_MSR);
if (info->ri_latch ^ (msr & UART_MSR_RI)) {
info->ri_latch = msr & UART_MSR_RI;
clear_bit(XMIT_WAITING, &(info->tx_state));
dtl1_write_wakeup(info);
r = IRQ_HANDLED;
}
spin_unlock(&(info->lock));
return r;
}
/* ======================== HCI interface ======================== */
static int dtl1_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int dtl1_hci_flush(struct hci_dev *hdev)
{
dtl1_info_t *info = (dtl1_info_t *)(hdev->driver_data);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
return 0;
}
static int dtl1_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
dtl1_hci_flush(hdev);
return 0;
}
static int dtl1_hci_send_frame(struct sk_buff *skb)
{
dtl1_info_t *info;
struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
struct sk_buff *s;
nsh_t nsh;
if (!hdev) {
BT_ERR("Frame for unknown HCI device (hdev=NULL)");
return -ENODEV;
}
info = (dtl1_info_t *)(hdev->driver_data);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
nsh.type = 0x81;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
nsh.type = 0x82;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
nsh.type = 0x83;
break;
default:
return -EILSEQ;
};
nsh.zero = 0;
nsh.len = skb->len;
s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
if (!s)
return -ENOMEM;
skb_reserve(s, NSHL);
skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
if (skb->len & 0x0001)
*skb_put(s, 1) = 0; /* PAD */
/* Prepend skb with Nokia frame header and queue */
memcpy(skb_push(s, NSHL), &nsh, NSHL);
skb_queue_tail(&(info->txq), s);
dtl1_write_wakeup(info);
kfree_skb(skb);
return 0;
}
static void dtl1_hci_destruct(struct hci_dev *hdev)
{
}
static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
/* ======================== Card services HCI interaction ======================== */
static int dtl1_open(dtl1_info_t *info)
{
unsigned long flags;
unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
skb_queue_head_init(&(info->txq));
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
info->rx_skb = NULL;
set_bit(XMIT_WAITING, &(info->tx_state));
/* Initialize HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can't allocate HCI device");
return -ENOMEM;
}
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = dtl1_hci_open;
hdev->close = dtl1_hci_close;
hdev->flush = dtl1_hci_flush;
hdev->send = dtl1_hci_send_frame;
hdev->destruct = dtl1_hci_destruct;
hdev->ioctl = dtl1_hci_ioctl;
hdev->owner = THIS_MODULE;
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
/* Initialize UART */
outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR)
& UART_MSR_RI;
/* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
/* Timeout before it is safe to send the first HCI packet */
msleep(2000);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
info->hdev = NULL;
hci_free_dev(hdev);
return -ENODEV;
}
return 0;
}
static int dtl1_close(dtl1_info_t *info)
{
unsigned long flags;
unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev = info->hdev;
if (!hdev)
return -ENODEV;
dtl1_hci_close(hdev);
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
if (hci_unregister_dev(hdev) < 0)
BT_ERR("Can't unregister HCI device %s", hdev->name);
hci_free_dev(hdev);
return 0;
}
static int dtl1_probe(struct pcmcia_device *link)
{
dtl1_info_t *info;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->p_dev = link;
link->priv = info;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return dtl1_config(link);
}
static void dtl1_detach(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
dtl1_release(link);
kfree(info);
}
static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
{
if ((p_dev->resource[1]->end) || (p_dev->resource[1]->end < 8))
return -ENODEV;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int dtl1_config(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
int i;
/* Look for a generic full-sized window */
link->resource[0]->end = 8;
if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
goto failed;
i = pcmcia_request_irq(link, dtl1_interrupt);
if (i != 0)
goto failed;
i = pcmcia_enable_device(link);
if (i != 0)
goto failed;
if (dtl1_open(info) != 0)
goto failed;
return 0;
failed:
dtl1_release(link);
return -ENODEV;
}
static void dtl1_release(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
dtl1_close(info);
pcmcia_disable_device(link);
}
static struct pcmcia_device_id dtl1_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82),
PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863),
PCMCIA_DEVICE_PROD_ID12("Socket", "CF+ Personal Network Card", 0xb38bcc2e, 0xe732bae3),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, dtl1_ids);
static struct pcmcia_driver dtl1_driver = {
.owner = THIS_MODULE,
.name = "dtl1_cs",
.probe = dtl1_probe,
.remove = dtl1_detach,
.id_table = dtl1_ids,
};
static int __init init_dtl1_cs(void)
{
return pcmcia_register_driver(&dtl1_driver);
}
static void __exit exit_dtl1_cs(void)
{
pcmcia_unregister_driver(&dtl1_driver);
}
module_init(init_dtl1_cs);
module_exit(exit_dtl1_cs);
| gpl-2.0 |
spanish33/android_kernel_oneplus_msm8974 | net/wireless/sysfs.c | 5354 | 3466 | /*
* This file provides /sys/class/ieee80211/<wiphy name>/
* and some default attributes.
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* This file is GPLv2 as found in COPYING.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
#include <linux/rtnetlink.h>
#include <net/cfg80211.h>
#include "sysfs.h"
#include "core.h"
static inline struct cfg80211_registered_device *dev_to_rdev(
struct device *dev)
{
return container_of(dev, struct cfg80211_registered_device, wiphy.dev);
}
#define SHOW_FMT(name, fmt, member) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
}
SHOW_FMT(index, "%d", wiphy_idx);
SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
char *buf) {
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
}
static ssize_t addresses_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
char *start = buf;
int i;
if (!wiphy->addresses)
return sprintf(buf, "%pM\n", wiphy->perm_addr);
for (i = 0; i < wiphy->n_addresses; i++)
buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
return buf - start;
}
static struct device_attribute ieee80211_dev_attrs[] = {
__ATTR_RO(index),
__ATTR_RO(macaddress),
__ATTR_RO(address_mask),
__ATTR_RO(addresses),
__ATTR_RO(name),
{}
};
static void wiphy_dev_release(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
cfg80211_dev_free(rdev);
}
#ifdef CONFIG_HOTPLUG
static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
{
/* TODO, we probably need stuff here */
return 0;
}
#endif
static int wiphy_suspend(struct device *dev, pm_message_t state)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
rdev->suspend_at = get_seconds();
if (rdev->ops->suspend) {
rtnl_lock();
if (rdev->wiphy.registered)
ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
rtnl_unlock();
}
return ret;
}
static int wiphy_resume(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
/* Age scan results with time spent in suspend */
spin_lock_bh(&rdev->bss_lock);
cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
spin_unlock_bh(&rdev->bss_lock);
if (rdev->ops->resume) {
rtnl_lock();
if (rdev->wiphy.registered)
ret = rdev->ops->resume(&rdev->wiphy);
rtnl_unlock();
}
return ret;
}
static const void *wiphy_namespace(struct device *d)
{
struct wiphy *wiphy = container_of(d, struct wiphy, dev);
return wiphy_net(wiphy);
}
struct class ieee80211_class = {
.name = "ieee80211",
.owner = THIS_MODULE,
.dev_release = wiphy_dev_release,
.dev_attrs = ieee80211_dev_attrs,
#ifdef CONFIG_HOTPLUG
.dev_uevent = wiphy_uevent,
#endif
.suspend = wiphy_suspend,
.resume = wiphy_resume,
.ns_type = &net_ns_type_operations,
.namespace = wiphy_namespace,
};
int wiphy_sysfs_init(void)
{
return class_register(&ieee80211_class);
}
void wiphy_sysfs_exit(void)
{
class_unregister(&ieee80211_class);
}
| gpl-2.0 |
yexihu/kernel-msm | drivers/media/dvb/frontends/or51132.c | 8426 | 17383 | /*
* Support for OR51132 (pcHDTV HD-3000) - VSB/QAM
*
*
* Copyright (C) 2007 Trent Piepho <xyzzy@speakeasy.org>
*
* Copyright (C) 2005 Kirk Lapray <kirk_lapray@bigfoot.com>
*
* Based on code from Jack Kelliher (kelliher@xmission.com)
* Copyright (C) 2002 & pcHDTV, inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
/*
* This driver needs two external firmware files. Please copy
* "dvb-fe-or51132-vsb.fw" and "dvb-fe-or51132-qam.fw" to
* /usr/lib/hotplug/firmware/ or /lib/firmware/
* (depending on configuration of firmware hotplug).
*/
#define OR51132_VSB_FIRMWARE "dvb-fe-or51132-vsb.fw"
#define OR51132_QAM_FIRMWARE "dvb-fe-or51132-qam.fw"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
#include "or51132.h"
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "or51132: " args); \
} while (0)
struct or51132_state
{
struct i2c_adapter* i2c;
/* Configuration settings */
const struct or51132_config* config;
struct dvb_frontend frontend;
/* Demodulator private data */
fe_modulation_t current_modulation;
u32 snr; /* Result of last SNR calculation */
/* Tuner private data */
u32 current_frequency;
};
/* Write buffer to demod */
static int or51132_writebuf(struct or51132_state *state, const u8 *buf, int len)
{
int err;
struct i2c_msg msg = { .addr = state->config->demod_address,
.flags = 0, .buf = (u8*)buf, .len = len };
/* msleep(20); */ /* doesn't appear to be necessary */
if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
printk(KERN_WARNING "or51132: I2C write (addr 0x%02x len %d) error: %d\n",
msg.addr, msg.len, err);
return -EREMOTEIO;
}
return 0;
}
/* Write constant bytes, e.g. or51132_writebytes(state, 0x04, 0x42, 0x00);
Less code and more efficient that loading a buffer on the stack with
the bytes to send and then calling or51132_writebuf() on that. */
#define or51132_writebytes(state, data...) \
({ static const u8 _data[] = {data}; \
or51132_writebuf(state, _data, sizeof(_data)); })
/* Read data from demod into buffer. Returns 0 on success. */
static int or51132_readbuf(struct or51132_state *state, u8 *buf, int len)
{
int err;
struct i2c_msg msg = { .addr = state->config->demod_address,
.flags = I2C_M_RD, .buf = buf, .len = len };
/* msleep(20); */ /* doesn't appear to be necessary */
if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
printk(KERN_WARNING "or51132: I2C read (addr 0x%02x len %d) error: %d\n",
msg.addr, msg.len, err);
return -EREMOTEIO;
}
return 0;
}
/* Reads a 16-bit demod register. Returns <0 on error. */
static int or51132_readreg(struct or51132_state *state, u8 reg)
{
u8 buf[2] = { 0x04, reg };
struct i2c_msg msg[2] = {
{.addr = state->config->demod_address, .flags = 0,
.buf = buf, .len = 2 },
{.addr = state->config->demod_address, .flags = I2C_M_RD,
.buf = buf, .len = 2 }};
int err;
if ((err = i2c_transfer(state->i2c, msg, 2)) != 2) {
printk(KERN_WARNING "or51132: I2C error reading register %d: %d\n",
reg, err);
return -EREMOTEIO;
}
return buf[0] | (buf[1] << 8);
}
static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware *fw)
{
struct or51132_state* state = fe->demodulator_priv;
static const u8 run_buf[] = {0x7F,0x01};
u8 rec_buf[8];
u32 firmwareAsize, firmwareBsize;
int i,ret;
dprintk("Firmware is %Zd bytes\n",fw->size);
/* Get size of firmware A and B */
firmwareAsize = le32_to_cpu(*((__le32*)fw->data));
dprintk("FirmwareA is %i bytes\n",firmwareAsize);
firmwareBsize = le32_to_cpu(*((__le32*)(fw->data+4)));
dprintk("FirmwareB is %i bytes\n",firmwareBsize);
/* Upload firmware */
if ((ret = or51132_writebuf(state, &fw->data[8], firmwareAsize))) {
printk(KERN_WARNING "or51132: load_firmware error 1\n");
return ret;
}
if ((ret = or51132_writebuf(state, &fw->data[8+firmwareAsize],
firmwareBsize))) {
printk(KERN_WARNING "or51132: load_firmware error 2\n");
return ret;
}
if ((ret = or51132_writebuf(state, run_buf, 2))) {
printk(KERN_WARNING "or51132: load_firmware error 3\n");
return ret;
}
if ((ret = or51132_writebuf(state, run_buf, 2))) {
printk(KERN_WARNING "or51132: load_firmware error 4\n");
return ret;
}
/* 50ms for operation to begin */
msleep(50);
/* Read back ucode version to besure we loaded correctly and are really up and running */
/* Get uCode version */
if ((ret = or51132_writebytes(state, 0x10, 0x10, 0x00))) {
printk(KERN_WARNING "or51132: load_firmware error a\n");
return ret;
}
if ((ret = or51132_writebytes(state, 0x04, 0x17))) {
printk(KERN_WARNING "or51132: load_firmware error b\n");
return ret;
}
if ((ret = or51132_writebytes(state, 0x00, 0x00))) {
printk(KERN_WARNING "or51132: load_firmware error c\n");
return ret;
}
for (i=0;i<4;i++) {
/* Once upon a time, this command might have had something
to do with getting the firmware version, but it's
not used anymore:
{0x04,0x00,0x30,0x00,i+1} */
/* Read 8 bytes, two bytes at a time */
if ((ret = or51132_readbuf(state, &rec_buf[i*2], 2))) {
printk(KERN_WARNING
"or51132: load_firmware error d - %d\n",i);
return ret;
}
}
printk(KERN_WARNING
"or51132: Version: %02X%02X%02X%02X-%02X%02X%02X%02X (%02X%01X-%01X-%02X%01X-%01X)\n",
rec_buf[1],rec_buf[0],rec_buf[3],rec_buf[2],
rec_buf[5],rec_buf[4],rec_buf[7],rec_buf[6],
rec_buf[3],rec_buf[2]>>4,rec_buf[2]&0x0f,
rec_buf[5],rec_buf[4]>>4,rec_buf[4]&0x0f);
if ((ret = or51132_writebytes(state, 0x10, 0x00, 0x00))) {
printk(KERN_WARNING "or51132: load_firmware error e\n");
return ret;
}
return 0;
};
static int or51132_init(struct dvb_frontend* fe)
{
return 0;
}
static int or51132_read_ber(struct dvb_frontend* fe, u32* ber)
{
*ber = 0;
return 0;
}
static int or51132_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
*ucblocks = 0;
return 0;
}
static int or51132_sleep(struct dvb_frontend* fe)
{
return 0;
}
static int or51132_setmode(struct dvb_frontend* fe)
{
struct or51132_state* state = fe->demodulator_priv;
u8 cmd_buf1[3] = {0x04, 0x01, 0x5f};
u8 cmd_buf2[3] = {0x1c, 0x00, 0 };
dprintk("setmode %d\n",(int)state->current_modulation);
switch (state->current_modulation) {
case VSB_8:
/* Auto CH, Auto NTSC rej, MPEGser, MPEG2tr, phase noise-high */
cmd_buf1[2] = 0x50;
/* REC MODE inv IF spectrum, Normal */
cmd_buf2[1] = 0x03;
/* Channel MODE ATSC/VSB8 */
cmd_buf2[2] = 0x06;
break;
/* All QAM modes are:
Auto-deinterleave; MPEGser, MPEG2tr, phase noise-high
REC MODE Normal Carrier Lock */
case QAM_AUTO:
/* Channel MODE Auto QAM64/256 */
cmd_buf2[2] = 0x4f;
break;
case QAM_256:
/* Channel MODE QAM256 */
cmd_buf2[2] = 0x45;
break;
case QAM_64:
/* Channel MODE QAM64 */
cmd_buf2[2] = 0x43;
break;
default:
printk(KERN_WARNING
"or51132: setmode: Modulation set to unsupported value (%d)\n",
state->current_modulation);
return -EINVAL;
}
/* Set Receiver 1 register */
if (or51132_writebuf(state, cmd_buf1, 3)) {
printk(KERN_WARNING "or51132: set_mode error 1\n");
return -EREMOTEIO;
}
dprintk("set #1 to %02x\n", cmd_buf1[2]);
/* Set operation mode in Receiver 6 register */
if (or51132_writebuf(state, cmd_buf2, 3)) {
printk(KERN_WARNING "or51132: set_mode error 2\n");
return -EREMOTEIO;
}
dprintk("set #6 to 0x%02x%02x\n", cmd_buf2[1], cmd_buf2[2]);
return 0;
}
/* Some modulations use the same firmware. This classifies modulations
by the firmware they use. */
#define MOD_FWCLASS_UNKNOWN 0
#define MOD_FWCLASS_VSB 1
#define MOD_FWCLASS_QAM 2
static int modulation_fw_class(fe_modulation_t modulation)
{
switch(modulation) {
case VSB_8:
return MOD_FWCLASS_VSB;
case QAM_AUTO:
case QAM_64:
case QAM_256:
return MOD_FWCLASS_QAM;
default:
return MOD_FWCLASS_UNKNOWN;
}
}
static int or51132_set_parameters(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int ret;
struct or51132_state* state = fe->demodulator_priv;
const struct firmware *fw;
const char *fwname;
int clock_mode;
/* Upload new firmware only if we need a different one */
if (modulation_fw_class(state->current_modulation) !=
modulation_fw_class(p->modulation)) {
switch (modulation_fw_class(p->modulation)) {
case MOD_FWCLASS_VSB:
dprintk("set_parameters VSB MODE\n");
fwname = OR51132_VSB_FIRMWARE;
/* Set non-punctured clock for VSB */
clock_mode = 0;
break;
case MOD_FWCLASS_QAM:
dprintk("set_parameters QAM MODE\n");
fwname = OR51132_QAM_FIRMWARE;
/* Set punctured clock for QAM */
clock_mode = 1;
break;
default:
printk("or51132: Modulation type(%d) UNSUPPORTED\n",
p->modulation);
return -1;
}
printk("or51132: Waiting for firmware upload(%s)...\n",
fwname);
ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
if (ret) {
printk(KERN_WARNING "or51132: No firmware up"
"loaded(timeout or file not found?)\n");
return ret;
}
ret = or51132_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
printk(KERN_WARNING "or51132: Writing firmware to "
"device failed!\n");
return ret;
}
printk("or51132: Firmware upload complete.\n");
state->config->set_ts_params(fe, clock_mode);
}
/* Change only if we are actually changing the modulation */
if (state->current_modulation != p->modulation) {
state->current_modulation = p->modulation;
or51132_setmode(fe);
}
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
/* Set to current mode */
or51132_setmode(fe);
/* Update current frequency */
state->current_frequency = p->frequency;
return 0;
}
static int or51132_get_parameters(struct dvb_frontend* fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct or51132_state* state = fe->demodulator_priv;
int status;
int retry = 1;
start:
/* Receiver Status */
if ((status = or51132_readreg(state, 0x00)) < 0) {
printk(KERN_WARNING "or51132: get_parameters: error reading receiver status\n");
return -EREMOTEIO;
}
switch(status&0xff) {
case 0x06:
p->modulation = VSB_8;
break;
case 0x43:
p->modulation = QAM_64;
break;
case 0x45:
p->modulation = QAM_256;
break;
default:
if (retry--)
goto start;
printk(KERN_WARNING "or51132: unknown status 0x%02x\n",
status&0xff);
return -EREMOTEIO;
}
/* FIXME: Read frequency from frontend, take AFC into account */
p->frequency = state->current_frequency;
/* FIXME: How to read inversion setting? Receiver 6 register? */
p->inversion = INVERSION_AUTO;
return 0;
}
static int or51132_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct or51132_state* state = fe->demodulator_priv;
int reg;
/* Receiver Status */
if ((reg = or51132_readreg(state, 0x00)) < 0) {
printk(KERN_WARNING "or51132: read_status: error reading receiver status: %d\n", reg);
*status = 0;
return -EREMOTEIO;
}
dprintk("%s: read_status %04x\n", __func__, reg);
if (reg & 0x0100) /* Receiver Lock */
*status = FE_HAS_SIGNAL|FE_HAS_CARRIER|FE_HAS_VITERBI|
FE_HAS_SYNC|FE_HAS_LOCK;
else
*status = 0;
return 0;
}
/* Calculate SNR estimation (scaled by 2^24)
8-VSB SNR and QAM equations from Oren datasheets
For 8-VSB:
SNR[dB] = 10 * log10(897152044.8282 / MSE^2 ) - K
Where K = 0 if NTSC rejection filter is OFF; and
K = 3 if NTSC rejection filter is ON
For QAM64:
SNR[dB] = 10 * log10(897152044.8282 / MSE^2 )
For QAM256:
SNR[dB] = 10 * log10(907832426.314266 / MSE^2 )
We re-write the snr equation as:
SNR * 2^24 = 10*(c - 2*intlog10(MSE))
Where for QAM256, c = log10(907832426.314266) * 2^24
and for 8-VSB and QAM64, c = log10(897152044.8282) * 2^24 */
static u32 calculate_snr(u32 mse, u32 c)
{
if (mse == 0) /* No signal */
return 0;
mse = 2*intlog10(mse);
if (mse > c) {
/* Negative SNR, which is possible, but realisticly the
demod will lose lock before the signal gets this bad. The
API only allows for unsigned values, so just return 0 */
return 0;
}
return 10*(c - mse);
}
static int or51132_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct or51132_state* state = fe->demodulator_priv;
int noise, reg;
u32 c, usK = 0;
int retry = 1;
start:
/* SNR after Equalizer */
noise = or51132_readreg(state, 0x02);
if (noise < 0) {
printk(KERN_WARNING "or51132: read_snr: error reading equalizer\n");
return -EREMOTEIO;
}
dprintk("read_snr noise (%d)\n", noise);
/* Read status, contains modulation type for QAM_AUTO and
NTSC filter for VSB */
reg = or51132_readreg(state, 0x00);
if (reg < 0) {
printk(KERN_WARNING "or51132: read_snr: error reading receiver status\n");
return -EREMOTEIO;
}
switch (reg&0xff) {
case 0x06:
if (reg & 0x1000) usK = 3 << 24;
/* Fall through to QAM64 case */
case 0x43:
c = 150204167;
break;
case 0x45:
c = 150290396;
break;
default:
printk(KERN_WARNING "or51132: unknown status 0x%02x\n", reg&0xff);
if (retry--) goto start;
return -EREMOTEIO;
}
dprintk("%s: modulation %02x, NTSC rej O%s\n", __func__,
reg&0xff, reg&0x1000?"n":"ff");
/* Calculate SNR using noise, c, and NTSC rejection correction */
state->snr = calculate_snr(noise, c) - usK;
*snr = (state->snr) >> 16;
dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __func__, noise,
state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
return 0;
}
static int or51132_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
/* Calculate Strength from SNR up to 35dB */
/* Even though the SNR can go higher than 35dB, there is some comfort */
/* factor in having a range of strong signals that can show at 100% */
struct or51132_state* state = (struct or51132_state*) fe->demodulator_priv;
u16 snr;
int ret;
ret = fe->ops.read_snr(fe, &snr);
if (ret != 0)
return ret;
/* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
/* scale the range 0 - 35*2^24 into 0 - 65535 */
if (state->snr >= 8960 * 0x10000)
*strength = 0xffff;
else
*strength = state->snr / 8960;
return 0;
}
static int or51132_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fe_tune_settings)
{
fe_tune_settings->min_delay_ms = 500;
fe_tune_settings->step_size = 0;
fe_tune_settings->max_drift = 0;
return 0;
}
static void or51132_release(struct dvb_frontend* fe)
{
struct or51132_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops or51132_ops;
struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c)
{
struct or51132_state* state = NULL;
/* Allocate memory for the internal state */
state = kzalloc(sizeof(struct or51132_state), GFP_KERNEL);
if (state == NULL)
return NULL;
/* Setup the state */
state->config = config;
state->i2c = i2c;
state->current_frequency = -1;
state->current_modulation = -1;
/* Create dvb_frontend */
memcpy(&state->frontend.ops, &or51132_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
static struct dvb_frontend_ops or51132_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
.frequency_min = 44000000,
.frequency_max = 958000000,
.frequency_stepsize = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
FE_CAN_8VSB
},
.release = or51132_release,
.init = or51132_init,
.sleep = or51132_sleep,
.set_frontend = or51132_set_parameters,
.get_frontend = or51132_get_parameters,
.get_tune_settings = or51132_get_tune_settings,
.read_status = or51132_read_status,
.read_ber = or51132_read_ber,
.read_signal_strength = or51132_read_signal_strength,
.read_snr = or51132_read_snr,
.read_ucblocks = or51132_read_ucblocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("OR51132 ATSC [pcHDTV HD-3000] (8VSB & ITU J83 AnnexB FEC QAM64/256) Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray");
MODULE_AUTHOR("Trent Piepho");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(or51132_attach);
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
Schischu/android_kernel_samsung_chagallwifi | fs/configfs/file.c | 12266 | 9648 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* file.c - operations for regular (text) files.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/*
* A simple attribute can only be 4096 characters. Why 4k? Because the
* original code limited it to PAGE_SIZE. That's a bad idea, though,
* because an attribute of 16k on ia64 won't work on x86. So we limit to
* 4k, our minimum common page size.
*/
#define SIMPLE_ATTR_SIZE 4096
struct configfs_buffer {
size_t count;
loff_t pos;
char * page;
struct configfs_item_operations * ops;
struct mutex mutex;
int needs_read_fill;
};
/**
* fill_read_buffer - allocate and fill buffer from item.
* @dentry: dentry pointer.
* @buffer: data buffer for file.
*
* Allocate @buffer->page, if it hasn't been already, then call the
* config_item's show() method to fill the buffer with this attribute's
* data.
* This is called only once, on the file's first read.
*/
static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
{
struct configfs_attribute * attr = to_attr(dentry);
struct config_item * item = to_item(dentry->d_parent);
struct configfs_item_operations * ops = buffer->ops;
int ret = 0;
ssize_t count;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
count = ops->show_attribute(item,attr,buffer->page);
buffer->needs_read_fill = 0;
BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
if (count >= 0)
buffer->count = count;
else
ret = count;
return ret;
}
/**
* configfs_read_file - read an attribute.
* @file: file pointer.
* @buf: buffer to fill.
* @count: number of bytes to read.
* @ppos: starting offset in file.
*
* Userspace wants to read an attribute file. The attribute descriptor
* is in the file's ->d_fsdata. The target item is in the directory's
* ->d_fsdata.
*
* We call fill_read_buffer() to allocate and fill the buffer from the
* item's show() method exactly once (if the read is happening from
* the beginning of the file). That should fill the entire buffer with
* all the data the item has to offer for that attribute.
* We then call flush_read_buffer() to copy the buffer to userspace
* in the increments specified.
*/
static ssize_t
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct configfs_buffer * buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
__func__, count, *ppos, buffer->page);
retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
buffer->count);
out:
mutex_unlock(&buffer->mutex);
return retval;
}
/**
* fill_write_buffer - copy buffer from userspace.
* @buffer: data buffer for file.
* @buf: data from user.
* @count: number of bytes in @userbuf.
*
* Allocate @buffer->page if it hasn't been already, then
* copy the user-supplied buffer into it.
*/
static int
fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
{
int error;
if (!buffer->page)
buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
if (!buffer->page)
return -ENOMEM;
if (count >= SIMPLE_ATTR_SIZE)
count = SIMPLE_ATTR_SIZE - 1;
error = copy_from_user(buffer->page,buf,count);
buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
* so e.g. sscanf() can scan the string easily */
buffer->page[count] = 0;
return error ? -EFAULT : count;
}
/**
* flush_write_buffer - push buffer to config_item.
* @dentry: dentry to the attribute
* @buffer: data buffer for file.
* @count: number of bytes
*
* Get the correct pointers for the config_item and the attribute we're
* dealing with, then call the store() method for the attribute,
* passing the buffer that we acquired in fill_write_buffer().
*/
static int
flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
{
struct configfs_attribute * attr = to_attr(dentry);
struct config_item * item = to_item(dentry->d_parent);
struct configfs_item_operations * ops = buffer->ops;
return ops->store_attribute(item,attr,buffer->page,count);
}
/**
* configfs_write_file - write an attribute.
* @file: file pointer
* @buf: data to write
* @count: number of bytes
* @ppos: starting offset
*
* Similar to configfs_read_file(), though working in the opposite direction.
* We allocate and fill the data from the user in fill_write_buffer(),
* then push it to the config_item in flush_write_buffer().
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come
* on the first write.
* Hint: if you're writing a value, first read the file, modify only the
* the value you're changing, then write entire buffer back.
*/
static ssize_t
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct configfs_buffer * buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
len = flush_write_buffer(file->f_path.dentry, buffer, count);
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
return len;
}
static int check_perm(struct inode * inode, struct file * file)
{
struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
struct configfs_attribute * attr = to_attr(file->f_path.dentry);
struct configfs_buffer * buffer;
struct configfs_item_operations * ops = NULL;
int error = 0;
if (!item || !attr)
goto Einval;
/* Grab the module reference for this attribute if we have one */
if (!try_module_get(attr->ca_owner)) {
error = -ENODEV;
goto Done;
}
if (item->ci_type)
ops = item->ci_type->ct_item_ops;
else
goto Eaccess;
/* File needs write support.
* The inode's perms must say it's ok,
* and we must have a store method.
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO) || !ops->store_attribute)
goto Eaccess;
}
/* File needs read support.
* The inode's perms must say it's ok, and we there
* must be a show method for it.
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO) || !ops->show_attribute)
goto Eaccess;
}
/* No error? Great, allocate a buffer for the file, and store it
* it in file->private_data for easy access.
*/
buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto Enomem;
}
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->ops = ops;
file->private_data = buffer;
goto Done;
Einval:
error = -EINVAL;
goto Done;
Eaccess:
error = -EACCES;
Enomem:
module_put(attr->ca_owner);
Done:
if (error && item)
config_item_put(item);
return error;
}
static int configfs_open_file(struct inode * inode, struct file * filp)
{
return check_perm(inode,filp);
}
static int configfs_release(struct inode * inode, struct file * filp)
{
struct config_item * item = to_item(filp->f_path.dentry->d_parent);
struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
struct module * owner = attr->ca_owner;
struct configfs_buffer * buffer = filp->private_data;
if (item)
config_item_put(item);
/* After this point, attr should not be accessed. */
module_put(owner);
if (buffer) {
if (buffer->page)
free_page((unsigned long)buffer->page);
mutex_destroy(&buffer->mutex);
kfree(buffer);
}
return 0;
}
const struct file_operations configfs_file_operations = {
.read = configfs_read_file,
.write = configfs_write_file,
.llseek = generic_file_llseek,
.open = configfs_open_file,
.release = configfs_release,
};
int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
{
struct configfs_dirent * parent_sd = dir->d_fsdata;
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
int error = 0;
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
/**
* configfs_create_file - create an attribute file for an item.
* @item: item we're creating for.
* @attr: atrribute descriptor.
*/
int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
{
BUG_ON(!item || !item->ci_dentry || !attr);
return configfs_add_file(item->ci_dentry, attr,
CONFIGFS_ITEM_ATTR);
}
| gpl-2.0 |
friedrich420/S4-TW-AEL-Kernel-v4Plus | drivers/video/aty/radeon_i2c.c | 12778 | 4019 | #include "radeonfb.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <asm/io.h>
#include <video/radeon.h>
#include "../edid.h"
static void radeon_gpio_setscl(void* data, int state)
{
struct radeon_i2c_chan *chan = data;
struct radeonfb_info *rinfo = chan->rinfo;
u32 val;
val = INREG(chan->ddc_reg) & ~(VGA_DDC_CLK_OUT_EN);
if (!state)
val |= VGA_DDC_CLK_OUT_EN;
OUTREG(chan->ddc_reg, val);
(void)INREG(chan->ddc_reg);
}
static void radeon_gpio_setsda(void* data, int state)
{
struct radeon_i2c_chan *chan = data;
struct radeonfb_info *rinfo = chan->rinfo;
u32 val;
val = INREG(chan->ddc_reg) & ~(VGA_DDC_DATA_OUT_EN);
if (!state)
val |= VGA_DDC_DATA_OUT_EN;
OUTREG(chan->ddc_reg, val);
(void)INREG(chan->ddc_reg);
}
static int radeon_gpio_getscl(void* data)
{
struct radeon_i2c_chan *chan = data;
struct radeonfb_info *rinfo = chan->rinfo;
u32 val;
val = INREG(chan->ddc_reg);
return (val & VGA_DDC_CLK_INPUT) ? 1 : 0;
}
static int radeon_gpio_getsda(void* data)
{
struct radeon_i2c_chan *chan = data;
struct radeonfb_info *rinfo = chan->rinfo;
u32 val;
val = INREG(chan->ddc_reg);
return (val & VGA_DDC_DATA_INPUT) ? 1 : 0;
}
static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
{
int rc;
snprintf(chan->adapter.name, sizeof(chan->adapter.name),
"radeonfb %s", name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &chan->rinfo->pdev->dev;
chan->algo.setsda = radeon_gpio_setsda;
chan->algo.setscl = radeon_gpio_setscl;
chan->algo.getsda = radeon_gpio_getsda;
chan->algo.getscl = radeon_gpio_getscl;
chan->algo.udelay = 10;
chan->algo.timeout = 20;
chan->algo.data = chan;
i2c_set_adapdata(&chan->adapter, chan);
/* Raise SCL and SDA */
radeon_gpio_setsda(chan, 1);
radeon_gpio_setscl(chan, 1);
udelay(20);
rc = i2c_bit_add_bus(&chan->adapter);
if (rc == 0)
dev_dbg(&chan->rinfo->pdev->dev, "I2C bus %s registered.\n", name);
else
dev_warn(&chan->rinfo->pdev->dev, "Failed to register I2C bus %s.\n", name);
return rc;
}
void radeon_create_i2c_busses(struct radeonfb_info *rinfo)
{
rinfo->i2c[0].rinfo = rinfo;
rinfo->i2c[0].ddc_reg = GPIO_MONID;
#ifndef CONFIG_PPC
rinfo->i2c[0].adapter.class = I2C_CLASS_HWMON;
#endif
radeon_setup_i2c_bus(&rinfo->i2c[0], "monid");
rinfo->i2c[1].rinfo = rinfo;
rinfo->i2c[1].ddc_reg = GPIO_DVI_DDC;
radeon_setup_i2c_bus(&rinfo->i2c[1], "dvi");
rinfo->i2c[2].rinfo = rinfo;
rinfo->i2c[2].ddc_reg = GPIO_VGA_DDC;
radeon_setup_i2c_bus(&rinfo->i2c[2], "vga");
rinfo->i2c[3].rinfo = rinfo;
rinfo->i2c[3].ddc_reg = GPIO_CRT2_DDC;
radeon_setup_i2c_bus(&rinfo->i2c[3], "crt2");
}
void radeon_delete_i2c_busses(struct radeonfb_info *rinfo)
{
if (rinfo->i2c[0].rinfo)
i2c_del_adapter(&rinfo->i2c[0].adapter);
rinfo->i2c[0].rinfo = NULL;
if (rinfo->i2c[1].rinfo)
i2c_del_adapter(&rinfo->i2c[1].adapter);
rinfo->i2c[1].rinfo = NULL;
if (rinfo->i2c[2].rinfo)
i2c_del_adapter(&rinfo->i2c[2].adapter);
rinfo->i2c[2].rinfo = NULL;
if (rinfo->i2c[3].rinfo)
i2c_del_adapter(&rinfo->i2c[3].adapter);
rinfo->i2c[3].rinfo = NULL;
}
int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn,
u8 **out_edid)
{
u8 *edid;
edid = fb_ddc_read(&rinfo->i2c[conn-1].adapter);
if (out_edid)
*out_edid = edid;
if (!edid) {
pr_debug("radeonfb: I2C (port %d) ... not found\n", conn);
return MT_NONE;
}
if (edid[0x14] & 0x80) {
/* Fix detection using BIOS tables */
if (rinfo->is_mobility /*&& conn == ddc_dvi*/ &&
(INREG(LVDS_GEN_CNTL) & LVDS_ON)) {
pr_debug("radeonfb: I2C (port %d) ... found LVDS panel\n", conn);
return MT_LCD;
} else {
pr_debug("radeonfb: I2C (port %d) ... found TMDS panel\n", conn);
return MT_DFP;
}
}
pr_debug("radeonfb: I2C (port %d) ... found CRT display\n", conn);
return MT_CRT;
}
| gpl-2.0 |
Clumsy-Kernel-Development/HTC_10_Kernel | sound/isa/gus/gus_mem.c | 14058 | 9900 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* GUS's memory allocation routines / bottom layer
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer);
#endif
void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup)
{
if (!xup) {
mutex_lock(&alloc->memory_mutex);
} else {
mutex_unlock(&alloc->memory_mutex);
}
}
static struct snd_gf1_mem_block *snd_gf1_mem_xalloc(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block)
{
struct snd_gf1_mem_block *pblock, *nblock;
nblock = kmalloc(sizeof(struct snd_gf1_mem_block), GFP_KERNEL);
if (nblock == NULL)
return NULL;
*nblock = *block;
pblock = alloc->first;
while (pblock) {
if (pblock->ptr > nblock->ptr) {
nblock->prev = pblock->prev;
nblock->next = pblock;
pblock->prev = nblock;
if (pblock == alloc->first)
alloc->first = nblock;
else
nblock->prev->next = nblock;
mutex_unlock(&alloc->memory_mutex);
return NULL;
}
pblock = pblock->next;
}
nblock->next = NULL;
if (alloc->last == NULL) {
nblock->prev = NULL;
alloc->first = alloc->last = nblock;
} else {
nblock->prev = alloc->last;
alloc->last->next = nblock;
alloc->last = nblock;
}
return nblock;
}
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block)
{
if (block->share) { /* ok.. shared block */
block->share--;
mutex_unlock(&alloc->memory_mutex);
return 0;
}
if (alloc->first == block) {
alloc->first = block->next;
if (block->next)
block->next->prev = NULL;
} else {
block->prev->next = block->next;
if (block->next)
block->next->prev = block->prev;
}
if (alloc->last == block) {
alloc->last = block->prev;
if (block->prev)
block->prev->next = NULL;
} else {
block->next->prev = block->prev;
if (block->prev)
block->prev->next = block->next;
}
kfree(block->name);
kfree(block);
return 0;
}
static struct snd_gf1_mem_block *snd_gf1_mem_look(struct snd_gf1_mem * alloc,
unsigned int address)
{
struct snd_gf1_mem_block *block;
for (block = alloc->first; block; block = block->next) {
if (block->ptr == address) {
return block;
}
}
return NULL;
}
static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
unsigned int *share_id)
{
struct snd_gf1_mem_block *block;
if (!share_id[0] && !share_id[1] &&
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
if (!memcmp(share_id, block->share_id,
sizeof(block->share_id)))
return block;
return NULL;
}
static int snd_gf1_mem_find(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block,
unsigned int size, int w_16, int align)
{
struct snd_gf1_bank_info *info = w_16 ? alloc->banks_16 : alloc->banks_8;
unsigned int idx, boundary;
int size1;
struct snd_gf1_mem_block *pblock;
unsigned int ptr1, ptr2;
if (w_16 && align < 2)
align = 2;
block->flags = w_16 ? SNDRV_GF1_MEM_BLOCK_16BIT : 0;
block->owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block->share = 0;
block->share_id[0] = block->share_id[1] =
block->share_id[2] = block->share_id[3] = 0;
block->name = NULL;
block->prev = block->next = NULL;
for (pblock = alloc->first, idx = 0; pblock; pblock = pblock->next) {
while (pblock->ptr >= (boundary = info[idx].address + info[idx].size))
idx++;
while (pblock->ptr + pblock->size >= (boundary = info[idx].address + info[idx].size))
idx++;
ptr2 = boundary;
if (pblock->next) {
if (pblock->ptr + pblock->size == pblock->next->ptr)
continue;
if (pblock->next->ptr < boundary)
ptr2 = pblock->next->ptr;
}
ptr1 = ALIGN(pblock->ptr + pblock->size, align);
if (ptr1 >= ptr2)
continue;
size1 = ptr2 - ptr1;
if ((int)size <= size1) {
block->ptr = ptr1;
block->size = size;
return 0;
}
}
while (++idx < 4) {
if (size <= info[idx].size) {
/* I assume that bank address is already aligned.. */
block->ptr = info[idx].address;
block->size = size;
return 0;
}
}
return -ENOMEM;
}
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16, int align,
unsigned int *share_id)
{
struct snd_gf1_mem_block block, *nblock;
snd_gf1_mem_lock(alloc, 0);
if (share_id != NULL) {
nblock = snd_gf1_mem_share(alloc, share_id);
if (nblock != NULL) {
if (size != (int)nblock->size) {
/* TODO: remove in the future */
snd_printk(KERN_ERR "snd_gf1_mem_alloc - share: sizes differ\n");
goto __std;
}
nblock->share++;
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
}
__std:
if (snd_gf1_mem_find(alloc, &block, size, w_16, align) < 0) {
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
if (share_id != NULL)
memcpy(&block.share_id, share_id, sizeof(block.share_id));
block.owner = owner;
block.name = kstrdup(name, GFP_KERNEL);
nblock = snd_gf1_mem_xalloc(alloc, &block);
snd_gf1_mem_lock(alloc, 1);
return nblock;
}
int snd_gf1_mem_free(struct snd_gf1_mem * alloc, unsigned int address)
{
int result;
struct snd_gf1_mem_block *block;
snd_gf1_mem_lock(alloc, 0);
if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
result = snd_gf1_mem_xfree(alloc, block);
snd_gf1_mem_lock(alloc, 1);
return result;
}
snd_gf1_mem_lock(alloc, 1);
return -EINVAL;
}
int snd_gf1_mem_init(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block block;
#ifdef CONFIG_SND_DEBUG
struct snd_info_entry *entry;
#endif
alloc = &gus->gf1.mem_alloc;
mutex_init(&alloc->memory_mutex);
alloc->first = alloc->last = NULL;
if (!gus->gf1.memory)
return 0;
memset(&block, 0, sizeof(block));
block.owner = SNDRV_GF1_MEM_OWNER_DRIVER;
if (gus->gf1.enh_mode) {
block.ptr = 0;
block.size = 1024;
block.name = kstrdup("InterWave LFOs", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
}
block.ptr = gus->gf1.default_voice_address;
block.size = 4;
block.name = kstrdup("Voice default (NULL's)", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
#ifdef CONFIG_SND_DEBUG
if (! snd_card_proc_new(gus->card, "gusmem", &entry))
snd_info_set_text_ops(entry, gus, snd_gf1_mem_info_read);
#endif
return 0;
}
int snd_gf1_mem_done(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block, *nblock;
alloc = &gus->gf1.mem_alloc;
block = alloc->first;
while (block) {
nblock = block->next;
snd_gf1_mem_xfree(alloc, block);
block = nblock;
}
return 0;
}
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_gus_card *gus;
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block;
unsigned int total, used;
int i;
gus = entry->private_data;
alloc = &gus->gf1.mem_alloc;
mutex_lock(&alloc->memory_mutex);
snd_iprintf(buffer, "8-bit banks : \n ");
for (i = 0; i < 4; i++)
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_8[i].address, alloc->banks_8[i].size >> 10, i + 1 < 4 ? "," : "");
snd_iprintf(buffer, "\n"
"16-bit banks : \n ");
for (i = total = 0; i < 4; i++) {
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_16[i].address, alloc->banks_16[i].size >> 10, i + 1 < 4 ? "," : "");
total += alloc->banks_16[i].size;
}
snd_iprintf(buffer, "\n");
used = 0;
for (block = alloc->first, i = 0; block; block = block->next, i++) {
used += block->size;
snd_iprintf(buffer, "Block %i at 0x%lx onboard 0x%x size %i (0x%x):\n", i, (long) block, block->ptr, block->size, block->size);
if (block->share ||
block->share_id[0] || block->share_id[1] ||
block->share_id[2] || block->share_id[3])
snd_iprintf(buffer, " Share : %i [id0 0x%x] [id1 0x%x] [id2 0x%x] [id3 0x%x]\n",
block->share,
block->share_id[0], block->share_id[1],
block->share_id[2], block->share_id[3]);
snd_iprintf(buffer, " Flags :%s\n",
block->flags & SNDRV_GF1_MEM_BLOCK_16BIT ? " 16-bit" : "");
snd_iprintf(buffer, " Owner : ");
switch (block->owner) {
case SNDRV_GF1_MEM_OWNER_DRIVER:
snd_iprintf(buffer, "driver - %s\n", block->name);
break;
case SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE:
snd_iprintf(buffer, "SIMPLE wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_GF1:
snd_iprintf(buffer, "GF1 wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF:
snd_iprintf(buffer, "IWFFFF wave\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
}
}
snd_iprintf(buffer, " Total: memory = %i, used = %i, free = %i\n",
total, used, total - used);
mutex_unlock(&alloc->memory_mutex);
#if 0
ultra_iprintf(buffer, " Verify: free = %i, max 8-bit block = %i, max 16-bit block = %i\n",
ultra_memory_free_size(card, &card->gf1.mem_alloc),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 0),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 1));
#endif
}
#endif
| gpl-2.0 |
loongson-community/linux-3A | sound/isa/gus/gus_mem.c | 14058 | 9900 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* GUS's memory allocation routines / bottom layer
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer);
#endif
void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup)
{
if (!xup) {
mutex_lock(&alloc->memory_mutex);
} else {
mutex_unlock(&alloc->memory_mutex);
}
}
static struct snd_gf1_mem_block *snd_gf1_mem_xalloc(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block)
{
struct snd_gf1_mem_block *pblock, *nblock;
nblock = kmalloc(sizeof(struct snd_gf1_mem_block), GFP_KERNEL);
if (nblock == NULL)
return NULL;
*nblock = *block;
pblock = alloc->first;
while (pblock) {
if (pblock->ptr > nblock->ptr) {
nblock->prev = pblock->prev;
nblock->next = pblock;
pblock->prev = nblock;
if (pblock == alloc->first)
alloc->first = nblock;
else
nblock->prev->next = nblock;
mutex_unlock(&alloc->memory_mutex);
return NULL;
}
pblock = pblock->next;
}
nblock->next = NULL;
if (alloc->last == NULL) {
nblock->prev = NULL;
alloc->first = alloc->last = nblock;
} else {
nblock->prev = alloc->last;
alloc->last->next = nblock;
alloc->last = nblock;
}
return nblock;
}
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block)
{
if (block->share) { /* ok.. shared block */
block->share--;
mutex_unlock(&alloc->memory_mutex);
return 0;
}
if (alloc->first == block) {
alloc->first = block->next;
if (block->next)
block->next->prev = NULL;
} else {
block->prev->next = block->next;
if (block->next)
block->next->prev = block->prev;
}
if (alloc->last == block) {
alloc->last = block->prev;
if (block->prev)
block->prev->next = NULL;
} else {
block->next->prev = block->prev;
if (block->prev)
block->prev->next = block->next;
}
kfree(block->name);
kfree(block);
return 0;
}
static struct snd_gf1_mem_block *snd_gf1_mem_look(struct snd_gf1_mem * alloc,
unsigned int address)
{
struct snd_gf1_mem_block *block;
for (block = alloc->first; block; block = block->next) {
if (block->ptr == address) {
return block;
}
}
return NULL;
}
static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
unsigned int *share_id)
{
struct snd_gf1_mem_block *block;
if (!share_id[0] && !share_id[1] &&
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
if (!memcmp(share_id, block->share_id,
sizeof(block->share_id)))
return block;
return NULL;
}
static int snd_gf1_mem_find(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block,
unsigned int size, int w_16, int align)
{
struct snd_gf1_bank_info *info = w_16 ? alloc->banks_16 : alloc->banks_8;
unsigned int idx, boundary;
int size1;
struct snd_gf1_mem_block *pblock;
unsigned int ptr1, ptr2;
if (w_16 && align < 2)
align = 2;
block->flags = w_16 ? SNDRV_GF1_MEM_BLOCK_16BIT : 0;
block->owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block->share = 0;
block->share_id[0] = block->share_id[1] =
block->share_id[2] = block->share_id[3] = 0;
block->name = NULL;
block->prev = block->next = NULL;
for (pblock = alloc->first, idx = 0; pblock; pblock = pblock->next) {
while (pblock->ptr >= (boundary = info[idx].address + info[idx].size))
idx++;
while (pblock->ptr + pblock->size >= (boundary = info[idx].address + info[idx].size))
idx++;
ptr2 = boundary;
if (pblock->next) {
if (pblock->ptr + pblock->size == pblock->next->ptr)
continue;
if (pblock->next->ptr < boundary)
ptr2 = pblock->next->ptr;
}
ptr1 = ALIGN(pblock->ptr + pblock->size, align);
if (ptr1 >= ptr2)
continue;
size1 = ptr2 - ptr1;
if ((int)size <= size1) {
block->ptr = ptr1;
block->size = size;
return 0;
}
}
while (++idx < 4) {
if (size <= info[idx].size) {
/* I assume that bank address is already aligned.. */
block->ptr = info[idx].address;
block->size = size;
return 0;
}
}
return -ENOMEM;
}
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16, int align,
unsigned int *share_id)
{
struct snd_gf1_mem_block block, *nblock;
snd_gf1_mem_lock(alloc, 0);
if (share_id != NULL) {
nblock = snd_gf1_mem_share(alloc, share_id);
if (nblock != NULL) {
if (size != (int)nblock->size) {
/* TODO: remove in the future */
snd_printk(KERN_ERR "snd_gf1_mem_alloc - share: sizes differ\n");
goto __std;
}
nblock->share++;
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
}
__std:
if (snd_gf1_mem_find(alloc, &block, size, w_16, align) < 0) {
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
if (share_id != NULL)
memcpy(&block.share_id, share_id, sizeof(block.share_id));
block.owner = owner;
block.name = kstrdup(name, GFP_KERNEL);
nblock = snd_gf1_mem_xalloc(alloc, &block);
snd_gf1_mem_lock(alloc, 1);
return nblock;
}
int snd_gf1_mem_free(struct snd_gf1_mem * alloc, unsigned int address)
{
int result;
struct snd_gf1_mem_block *block;
snd_gf1_mem_lock(alloc, 0);
if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
result = snd_gf1_mem_xfree(alloc, block);
snd_gf1_mem_lock(alloc, 1);
return result;
}
snd_gf1_mem_lock(alloc, 1);
return -EINVAL;
}
int snd_gf1_mem_init(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block block;
#ifdef CONFIG_SND_DEBUG
struct snd_info_entry *entry;
#endif
alloc = &gus->gf1.mem_alloc;
mutex_init(&alloc->memory_mutex);
alloc->first = alloc->last = NULL;
if (!gus->gf1.memory)
return 0;
memset(&block, 0, sizeof(block));
block.owner = SNDRV_GF1_MEM_OWNER_DRIVER;
if (gus->gf1.enh_mode) {
block.ptr = 0;
block.size = 1024;
block.name = kstrdup("InterWave LFOs", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
}
block.ptr = gus->gf1.default_voice_address;
block.size = 4;
block.name = kstrdup("Voice default (NULL's)", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
#ifdef CONFIG_SND_DEBUG
if (! snd_card_proc_new(gus->card, "gusmem", &entry))
snd_info_set_text_ops(entry, gus, snd_gf1_mem_info_read);
#endif
return 0;
}
int snd_gf1_mem_done(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block, *nblock;
alloc = &gus->gf1.mem_alloc;
block = alloc->first;
while (block) {
nblock = block->next;
snd_gf1_mem_xfree(alloc, block);
block = nblock;
}
return 0;
}
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_gus_card *gus;
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block;
unsigned int total, used;
int i;
gus = entry->private_data;
alloc = &gus->gf1.mem_alloc;
mutex_lock(&alloc->memory_mutex);
snd_iprintf(buffer, "8-bit banks : \n ");
for (i = 0; i < 4; i++)
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_8[i].address, alloc->banks_8[i].size >> 10, i + 1 < 4 ? "," : "");
snd_iprintf(buffer, "\n"
"16-bit banks : \n ");
for (i = total = 0; i < 4; i++) {
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_16[i].address, alloc->banks_16[i].size >> 10, i + 1 < 4 ? "," : "");
total += alloc->banks_16[i].size;
}
snd_iprintf(buffer, "\n");
used = 0;
for (block = alloc->first, i = 0; block; block = block->next, i++) {
used += block->size;
snd_iprintf(buffer, "Block %i at 0x%lx onboard 0x%x size %i (0x%x):\n", i, (long) block, block->ptr, block->size, block->size);
if (block->share ||
block->share_id[0] || block->share_id[1] ||
block->share_id[2] || block->share_id[3])
snd_iprintf(buffer, " Share : %i [id0 0x%x] [id1 0x%x] [id2 0x%x] [id3 0x%x]\n",
block->share,
block->share_id[0], block->share_id[1],
block->share_id[2], block->share_id[3]);
snd_iprintf(buffer, " Flags :%s\n",
block->flags & SNDRV_GF1_MEM_BLOCK_16BIT ? " 16-bit" : "");
snd_iprintf(buffer, " Owner : ");
switch (block->owner) {
case SNDRV_GF1_MEM_OWNER_DRIVER:
snd_iprintf(buffer, "driver - %s\n", block->name);
break;
case SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE:
snd_iprintf(buffer, "SIMPLE wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_GF1:
snd_iprintf(buffer, "GF1 wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF:
snd_iprintf(buffer, "IWFFFF wave\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
}
}
snd_iprintf(buffer, " Total: memory = %i, used = %i, free = %i\n",
total, used, total - used);
mutex_unlock(&alloc->memory_mutex);
#if 0
ultra_iprintf(buffer, " Verify: free = %i, max 8-bit block = %i, max 16-bit block = %i\n",
ultra_memory_free_size(card, &card->gf1.mem_alloc),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 0),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 1));
#endif
}
#endif
| gpl-2.0 |
ion-storm/Unleashed-N4 | sound/isa/gus/gus_mem.c | 14058 | 9900 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* GUS's memory allocation routines / bottom layer
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer);
#endif
void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup)
{
if (!xup) {
mutex_lock(&alloc->memory_mutex);
} else {
mutex_unlock(&alloc->memory_mutex);
}
}
static struct snd_gf1_mem_block *snd_gf1_mem_xalloc(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block)
{
struct snd_gf1_mem_block *pblock, *nblock;
nblock = kmalloc(sizeof(struct snd_gf1_mem_block), GFP_KERNEL);
if (nblock == NULL)
return NULL;
*nblock = *block;
pblock = alloc->first;
while (pblock) {
if (pblock->ptr > nblock->ptr) {
nblock->prev = pblock->prev;
nblock->next = pblock;
pblock->prev = nblock;
if (pblock == alloc->first)
alloc->first = nblock;
else
nblock->prev->next = nblock;
mutex_unlock(&alloc->memory_mutex);
return NULL;
}
pblock = pblock->next;
}
nblock->next = NULL;
if (alloc->last == NULL) {
nblock->prev = NULL;
alloc->first = alloc->last = nblock;
} else {
nblock->prev = alloc->last;
alloc->last->next = nblock;
alloc->last = nblock;
}
return nblock;
}
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block)
{
if (block->share) { /* ok.. shared block */
block->share--;
mutex_unlock(&alloc->memory_mutex);
return 0;
}
if (alloc->first == block) {
alloc->first = block->next;
if (block->next)
block->next->prev = NULL;
} else {
block->prev->next = block->next;
if (block->next)
block->next->prev = block->prev;
}
if (alloc->last == block) {
alloc->last = block->prev;
if (block->prev)
block->prev->next = NULL;
} else {
block->next->prev = block->prev;
if (block->prev)
block->prev->next = block->next;
}
kfree(block->name);
kfree(block);
return 0;
}
static struct snd_gf1_mem_block *snd_gf1_mem_look(struct snd_gf1_mem * alloc,
unsigned int address)
{
struct snd_gf1_mem_block *block;
for (block = alloc->first; block; block = block->next) {
if (block->ptr == address) {
return block;
}
}
return NULL;
}
static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
unsigned int *share_id)
{
struct snd_gf1_mem_block *block;
if (!share_id[0] && !share_id[1] &&
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
if (!memcmp(share_id, block->share_id,
sizeof(block->share_id)))
return block;
return NULL;
}
static int snd_gf1_mem_find(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block,
unsigned int size, int w_16, int align)
{
struct snd_gf1_bank_info *info = w_16 ? alloc->banks_16 : alloc->banks_8;
unsigned int idx, boundary;
int size1;
struct snd_gf1_mem_block *pblock;
unsigned int ptr1, ptr2;
if (w_16 && align < 2)
align = 2;
block->flags = w_16 ? SNDRV_GF1_MEM_BLOCK_16BIT : 0;
block->owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block->share = 0;
block->share_id[0] = block->share_id[1] =
block->share_id[2] = block->share_id[3] = 0;
block->name = NULL;
block->prev = block->next = NULL;
for (pblock = alloc->first, idx = 0; pblock; pblock = pblock->next) {
while (pblock->ptr >= (boundary = info[idx].address + info[idx].size))
idx++;
while (pblock->ptr + pblock->size >= (boundary = info[idx].address + info[idx].size))
idx++;
ptr2 = boundary;
if (pblock->next) {
if (pblock->ptr + pblock->size == pblock->next->ptr)
continue;
if (pblock->next->ptr < boundary)
ptr2 = pblock->next->ptr;
}
ptr1 = ALIGN(pblock->ptr + pblock->size, align);
if (ptr1 >= ptr2)
continue;
size1 = ptr2 - ptr1;
if ((int)size <= size1) {
block->ptr = ptr1;
block->size = size;
return 0;
}
}
while (++idx < 4) {
if (size <= info[idx].size) {
/* I assume that bank address is already aligned.. */
block->ptr = info[idx].address;
block->size = size;
return 0;
}
}
return -ENOMEM;
}
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16, int align,
unsigned int *share_id)
{
struct snd_gf1_mem_block block, *nblock;
snd_gf1_mem_lock(alloc, 0);
if (share_id != NULL) {
nblock = snd_gf1_mem_share(alloc, share_id);
if (nblock != NULL) {
if (size != (int)nblock->size) {
/* TODO: remove in the future */
snd_printk(KERN_ERR "snd_gf1_mem_alloc - share: sizes differ\n");
goto __std;
}
nblock->share++;
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
}
__std:
if (snd_gf1_mem_find(alloc, &block, size, w_16, align) < 0) {
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
if (share_id != NULL)
memcpy(&block.share_id, share_id, sizeof(block.share_id));
block.owner = owner;
block.name = kstrdup(name, GFP_KERNEL);
nblock = snd_gf1_mem_xalloc(alloc, &block);
snd_gf1_mem_lock(alloc, 1);
return nblock;
}
int snd_gf1_mem_free(struct snd_gf1_mem * alloc, unsigned int address)
{
int result;
struct snd_gf1_mem_block *block;
snd_gf1_mem_lock(alloc, 0);
if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
result = snd_gf1_mem_xfree(alloc, block);
snd_gf1_mem_lock(alloc, 1);
return result;
}
snd_gf1_mem_lock(alloc, 1);
return -EINVAL;
}
int snd_gf1_mem_init(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block block;
#ifdef CONFIG_SND_DEBUG
struct snd_info_entry *entry;
#endif
alloc = &gus->gf1.mem_alloc;
mutex_init(&alloc->memory_mutex);
alloc->first = alloc->last = NULL;
if (!gus->gf1.memory)
return 0;
memset(&block, 0, sizeof(block));
block.owner = SNDRV_GF1_MEM_OWNER_DRIVER;
if (gus->gf1.enh_mode) {
block.ptr = 0;
block.size = 1024;
block.name = kstrdup("InterWave LFOs", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
}
block.ptr = gus->gf1.default_voice_address;
block.size = 4;
block.name = kstrdup("Voice default (NULL's)", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
#ifdef CONFIG_SND_DEBUG
if (! snd_card_proc_new(gus->card, "gusmem", &entry))
snd_info_set_text_ops(entry, gus, snd_gf1_mem_info_read);
#endif
return 0;
}
int snd_gf1_mem_done(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block, *nblock;
alloc = &gus->gf1.mem_alloc;
block = alloc->first;
while (block) {
nblock = block->next;
snd_gf1_mem_xfree(alloc, block);
block = nblock;
}
return 0;
}
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_gus_card *gus;
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block;
unsigned int total, used;
int i;
gus = entry->private_data;
alloc = &gus->gf1.mem_alloc;
mutex_lock(&alloc->memory_mutex);
snd_iprintf(buffer, "8-bit banks : \n ");
for (i = 0; i < 4; i++)
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_8[i].address, alloc->banks_8[i].size >> 10, i + 1 < 4 ? "," : "");
snd_iprintf(buffer, "\n"
"16-bit banks : \n ");
for (i = total = 0; i < 4; i++) {
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_16[i].address, alloc->banks_16[i].size >> 10, i + 1 < 4 ? "," : "");
total += alloc->banks_16[i].size;
}
snd_iprintf(buffer, "\n");
used = 0;
for (block = alloc->first, i = 0; block; block = block->next, i++) {
used += block->size;
snd_iprintf(buffer, "Block %i at 0x%lx onboard 0x%x size %i (0x%x):\n", i, (long) block, block->ptr, block->size, block->size);
if (block->share ||
block->share_id[0] || block->share_id[1] ||
block->share_id[2] || block->share_id[3])
snd_iprintf(buffer, " Share : %i [id0 0x%x] [id1 0x%x] [id2 0x%x] [id3 0x%x]\n",
block->share,
block->share_id[0], block->share_id[1],
block->share_id[2], block->share_id[3]);
snd_iprintf(buffer, " Flags :%s\n",
block->flags & SNDRV_GF1_MEM_BLOCK_16BIT ? " 16-bit" : "");
snd_iprintf(buffer, " Owner : ");
switch (block->owner) {
case SNDRV_GF1_MEM_OWNER_DRIVER:
snd_iprintf(buffer, "driver - %s\n", block->name);
break;
case SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE:
snd_iprintf(buffer, "SIMPLE wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_GF1:
snd_iprintf(buffer, "GF1 wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF:
snd_iprintf(buffer, "IWFFFF wave\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
}
}
snd_iprintf(buffer, " Total: memory = %i, used = %i, free = %i\n",
total, used, total - used);
mutex_unlock(&alloc->memory_mutex);
#if 0
ultra_iprintf(buffer, " Verify: free = %i, max 8-bit block = %i, max 16-bit block = %i\n",
ultra_memory_free_size(card, &card->gf1.mem_alloc),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 0),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 1));
#endif
}
#endif
| gpl-2.0 |
sqlfocus/linux | sound/soc/fsl/imx-pcm-fiq.c | 235 | 9452 | /*
* imx-pcm-fiq.c -- ALSA Soc Audio Layer
*
* Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
*
* This code is based on code copyrighted by Freescale,
* Liam Girdwood, Javier Martin and probably others.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/dmaengine_pcm.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/fiq.h>
#include <linux/platform_data/asoc-imx-ssi.h>
#include "imx-ssi.h"
#include "imx-pcm.h"
struct imx_pcm_runtime_data {
unsigned int period;
int periods;
unsigned long offset;
struct hrtimer hrt;
int poll_time_ns;
struct snd_pcm_substream *substream;
atomic_t playing;
atomic_t capturing;
};
static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
{
struct imx_pcm_runtime_data *iprtd =
container_of(hrt, struct imx_pcm_runtime_data, hrt);
struct snd_pcm_substream *substream = iprtd->substream;
struct pt_regs regs;
if (!atomic_read(&iprtd->playing) && !atomic_read(&iprtd->capturing))
return HRTIMER_NORESTART;
get_fiq_regs(®s);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
iprtd->offset = regs.ARM_r8 & 0xffff;
else
iprtd->offset = regs.ARM_r9 & 0xffff;
snd_pcm_period_elapsed(substream);
hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
return HRTIMER_RESTART;
}
static struct fiq_handler fh = {
.name = DRV_NAME,
};
static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
iprtd->periods = params_periods(params);
iprtd->period = params_period_bytes(params);
iprtd->offset = 0;
iprtd->poll_time_ns = 1000000000 / params_rate(params) *
params_period_size(params);
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
struct pt_regs regs;
get_fiq_regs(®s);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16;
else
regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16;
set_fiq_regs(®s);
return 0;
}
static int imx_pcm_fiq;
static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
atomic_set(&iprtd->playing, 1);
else
atomic_set(&iprtd->capturing, 1);
hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
HRTIMER_MODE_REL);
enable_fiq(imx_pcm_fiq);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
atomic_set(&iprtd->playing, 0);
else
atomic_set(&iprtd->capturing, 0);
if (!atomic_read(&iprtd->playing) &&
!atomic_read(&iprtd->capturing))
disable_fiq(imx_pcm_fiq);
break;
default:
return -EINVAL;
}
return 0;
}
static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
return bytes_to_frames(substream->runtime, iprtd->offset);
}
static struct snd_pcm_hardware snd_imx_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = 16 * 1024,
.periods_min = 4,
.periods_max = 255,
.fifo_size = 0,
};
static int snd_imx_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd;
int ret;
iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
if (iprtd == NULL)
return -ENOMEM;
runtime->private_data = iprtd;
iprtd->substream = substream;
atomic_set(&iprtd->playing, 0);
atomic_set(&iprtd->capturing, 0);
hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
iprtd->hrt.function = snd_hrtimer_callback;
ret = snd_pcm_hw_constraint_integer(substream->runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0) {
kfree(iprtd);
return ret;
}
snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
return 0;
}
static int snd_imx_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
hrtimer_cancel(&iprtd->hrt);
kfree(iprtd);
return 0;
}
static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
runtime->dma_addr, runtime->dma_bytes);
pr_debug("%s: ret: %d %p %pad 0x%08zx\n", __func__, ret,
runtime->dma_area,
&runtime->dma_addr,
runtime->dma_bytes);
return ret;
}
static struct snd_pcm_ops imx_pcm_ops = {
.open = snd_imx_open,
.close = snd_imx_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_imx_pcm_hw_params,
.prepare = snd_imx_pcm_prepare,
.trigger = snd_imx_pcm_trigger,
.pointer = snd_imx_pcm_pointer,
.mmap = snd_imx_pcm_mmap,
};
static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = IMX_SSI_DMABUF_SIZE;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
return 0;
}
static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
return ret;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
return ret;
}
return 0;
}
static int ssi_irq = 0;
static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct snd_pcm_substream *substream;
int ret;
ret = imx_pcm_new(rtd);
if (ret)
return ret;
substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (substream) {
struct snd_dma_buffer *buf = &substream->dma_buffer;
imx_ssi_fiq_tx_buffer = (unsigned long)buf->area;
}
substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
if (substream) {
struct snd_dma_buffer *buf = &substream->dma_buffer;
imx_ssi_fiq_rx_buffer = (unsigned long)buf->area;
}
set_fiq_handler(&imx_ssi_fiq_start,
&imx_ssi_fiq_end - &imx_ssi_fiq_start);
return 0;
}
static void imx_pcm_free(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
}
static void imx_pcm_fiq_free(struct snd_pcm *pcm)
{
mxc_set_irq_fiq(ssi_irq, 0);
release_fiq(&fh);
imx_pcm_free(pcm);
}
static struct snd_soc_platform_driver imx_soc_platform_fiq = {
.ops = &imx_pcm_ops,
.pcm_new = imx_pcm_fiq_new,
.pcm_free = imx_pcm_fiq_free,
};
int imx_pcm_fiq_init(struct platform_device *pdev,
struct imx_pcm_fiq_params *params)
{
int ret;
ret = claim_fiq(&fh);
if (ret) {
dev_err(&pdev->dev, "failed to claim fiq: %d", ret);
return ret;
}
mxc_set_irq_fiq(params->irq, 1);
ssi_irq = params->irq;
imx_pcm_fiq = params->irq;
imx_ssi_fiq_base = (unsigned long)params->base;
params->dma_params_tx->maxburst = 4;
params->dma_params_rx->maxburst = 6;
ret = snd_soc_register_platform(&pdev->dev, &imx_soc_platform_fiq);
if (ret)
goto failed_register;
return 0;
failed_register:
mxc_set_irq_fiq(ssi_irq, 0);
release_fiq(&fh);
return ret;
}
EXPORT_SYMBOL_GPL(imx_pcm_fiq_init);
void imx_pcm_fiq_exit(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
}
EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
varigit/kernel-VAR-SOM-AMxx | drivers/mfd/db8500-prcmu.c | 235 | 84353 | /*
* Copyright (C) STMicroelectronics 2009
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License v2
* Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
*
* U8500 PRCM Unit interface driver
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/irq.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
#include <linux/platform_data/ux500_wdt.h>
#include <linux/platform_data/db8500_thermal.h>
#include "dbx500-prcmu-regs.h"
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
#define PRCM_AVS_VOLTAGE 0
#define PRCM_AVS_VOLTAGE_MASK 0x3f
#define PRCM_AVS_ISSLOWSTARTUP 6
#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
#define PRCM_AVS_ISMODEENABLE 7
#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
#define PRCM_BOOT_STATUS 0xFFF
#define PRCM_ROMCODE_A2P 0xFFE
#define PRCM_ROMCODE_P2A 0xFFD
#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
/* Req Mailboxes */
#define PRCM_REQ_MB0 0xFDC /* 12 bytes */
#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
#define PRCM_REQ_MB3 0xE4C /* 372 bytes */
#define PRCM_REQ_MB4 0xE48 /* 4 bytes */
#define PRCM_REQ_MB5 0xE44 /* 4 bytes */
/* Ack Mailboxes */
#define PRCM_ACK_MB0 0xE08 /* 52 bytes */
#define PRCM_ACK_MB1 0xE04 /* 4 bytes */
#define PRCM_ACK_MB2 0xE00 /* 4 bytes */
#define PRCM_ACK_MB3 0xDFC /* 4 bytes */
#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
/* Mailbox 0 headers */
#define MB0H_POWER_STATE_TRANS 0
#define MB0H_CONFIG_WAKEUPS_EXE 1
#define MB0H_READ_WAKEUP_ACK 3
#define MB0H_CONFIG_WAKEUPS_SLEEP 4
#define MB0H_WAKEUP_EXE 2
#define MB0H_WAKEUP_SLEEP 5
/* Mailbox 0 REQs */
#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
/* Mailbox 0 ACKs */
#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
/* Mailbox 1 headers */
#define MB1H_ARM_APE_OPP 0x0
#define MB1H_RESET_MODEM 0x2
#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
#define MB1H_RELEASE_USB_WAKEUP 0x5
#define MB1H_PLL_ON_OFF 0x6
/* Mailbox 1 Requests */
#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
#define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4)
#define PLL_SOC0_OFF 0x1
#define PLL_SOC0_ON 0x2
#define PLL_SOC1_OFF 0x4
#define PLL_SOC1_ON 0x8
/* Mailbox 1 ACKs */
#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
/* Mailbox 2 headers */
#define MB2H_DPS 0x0
#define MB2H_AUTO_PWR 0x1
/* Mailbox 2 REQs */
#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
/* Mailbox 2 ACKs */
#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
#define HWACC_PWR_ST_OK 0xFE
/* Mailbox 3 headers */
#define MB3H_ANC 0x0
#define MB3H_SIDETONE 0x1
#define MB3H_SYSCLK 0xE
/* Mailbox 3 Requests */
#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
/* Mailbox 4 headers */
#define MB4H_DDR_INIT 0x0
#define MB4H_MEM_ST 0x1
#define MB4H_HOTDOG 0x12
#define MB4H_HOTMON 0x13
#define MB4H_HOT_PERIOD 0x14
#define MB4H_A9WDOG_CONF 0x16
#define MB4H_A9WDOG_EN 0x17
#define MB4H_A9WDOG_DIS 0x18
#define MB4H_A9WDOG_LOAD 0x19
#define MB4H_A9WDOG_KICK 0x20
/* Mailbox 4 Requests */
#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
#define HOTMON_CONFIG_LOW BIT(0)
#define HOTMON_CONFIG_HIGH BIT(1)
#define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2)
#define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3)
#define A9WDOG_AUTO_OFF_EN BIT(7)
#define A9WDOG_AUTO_OFF_DIS 0
#define A9WDOG_ID_MASK 0xf
/* Mailbox 5 Requests */
#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
#define I2C_WR_OK 0x1
#define I2C_RD_OK 0x2
#define NUM_MB 8
#define MBOX_BIT BIT
#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
/*
* Wakeups/IRQs
*/
#define WAKEUP_BIT_RTC BIT(0)
#define WAKEUP_BIT_RTT0 BIT(1)
#define WAKEUP_BIT_RTT1 BIT(2)
#define WAKEUP_BIT_HSI0 BIT(3)
#define WAKEUP_BIT_HSI1 BIT(4)
#define WAKEUP_BIT_CA_WAKE BIT(5)
#define WAKEUP_BIT_USB BIT(6)
#define WAKEUP_BIT_ABB BIT(7)
#define WAKEUP_BIT_ABB_FIFO BIT(8)
#define WAKEUP_BIT_SYSCLK_OK BIT(9)
#define WAKEUP_BIT_CA_SLEEP BIT(10)
#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
#define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
#define WAKEUP_BIT_ANC_OK BIT(13)
#define WAKEUP_BIT_SW_ERROR BIT(14)
#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
#define WAKEUP_BIT_ARM BIT(17)
#define WAKEUP_BIT_HOTMON_LOW BIT(18)
#define WAKEUP_BIT_HOTMON_HIGH BIT(19)
#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
#define WAKEUP_BIT_GPIO0 BIT(23)
#define WAKEUP_BIT_GPIO1 BIT(24)
#define WAKEUP_BIT_GPIO2 BIT(25)
#define WAKEUP_BIT_GPIO3 BIT(26)
#define WAKEUP_BIT_GPIO4 BIT(27)
#define WAKEUP_BIT_GPIO5 BIT(28)
#define WAKEUP_BIT_GPIO6 BIT(29)
#define WAKEUP_BIT_GPIO7 BIT(30)
#define WAKEUP_BIT_GPIO8 BIT(31)
static struct {
bool valid;
struct prcmu_fw_version version;
} fw_info;
static struct irq_domain *db8500_irq_domain;
/*
* This vector maps irq numbers to the bits in the bit field used in
* communication with the PRCMU firmware.
*
* The reason for having this is to keep the irq numbers contiguous even though
* the bits in the bit field are not. (The bits also have a tendency to move
* around, to further complicate matters.)
*/
#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name))
#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
#define IRQ_PRCMU_RTC 0
#define IRQ_PRCMU_RTT0 1
#define IRQ_PRCMU_RTT1 2
#define IRQ_PRCMU_HSI0 3
#define IRQ_PRCMU_HSI1 4
#define IRQ_PRCMU_CA_WAKE 5
#define IRQ_PRCMU_USB 6
#define IRQ_PRCMU_ABB 7
#define IRQ_PRCMU_ABB_FIFO 8
#define IRQ_PRCMU_ARM 9
#define IRQ_PRCMU_MODEM_SW_RESET_REQ 10
#define IRQ_PRCMU_GPIO0 11
#define IRQ_PRCMU_GPIO1 12
#define IRQ_PRCMU_GPIO2 13
#define IRQ_PRCMU_GPIO3 14
#define IRQ_PRCMU_GPIO4 15
#define IRQ_PRCMU_GPIO5 16
#define IRQ_PRCMU_GPIO6 17
#define IRQ_PRCMU_GPIO7 18
#define IRQ_PRCMU_GPIO8 19
#define IRQ_PRCMU_CA_SLEEP 20
#define IRQ_PRCMU_HOTMON_LOW 21
#define IRQ_PRCMU_HOTMON_HIGH 22
#define NUM_PRCMU_WAKEUPS 23
static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
IRQ_ENTRY(RTC),
IRQ_ENTRY(RTT0),
IRQ_ENTRY(RTT1),
IRQ_ENTRY(HSI0),
IRQ_ENTRY(HSI1),
IRQ_ENTRY(CA_WAKE),
IRQ_ENTRY(USB),
IRQ_ENTRY(ABB),
IRQ_ENTRY(ABB_FIFO),
IRQ_ENTRY(CA_SLEEP),
IRQ_ENTRY(ARM),
IRQ_ENTRY(HOTMON_LOW),
IRQ_ENTRY(HOTMON_HIGH),
IRQ_ENTRY(MODEM_SW_RESET_REQ),
IRQ_ENTRY(GPIO0),
IRQ_ENTRY(GPIO1),
IRQ_ENTRY(GPIO2),
IRQ_ENTRY(GPIO3),
IRQ_ENTRY(GPIO4),
IRQ_ENTRY(GPIO5),
IRQ_ENTRY(GPIO6),
IRQ_ENTRY(GPIO7),
IRQ_ENTRY(GPIO8)
};
#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
WAKEUP_ENTRY(RTC),
WAKEUP_ENTRY(RTT0),
WAKEUP_ENTRY(RTT1),
WAKEUP_ENTRY(HSI0),
WAKEUP_ENTRY(HSI1),
WAKEUP_ENTRY(USB),
WAKEUP_ENTRY(ABB),
WAKEUP_ENTRY(ABB_FIFO),
WAKEUP_ENTRY(ARM)
};
/*
* mb0_transfer - state needed for mailbox 0 communication.
* @lock: The transaction lock.
* @dbb_events_lock: A lock used to handle concurrent access to (parts of)
* the request data.
* @mask_work: Work structure used for (un)masking wakeup interrupts.
* @req: Request data that need to persist between requests.
*/
static struct {
spinlock_t lock;
spinlock_t dbb_irqs_lock;
struct work_struct mask_work;
struct mutex ac_wake_lock;
struct completion ac_wake_work;
struct {
u32 dbb_irqs;
u32 dbb_wakeups;
u32 abb_events;
} req;
} mb0_transfer;
/*
* mb1_transfer - state needed for mailbox 1 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @ape_opp: The current APE OPP.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
u8 ape_opp;
struct {
u8 header;
u8 arm_opp;
u8 ape_opp;
u8 ape_voltage_status;
} ack;
} mb1_transfer;
/*
* mb2_transfer - state needed for mailbox 2 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @auto_pm_lock: The autonomous power management configuration lock.
* @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
* @req: Request data that need to persist between requests.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
spinlock_t auto_pm_lock;
bool auto_pm_enabled;
struct {
u8 status;
} ack;
} mb2_transfer;
/*
* mb3_transfer - state needed for mailbox 3 communication.
* @lock: The request lock.
* @sysclk_lock: A lock used to handle concurrent sysclk requests.
* @sysclk_work: Work structure used for sysclk requests.
*/
static struct {
spinlock_t lock;
struct mutex sysclk_lock;
struct completion sysclk_work;
} mb3_transfer;
/*
* mb4_transfer - state needed for mailbox 4 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
*/
static struct {
struct mutex lock;
struct completion work;
} mb4_transfer;
/*
* mb5_transfer - state needed for mailbox 5 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
struct {
u8 status;
u8 value;
} ack;
} mb5_transfer;
static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
/* Spinlocks */
static DEFINE_SPINLOCK(prcmu_lock);
static DEFINE_SPINLOCK(clkout_lock);
/* Global var to runtime determine TCDM base for v2 or v1 */
static __iomem void *tcdm_base;
static __iomem void *prcmu_base;
struct clk_mgt {
u32 offset;
u32 pllsw;
int branch;
bool clk38div;
};
enum {
PLL_RAW,
PLL_FIX,
PLL_DIV
};
static DEFINE_SPINLOCK(clk_mgt_lock);
#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \
{ (PRCM_##_name##_MGT), 0 , _branch, _clk38div}
static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(SGACLK, PLL_DIV, false),
CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true),
CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true),
CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true),
CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true),
CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true),
CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true),
CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false),
CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true),
CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true),
CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true),
CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true),
CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false),
CLK_MGT_ENTRY(DMACLK, PLL_DIV, true),
CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true),
CLK_MGT_ENTRY(TVCLK, PLL_FIX, true),
CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true),
CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true),
CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false),
};
struct dsiclk {
u32 divsel_mask;
u32 divsel_shift;
u32 divsel;
};
static struct dsiclk dsiclk[2] = {
{
.divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK,
.divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT,
.divsel = PRCM_DSI_PLLOUT_SEL_PHI,
},
{
.divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK,
.divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT,
.divsel = PRCM_DSI_PLLOUT_SEL_PHI,
}
};
struct dsiescclk {
u32 en;
u32 div_mask;
u32 div_shift;
};
static struct dsiescclk dsiescclk[3] = {
{
.en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT,
},
{
.en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT,
},
{
.en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT,
}
};
/*
* Used by MCDE to setup all necessary PRCMU registers
*/
#define PRCMU_RESET_DSIPLL 0x00004000
#define PRCMU_UNCLAMP_DSIPLL 0x00400800
#define PRCMU_CLK_PLL_DIV_SHIFT 0
#define PRCMU_CLK_PLL_SW_SHIFT 5
#define PRCMU_CLK_38 (1 << 9)
#define PRCMU_CLK_38_SRC (1 << 10)
#define PRCMU_CLK_38_DIV (1 << 11)
/* PLLDIV=12, PLLSW=4 (PLLDDR) */
#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
/* DPI 50000000 Hz */
#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
(16 << PRCMU_CLK_PLL_DIV_SHIFT))
#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
/* D=101, N=1, R=4, SELDIV2=0 */
#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
#define PRCMU_ENABLE_PLLDSI 0x00000001
#define PRCMU_DISABLE_PLLDSI 0x00000000
#define PRCMU_RELEASE_RESET_DSS 0x0000400C
#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
/* ESC clk, div0=1, div1=1, div2=3 */
#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
#define PRCMU_DSI_RESET_SW 0x00000007
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
int db8500_prcmu_enable_dsipll(void)
{
int i;
/* Clear DSIPLL_RESETN */
writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
/* Set DSI PLL FREQ */
writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
== PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
/* Set DSIPLL_RESETN */
writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
return 0;
}
int db8500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
return 0;
}
int db8500_prcmu_set_display_clocks(void)
{
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT);
writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT);
writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
return 0;
}
u32 db8500_prcmu_read(unsigned int reg)
{
return readl(prcmu_base + reg);
}
void db8500_prcmu_write(unsigned int reg, u32 value)
{
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
writel(value, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
val = readl(prcmu_base + reg);
val = ((val & ~mask) | (value & mask));
writel(val, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
struct prcmu_fw_version *prcmu_get_fw_version(void)
{
return fw_info.valid ? &fw_info.version : NULL;
}
bool prcmu_has_arm_maxopp(void)
{
return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
}
/**
* prcmu_get_boot_status - PRCMU boot status checking
* Returns: the current PRCMU boot status
*/
int prcmu_get_boot_status(void)
{
return readb(tcdm_base + PRCM_BOOT_STATUS);
}
/**
* prcmu_set_rc_a2p - This function is used to run few power state sequences
* @val: Value to be set, i.e. transition requested
* Returns: 0 on success, -EINVAL on invalid argument
*
* This function is used to run the following power state sequences -
* any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
*/
int prcmu_set_rc_a2p(enum romcode_write val)
{
if (val < RDY_2_DS || val > RDY_2_XP70_RST)
return -EINVAL;
writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
return 0;
}
/**
* prcmu_get_rc_p2a - This function is used to get power state sequences
* Returns: the power transition that has last happened
*
* This function can return the following transitions-
* any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
*/
enum romcode_read prcmu_get_rc_p2a(void)
{
return readb(tcdm_base + PRCM_ROMCODE_P2A);
}
/**
* prcmu_get_current_mode - Return the current XP70 power mode
* Returns: Returns the current AP(ARM) power mode: init,
* apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
*/
enum ap_pwrst prcmu_get_xp70_current_state(void)
{
return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
}
/**
* prcmu_config_clkout - Configure one of the programmable clock outputs.
* @clkout: The CLKOUT number (0 or 1).
* @source: The clock to be used (one of the PRCMU_CLKSRC_*).
* @div: The divider to be applied.
*
* Configures one of the programmable clock outputs (CLKOUTs).
* @div should be in the range [1,63] to request a configuration, or 0 to
* inform that the configuration is no longer requested.
*/
int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
{
static int requests[2];
int r = 0;
unsigned long flags;
u32 val;
u32 bits;
u32 mask;
u32 div_mask;
BUG_ON(clkout > 1);
BUG_ON(div > 63);
BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
if (!div && !requests[clkout])
return -EINVAL;
switch (clkout) {
case 0:
div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV0_SHIFT));
break;
case 1:
div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
PRCM_CLKOCR_CLK1TYPE);
bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV1_SHIFT));
break;
}
bits &= mask;
spin_lock_irqsave(&clkout_lock, flags);
val = readl(PRCM_CLKOCR);
if (val & div_mask) {
if (div) {
if ((val & mask) != bits) {
r = -EBUSY;
goto unlock_and_return;
}
} else {
if ((val & mask & ~div_mask) != bits) {
r = -EINVAL;
goto unlock_and_return;
}
}
}
writel((bits | (val & ~mask)), PRCM_CLKOCR);
requests[clkout] += (div ? 1 : -1);
unlock_and_return:
spin_unlock_irqrestore(&clkout_lock, flags);
return r;
}
int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
{
unsigned long flags;
BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
spin_lock_irqsave(&mb0_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
writeb((keep_ulp_clk ? 1 : 0),
(tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
return 0;
}
u8 db8500_prcmu_get_power_state_result(void)
{
return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
}
/* This function should only be called while mb0_transfer.lock is held. */
static void config_wakeups(void)
{
const u8 header[2] = {
MB0H_CONFIG_WAKEUPS_EXE,
MB0H_CONFIG_WAKEUPS_SLEEP
};
static u32 last_dbb_events;
static u32 last_abb_events;
u32 dbb_events;
u32 abb_events;
unsigned int i;
dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
abb_events = mb0_transfer.req.abb_events;
if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
return;
for (i = 0; i < 2; i++) {
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
}
last_dbb_events = dbb_events;
last_abb_events = abb_events;
}
void db8500_prcmu_enable_wakeups(u32 wakeups)
{
unsigned long flags;
u32 bits;
int i;
BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
if (wakeups & BIT(i))
bits |= prcmu_wakeup_bit[i];
}
spin_lock_irqsave(&mb0_transfer.lock, flags);
mb0_transfer.req.dbb_wakeups = bits;
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
void db8500_prcmu_config_abb_event_readout(u32 abb_events)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
mb0_transfer.req.abb_events = abb_events;
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
{
if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
else
*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
}
/**
* db8500_prcmu_set_arm_opp - set the appropriate ARM OPP
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
int r;
if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
return -EINVAL;
r = 0;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
(mb1_transfer.ack.arm_opp != opp))
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_get_arm_opp - get the current ARM OPP
*
* Returns: the current ARM OPP
*/
int db8500_prcmu_get_arm_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
}
/**
* db8500_prcmu_get_ddr_opp - get the current DDR OPP
*
* Returns: the current DDR OPP
*/
int db8500_prcmu_get_ddr_opp(void)
{
return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
/**
* db8500_set_ddr_opp - set the appropriate DDR OPP
* @opp: The new DDR operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the DDR.
*/
static bool enable_set_ddr_opp;
int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
if (enable_set_ddr_opp)
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
}
/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
static void request_even_slower_clocks(bool enable)
{
u32 clock_reg[] = {
PRCM_ACLK_MGT,
PRCM_DMACLK_MGT
};
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
u32 val;
u32 div;
val = readl(prcmu_base + clock_reg[i]);
div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
if (enable) {
if ((div <= 1) || (div > 15)) {
pr_err("prcmu: Bad clock divider %d in %s\n",
div, __func__);
goto unlock_and_return;
}
div <<= 1;
} else {
if (div <= 2)
goto unlock_and_return;
div >>= 1;
}
val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
(div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
writel(val, prcmu_base + clock_reg[i]);
}
unlock_and_return:
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
}
/**
* db8500_set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the APE.
*/
int db8500_prcmu_set_ape_opp(u8 opp)
{
int r = 0;
if (opp == mb1_transfer.ape_opp)
return 0;
mutex_lock(&mb1_transfer.lock);
if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
request_even_slower_clocks(false);
if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP))
goto skip_message;
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
(tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
(mb1_transfer.ack.ape_opp != opp))
r = -EIO;
skip_message:
if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
(r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
request_even_slower_clocks(true);
if (!r)
mb1_transfer.ape_opp = opp;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_get_ape_opp - get the current APE OPP
*
* Returns: the current APE OPP
*/
int db8500_prcmu_get_ape_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
}
/**
* db8500_prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
* @enable: true to request the higher voltage, false to drop a request.
*
* Calls to this function to enable and disable requests must be balanced.
*/
int db8500_prcmu_request_ape_opp_100_voltage(bool enable)
{
int r = 0;
u8 header;
static unsigned int requests;
mutex_lock(&mb1_transfer.lock);
if (enable) {
if (0 != requests++)
goto unlock_and_return;
header = MB1H_REQUEST_APE_OPP_100_VOLT;
} else {
if (requests == 0) {
r = -EIO;
goto unlock_and_return;
} else if (1 != requests--) {
goto unlock_and_return;
}
header = MB1H_RELEASE_APE_OPP_100_VOLT;
}
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != header) ||
((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
r = -EIO;
unlock_and_return:
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
*
* This function releases the power state requirements of a USB wakeup.
*/
int prcmu_release_usb_wakeup_state(void)
{
int r = 0;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RELEASE_USB_WAKEUP,
(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
static int request_pll(u8 clock, bool enable)
{
int r = 0;
if (clock == PRCMU_PLLSOC0)
clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF);
else if (clock == PRCMU_PLLSOC1)
clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF);
else
return -EINVAL;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF)
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_set_epod - set the state of a EPOD (power domain)
* @epod_id: The EPOD to set
* @epod_state: The new EPOD state
*
* This function sets the state of a EPOD (power domain). It may not be called
* from interrupt context.
*/
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
{
int r = 0;
bool ram_retention = false;
int i;
/* check argument */
BUG_ON(epod_id >= NUM_EPOD_ID);
/* set flag if retention is possible */
switch (epod_id) {
case EPOD_ID_SVAMMDSP:
case EPOD_ID_SIAMMDSP:
case EPOD_ID_ESRAM12:
case EPOD_ID_ESRAM34:
ram_retention = true;
break;
}
/* check argument */
BUG_ON(epod_state > EPOD_STATE_ON);
BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
/* get lock */
mutex_lock(&mb2_transfer.lock);
/* wait for mailbox */
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
cpu_relax();
/* fill in mailbox */
for (i = 0; i < NUM_EPOD_ID; i++)
writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET);
/*
* The current firmware version does not handle errors correctly,
* and we cannot recover if there is an error.
* This is expected to change when the firmware is updated.
*/
if (!wait_for_completion_timeout(&mb2_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
goto unlock_and_return;
}
if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
r = -EIO;
unlock_and_return:
mutex_unlock(&mb2_transfer.lock);
return r;
}
/**
* prcmu_configure_auto_pm - Configure autonomous power management.
* @sleep: Configuration for ApSleep.
* @idle: Configuration for ApIdle.
*/
void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle)
{
u32 sleep_cfg;
u32 idle_cfg;
unsigned long flags;
BUG_ON((sleep == NULL) || (idle == NULL));
sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
idle_cfg = (idle->sva_auto_pm_enable & 0xF);
idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
/*
* The autonomous power management configuration is done through
* fields in mailbox 2, but these fields are only used as shared
* variables - i.e. there is no need to send a message.
*/
writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
mb2_transfer.auto_pm_enabled =
((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
}
EXPORT_SYMBOL(prcmu_configure_auto_pm);
bool prcmu_is_auto_pm_enabled(void)
{
return mb2_transfer.auto_pm_enabled;
}
static int request_sysclk(bool enable)
{
int r;
unsigned long flags;
r = 0;
mutex_lock(&mb3_transfer.sysclk_lock);
spin_lock_irqsave(&mb3_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
cpu_relax();
writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb3_transfer.lock, flags);
/*
* The firmware only sends an ACK if we want to enable the
* SysClk, and it succeeds.
*/
if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
}
mutex_unlock(&mb3_transfer.sysclk_lock);
return r;
}
static int request_timclk(bool enable)
{
u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
if (!enable)
val |= PRCM_TCR_STOP_TIMERS;
writel(val, PRCM_TCR);
return 0;
}
static int request_clock(u8 clock, bool enable)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(prcmu_base + clk_mgt[clock].offset);
if (enable) {
val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
} else {
clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
}
writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
return 0;
}
static int request_sga_clock(u8 clock, bool enable)
{
u32 val;
int ret;
if (enable) {
val = readl(PRCM_CGATING_BYPASS);
writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
ret = request_clock(clock, enable);
if (!ret && !enable) {
val = readl(PRCM_CGATING_BYPASS);
writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
return ret;
}
static inline bool plldsi_locked(void)
{
return (readl(PRCM_PLLDSI_LOCKP) &
(PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) ==
(PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3);
}
static int request_plldsi(bool enable)
{
int r = 0;
u32 val;
writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ?
PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET));
val = readl(PRCM_PLLDSI_ENABLE);
if (enable)
val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
else
val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
writel(val, PRCM_PLLDSI_ENABLE);
if (enable) {
unsigned int i;
bool locked = plldsi_locked();
for (i = 10; !locked && (i > 0); --i) {
udelay(100);
locked = plldsi_locked();
}
if (locked) {
writel(PRCM_APE_RESETN_DSIPLL_RESETN,
PRCM_APE_RESETN_SET);
} else {
writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI),
PRCM_MMIP_LS_CLAMP_SET);
val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
writel(val, PRCM_PLLDSI_ENABLE);
r = -EAGAIN;
}
} else {
writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR);
}
return r;
}
static int request_dsiclk(u8 n, bool enable)
{
u32 val;
val = readl(PRCM_DSI_PLLOUT_SEL);
val &= ~dsiclk[n].divsel_mask;
val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) <<
dsiclk[n].divsel_shift);
writel(val, PRCM_DSI_PLLOUT_SEL);
return 0;
}
static int request_dsiescclk(u8 n, bool enable)
{
u32 val;
val = readl(PRCM_DSITVCLK_DIV);
enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en);
writel(val, PRCM_DSITVCLK_DIV);
return 0;
}
/**
* db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
* @clock: The clock for which the request is made.
* @enable: Whether the clock should be enabled (true) or disabled (false).
*
* This function should only be used by the clock implementation.
* Do not use it from any other place!
*/
int db8500_prcmu_request_clock(u8 clock, bool enable)
{
if (clock == PRCMU_SGACLK)
return request_sga_clock(clock, enable);
else if (clock < PRCMU_NUM_REG_CLOCKS)
return request_clock(clock, enable);
else if (clock == PRCMU_TIMCLK)
return request_timclk(enable);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return request_dsiclk((clock - PRCMU_DSI0CLK), enable);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable);
else if (clock == PRCMU_PLLDSI)
return request_plldsi(enable);
else if (clock == PRCMU_SYSCLK)
return request_sysclk(enable);
else if ((clock == PRCMU_PLLSOC0) || (clock == PRCMU_PLLSOC1))
return request_pll(clock, enable);
else
return -EINVAL;
}
static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate,
int branch)
{
u64 rate;
u32 val;
u32 d;
u32 div = 1;
val = readl(reg);
rate = src_rate;
rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT);
d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT);
if (d > 1)
div *= d;
d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT);
if (d > 1)
div *= d;
if (val & PRCM_PLL_FREQ_SELDIV2)
div *= 2;
if ((branch == PLL_FIX) || ((branch == PLL_DIV) &&
(val & PRCM_PLL_FREQ_DIV2EN) &&
((reg == PRCM_PLLSOC0_FREQ) ||
(reg == PRCM_PLLARM_FREQ) ||
(reg == PRCM_PLLDDR_FREQ))))
div *= 2;
(void)do_div(rate, div);
return (unsigned long)rate;
}
#define ROOT_CLOCK_RATE 38400000
static unsigned long clock_rate(u8 clock)
{
u32 val;
u32 pllsw;
unsigned long rate = ROOT_CLOCK_RATE;
val = readl(prcmu_base + clk_mgt[clock].offset);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV))
rate /= 2;
return rate;
}
val |= clk_mgt[clock].pllsw;
pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0)
rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch);
else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1)
rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch);
else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR)
rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch);
else
return 0;
if ((clock == PRCMU_SGACLK) &&
(val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) {
u64 r = (rate * 10);
(void)do_div(r, 25);
return (unsigned long)r;
}
val &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
if (val)
return rate / val;
else
return 0;
}
static unsigned long armss_rate(void)
{
u32 r;
unsigned long rate;
r = readl(PRCM_ARM_CHGCLKREQ);
if (r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ) {
/* External ARMCLKFIX clock */
rate = pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_FIX);
/* Check PRCM_ARM_CHGCLKREQ divider */
if (!(r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL))
rate /= 2;
/* Check PRCM_ARMCLKFIX_MGT divider */
r = readl(PRCM_ARMCLKFIX_MGT);
r &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
rate /= r;
} else {/* ARM PLL */
rate = pll_rate(PRCM_PLLARM_FREQ, ROOT_CLOCK_RATE, PLL_DIV);
}
return rate;
}
static unsigned long dsiclk_rate(u8 n)
{
u32 divsel;
u32 div = 1;
divsel = readl(PRCM_DSI_PLLOUT_SEL);
divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift);
if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
divsel = dsiclk[n].divsel;
else
dsiclk[n].divsel = divsel;
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
div *= 2;
case PRCM_DSI_PLLOUT_SEL_PHI_2:
div *= 2;
case PRCM_DSI_PLLOUT_SEL_PHI:
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW) / div;
default:
return 0;
}
}
static unsigned long dsiescclk_rate(u8 n)
{
u32 div;
div = readl(PRCM_DSITVCLK_DIV);
div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift));
return clock_rate(PRCMU_TVCLK) / max((u32)1, div);
}
unsigned long prcmu_clock_rate(u8 clock)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
return clock_rate(clock);
else if (clock == PRCMU_TIMCLK)
return ROOT_CLOCK_RATE / 16;
else if (clock == PRCMU_SYSCLK)
return ROOT_CLOCK_RATE;
else if (clock == PRCMU_PLLSOC0)
return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_PLLSOC1)
return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_ARMSS)
return armss_rate();
else if (clock == PRCMU_PLLDDR)
return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_PLLDSI)
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return dsiclk_rate(clock - PRCMU_DSI0CLK);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK);
else
return 0;
}
static unsigned long clock_source_rate(u32 clk_mgt_val, int branch)
{
if (clk_mgt_val & PRCM_CLK_MGT_CLK38)
return ROOT_CLOCK_RATE;
clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK;
if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0)
return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch);
else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1)
return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch);
else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR)
return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch);
else
return 0;
}
static u32 clock_divider(unsigned long src_rate, unsigned long rate)
{
u32 div;
div = (src_rate / rate);
if (div == 0)
return 1;
if (rate < (src_rate / div))
div++;
return div;
}
static long round_clock_rate(u8 clock, unsigned long rate)
{
u32 val;
u32 div;
unsigned long src_rate;
long rounded_rate;
val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div) {
if (div > 2)
div = 2;
} else {
div = 1;
}
} else if ((clock == PRCMU_SGACLK) && (div == 3)) {
u64 r = (src_rate * 10);
(void)do_div(r, 25);
if (r <= rate)
return (unsigned long)r;
}
rounded_rate = (src_rate / min(div, (u32)31));
return rounded_rate;
}
/* CPU FREQ table, may be changed due to if MAX_OPP is supported. */
static struct cpufreq_frequency_table db8500_cpufreq_table[] = {
{ .frequency = 200000, .driver_data = ARM_EXTCLK,},
{ .frequency = 400000, .driver_data = ARM_50_OPP,},
{ .frequency = 800000, .driver_data = ARM_100_OPP,},
{ .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */
{ .frequency = CPUFREQ_TABLE_END,},
};
static long round_armss_rate(unsigned long rate)
{
long freq = 0;
int i = 0;
/* cpufreq table frequencies is in KHz. */
rate = rate / 1000;
/* Find the corresponding arm opp from the cpufreq table. */
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
freq = db8500_cpufreq_table[i].frequency;
if (freq == rate)
break;
i++;
}
/* Return the last valid value, even if a match was not found. */
return freq * 1000;
}
#define MIN_PLL_VCO_RATE 600000000ULL
#define MAX_PLL_VCO_RATE 1680640000ULL
static long round_plldsi_rate(unsigned long rate)
{
long rounded_rate = 0;
unsigned long src_rate;
unsigned long rem;
u32 r;
src_rate = clock_rate(PRCMU_HDMICLK);
rem = rate;
for (r = 7; (rem > 0) && (r > 0); r--) {
u64 d;
d = (r * rate);
(void)do_div(d, src_rate);
if (d < 6)
d = 6;
else if (d > 255)
d = 255;
d *= src_rate;
if (((2 * d) < (r * MIN_PLL_VCO_RATE)) ||
((r * MAX_PLL_VCO_RATE) < (2 * d)))
continue;
(void)do_div(d, r);
if (rate < d) {
if (rounded_rate == 0)
rounded_rate = (long)d;
break;
}
if ((rate - d) < rem) {
rem = (rate - d);
rounded_rate = (long)d;
}
}
return rounded_rate;
}
static long round_dsiclk_rate(unsigned long rate)
{
u32 div;
unsigned long src_rate;
long rounded_rate;
src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW);
div = clock_divider(src_rate, rate);
rounded_rate = (src_rate / ((div > 2) ? 4 : div));
return rounded_rate;
}
static long round_dsiescclk_rate(unsigned long rate)
{
u32 div;
unsigned long src_rate;
long rounded_rate;
src_rate = clock_rate(PRCMU_TVCLK);
div = clock_divider(src_rate, rate);
rounded_rate = (src_rate / min(div, (u32)255));
return rounded_rate;
}
long prcmu_round_clock_rate(u8 clock, unsigned long rate)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
return round_clock_rate(clock, rate);
else if (clock == PRCMU_ARMSS)
return round_armss_rate(rate);
else if (clock == PRCMU_PLLDSI)
return round_plldsi_rate(rate);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return round_dsiclk_rate(rate);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return round_dsiescclk_rate(rate);
else
return (long)prcmu_clock_rate(clock);
}
static void set_clock_rate(u8 clock, unsigned long rate)
{
u32 val;
u32 div;
unsigned long src_rate;
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div) {
if (div > 1)
val |= PRCM_CLK_MGT_CLK38DIV;
else
val &= ~PRCM_CLK_MGT_CLK38DIV;
}
} else if (clock == PRCMU_SGACLK) {
val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK |
PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN);
if (div == 3) {
u64 r = (src_rate * 10);
(void)do_div(r, 25);
if (r <= rate) {
val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN;
div = 0;
}
}
val |= min(div, (u32)31);
} else {
val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
val |= min(div, (u32)31);
}
writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
}
static int set_armss_rate(unsigned long rate)
{
int i = 0;
/* cpufreq table frequencies is in KHz. */
rate = rate / 1000;
/* Find the corresponding arm opp from the cpufreq table. */
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
if (db8500_cpufreq_table[i].frequency == rate)
break;
i++;
}
if (db8500_cpufreq_table[i].frequency != rate)
return -EINVAL;
/* Set the new arm opp. */
return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].driver_data);
}
static int set_plldsi_rate(unsigned long rate)
{
unsigned long src_rate;
unsigned long rem;
u32 pll_freq = 0;
u32 r;
src_rate = clock_rate(PRCMU_HDMICLK);
rem = rate;
for (r = 7; (rem > 0) && (r > 0); r--) {
u64 d;
u64 hwrate;
d = (r * rate);
(void)do_div(d, src_rate);
if (d < 6)
d = 6;
else if (d > 255)
d = 255;
hwrate = (d * src_rate);
if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) ||
((r * MAX_PLL_VCO_RATE) < (2 * hwrate)))
continue;
(void)do_div(hwrate, r);
if (rate < hwrate) {
if (pll_freq == 0)
pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
(r << PRCM_PLL_FREQ_R_SHIFT));
break;
}
if ((rate - hwrate) < rem) {
rem = (rate - hwrate);
pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
(r << PRCM_PLL_FREQ_R_SHIFT));
}
}
if (pll_freq == 0)
return -EINVAL;
pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT);
writel(pll_freq, PRCM_PLLDSI_FREQ);
return 0;
}
static void set_dsiclk_rate(u8 n, unsigned long rate)
{
u32 val;
u32 div;
div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ,
clock_rate(PRCMU_HDMICLK), PLL_RAW), rate);
dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI :
(div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 :
/* else */ PRCM_DSI_PLLOUT_SEL_PHI_4;
val = readl(PRCM_DSI_PLLOUT_SEL);
val &= ~dsiclk[n].divsel_mask;
val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift);
writel(val, PRCM_DSI_PLLOUT_SEL);
}
static void set_dsiescclk_rate(u8 n, unsigned long rate)
{
u32 val;
u32 div;
div = clock_divider(clock_rate(PRCMU_TVCLK), rate);
val = readl(PRCM_DSITVCLK_DIV);
val &= ~dsiescclk[n].div_mask;
val |= (min(div, (u32)255) << dsiescclk[n].div_shift);
writel(val, PRCM_DSITVCLK_DIV);
}
int prcmu_set_clock_rate(u8 clock, unsigned long rate)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
set_clock_rate(clock, rate);
else if (clock == PRCMU_ARMSS)
return set_armss_rate(rate);
else if (clock == PRCMU_PLLDSI)
return set_plldsi_rate(rate);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate);
return 0;
}
int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
(state < ESRAM0_DEEP_SLEEP_STATE_OFF))
return -EINVAL;
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
(tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
writeb(DDR_PWR_STATE_ON,
(tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_hotdog(u8 threshold)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
(tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
static int config_hot_period(u16 val)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
if (cycles32k == 0xFFFF)
return -EINVAL;
return config_hot_period(cycles32k);
}
int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0));
writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1));
writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2));
writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3));
writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
{
BUG_ON(num == 0 || num > 0xf);
return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0,
sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
A9WDOG_AUTO_OFF_DIS);
}
EXPORT_SYMBOL(db8500_prcmu_config_a9wdog);
int db8500_prcmu_enable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_enable_a9wdog);
int db8500_prcmu_disable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_disable_a9wdog);
int db8500_prcmu_kick_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_kick_a9wdog);
/*
* timeout is 28 bit, in ms.
*/
int db8500_prcmu_load_a9wdog(u8 id, u32 timeout)
{
return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
(id & A9WDOG_ID_MASK) |
/*
* Put the lowest 28 bits of timeout at
* offset 4. Four first bits are used for id.
*/
(u8)((timeout << 4) & 0xf0),
(u8)((timeout >> 4) & 0xff),
(u8)((timeout >> 12) & 0xff),
(u8)((timeout >> 20) & 0xff));
}
EXPORT_SYMBOL(db8500_prcmu_load_a9wdog);
/**
* prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The read out value(s).
* @size: The number of registers to read.
*
* Reads register value(s) from the ABB.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
{
int r;
if (size != 1)
return -EINVAL;
mutex_lock(&mb5_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(0, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5));
writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
} else {
r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
}
if (!r)
*value = mb5_transfer.ack.value;
mutex_unlock(&mb5_transfer.lock);
return r;
}
/**
* prcmu_abb_write_masked() - Write masked register value(s) to the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The value(s) to write.
* @mask: The mask(s) to use.
* @size: The number of registers to write.
*
* Writes masked register value(s) to the ABB.
* For each @value, only the bits set to 1 in the corresponding @mask
* will be written. The other bits are not changed.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size)
{
int r;
if (size != 1)
return -EINVAL;
mutex_lock(&mb5_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(~*mask, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5));
writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
} else {
r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
}
mutex_unlock(&mb5_transfer.lock);
return r;
}
/**
* prcmu_abb_write() - Write register value(s) to the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The value(s) to write.
* @size: The number of registers to write.
*
* Writes register value(s) to the ABB.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
{
u8 mask = ~0;
return prcmu_abb_write_masked(slave, reg, value, &mask, size);
}
/**
* prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
*/
int prcmu_ac_wake_req(void)
{
u32 val;
int ret = 0;
mutex_lock(&mb0_transfer.ac_wake_lock);
val = readl(PRCM_HOSTACCESS_REQ);
if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
goto unlock_and_return;
atomic_set(&ac_wake_req_state, 1);
/*
* Force Modem Wake-up before hostaccess_req ping-pong.
* It prevents Modem to enter in Sleep while acking the hostaccess
* request. The 31us delay has been calculated by HWI.
*/
val |= PRCM_HOSTACCESS_REQ_WAKE_REQ;
writel(val, PRCM_HOSTACCESS_REQ);
udelay(31);
val |= PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ;
writel(val, PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
#if defined(CONFIG_DBX500_PRCMU_DEBUG)
db8500_prcmu_debug_dump(__func__, true, true);
#endif
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
ret = -EFAULT;
}
unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
return ret;
}
/**
* prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
*/
void prcmu_ac_sleep_req(void)
{
u32 val;
mutex_lock(&mb0_transfer.ac_wake_lock);
val = readl(PRCM_HOSTACCESS_REQ);
if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
goto unlock_and_return;
writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
}
atomic_set(&ac_wake_req_state, 0);
unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
}
bool db8500_prcmu_is_ac_wake_requested(void)
{
return (atomic_read(&ac_wake_req_state) != 0);
}
/**
* db8500_prcmu_system_reset - System reset
*
* Saves the reset reason code and then sets the APE_SOFTRST register which
* fires interrupt to fw
*/
void db8500_prcmu_system_reset(u16 reset_code)
{
writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
writel(1, PRCM_APE_SOFTRST);
}
/**
* db8500_prcmu_get_reset_code - Retrieve SW reset reason code
*
* Retrieves the reset reason code stored by prcmu_system_reset() before
* last restart.
*/
u16 db8500_prcmu_get_reset_code(void)
{
return readw(tcdm_base + PRCM_SW_RST_REASON);
}
/**
* db8500_prcmu_reset_modem - ask the PRCMU to reset modem
*/
void db8500_prcmu_modem_reset(void)
{
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
/*
* No need to check return from PRCMU as modem should go in reset state
* This state is already managed by upper layer
*/
mutex_unlock(&mb1_transfer.lock);
}
static void ack_dbb_wakeup(void)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
static inline void print_unknown_header_warning(u8 n, u8 header)
{
pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
header, n);
}
static bool read_mailbox_0(void)
{
bool r;
u32 ev;
unsigned int n;
u8 header;
header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
switch (header) {
case MB0H_WAKEUP_EXE:
case MB0H_WAKEUP_SLEEP:
if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
else
ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
complete(&mb0_transfer.ac_wake_work);
if (ev & WAKEUP_BIT_SYSCLK_OK)
complete(&mb3_transfer.sysclk_work);
ev &= mb0_transfer.req.dbb_irqs;
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
if (ev & prcmu_irq_bit[n])
generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
}
r = true;
break;
default:
print_unknown_header_warning(0, header);
r = false;
break;
}
writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
return r;
}
static bool read_mailbox_1(void)
{
mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
mb1_transfer.ack.arm_opp = readb(tcdm_base +
PRCM_ACK_MB1_CURRENT_ARM_OPP);
mb1_transfer.ack.ape_opp = readb(tcdm_base +
PRCM_ACK_MB1_CURRENT_APE_OPP);
mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
complete(&mb1_transfer.work);
return false;
}
static bool read_mailbox_2(void)
{
mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_4(void)
{
u8 header;
bool do_complete = true;
header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
switch (header) {
case MB4H_MEM_ST:
case MB4H_HOTDOG:
case MB4H_HOTMON:
case MB4H_HOT_PERIOD:
case MB4H_A9WDOG_CONF:
case MB4H_A9WDOG_EN:
case MB4H_A9WDOG_DIS:
case MB4H_A9WDOG_LOAD:
case MB4H_A9WDOG_KICK:
break;
default:
print_unknown_header_warning(4, header);
do_complete = false;
break;
}
writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
if (do_complete)
complete(&mb4_transfer.work);
return false;
}
static bool read_mailbox_5(void)
{
mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
complete(&mb5_transfer.work);
return false;
}
static bool read_mailbox_6(void)
{
writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_7(void)
{
writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
return false;
}
static bool (* const read_mailbox[NUM_MB])(void) = {
read_mailbox_0,
read_mailbox_1,
read_mailbox_2,
read_mailbox_3,
read_mailbox_4,
read_mailbox_5,
read_mailbox_6,
read_mailbox_7
};
static irqreturn_t prcmu_irq_handler(int irq, void *data)
{
u32 bits;
u8 n;
irqreturn_t r;
bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
r = IRQ_HANDLED;
for (n = 0; bits; n++) {
if (bits & MBOX_BIT(n)) {
bits -= MBOX_BIT(n);
if (read_mailbox[n]())
r = IRQ_WAKE_THREAD;
}
}
return r;
}
static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
{
ack_dbb_wakeup();
return IRQ_HANDLED;
}
static void prcmu_mask_work(struct work_struct *work)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
static void prcmu_irq_mask(struct irq_data *d)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->hwirq];
spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
if (d->irq != IRQ_PRCMU_CA_SLEEP)
schedule_work(&mb0_transfer.mask_work);
}
static void prcmu_irq_unmask(struct irq_data *d)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->hwirq];
spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
if (d->irq != IRQ_PRCMU_CA_SLEEP)
schedule_work(&mb0_transfer.mask_work);
}
static void noop(struct irq_data *d)
{
}
static struct irq_chip prcmu_irq_chip = {
.name = "prcmu",
.irq_disable = prcmu_irq_mask,
.irq_ack = noop,
.irq_mask = prcmu_irq_mask,
.irq_unmask = prcmu_irq_unmask,
};
static __init char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
return "U8500";
case PRCMU_FW_PROJECT_U8400:
return "U8400";
case PRCMU_FW_PROJECT_U9500:
return "U9500";
case PRCMU_FW_PROJECT_U8500_MBB:
return "U8500 MBB";
case PRCMU_FW_PROJECT_U8500_C1:
return "U8500 C1";
case PRCMU_FW_PROJECT_U8500_C2:
return "U8500 C2";
case PRCMU_FW_PROJECT_U8500_C3:
return "U8500 C3";
case PRCMU_FW_PROJECT_U8500_C4:
return "U8500 C4";
case PRCMU_FW_PROJECT_U9500_MBL:
return "U9500 MBL";
case PRCMU_FW_PROJECT_U8500_MBL:
return "U8500 MBL";
case PRCMU_FW_PROJECT_U8500_MBL2:
return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
case PRCMU_FW_PROJECT_U9540:
return "U9540";
case PRCMU_FW_PROJECT_A9420:
return "A9420";
case PRCMU_FW_PROJECT_L8540:
return "L8540";
case PRCMU_FW_PROJECT_L8580:
return "L8580";
default:
return "Unknown";
}
}
static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(virq, &prcmu_irq_chip,
handle_simple_irq);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops db8500_irq_ops = {
.map = db8500_irq_map,
.xlate = irq_domain_xlate_twocell,
};
static int db8500_irq_init(struct device_node *np, int irq_base)
{
int i;
/* In the device tree case, just take some IRQs */
if (np)
irq_base = 0;
db8500_irq_domain = irq_domain_add_simple(
np, NUM_PRCMU_WAKEUPS, irq_base,
&db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
pr_err("Failed to create irqdomain\n");
return -ENOSYS;
}
/* All wakeups will be used, so create mappings for all */
for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
irq_create_mapping(db8500_irq_domain, i);
return 0;
}
static void dbx500_fw_version_init(struct platform_device *pdev,
u32 version_offset)
{
struct resource *res;
void __iomem *tcpm_base;
u32 version;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"prcmu-tcpm");
if (!res) {
dev_err(&pdev->dev,
"Error: no prcmu tcpm memory region provided\n");
return;
}
tcpm_base = ioremap(res->start, resource_size(res));
if (!tcpm_base) {
dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n");
return;
}
version = readl(tcpm_base + version_offset);
fw_info.version.project = (version & 0xFF);
fw_info.version.api_version = (version >> 8) & 0xFF;
fw_info.version.func_version = (version >> 16) & 0xFF;
fw_info.version.errata = (version >> 24) & 0xFF;
strncpy(fw_info.version.project_name,
fw_project_name(fw_info.version.project),
PRCMU_FW_PROJECT_NAME_LEN);
fw_info.valid = true;
pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
fw_info.version.project_name,
fw_info.version.project,
fw_info.version.api_version,
fw_info.version.func_version,
fw_info.version.errata);
iounmap(tcpm_base);
}
void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
{
/*
* This is a temporary remap to bring up the clocks. It is
* subsequently replaces with a real remap. After the merge of
* the mailbox subsystem all of this early code goes away, and the
* clock driver can probe independently. An early initcall will
* still be needed, but it can be diverted into drivers/clk/ux500.
*/
prcmu_base = ioremap(phy_base, size);
if (!prcmu_base)
pr_err("%s: ioremap() of prcmu registers failed!\n", __func__);
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
init_completion(&mb0_transfer.ac_wake_work);
mutex_init(&mb1_transfer.lock);
init_completion(&mb1_transfer.work);
mb1_transfer.ape_opp = APE_NO_CHANGE;
mutex_init(&mb2_transfer.lock);
init_completion(&mb2_transfer.work);
spin_lock_init(&mb2_transfer.auto_pm_lock);
spin_lock_init(&mb3_transfer.lock);
mutex_init(&mb3_transfer.sysclk_lock);
init_completion(&mb3_transfer.sysclk_work);
mutex_init(&mb4_transfer.lock);
init_completion(&mb4_transfer.work);
mutex_init(&mb5_transfer.lock);
init_completion(&mb5_transfer.work);
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
static void __init init_prcm_registers(void)
{
u32 val;
val = readl(PRCM_A9PL_FORCE_CLKEN);
val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN |
PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN);
writel(val, (PRCM_A9PL_FORCE_CLKEN));
}
/*
* Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
*/
static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("v-ape", NULL),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
/* "v-mmc" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "sdi0"),
REGULATOR_SUPPLY("vcore", "sdi1"),
REGULATOR_SUPPLY("vcore", "sdi2"),
REGULATOR_SUPPLY("vcore", "sdi3"),
REGULATOR_SUPPLY("vcore", "sdi4"),
REGULATOR_SUPPLY("v-dma", "dma40.0"),
REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
/* "v-uart" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "uart0"),
REGULATOR_SUPPLY("vcore", "uart1"),
REGULATOR_SUPPLY("vcore", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
/* AV8100 regulator */
REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
};
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
REGULATOR_SUPPLY("vsupply", "mcde"),
};
/* SVA MMDSP regulator switch */
static struct regulator_consumer_supply db8500_svammdsp_consumers[] = {
REGULATOR_SUPPLY("sva-mmdsp", "cm_control"),
};
/* SVA pipe regulator switch */
static struct regulator_consumer_supply db8500_svapipe_consumers[] = {
REGULATOR_SUPPLY("sva-pipe", "cm_control"),
};
/* SIA MMDSP regulator switch */
static struct regulator_consumer_supply db8500_siammdsp_consumers[] = {
REGULATOR_SUPPLY("sia-mmdsp", "cm_control"),
};
/* SIA pipe regulator switch */
static struct regulator_consumer_supply db8500_siapipe_consumers[] = {
REGULATOR_SUPPLY("sia-pipe", "cm_control"),
};
static struct regulator_consumer_supply db8500_sga_consumers[] = {
REGULATOR_SUPPLY("v-mali", NULL),
};
/* ESRAM1 and 2 regulator switch */
static struct regulator_consumer_supply db8500_esram12_consumers[] = {
REGULATOR_SUPPLY("esram12", "cm_control"),
};
/* ESRAM3 and 4 regulator switch */
static struct regulator_consumer_supply db8500_esram34_consumers[] = {
REGULATOR_SUPPLY("v-esram34", "mcde"),
REGULATOR_SUPPLY("esram34", "cm_control"),
REGULATOR_SUPPLY("lcla_esram", "dma40.0"),
};
static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.constraints = {
.name = "db8500-vape",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.always_on = true,
},
.consumer_supplies = db8500_vape_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
},
[DB8500_REGULATOR_VARM] = {
.constraints = {
.name = "db8500-varm",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VMODEM] = {
.constraints = {
.name = "db8500-vmodem",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VPLL] = {
.constraints = {
.name = "db8500-vpll",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VSMPS1] = {
.constraints = {
.name = "db8500-vsmps1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VSMPS2] = {
.constraints = {
.name = "db8500-vsmps2",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_vsmps2_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
},
[DB8500_REGULATOR_VSMPS3] = {
.constraints = {
.name = "db8500-vsmps3",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VRF1] = {
.constraints = {
.name = "db8500-vrf1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_svammdsp_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.constraints = {
/* "ret" means "retention" */
.name = "db8500-sva-mmdsp-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_svapipe_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_siammdsp_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.constraints = {
.name = "db8500-sia-mmdsp-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_siapipe_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SGA] = {
.supply_regulator = "db8500-vape",
.constraints = {
.name = "db8500-sga",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_sga_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers),
},
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.supply_regulator = "db8500-vape",
.constraints = {
.name = "db8500-b2r2-mcde",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_b2r2_mcde_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
/*
* esram12 is set in retention and supplied by Vsafe when Vape is off,
* no need to hold Vape
*/
.constraints = {
.name = "db8500-esram12",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_esram12_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.constraints = {
.name = "db8500-esram12-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
/*
* esram34 is set in retention and supplied by Vsafe when Vape is off,
* no need to hold Vape
*/
.constraints = {
.name = "db8500-esram34",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_esram34_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.constraints = {
.name = "db8500-esram34-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
};
static struct ux500_wdt_data db8500_wdt_pdata = {
.timeout = 600, /* 10 minutes */
.has_28_bits_resolution = true,
};
/*
* Thermal Sensor
*/
static struct resource db8500_thsens_resources[] = {
{
.name = "IRQ_HOTMON_LOW",
.start = IRQ_PRCMU_HOTMON_LOW,
.end = IRQ_PRCMU_HOTMON_LOW,
.flags = IORESOURCE_IRQ,
},
{
.name = "IRQ_HOTMON_HIGH",
.start = IRQ_PRCMU_HOTMON_HIGH,
.end = IRQ_PRCMU_HOTMON_HIGH,
.flags = IORESOURCE_IRQ,
},
};
static struct db8500_thsens_platform_data db8500_thsens_data = {
.trip_points[0] = {
.temp = 70000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[1] = {
.temp = 75000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[2] = {
.temp = 80000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[3] = {
.temp = 85000,
.type = THERMAL_TRIP_CRITICAL,
},
.num_trips = 4,
};
static const struct mfd_cell common_prcmu_devs[] = {
{
.name = "ux500_wdt",
.platform_data = &db8500_wdt_pdata,
.pdata_size = sizeof(db8500_wdt_pdata),
.id = -1,
},
};
static const struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
.of_compatible = "stericsson,db8500-prcmu-regulator",
.platform_data = &db8500_regulators,
.pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-ux500",
.of_compatible = "stericsson,cpufreq-ux500",
.platform_data = &db8500_cpufreq_table,
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
.name = "cpuidle-dbx500",
.of_compatible = "stericsson,cpuidle-dbx500",
},
{
.name = "db8500-thermal",
.num_resources = ARRAY_SIZE(db8500_thsens_resources),
.resources = db8500_thsens_resources,
.platform_data = &db8500_thsens_data,
.pdata_size = sizeof(db8500_thsens_data),
},
};
static void db8500_prcmu_update_cpufreq(void)
{
if (prcmu_has_arm_maxopp()) {
db8500_cpufreq_table[3].frequency = 1000000;
db8500_cpufreq_table[3].driver_data = ARM_MAX_OPP;
}
}
static int db8500_prcmu_register_ab8500(struct device *parent,
struct ab8500_platform_data *pdata,
int irq)
{
struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
struct mfd_cell ab8500_cell = {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.id = AB8500_VERSION_AB8500,
.platform_data = pdata,
.pdata_size = sizeof(struct ab8500_platform_data),
.resources = &ab8500_resource,
.num_resources = 1,
};
return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
}
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu");
if (!res) {
dev_err(&pdev->dev, "no prcmu memory region provided\n");
return -ENOENT;
}
prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!prcmu_base) {
dev_err(&pdev->dev,
"failed to ioremap prcmu register memory\n");
return -ENOENT;
}
init_prcm_registers();
dbx500_fw_version_init(pdev, pdata->version_offset);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
return -ENOENT;
}
tcdm_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "no prcmu irq provided\n");
return -ENOENT;
}
err = request_threaded_irq(irq, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
err = -EBUSY;
goto no_irq_return;
}
db8500_irq_init(np, pdata->irq_base);
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs,
ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain);
if (err) {
pr_err("prcmu: Failed to add subdevices\n");
return err;
}
/* TODO: Remove restriction when clk definitions are available. */
if (!of_machine_is_compatible("st-ericsson,u8540")) {
err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
ARRAY_SIZE(db8500_prcmu_devs), NULL, 0,
db8500_irq_domain);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add subdevices\n");
goto no_irq_return;
}
}
err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
pdata->ab_irq);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add ab8500 subdevice\n");
goto no_irq_return;
}
pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
return err;
}
static const struct of_device_id db8500_prcmu_match[] = {
{ .compatible = "stericsson,db8500-prcmu"},
{ },
};
static struct platform_driver db8500_prcmu_driver = {
.driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
.of_match_table = db8500_prcmu_match,
},
.probe = db8500_prcmu_probe,
};
static int __init db8500_prcmu_init(void)
{
return platform_driver_register(&db8500_prcmu_driver);
}
core_initcall(db8500_prcmu_init);
MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
percy-g2/android_kernel_sony_u8500_OLD | drivers/md/raid5.c | 491 | 171102 | /*
* raid5.c : Multiple Devices driver for Linux
* Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
* Copyright (C) 1999, 2000 Ingo Molnar
* Copyright (C) 2002, 2003 H. Peter Anvin
*
* RAID-4/5/6 management functions.
* Thanks to Penguin Computing for making the RAID-6 development possible
* by donating a test server!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* (for example /usr/src/linux/COPYING); if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* BITMAP UNPLUGGING:
*
* The sequencing for updating the bitmap reliably is a little
* subtle (and I got it wrong the first time) so it deserves some
* explanation.
*
* We group bitmap updates into batches. Each batch has a number.
* We may write out several batches at once, but that isn't very important.
* conf->seq_write is the number of the last batch successfully written.
* conf->seq_flush is the number of the last batch that was closed to
* new additions.
* When we discover that we will need to write to any block in a stripe
* (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
* the number of the batch it will be in. This is seq_flush+1.
* When we are ready to do a write, if that batch hasn't been written yet,
* we plug the array and queue the stripe for later.
* When an unplug happens, we increment bm_flush, thus closing the current
* batch.
* When we notice that bm_flush > bm_write, we write out all pending updates
* to the bitmap, and advance bm_write to where bm_flush was.
* This may occasionally write a bit out twice, but is sure never to
* miss any bits.
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
#include <linux/async.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include "md.h"
#include "raid5.h"
#include "raid0.h"
#include "bitmap.h"
/*
* Stripe cache
*/
#define NR_STRIPES 256
#define STRIPE_SIZE PAGE_SIZE
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
#define IO_THRESHOLD 1
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This macro is used to determine the 'next' bio in the list, given the sector
* of the current stripe+device
*/
#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
/*
* The following can be used to debug the driver
*/
#define RAID5_PARANOIA 1
#if RAID5_PARANOIA && defined(CONFIG_SMP)
# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
#else
# define CHECK_DEVLOCK()
#endif
#ifdef DEBUG
#define inline
#define __inline__
#endif
#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
/*
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
*/
static inline int raid5_bi_phys_segments(struct bio *bio)
{
return bio->bi_phys_segments & 0xffff;
}
static inline int raid5_bi_hw_segments(struct bio *bio)
{
return (bio->bi_phys_segments >> 16) & 0xffff;
}
static inline int raid5_dec_bi_phys_segments(struct bio *bio)
{
--bio->bi_phys_segments;
return raid5_bi_phys_segments(bio);
}
static inline int raid5_dec_bi_hw_segments(struct bio *bio)
{
unsigned short val = raid5_bi_hw_segments(bio);
--val;
bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
return val;
}
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
{
bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
}
/* Find first data disk in a raid6 stripe */
static inline int raid6_d0(struct stripe_head *sh)
{
if (sh->ddf_layout)
/* ddf always start from first device */
return 0;
/* md starts just after Q block */
if (sh->qd_idx == sh->disks - 1)
return 0;
else
return sh->qd_idx + 1;
}
static inline int raid6_next_disk(int disk, int raid_disks)
{
disk++;
return (disk < raid_disks) ? disk : 0;
}
/* When walking through the disks in a raid5, starting at raid6_d0,
* We need to map each disk to a 'slot', where the data disks are slot
* 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
* is raid_disks-1. This help does that mapping.
*/
static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
int *count, int syndrome_disks)
{
int slot = *count;
if (sh->ddf_layout)
(*count)++;
if (idx == sh->pd_idx)
return syndrome_disks;
if (idx == sh->qd_idx)
return syndrome_disks + 1;
if (!sh->ddf_layout)
(*count)++;
return slot;
}
static void return_io(struct bio *return_bi)
{
struct bio *bi = return_bi;
while (bi) {
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
bio_endio(bi, 0);
bi = return_bi;
}
}
static void print_raid5_conf (raid5_conf_t *conf);
static int stripe_operations_active(struct stripe_head *sh)
{
return sh->check_state || sh->reconstruct_state ||
test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
test_bit(STRIPE_COMPUTE_RUN, &sh->state);
}
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
{
if (atomic_dec_and_test(&sh->count)) {
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
md_wakeup_thread(conf->mddev->thread);
} else {
BUG_ON(stripe_operations_active(sh));
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
}
atomic_dec(&conf->active_stripes);
if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
list_add_tail(&sh->lru, &conf->inactive_list);
wake_up(&conf->wait_for_stripe);
if (conf->retry_read_aligned)
md_wakeup_thread(conf->mddev->thread);
}
}
}
}
static void release_stripe(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
__release_stripe(conf, sh);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
static inline void remove_hash(struct stripe_head *sh)
{
pr_debug("remove_hash(), stripe %llu\n",
(unsigned long long)sh->sector);
hlist_del_init(&sh->hash);
}
static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
{
struct hlist_head *hp = stripe_hash(conf, sh->sector);
pr_debug("insert_hash(), stripe %llu\n",
(unsigned long long)sh->sector);
CHECK_DEVLOCK();
hlist_add_head(&sh->hash, hp);
}
/* find an idle stripe, make sure it is unhashed, and return it. */
static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh = NULL;
struct list_head *first;
CHECK_DEVLOCK();
if (list_empty(&conf->inactive_list))
goto out;
first = conf->inactive_list.next;
sh = list_entry(first, struct stripe_head, lru);
list_del_init(first);
remove_hash(sh);
atomic_inc(&conf->active_stripes);
out:
return sh;
}
static void shrink_buffers(struct stripe_head *sh)
{
struct page *p;
int i;
int num = sh->raid_conf->pool_size;
for (i = 0; i < num ; i++) {
p = sh->dev[i].page;
if (!p)
continue;
sh->dev[i].page = NULL;
put_page(p);
}
}
static int grow_buffers(struct stripe_head *sh)
{
int i;
int num = sh->raid_conf->pool_size;
for (i = 0; i < num; i++) {
struct page *page;
if (!(page = alloc_page(GFP_KERNEL))) {
return 1;
}
sh->dev[i].page = page;
}
return 0;
}
static void raid5_build_block(struct stripe_head *sh, int i, int previous);
static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh);
static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
{
raid5_conf_t *conf = sh->raid_conf;
int i;
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
BUG_ON(stripe_operations_active(sh));
CHECK_DEVLOCK();
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sh->sector);
remove_hash(sh);
sh->generation = conf->generation - previous;
sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
sh->sector = sector;
stripe_set_idx(sector, conf, previous, sh);
sh->state = 0;
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
BUG();
}
dev->flags = 0;
raid5_build_block(sh, i, previous);
}
insert_hash(conf, sh);
}
static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
short generation)
{
struct stripe_head *sh;
struct hlist_node *hn;
CHECK_DEVLOCK();
pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->generation == generation)
return sh;
pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
return NULL;
}
/*
* Need to check if array has failed when deciding whether to:
* - start an array
* - remove non-faulty devices
* - add a spare
* - allow a reshape
* This determination is simple when no reshape is happening.
* However if there is a reshape, we need to carefully check
* both the before and after sections.
* This is because some failed devices may only affect one
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
static int has_failed(raid5_conf_t *conf)
{
int degraded;
int i;
if (conf->mddev->reshape_position == MaxSector)
return conf->mddev->degraded > conf->max_degraded;
rcu_read_lock();
degraded = 0;
for (i = 0; i < conf->previous_raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded++;
else if (test_bit(In_sync, &rdev->flags))
;
else
/* not in-sync or faulty.
* If the reshape increases the number of devices,
* this is being recovered by the reshape, so
* this 'previous' section is not in_sync.
* If the number of devices is being reduced however,
* the device can only be part of the array if
* we are reverting a reshape, so this section will
* be in-sync.
*/
if (conf->raid_disks >= conf->previous_raid_disks)
degraded++;
}
rcu_read_unlock();
if (degraded > conf->max_degraded)
return 1;
rcu_read_lock();
degraded = 0;
for (i = 0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded++;
else if (test_bit(In_sync, &rdev->flags))
;
else
/* not in-sync or faulty.
* If reshape increases the number of devices, this
* section has already been recovered, else it
* almost certainly hasn't.
*/
if (conf->raid_disks <= conf->previous_raid_disks)
degraded++;
}
rcu_read_unlock();
if (degraded > conf->max_degraded)
return 1;
return 0;
}
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(&conf->device_lock);
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
conf->device_lock, /* nothing */);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
sh = get_free_stripe(conf);
if (noblock && sh == NULL)
break;
if (!sh) {
conf->inactive_blocked = 1;
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
);
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
} else {
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
if (list_empty(&sh->lru) &&
!test_bit(STRIPE_EXPANDING, &sh->state))
BUG();
list_del_init(&sh->lru);
}
}
} while (sh == NULL);
if (sh)
atomic_inc(&sh->count);
spin_unlock_irq(&conf->device_lock);
return sh;
}
static void
raid5_end_read_request(struct bio *bi, int error);
static void
raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
raid5_conf_t *conf = sh->raid_conf;
int i, disks = sh->disks;
might_sleep();
for (i = disks; i--; ) {
int rw;
struct bio *bi;
mdk_rdev_t *rdev;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
else
rw = WRITE;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
rw = READ;
else
continue;
bi = &sh->dev[i].req;
bi->bi_rw = rw;
if (rw & WRITE)
bi->bi_end_io = raid5_end_write_request;
else
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
if (rdev) {
if (s->syncing || s->expanding || s->expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
set_bit(STRIPE_IO_STARTED, &sh->state);
bi->bi_bdev = rdev->bdev;
pr_debug("%s: for %llu schedule op %ld on disc %d\n",
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
bi->bi_flags = 1 << BIO_UPTODATE;
bi->bi_vcnt = 1;
bi->bi_max_vecs = 1;
bi->bi_idx = 0;
bi->bi_io_vec = &sh->dev[i].vec;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
if ((rw & WRITE) &&
test_bit(R5_ReWrite, &sh->dev[i].flags))
atomic_add(STRIPE_SECTORS,
&rdev->corrected_errors);
generic_make_request(bi);
} else {
if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page,
sector_t sector, struct dma_async_tx_descriptor *tx)
{
struct bio_vec *bvl;
struct page *bio_page;
int i;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
if (bio->bi_sector >= sector)
page_offset = (signed)(bio->bi_sector - sector) * 512;
else
page_offset = (signed)(sector - bio->bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
bio_for_each_segment(bvl, bio, i) {
int len = bvl->bv_len;
int clen;
int b_offset = 0;
if (page_offset < 0) {
b_offset = -page_offset;
page_offset += b_offset;
len -= b_offset;
}
if (len > 0 && page_offset + len > STRIPE_SIZE)
clen = STRIPE_SIZE - page_offset;
else
clen = len;
if (clen > 0) {
b_offset += bvl->bv_offset;
bio_page = bvl->bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
else
tx = async_memcpy(bio_page, page, b_offset,
page_offset, clen, &submit);
}
/* chain the operations */
submit.depend_tx = tx;
if (clen < len) /* hit end of page */
break;
page_offset += len;
}
return tx;
}
static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
raid5_conf_t *conf = sh->raid_conf;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* clear completed biofills */
spin_lock_irq(&conf->device_lock);
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* acknowledge completion of a biofill operation */
/* and check if we need to reply to a read request,
* new R5_Wantfill requests are held off until
* !STRIPE_BIOFILL_RUN
*/
if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2;
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_phys_segments(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
rbi = rbi2;
}
}
}
spin_unlock_irq(&conf->device_lock);
clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
return_io(return_bi);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static void ops_run_biofill(struct stripe_head *sh)
{
struct dma_async_tx_descriptor *tx = NULL;
raid5_conf_t *conf = sh->raid_conf;
struct async_submit_ctl submit;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi;
spin_lock_irq(&conf->device_lock);
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&conf->device_lock);
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
dev->sector, tx);
rbi = r5_next_bio(rbi, dev->sector);
}
}
}
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
async_trigger_callback(&submit);
}
static void mark_target_uptodate(struct stripe_head *sh, int target)
{
struct r5dev *tgt;
if (target < 0)
return;
tgt = &sh->dev[target];
set_bit(R5_UPTODATE, &tgt->flags);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
clear_bit(R5_Wantcompute, &tgt->flags);
}
static void ops_complete_compute(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* mark the computed target(s) as uptodate */
mark_target_uptodate(sh, sh->ops.target);
mark_target_uptodate(sh, sh->ops.target2);
clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
if (sh->check_state == check_state_compute_run)
sh->check_state = check_state_compute_result;
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
/* return a pointer to the address conversion region of the scribble buffer */
static addr_conv_t *to_addr_conv(struct stripe_head *sh,
struct raid5_percpu *percpu)
{
return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
}
static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
struct page **xor_srcs = percpu->scribble;
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
int count = 0;
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int i;
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
for (i = disks; i--; )
if (i != target)
xor_srcs[count++] = sh->dev[i].page;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
ops_complete_compute, sh, to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
/* set_syndrome_sources - populate source buffers for gen_syndrome
* @srcs - (struct page *) array of size sh->disks
* @sh - stripe_head to parse
*
* Populates srcs in proper layout order for the stripe and returns the
* 'count' of sources to be used in a call to async_gen_syndrome. The P
* destination buffer is recorded in srcs[count] and the Q destination
* is recorded in srcs[count+1]].
*/
static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
{
int disks = sh->disks;
int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
int d0_idx = raid6_d0(sh);
int count;
int i;
for (i = 0; i < disks; i++)
srcs[i] = NULL;
count = 0;
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
srcs[slot] = sh->dev[i].page;
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
return syndrome_disks;
}
static struct dma_async_tx_descriptor *
ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
struct page **blocks = percpu->scribble;
int target;
int qd_idx = sh->qd_idx;
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
struct r5dev *tgt;
struct page *dest;
int i;
int count;
if (sh->ops.target < 0)
target = sh->ops.target2;
else if (sh->ops.target2 < 0)
target = sh->ops.target;
else
/* we should only have one valid target */
BUG();
BUG_ON(target < 0);
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
tgt = &sh->dev[target];
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
dest = tgt->page;
atomic_inc(&sh->count);
if (target == qd_idx) {
count = set_syndrome_sources(blocks, sh);
blocks[count] = NULL; /* regenerating p is not necessary */
BUG_ON(blocks[count+1] != dest); /* q should already be set */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu));
tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
} else {
/* Compute any data- or p-drive using XOR */
count = 0;
for (i = disks; i-- ; ) {
if (i == target || i == qd_idx)
continue;
blocks[count++] = sh->dev[i].page;
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
to_addr_conv(sh, percpu));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
}
return tx;
}
static struct dma_async_tx_descriptor *
ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int i, count, disks = sh->disks;
int syndrome_disks = sh->ddf_layout ? disks : disks-2;
int d0_idx = raid6_d0(sh);
int faila = -1, failb = -1;
int target = sh->ops.target;
int target2 = sh->ops.target2;
struct r5dev *tgt = &sh->dev[target];
struct r5dev *tgt2 = &sh->dev[target2];
struct dma_async_tx_descriptor *tx;
struct page **blocks = percpu->scribble;
struct async_submit_ctl submit;
pr_debug("%s: stripe %llu block1: %d block2: %d\n",
__func__, (unsigned long long)sh->sector, target, target2);
BUG_ON(target < 0 || target2 < 0);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
/* we need to open-code set_syndrome_sources to handle the
* slot number conversion for 'faila' and 'failb'
*/
for (i = 0; i < disks ; i++)
blocks[i] = NULL;
count = 0;
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
blocks[slot] = sh->dev[i].page;
if (i == target)
faila = slot;
if (i == target2)
failb = slot;
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
BUG_ON(faila == failb);
if (failb < faila)
swap(faila, failb);
pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
__func__, (unsigned long long)sh->sector, faila, failb);
atomic_inc(&sh->count);
if (failb == syndrome_disks+1) {
/* Q disk is one of the missing disks */
if (faila == syndrome_disks) {
/* Missing P+Q, just recompute */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu));
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
STRIPE_SIZE, &submit);
} else {
struct page *dest;
int data_target;
int qd_idx = sh->qd_idx;
/* Missing D+Q: recompute D from P, then recompute Q */
if (target == qd_idx)
data_target = target2;
else
data_target = target;
count = 0;
for (i = disks; i-- ; ) {
if (i == data_target || i == qd_idx)
continue;
blocks[count++] = sh->dev[i].page;
}
dest = sh->dev[data_target].page;
init_async_submit(&submit,
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
to_addr_conv(sh, percpu));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
&submit);
count = set_syndrome_sources(blocks, sh);
init_async_submit(&submit, ASYNC_TX_FENCE, tx,
ops_complete_compute, sh,
to_addr_conv(sh, percpu));
return async_gen_syndrome(blocks, 0, count+2,
STRIPE_SIZE, &submit);
}
} else {
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu));
if (failb == syndrome_disks) {
/* We're missing D+P. */
return async_raid6_datap_recov(syndrome_disks+2,
STRIPE_SIZE, faila,
blocks, &submit);
} else {
/* We're missing D+D. */
return async_raid6_2data_recov(syndrome_disks+2,
STRIPE_SIZE, faila, failb,
blocks, &submit);
}
}
}
static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
}
static struct dma_async_tx_descriptor *
ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
struct page **xor_srcs = percpu->scribble;
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu));
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
struct bio *chosen;
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;
spin_lock(&sh->lock);
chosen = dev->towrite;
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock(&sh->lock);
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
tx = async_copy_data(1, wbi, dev->page,
dev->sector, tx);
wbi = r5_next_bio(wbi, dev->sector);
}
}
}
return tx;
}
static void ops_complete_reconstruct(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
int disks = sh->disks;
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
int i;
bool fua = false;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; )
fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
set_bit(R5_UPTODATE, &dev->flags);
if (fua)
set_bit(R5_WantFUA, &dev->flags);
}
}
if (sh->reconstruct_state == reconstruct_state_drain_run)
sh->reconstruct_state = reconstruct_state_drain_result;
else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
sh->reconstruct_state = reconstruct_state_prexor_drain_result;
else {
BUG_ON(sh->reconstruct_state != reconstruct_state_run);
sh->reconstruct_state = reconstruct_state_result;
}
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static void
ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
struct page **xor_srcs = percpu->scribble;
struct async_submit_ctl submit;
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
unsigned long flags;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->written)
xor_srcs[count++] = dev->page;
}
} else {
xor_dest = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i != pd_idx)
xor_srcs[count++] = dev->page;
}
}
/* 1/ if we prexor'd then the dest is reused as a source
* 2/ if we did not prexor then we are redoing the parity
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
flags = ASYNC_TX_ACK |
(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
atomic_inc(&sh->count);
init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
}
static void
ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
struct async_submit_ctl submit;
struct page **blocks = percpu->scribble;
int count;
pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
count = set_syndrome_sources(blocks, sh);
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
sh, to_addr_conv(sh, percpu));
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
}
static void ops_complete_check(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
sh->check_state = check_state_check_result;
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct page *xor_dest;
struct page **xor_srcs = percpu->scribble;
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int count;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
count = 0;
xor_dest = sh->dev[pd_idx].page;
xor_srcs[count++] = xor_dest;
for (i = disks; i--; ) {
if (i == pd_idx || i == qd_idx)
continue;
xor_srcs[count++] = sh->dev[i].page;
}
init_async_submit(&submit, 0, NULL, NULL, NULL,
to_addr_conv(sh, percpu));
tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
&sh->ops.zero_sum_result, &submit);
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
tx = async_trigger_callback(&submit);
}
static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
{
struct page **srcs = percpu->scribble;
struct async_submit_ctl submit;
int count;
pr_debug("%s: stripe %llu checkp: %d\n", __func__,
(unsigned long long)sh->sector, checkp);
count = set_syndrome_sources(srcs, sh);
if (!checkp)
srcs[count] = NULL;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
sh, to_addr_conv(sh, percpu));
async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
raid5_conf_t *conf = sh->raid_conf;
int level = conf->level;
struct raid5_percpu *percpu;
unsigned long cpu;
cpu = get_cpu();
percpu = per_cpu_ptr(conf->percpu, cpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
}
if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
if (level < 6)
tx = ops_run_compute5(sh, percpu);
else {
if (sh->ops.target2 < 0 || sh->ops.target < 0)
tx = ops_run_compute6_1(sh, percpu);
else
tx = ops_run_compute6_2(sh, percpu);
}
/* terminate the chain if reconstruct is not set to be run */
if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
async_tx_ack(tx);
}
if (test_bit(STRIPE_OP_PREXOR, &ops_request))
tx = ops_run_prexor(sh, percpu, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx);
overlap_clear++;
}
if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
if (level < 6)
ops_run_reconstruct5(sh, percpu, tx);
else
ops_run_reconstruct6(sh, percpu, tx);
}
if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
if (sh->check_state == check_state_run)
ops_run_check_p(sh, percpu);
else if (sh->check_state == check_state_run_q)
ops_run_check_pq(sh, percpu, 0);
else if (sh->check_state == check_state_run_pq)
ops_run_check_pq(sh, percpu, 1);
else
BUG();
}
if (overlap_clear)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
put_cpu();
}
#ifdef CONFIG_MULTICORE_RAID456
static void async_run_ops(void *param, async_cookie_t cookie)
{
struct stripe_head *sh = param;
unsigned long ops_request = sh->ops.request;
clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
wake_up(&sh->ops.wait_for_ops);
__raid_run_ops(sh, ops_request);
release_stripe(sh);
}
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
/* since handle_stripe can be called outside of raid5d context
* we need to ensure sh->ops.request is de-staged before another
* request arrives
*/
wait_event(sh->ops.wait_for_ops,
!test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
sh->ops.request = ops_request;
atomic_inc(&sh->count);
async_schedule(async_run_ops, sh);
}
#else
#define raid_run_ops __raid_run_ops
#endif
static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
if (!sh)
return 0;
memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev));
sh->raid_conf = conf;
spin_lock_init(&sh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif
if (grow_buffers(sh)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
INIT_LIST_HEAD(&sh->lru);
release_stripe(sh);
return 1;
}
static int grow_stripes(raid5_conf_t *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
"raid%d-%s", conf->level, mdname(conf->mddev));
else
sprintf(conf->cache_name[0],
"raid%d-%p", conf->level, conf->mddev);
sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
while (num--)
if (!grow_one_stripe(conf))
return 1;
return 0;
}
/**
* scribble_len - return the required size of the scribble region
* @num - total number of disks in the array
*
* The size must be enough to contain:
* 1/ a struct page pointer for each device in the array +2
* 2/ room to convert each entry in (1) to its corresponding dma
* (dma_map_page()) or page (page_address()) address.
*
* Note: the +2 is for the destination buffers of the ddf/raid6 case where we
* calculate over all devices (not just the data blocks), using zeros in place
* of the P and Q blocks.
*/
static size_t scribble_len(int num)
{
size_t len;
len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
return len;
}
static int resize_stripes(raid5_conf_t *conf, int newsize)
{
/* Make all the stripes able to hold 'newsize' devices.
* New slots in each stripe get 'page' set to a new page.
*
* This happens in stages:
* 1/ create a new kmem_cache and allocate the required number of
* stripe_heads.
* 2/ gather all the old stripe_heads and tranfer the pages across
* to the new stripe_heads. This will have the side effect of
* freezing the array as once all stripe_heads have been collected,
* no IO will be possible. Old stripe heads are freed once their
* pages have been transferred over, and the old kmem_cache is
* freed when all stripes are done.
* 3/ reallocate conf->disks to be suitable bigger. If this fails,
* we simple return a failre status - no need to clean anything up.
* 4/ allocate new pages for the new slots in the new stripe_heads.
* If this fails, we don't bother trying the shrink the
* stripe_heads down again, we just leave them as they are.
* As each stripe_head is processed the new one is released into
* active service.
*
* Once step2 is started, we cannot afford to wait for a write,
* so we use GFP_NOIO allocations.
*/
struct stripe_head *osh, *nsh;
LIST_HEAD(newstripes);
struct disk_info *ndisks;
unsigned long cpu;
int err;
struct kmem_cache *sc;
int i;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
err = md_allow_write(conf->mddev);
if (err)
return err;
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
0, 0, NULL);
if (!sc)
return -ENOMEM;
for (i = conf->max_nr_stripes; i; i--) {
nsh = kmem_cache_alloc(sc, GFP_KERNEL);
if (!nsh)
break;
memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
nsh->raid_conf = conf;
spin_lock_init(&nsh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif
list_add(&nsh->lru, &newstripes);
}
if (i) {
/* didn't get enough, give up */
while (!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
list_del(&nsh->lru);
kmem_cache_free(sc, nsh);
}
kmem_cache_destroy(sc);
return -ENOMEM;
}
/* Step 2 - Must use GFP_NOIO now.
* OK, we have enough stripes, start collecting inactive
* stripes and copying them over
*/
list_for_each_entry(nsh, &newstripes, lru) {
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
);
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++)
nsh->dev[i].page = osh->dev[i].page;
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
kmem_cache_free(conf->slab_cache, osh);
}
kmem_cache_destroy(conf->slab_cache);
/* Step 3.
* At this point, we are holding all the stripes so the array
* is completely stalled, so now is a good time to resize
* conf->disks and the scribble region
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
for (i=0; i<conf->raid_disks; i++)
ndisks[i] = conf->disks[i];
kfree(conf->disks);
conf->disks = ndisks;
} else
err = -ENOMEM;
get_online_cpus();
conf->scribble_len = scribble_len(newsize);
for_each_present_cpu(cpu) {
struct raid5_percpu *percpu;
void *scribble;
percpu = per_cpu_ptr(conf->percpu, cpu);
scribble = kmalloc(conf->scribble_len, GFP_NOIO);
if (scribble) {
kfree(percpu->scribble);
percpu->scribble = scribble;
} else {
err = -ENOMEM;
break;
}
}
put_online_cpus();
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
list_del_init(&nsh->lru);
for (i=conf->raid_disks; i < newsize; i++)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
nsh->dev[i].page = p;
if (!p)
err = -ENOMEM;
}
release_stripe(nsh);
}
/* critical section pass, GFP_NOIO no longer needed */
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
conf->pool_size = newsize;
return err;
}
static int drop_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
spin_lock_irq(&conf->device_lock);
sh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
return 1;
}
static void shrink_stripes(raid5_conf_t *conf)
{
while (drop_one_stripe(conf))
;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) {
BUG();
return;
}
if (uptodate) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
printk_rl(KERN_INFO "md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)(sh->sector
+ rdev->data_offset),
bdevname(rdev->bdev, b));
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
if (atomic_read(&conf->disks[i].rdev->read_errors))
atomic_set(&conf->disks[i].rdev->read_errors, 0);
} else {
const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
int retry = 0;
rdev = conf->disks[i].rdev;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
"md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
+ rdev->data_offset),
bdn);
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk_rl(KERN_WARNING
"md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
+ rdev->data_offset),
bdn);
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
"md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
if (retry)
set_bit(R5_ReadError, &sh->dev[i].flags);
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
md_error(conf->mddev, rdev);
}
}
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static void raid5_end_write_request(struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) {
BUG();
return;
}
if (!uptodate)
md_error(conf->mddev, conf->disks[i].rdev);
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
bio_init(&dev->req);
dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++;
dev->req.bi_max_vecs++;
dev->vec.bv_page = dev->page;
dev->vec.bv_len = STRIPE_SIZE;
dev->vec.bv_offset = 0;
dev->req.bi_sector = sh->sector;
dev->req.bi_private = sh;
dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous);
}
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
raid5_conf_t *conf = mddev->private;
pr_debug("raid456: error called\n");
if (test_and_clear_bit(In_sync, &rdev->flags)) {
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery was running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
"md/raid:%s: Disk failure on %s, disabling device.\n"
"md/raid:%s: Operation continuing on %d devices.\n",
mdname(mddev),
bdevname(rdev->bdev, b),
mdname(mddev),
conf->raid_disks - mddev->degraded);
}
/*
* Input: a 'big' sector number,
* Output: index of the data and parity disk, and the sector # in them.
*/
static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
int previous, int *dd_idx,
struct stripe_head *sh)
{
sector_t stripe, stripe2;
sector_t chunk_number;
unsigned int chunk_offset;
int pd_idx, qd_idx;
int ddf_layout = 0;
sector_t new_sector;
int algorithm = previous ? conf->prev_algo
: conf->algorithm;
int sectors_per_chunk = previous ? conf->prev_chunk_sectors
: conf->chunk_sectors;
int raid_disks = previous ? conf->previous_raid_disks
: conf->raid_disks;
int data_disks = raid_disks - conf->max_degraded;
/* First compute the information on this sector */
/*
* Compute the chunk number and the sector offset inside the chunk
*/
chunk_offset = sector_div(r_sector, sectors_per_chunk);
chunk_number = r_sector;
/*
* Compute the stripe number
*/
stripe = chunk_number;
*dd_idx = sector_div(stripe, data_disks);
stripe2 = stripe;
/*
* Select the parity disk based on the user selected algorithm.
*/
pd_idx = qd_idx = ~0;
switch(conf->level) {
case 4:
pd_idx = data_disks;
break;
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
pd_idx = data_disks - sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
pd_idx = sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_LEFT_SYMMETRIC:
pd_idx = data_disks - sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
pd_idx = sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_PARITY_0:
pd_idx = 0;
(*dd_idx)++;
break;
case ALGORITHM_PARITY_N:
pd_idx = data_disks;
break;
default:
BUG();
}
break;
case 6:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
qd_idx = 0;
} else if (*dd_idx >= pd_idx)
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
qd_idx = 0;
} else if (*dd_idx >= pd_idx)
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_LEFT_SYMMETRIC:
pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
pd_idx = sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
case ALGORITHM_PARITY_0:
pd_idx = 0;
qd_idx = 1;
(*dd_idx) += 2;
break;
case ALGORITHM_PARITY_N:
pd_idx = data_disks;
qd_idx = data_disks + 1;
break;
case ALGORITHM_ROTATING_ZERO_RESTART:
/* Exactly the same as RIGHT_ASYMMETRIC, but or
* of blocks for computing Q is different.
*/
pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
qd_idx = 0;
} else if (*dd_idx >= pd_idx)
(*dd_idx) += 2; /* D D P Q D */
ddf_layout = 1;
break;
case ALGORITHM_ROTATING_N_RESTART:
/* Same a left_asymmetric, by first stripe is
* D D D P Q rather than
* Q D D D P
*/
stripe2 += 1;
pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
qd_idx = 0;
} else if (*dd_idx >= pd_idx)
(*dd_idx) += 2; /* D D P Q D */
ddf_layout = 1;
break;
case ALGORITHM_ROTATING_N_CONTINUE:
/* Same as left_symmetric but Q is before P */
pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
ddf_layout = 1;
break;
case ALGORITHM_LEFT_ASYMMETRIC_6:
/* RAID5 left_asymmetric, with Q on last device */
pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_ASYMMETRIC_6:
pd_idx = sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_SYMMETRIC_6:
pd_idx = sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
case ALGORITHM_PARITY_0_6:
pd_idx = 0;
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
default:
BUG();
}
break;
}
if (sh) {
sh->pd_idx = pd_idx;
sh->qd_idx = qd_idx;
sh->ddf_layout = ddf_layout;
}
/*
* Finally, compute the new sector number
*/
new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
return new_sector;
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
{
raid5_conf_t *conf = sh->raid_conf;
int raid_disks = sh->disks;
int data_disks = raid_disks - conf->max_degraded;
sector_t new_sector = sh->sector, check;
int sectors_per_chunk = previous ? conf->prev_chunk_sectors
: conf->chunk_sectors;
int algorithm = previous ? conf->prev_algo
: conf->algorithm;
sector_t stripe;
int chunk_offset;
sector_t chunk_number;
int dummy1, dd_idx = i;
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
if (i == sh->pd_idx)
return 0;
switch(conf->level) {
case 4: break;
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
case ALGORITHM_RIGHT_ASYMMETRIC:
if (i > sh->pd_idx)
i--;
break;
case ALGORITHM_LEFT_SYMMETRIC:
case ALGORITHM_RIGHT_SYMMETRIC:
if (i < sh->pd_idx)
i += raid_disks;
i -= (sh->pd_idx + 1);
break;
case ALGORITHM_PARITY_0:
i -= 1;
break;
case ALGORITHM_PARITY_N:
break;
default:
BUG();
}
break;
case 6:
if (i == sh->qd_idx)
return 0; /* It is the Q disk */
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
case ALGORITHM_RIGHT_ASYMMETRIC:
case ALGORITHM_ROTATING_ZERO_RESTART:
case ALGORITHM_ROTATING_N_RESTART:
if (sh->pd_idx == raid_disks-1)
i--; /* Q D D D P */
else if (i > sh->pd_idx)
i -= 2; /* D D P Q D */
break;
case ALGORITHM_LEFT_SYMMETRIC:
case ALGORITHM_RIGHT_SYMMETRIC:
if (sh->pd_idx == raid_disks-1)
i--; /* Q D D D P */
else {
/* D D P Q D */
if (i < sh->pd_idx)
i += raid_disks;
i -= (sh->pd_idx + 2);
}
break;
case ALGORITHM_PARITY_0:
i -= 2;
break;
case ALGORITHM_PARITY_N:
break;
case ALGORITHM_ROTATING_N_CONTINUE:
/* Like left_symmetric, but P is before Q */
if (sh->pd_idx == 0)
i--; /* P D D D Q */
else {
/* D D Q P D */
if (i < sh->pd_idx)
i += raid_disks;
i -= (sh->pd_idx + 1);
}
break;
case ALGORITHM_LEFT_ASYMMETRIC_6:
case ALGORITHM_RIGHT_ASYMMETRIC_6:
if (i > sh->pd_idx)
i--;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
case ALGORITHM_RIGHT_SYMMETRIC_6:
if (i < sh->pd_idx)
i += data_disks + 1;
i -= (sh->pd_idx + 1);
break;
case ALGORITHM_PARITY_0_6:
i -= 1;
break;
default:
BUG();
}
break;
}
chunk_number = stripe * data_disks + i;
r_sector = chunk_number * sectors_per_chunk + chunk_offset;
check = raid5_compute_sector(conf, r_sector,
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
mdname(conf->mddev));
return 0;
}
return r_sector;
}
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
int i, pd_idx = sh->pd_idx, disks = sh->disks;
raid5_conf_t *conf = sh->raid_conf;
int level = conf->level;
if (rcw) {
/* if we are not expanding this is a proper write request, and
* there will be bios with new data to be drained into the
* stripe cache
*/
if (!expand) {
sh->reconstruct_state = reconstruct_state_drain_run;
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
} else
sh->reconstruct_state = reconstruct_state_run;
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->towrite) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
}
}
if (s->locked + conf->max_degraded == disks)
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
} else {
BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i == pd_idx)
continue;
if (dev->towrite &&
(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
set_bit(R5_Wantdrain, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
}
}
}
/* keep the parity disk(s) locked while asynchronous operations
* are in flight
*/
set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
s->locked++;
if (level == 6) {
int qd_idx = sh->qd_idx;
struct r5dev *dev = &sh->dev[qd_idx];
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
}
pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
__func__, (unsigned long long)sh->sector,
s->locked, s->ops_request);
}
/*
* Each stripe/dev can have one or more bion attached.
* toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
{
struct bio **bip;
raid5_conf_t *conf = sh->raid_conf;
int firstwrite=0;
pr_debug("adding bh b#%llu to stripe s#%llu\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
spin_lock(&sh->lock);
spin_lock_irq(&conf->device_lock);
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
if (*bip == NULL && sh->dev[dd_idx].written == NULL)
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
while (*bip && (*bip)->bi_sector < bi->bi_sector) {
if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
*bip = bi;
bi->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector, dd_idx);
if (conf->mddev->bitmap && firstwrite) {
bitmap_startwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0);
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
if (forwrite) {
/* check if page is covered */
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
bi && bi->bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bi->bi_sector + (bi->bi_size>>9) >= sector)
sector = bi->bi_sector + (bi->bi_size>>9);
}
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
return 1;
overlap:
set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
return 0;
}
static void end_reshape(raid5_conf_t *conf);
static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh)
{
int sectors_per_chunk =
previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
int dd_idx;
int chunk_offset = sector_div(stripe, sectors_per_chunk);
int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
raid5_compute_sector(conf,
stripe * (disks - conf->max_degraded)
*sectors_per_chunk + chunk_offset,
previous,
&dd_idx, sh);
}
static void
handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks,
struct bio **return_bi)
{
int i;
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
mdk_rdev_t *rdev;
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags))
/* multiple read failures in one stripe */
md_error(conf->mddev, rdev);
rcu_read_unlock();
}
spin_lock_irq(&conf->device_lock);
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
if (bi) {
s->to_write--;
bitmap_end = 1;
}
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
while (bi && bi->bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
}
bi = nextbi;
}
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
if (bi) bitmap_end = 1;
while (bi && bi->bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
}
bi = bi2;
}
/* fail any reads if this device is non-operational and
* the data has not reached the cache yet.
*/
if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
(!test_bit(R5_Insync, &sh->dev[i].flags) ||
test_bit(R5_ReadError, &sh->dev[i].flags))) {
bi = sh->dev[i].toread;
sh->dev[i].toread = NULL;
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
if (bi) s->to_read--;
while (bi && bi->bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) {
bi->bi_next = *return_bi;
*return_bi = bi;
}
bi = nextbi;
}
}
spin_unlock_irq(&conf->device_lock);
if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0);
}
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
}
/* fetch_block5 - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
* Returns 1 when no more member devices need to be checked, otherwise returns
* 0 to tell the loop in handle_stripe_fill5 to continue
*/
static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
struct r5dev *failed_dev = &sh->dev[s->failed_num];
/* is the data in this block needed, and can we get it? */
if (!test_bit(R5_LOCKED, &dev->flags) &&
!test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
(s->failed &&
(failed_dev->toread ||
(failed_dev->towrite &&
!test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
/* We would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
if ((s->uptodate == disks - 1) &&
(s->failed && disk_idx == s->failed_num)) {
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute, &dev->flags);
sh->ops.target = disk_idx;
sh->ops.target2 = -1;
s->req_compute = 1;
/* Careful: from this point on 'uptodate' is in the eye
* of raid_run_ops which services 'compute' operations
* before writes. R5_Wantcompute flags a block that will
* be R5_UPTODATE by the time it is needed for a
* subsequent operation.
*/
s->uptodate++;
return 1; /* uptodate + compute == disks */
} else if (test_bit(R5_Insync, &dev->flags)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
pr_debug("Reading block %d (sync=%d)\n", disk_idx,
s->syncing);
}
}
return 0;
}
/**
* handle_stripe_fill5 - read or compute data to satisfy pending requests.
*/
static void handle_stripe_fill5(struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
int i;
/* look for blocks to read/compute, skip this if a compute
* is already in flight, or if the stripe contents are in the
* midst of changing due to a write
*/
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
!sh->reconstruct_state)
for (i = disks; i--; )
if (fetch_block5(sh, s, i, disks))
break;
set_bit(STRIPE_HANDLE, &sh->state);
}
/* fetch_block6 - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
* Returns 1 when no more member devices need to be checked, otherwise returns
* 0 to tell the loop in handle_stripe_fill6 to continue
*/
static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
struct r6_state *r6s, int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
&sh->dev[r6s->failed_num[1]] };
if (!test_bit(R5_LOCKED, &dev->flags) &&
!test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
(s->failed >= 1 &&
(fdev[0]->toread || s->to_write)) ||
(s->failed >= 2 &&
(fdev[1]->toread || s->to_write)))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
if ((s->uptodate == disks - 1) &&
(s->failed && (disk_idx == r6s->failed_num[0] ||
disk_idx == r6s->failed_num[1]))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/
pr_debug("Computing stripe %llu block %d\n",
(unsigned long long)sh->sector, disk_idx);
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute, &dev->flags);
sh->ops.target = disk_idx;
sh->ops.target2 = -1; /* no 2nd target */
s->req_compute = 1;
s->uptodate++;
return 1;
} else if (s->uptodate == disks-2 && s->failed >= 2) {
/* Computing 2-failure is *very* expensive; only
* do it if failed >= 2
*/
int other;
for (other = disks; other--; ) {
if (other == disk_idx)
continue;
if (!test_bit(R5_UPTODATE,
&sh->dev[other].flags))
break;
}
BUG_ON(other < 0);
pr_debug("Computing stripe %llu blocks %d,%d\n",
(unsigned long long)sh->sector,
disk_idx, other);
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
set_bit(R5_Wantcompute, &sh->dev[other].flags);
sh->ops.target = disk_idx;
sh->ops.target2 = other;
s->uptodate += 2;
s->req_compute = 1;
return 1;
} else if (test_bit(R5_Insync, &dev->flags)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
pr_debug("Reading block %d (sync=%d)\n",
disk_idx, s->syncing);
}
}
return 0;
}
/**
* handle_stripe_fill6 - read or compute data to satisfy pending requests.
*/
static void handle_stripe_fill6(struct stripe_head *sh,
struct stripe_head_state *s, struct r6_state *r6s,
int disks)
{
int i;
/* look for blocks to read/compute, skip this if a compute
* is already in flight, or if the stripe contents are in the
* midst of changing due to a write
*/
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
!sh->reconstruct_state)
for (i = disks; i--; )
if (fetch_block6(sh, s, r6s, i, disks))
break;
set_bit(STRIPE_HANDLE, &sh->state);
}
/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
* never LOCKED, so we don't need to test 'failed' directly.
*/
static void handle_stripe_clean_event(raid5_conf_t *conf,
struct stripe_head *sh, int disks, struct bio **return_bi)
{
int i;
struct r5dev *dev;
for (i = disks; i--; )
if (sh->dev[i].written) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
test_bit(R5_UPTODATE, &dev->flags)) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
int bitmap_end = 0;
pr_debug("Return write for disc %d\n", i);
spin_lock_irq(&conf->device_lock);
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_phys_segments(wbi)) {
md_write_end(conf->mddev);
wbi->bi_next = *return_bi;
*return_bi = wbi;
}
wbi = wbi2;
}
if (dev->towrite == NULL)
bitmap_end = 1;
spin_unlock_irq(&conf->device_lock);
if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap,
sh->sector,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
}
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
}
static void handle_stripe_dirtying5(raid5_conf_t *conf,
struct stripe_head *sh, struct stripe_head_state *s, int disks)
{
int rmw = 0, rcw = 0, i;
for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
else
rmw += 2*disks; /* cannot read it */
}
/* Would I have to read this buffer for reconstruct_write */
if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags)) rcw++;
else
rcw += 2*disks;
}
}
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0)
/* prefer read-modify-write, but need to get some data */
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
"%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
if (rcw <= rmw && rcw > 0)
/* want reconstruct write, but need to get some data */
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
i != sh->pd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
"%d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
/* since handle_stripe can be called at any time we need to handle the
* case where a compute block operation has been submitted and then a
* subsequent call wants to start a write request. raid_run_ops only
* handles the case where compute block and reconstruct are requested
* simultaneously. If this is not the case then new writes need to be
* held off until the compute completes.
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)))
schedule_reconstruction(sh, s, rcw == 0, 0);
}
static void handle_stripe_dirtying6(raid5_conf_t *conf,
struct stripe_head *sh, struct stripe_head_state *s,
struct r6_state *r6s, int disks)
{
int rcw = 0, pd_idx = sh->pd_idx, i;
int qd_idx = sh->qd_idx;
set_bit(STRIPE_HANDLE, &sh->state);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* check if we haven't enough data */
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
i != pd_idx && i != qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (!test_bit(R5_Insync, &dev->flags))
continue; /* it's a failed drive */
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old stripe %llu "
"block %d for Reconstruct\n",
(unsigned long long)sh->sector, i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
} else {
pr_debug("Request delayed stripe %llu "
"block %d for Reconstruct\n",
(unsigned long long)sh->sector, i);
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
/* now if nothing is locked, and if we have enough data, we can start a
* write request
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
s->locked == 0 && rcw == 0 &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)) {
schedule_reconstruction(sh, s, 1, 0);
}
}
static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
struct r5dev *dev = NULL;
set_bit(STRIPE_HANDLE, &sh->state);
switch (sh->check_state) {
case check_state_idle:
/* start a new check operation if there are no failures */
if (s->failed == 0) {
BUG_ON(s->uptodate != disks);
sh->check_state = check_state_run;
set_bit(STRIPE_OP_CHECK, &s->ops_request);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
s->uptodate--;
break;
}
dev = &sh->dev[s->failed_num];
/* fall through */
case check_state_compute_result:
sh->check_state = check_state_idle;
if (!dev)
dev = &sh->dev[sh->pd_idx];
/* check that a write has not made the stripe insync */
if (test_bit(STRIPE_INSYNC, &sh->state))
break;
/* either failed parity check, or recovery is happening */
BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
BUG_ON(s->uptodate != disks);
set_bit(R5_LOCKED, &dev->flags);
s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
break; /* we will be called again upon completion */
case check_state_check_result:
sh->check_state = check_state_idle;
/* if a failure occurred during the check operation, leave
* STRIPE_INSYNC not set and let the stripe be handled again
*/
if (s->failed)
break;
/* handle a successful check operation, if parity is correct
* we are done. Otherwise update the mismatch count and repair
* parity if !MD_RECOVERY_CHECK
*/
if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
/* parity is correct (on disc,
* not in buffer any more)
*/
set_bit(STRIPE_INSYNC, &sh->state);
else {
conf->mddev->resync_mismatches += STRIPE_SECTORS;
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
else {
sh->check_state = check_state_compute_run;
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute,
&sh->dev[sh->pd_idx].flags);
sh->ops.target = sh->pd_idx;
sh->ops.target2 = -1;
s->uptodate++;
}
}
break;
case check_state_compute_run:
break;
default:
printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
}
}
static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s,
struct r6_state *r6s, int disks)
{
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct r5dev *dev;
set_bit(STRIPE_HANDLE, &sh->state);
BUG_ON(s->failed > 2);
/* Want to check and possibly repair P and Q.
* However there could be one 'failed' device, in which
* case we can only check one of them, possibly using the
* other to generate missing data
*/
switch (sh->check_state) {
case check_state_idle:
/* start a new check operation if there are < 2 failures */
if (s->failed == r6s->q_failed) {
/* The only possible failed device holds Q, so it
* makes sense to check P (If anything else were failed,
* we would have used P to recreate it).
*/
sh->check_state = check_state_run;
}
if (!r6s->q_failed && s->failed < 2) {
/* Q is not failed, and we didn't use it to generate
* anything, so it makes sense to check it
*/
if (sh->check_state == check_state_run)
sh->check_state = check_state_run_pq;
else
sh->check_state = check_state_run_q;
}
/* discard potentially stale zero_sum_result */
sh->ops.zero_sum_result = 0;
if (sh->check_state == check_state_run) {
/* async_xor_zero_sum destroys the contents of P */
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
s->uptodate--;
}
if (sh->check_state >= check_state_run &&
sh->check_state <= check_state_run_pq) {
/* async_syndrome_zero_sum preserves P and Q, so
* no need to mark them !uptodate here
*/
set_bit(STRIPE_OP_CHECK, &s->ops_request);
break;
}
/* we have 2-disk failure */
BUG_ON(s->failed != 2);
/* fall through */
case check_state_compute_result:
sh->check_state = check_state_idle;
/* check that a write has not made the stripe insync */
if (test_bit(STRIPE_INSYNC, &sh->state))
break;
/* now write out any block on a failed drive,
* or P or Q if they were recomputed
*/
BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
if (s->failed == 2) {
dev = &sh->dev[r6s->failed_num[1]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
if (s->failed >= 1) {
dev = &sh->dev[r6s->failed_num[0]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
dev = &sh->dev[pd_idx];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
dev = &sh->dev[qd_idx];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
case check_state_run_q:
case check_state_run_pq:
break; /* we will be called again upon completion */
case check_state_check_result:
sh->check_state = check_state_idle;
/* handle a successful check operation, if parity is correct
* we are done. Otherwise update the mismatch count and repair
* parity if !MD_RECOVERY_CHECK
*/
if (sh->ops.zero_sum_result == 0) {
/* both parities are correct */
if (!s->failed)
set_bit(STRIPE_INSYNC, &sh->state);
else {
/* in contrast to the raid5 case we can validate
* parity, but still have a failure to write
* back
*/
sh->check_state = check_state_compute_result;
/* Returning at this point means that we may go
* off and bring p and/or q uptodate again so
* we make sure to check zero_sum_result again
* to verify if p or q need writeback
*/
}
} else {
conf->mddev->resync_mismatches += STRIPE_SECTORS;
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
else {
int *target = &sh->ops.target;
sh->ops.target = -1;
sh->ops.target2 = -1;
sh->check_state = check_state_compute_run;
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
set_bit(R5_Wantcompute,
&sh->dev[pd_idx].flags);
*target = pd_idx;
target = &sh->ops.target2;
s->uptodate++;
}
if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
set_bit(R5_Wantcompute,
&sh->dev[qd_idx].flags);
*target = qd_idx;
s->uptodate++;
}
}
}
break;
case check_state_compute_run:
break;
default:
printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
}
}
static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
struct r6_state *r6s)
{
int i;
/* We have read all the blocks in this stripe and now we need to
* copy some of them into a target stripe for expand.
*/
struct dma_async_tx_descriptor *tx = NULL;
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
if (i != sh->pd_idx && i != sh->qd_idx) {
int dd_idx, j;
struct stripe_head *sh2;
struct async_submit_ctl submit;
sector_t bn = compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
sh2 = get_active_stripe(conf, s, 0, 1, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
* get requested, we will try again
*/
continue;
if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
/* must have already done this block */
release_stripe(sh2);
continue;
}
/* place all the copies on one channel */
init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
tx = async_memcpy(sh2->dev[dd_idx].page,
sh->dev[i].page, 0, 0, STRIPE_SIZE,
&submit);
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
if (j != sh2->pd_idx &&
(!r6s || j != sh2->qd_idx) &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
set_bit(STRIPE_EXPAND_READY, &sh2->state);
set_bit(STRIPE_HANDLE, &sh2->state);
}
release_stripe(sh2);
}
/* done submitting copies, wait for them to complete */
if (tx) {
async_tx_ack(tx);
dma_wait_for_async_tx(tx);
}
}
/*
* handle_stripe - do things to a stripe.
*
* We lock the stripe and then examine the state of various bits
* to see what needs to be done.
* Possible results:
* return some read request which now have data
* return some write requests which are safely on disc
* schedule a read on some buffers
* schedule a write of some buffers
* return confirmation of parity correctness
*
* buffers are taken off read_list or write_list, and bh_cache buffers
* get BH_Lock set before the stripe lock is released.
*
*/
static void handle_stripe5(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
struct bio *return_bi = NULL;
struct stripe_head_state s;
struct r5dev *dev;
mdk_rdev_t *blocked_rdev = NULL;
int prexor;
int dec_preread_active = 0;
memset(&s, 0, sizeof(s));
pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
"reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
atomic_read(&sh->count), sh->pd_idx, sh->check_state,
sh->reconstruct_state);
spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx toread %p read %p write %p "
"written %p\n", i, dev->flags, dev->toread, dev->read,
dev->towrite, dev->written);
/* maybe we can request a biofill operation
*
* new wantfill requests are only permitted while
* ops_complete_biofill is guaranteed to be inactive
*/
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
!test_bit(STRIPE_BIOFILL_RUN, &sh->state))
set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
if (test_bit(R5_Wantfill, &dev->flags))
s.to_fill++;
else if (dev->toread)
s.to_read++;
if (dev->towrite) {
s.to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
s.non_overwrite++;
}
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
if (blocked_rdev == NULL &&
rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
}
clear_bit(R5_Insync, &dev->flags);
if (!rdev)
/* Not in-sync */;
else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
else {
/* could be in-sync depending on recovery/reshape status */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
clear_bit(R5_ReWrite, &dev->flags);
}
if (test_bit(R5_ReadError, &dev->flags))
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(R5_Insync, &dev->flags)) {
s.failed++;
s.failed_num = i;
}
}
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
goto unlock;
}
/* There is nothing for the blocked_rdev to block */
rdev_dec_pending(blocked_rdev, conf->mddev);
blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
set_bit(STRIPE_BIOFILL_RUN, &sh->state);
}
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d\n",
s.locked, s.uptodate, s.to_read, s.to_write,
s.failed, s.failed_num);
/* check if the array has lost two devices and, if so, some requests might
* need to be failed
*/
if (s.failed > 1 && s.to_read+s.to_write+s.written)
handle_failed_stripe(conf, sh, &s, disks, &return_bi);
if (s.failed > 1 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
s.syncing = 0;
}
/* might be able to return some write requests if the parity block
* is safe, or on a failed drive
*/
dev = &sh->dev[sh->pd_idx];
if ( s.written &&
((test_bit(R5_Insync, &dev->flags) &&
!test_bit(R5_LOCKED, &dev->flags) &&
test_bit(R5_UPTODATE, &dev->flags)) ||
(s.failed == 1 && s.failed_num == sh->pd_idx)))
handle_stripe_clean_event(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite ||
(s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
handle_stripe_fill5(sh, &s, disks);
/* Now we check to see if any write operations have recently
* completed
*/
prexor = 0;
if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
prexor = 1;
if (sh->reconstruct_state == reconstruct_state_drain_result ||
sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity block are ready to
* be written back to disk
*/
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
for (i = disks; i--; ) {
dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || dev->written)) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
continue;
if (!test_bit(R5_Insync, &dev->flags) ||
(i == sh->pd_idx && s.failed == 0))
set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
*/
if (s.to_write && !sh->reconstruct_state && !sh->check_state)
handle_stripe_dirtying5(conf, sh, &s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
* data is available. The parity check is held off while parity
* dependent operations are in flight.
*/
if (sh->check_state ||
(s.syncing && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
!test_bit(STRIPE_INSYNC, &sh->state)))
handle_parity_checks5(conf, sh, &s, disks);
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
/* If the failed drive is just a ReadError, then we might need to progress
* the repair/check process
*/
if (s.failed == 1 && !conf->mddev->ro &&
test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
&& !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
&& test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
) {
dev = &sh->dev[s.failed_num];
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
}
}
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh2
= get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
*/
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
&sh2->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe(sh2);
goto unlock;
}
if (sh2)
release_stripe(sh2);
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
}
}
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
!sh->reconstruct_state) {
/* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
schedule_reconstruction(sh, &s, 1, 1);
} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
if (s.expanding && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state))
handle_stripe_expansion(conf, sh, NULL);
unlock:
spin_unlock(&sh->lock);
/* wait for this device to become unblocked */
if (unlikely(blocked_rdev))
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
if (s.ops_request)
raid_run_ops(sh, s.ops_request);
ops_run_io(sh, &s);
if (dec_preread_active) {
/* We delay this until after ops_run_io so that if make_request
* is waiting on a flush, it won't continue until the writes
* have actually been submitted.
*/
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) <
IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
}
return_io(return_bi);
}
static void handle_stripe6(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
struct bio *return_bi = NULL;
int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
struct stripe_head_state s;
struct r6_state r6s;
struct r5dev *dev, *pdev, *qdev;
mdk_rdev_t *blocked_rdev = NULL;
int dec_preread_active = 0;
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
(unsigned long long)sh->sector, sh->state,
atomic_read(&sh->count), pd_idx, qd_idx,
sh->check_state, sh->reconstruct_state);
memset(&s, 0, sizeof(s));
spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags, dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read
*
* new wantfill requests are only permitted while
* ops_complete_biofill is guaranteed to be inactive
*/
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
!test_bit(STRIPE_BIOFILL_RUN, &sh->state))
set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
if (test_bit(R5_Wantcompute, &dev->flags)) {
s.compute++;
BUG_ON(s.compute > 2);
}
if (test_bit(R5_Wantfill, &dev->flags)) {
s.to_fill++;
} else if (dev->toread)
s.to_read++;
if (dev->towrite) {
s.to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
s.non_overwrite++;
}
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
if (blocked_rdev == NULL &&
rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
}
clear_bit(R5_Insync, &dev->flags);
if (!rdev)
/* Not in-sync */;
else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
else {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
clear_bit(R5_ReWrite, &dev->flags);
}
if (test_bit(R5_ReadError, &dev->flags))
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(R5_Insync, &dev->flags)) {
if (s.failed < 2)
r6s.failed_num[s.failed] = i;
s.failed++;
}
}
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
goto unlock;
}
/* There is nothing for the blocked_rdev to block */
rdev_dec_pending(blocked_rdev, conf->mddev);
blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
set_bit(STRIPE_BIOFILL_RUN, &sh->state);
}
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
r6s.failed_num[0], r6s.failed_num[1]);
/* check if the array has lost >2 devices and, if so, some requests
* might need to be failed
*/
if (s.failed > 2 && s.to_read+s.to_write+s.written)
handle_failed_stripe(conf, sh, &s, disks, &return_bi);
if (s.failed > 2 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
s.syncing = 0;
}
/*
* might be able to return some write requests if the parity blocks
* are safe, or on a failed drive
*/
pdev = &sh->dev[pd_idx];
r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
|| (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
qdev = &sh->dev[qd_idx];
r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
|| (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
if ( s.written &&
( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
&& !test_bit(R5_LOCKED, &pdev->flags)
&& test_bit(R5_UPTODATE, &pdev->flags)))) &&
( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
&& test_bit(R5_UPTODATE, &qdev->flags)))))
handle_stripe_clean_event(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
(s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
handle_stripe_fill6(sh, &s, &r6s, disks);
/* Now we check to see if any write operations have recently
* completed
*/
if (sh->reconstruct_state == reconstruct_state_drain_result) {
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity blocks are ready to
* be written back to disk
*/
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
for (i = disks; i--; ) {
dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == qd_idx ||
dev->written)) {
pr_debug("Writing block %d\n", i);
BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
set_bit(R5_Wantwrite, &dev->flags);
if (!test_bit(R5_Insync, &dev->flags) ||
((i == sh->pd_idx || i == qd_idx) &&
s.failed == 0))
set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
*/
if (s.to_write && !sh->reconstruct_state && !sh->check_state)
handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
* data is available. The parity check is held off while parity
* dependent operations are in flight.
*/
if (sh->check_state ||
(s.syncing && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
!test_bit(STRIPE_INSYNC, &sh->state)))
handle_parity_checks6(conf, sh, &s, &r6s, disks);
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
/* If the failed drives are just a ReadError, then we might need
* to progress the repair/check process
*/
if (s.failed <= 2 && !conf->mddev->ro)
for (i = 0; i < s.failed; i++) {
dev = &sh->dev[r6s.failed_num[i]];
if (test_bit(R5_ReadError, &dev->flags)
&& !test_bit(R5_LOCKED, &dev->flags)
&& test_bit(R5_UPTODATE, &dev->flags)
) {
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
}
}
}
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
}
}
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
!sh->reconstruct_state) {
struct stripe_head *sh2
= get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
*/
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
&sh2->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe(sh2);
goto unlock;
}
if (sh2)
release_stripe(sh2);
/* Need to write out all blocks after computing P&Q */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
schedule_reconstruction(sh, &s, 1, 1);
} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
if (s.expanding && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state))
handle_stripe_expansion(conf, sh, &r6s);
unlock:
spin_unlock(&sh->lock);
/* wait for this device to become unblocked */
if (unlikely(blocked_rdev))
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
if (s.ops_request)
raid_run_ops(sh, s.ops_request);
ops_run_io(sh, &s);
if (dec_preread_active) {
/* We delay this until after ops_run_io so that if make_request
* is waiting on a flush, it won't continue until the writes
* have actually been submitted.
*/
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) <
IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
}
return_io(return_bi);
}
static void handle_stripe(struct stripe_head *sh)
{
if (sh->raid_conf->level == 6)
handle_stripe6(sh);
else
handle_stripe5(sh);
}
static void raid5_activate_delayed(raid5_conf_t *conf)
{
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
while (!list_empty(&conf->delayed_list)) {
struct list_head *l = conf->delayed_list.next;
struct stripe_head *sh;
sh = list_entry(l, struct stripe_head, lru);
list_del_init(l);
clear_bit(STRIPE_DELAYED, &sh->state);
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
}
}
}
static void activate_bit_delay(raid5_conf_t *conf)
{
/* device_lock is held */
struct list_head head;
list_add(&head, &conf->bitmap_list);
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
list_del_init(&sh->lru);
atomic_inc(&sh->count);
__release_stripe(conf, sh);
}
}
int md_raid5_congested(mddev_t *mddev, int bits)
{
raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
*/
if (conf->inactive_blocked)
return 1;
if (conf->quiesce)
return 1;
if (list_empty_careful(&conf->inactive_list))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(md_raid5_congested);
static int raid5_congested(void *data, int bits)
{
mddev_t *mddev = data;
return mddev_congested(mddev, bits) ||
md_raid5_congested(mddev, bits);
}
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
static int raid5_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0;
if (max <= biovec->bv_len && bio_sectors == 0)
return biovec->bv_len;
else
return max;
}
static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
{
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio->bi_size >> 9;
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
}
/*
* add bio to the retry LIFO ( in O(1) ... we are in interrupt )
* later sampled by raid5d.
*/
static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
{
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
bi->bi_next = conf->retry_read_aligned_list;
conf->retry_read_aligned_list = bi;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(conf->mddev->thread);
}
static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
{
struct bio *bi;
bi = conf->retry_read_aligned;
if (bi) {
conf->retry_read_aligned = NULL;
return bi;
}
bi = conf->retry_read_aligned_list;
if(bi) {
conf->retry_read_aligned_list = bi->bi_next;
bi->bi_next = NULL;
/*
* this sets the active strip count to 1 and the processed
* strip count to zero (upper 8 bits)
*/
bi->bi_phys_segments = 1; /* biased count of active stripes */
}
return bi;
}
/*
* The "raid5_align_endio" should check if the read succeeded and if it
* did, call bio_endio on the original bio (having bio_put the new bio
* first).
* If the read failed..
*/
static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
raid5_conf_t *conf;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
bio_put(bi);
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
mddev = rdev->mddev;
conf = mddev->private;
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
}
static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
if (bi->bi_phys_segments > queue_max_segments(q))
return 0;
if (q->merge_bvec_fn)
/* it's too hard to apply the merge_bvec_fn at this stage,
* just just give up
*/
return 0;
return 1;
}
static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
{
raid5_conf_t *conf = mddev->private;
int dd_idx;
struct bio* align_bi;
mdk_rdev_t *rdev;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("chunk_aligned_read : non aligned\n");
return 0;
}
/*
* use bio_clone_mddev to make a copy of the bio
*/
align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
if (!align_bi)
return 0;
/*
* set bi_end_io to a new function, and set bi_private to the
* original bio.
*/
align_bi->bi_end_io = raid5_align_endio;
align_bi->bi_private = raid_bio;
/*
* compute position
*/
align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
0,
&dd_idx, NULL);
rcu_read_lock();
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
if (rdev && test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi)) {
/* too big in some way */
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
}
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
conf->device_lock, /* nothing */);
atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
generic_make_request(align_bi);
return 1;
} else {
rcu_read_unlock();
bio_put(align_bi);
return 0;
}
}
/* __get_priority_stripe - get the next stripe to process
*
* Full stripe writes are allowed to pass preread active stripes up until
* the bypass_threshold is exceeded. In general the bypass_count
* increments when the handle_list is handled before the hold_list; however, it
* will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
* stripe with in flight i/o. The bypass_count will be reset when the
* head of the hold_list has changed, i.e. the head was promoted to the
* handle_list.
*/
static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
__func__,
list_empty(&conf->handle_list) ? "empty" : "busy",
list_empty(&conf->hold_list) ? "empty" : "busy",
atomic_read(&conf->pending_full_writes), conf->bypass_count);
if (!list_empty(&conf->handle_list)) {
sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
if (list_empty(&conf->hold_list))
conf->bypass_count = 0;
else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
if (conf->hold_list.next == conf->last_hold)
conf->bypass_count++;
else {
conf->last_hold = conf->hold_list.next;
conf->bypass_count -= conf->bypass_threshold;
if (conf->bypass_count < 0)
conf->bypass_count = 0;
}
}
} else if (!list_empty(&conf->hold_list) &&
((conf->bypass_threshold &&
conf->bypass_count > conf->bypass_threshold) ||
atomic_read(&conf->pending_full_writes) == 0)) {
sh = list_entry(conf->hold_list.next,
typeof(*sh), lru);
conf->bypass_count -= conf->bypass_threshold;
if (conf->bypass_count < 0)
conf->bypass_count = 0;
} else
return NULL;
list_del_init(&sh->lru);
atomic_inc(&sh->count);
BUG_ON(atomic_read(&sh->count) != 1);
return sh;
}
static int make_request(mddev_t *mddev, struct bio * bi)
{
raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
return 0;
}
md_write_start(mddev, bi);
if (rw == READ &&
mddev->reshape_position == MaxSector &&
chunk_aligned_read(mddev,bi))
return 0;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bi->bi_sector + (bi->bi_size>>9);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int disks, data_disks;
int previous;
retry:
previous = 0;
disks = conf->raid_disks;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
/* spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
* possible to see a half-updated value
* Of course reshape_progress could change after
* the lock is dropped, so once we get a reference
* to the stripe that we think it is, we will have
* to check again.
*/
spin_lock_irq(&conf->device_lock);
if (mddev->delta_disks < 0
? logical_sector < conf->reshape_progress
: logical_sector >= conf->reshape_progress) {
disks = conf->previous_raid_disks;
previous = 1;
} else {
if (mddev->delta_disks < 0
? logical_sector < conf->reshape_safe
: logical_sector >= conf->reshape_safe) {
spin_unlock_irq(&conf->device_lock);
schedule();
goto retry;
}
}
spin_unlock_irq(&conf->device_lock);
}
data_disks = disks - conf->max_degraded;
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
pr_debug("raid456: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
* stripe, so we must do the range check again.
* Expansion could still move past after this
* test, but as we are holding a reference to
* 'sh', we know that if that happens,
* STRIPE_EXPANDING will get set and the expansion
* won't proceed until we finish with the stripe.
*/
int must_retry = 0;
spin_lock_irq(&conf->device_lock);
if (mddev->delta_disks < 0
? logical_sector >= conf->reshape_progress
: logical_sector < conf->reshape_progress)
/* mismatch, need to try again */
must_retry = 1;
spin_unlock_irq(&conf->device_lock);
if (must_retry) {
release_stripe(sh);
schedule();
goto retry;
}
}
if (bio_data_dir(bi) == WRITE &&
logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
*/
flush_signals(current);
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi)
schedule();
goto retry;
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
*/
md_wakeup_thread(mddev->thread);
release_stripe(sh);
schedule();
goto retry;
}
finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
finish_wait(&conf->wait_for_overlap, &w);
break;
}
}
if (!plugged)
md_wakeup_thread(mddev->thread);
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
if ( rw == WRITE )
md_write_end(mddev);
bio_endio(bi, 0);
}
return 0;
}
static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
{
/* reshaping is quite different to recovery/resync so it is
* handled quite separately ... here.
*
* On each call to sync_request, we gather one chunk worth of
* destination stripes and flag them as expanding.
* Then we find all the source stripes and request reads.
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
int data_disks = raid_disks - conf->max_degraded;
int new_data_disks = conf->raid_disks - conf->max_degraded;
int i;
int dd_idx;
sector_t writepos, readpos, safepos;
sector_t stripe_addr;
int reshape_sectors;
struct list_head stripes;
if (sector_nr == 0) {
/* If restarting in the middle, skip the initial sectors */
if (mddev->delta_disks < 0 &&
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
sector_nr = raid5_size(mddev, 0, 0)
- conf->reshape_progress;
} else if (mddev->delta_disks >= 0 &&
conf->reshape_progress > 0)
sector_nr = conf->reshape_progress;
sector_div(sector_nr, new_data_disks);
if (sector_nr) {
mddev->curr_resync_completed = sector_nr;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
*skipped = 1;
return sector_nr;
}
}
/* We need to process a full chunk at a time.
* If old and new chunk sizes differ, we need to process the
* largest of these
*/
if (mddev->new_chunk_sectors > mddev->chunk_sectors)
reshape_sectors = mddev->new_chunk_sectors;
else
reshape_sectors = mddev->chunk_sectors;
/* we update the metadata when there is more than 3Meg
* in the block range (that is rather arbitrary, should
* probably be time based) or when the data about to be
* copied would over-write the source of the data at
* the front of the range.
* i.e. one new_stripe along from reshape_progress new_maps
* to after where reshape_safe old_maps to
*/
writepos = conf->reshape_progress;
sector_div(writepos, new_data_disks);
readpos = conf->reshape_progress;
sector_div(readpos, data_disks);
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->delta_disks < 0) {
writepos -= min_t(sector_t, reshape_sectors, writepos);
readpos += reshape_sectors;
safepos += reshape_sectors;
} else {
writepos += reshape_sectors;
readpos -= min_t(sector_t, reshape_sectors, readpos);
safepos -= min_t(sector_t, reshape_sectors, safepos);
}
/* 'writepos' is the most advanced device address we might write.
* 'readpos' is the least advanced device address we might read.
* 'safepos' is the least address recorded in the metadata as having
* been reshaped.
* If 'readpos' is behind 'writepos', then there is no way that we can
* ensure safety in the face of a crash - that must be done by userspace
* making a backup of the data. So in that case there is no particular
* rush to update metadata.
* Otherwise if 'safepos' is behind 'writepos', then we really need to
* update the metadata to advance 'safepos' to match 'readpos' so that
* we can be safe in the event of a crash.
* So we insist on updating metadata if safepos is behind writepos and
* readpos is beyond writepos.
* In any case, update the metadata every 10 seconds.
* Maybe that number should be configurable, but I'm not sure it is
* worth it.... maybe it could be a multiple of safemode_delay???
*/
if ((mddev->delta_disks < 0
? (safepos > writepos && readpos < writepos)
: (safepos < writepos && readpos > writepos)) ||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
kthread_should_stop());
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
if (mddev->delta_disks < 0) {
BUG_ON(conf->reshape_progress == 0);
stripe_addr = writepos;
BUG_ON((mddev->dev_sectors &
~((sector_t)reshape_sectors - 1))
- reshape_sectors - stripe_addr
!= sector_nr);
} else {
BUG_ON(writepos != sector_nr + reshape_sectors);
stripe_addr = sector_nr;
}
INIT_LIST_HEAD(&stripes);
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped_disk = 0;
sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
* array, then we need to zero those blocks
*/
for (j=sh->disks; j--;) {
sector_t s;
if (j == sh->pd_idx)
continue;
if (conf->level == 6 &&
j == sh->qd_idx)
continue;
s = compute_blocknr(sh, j, 0);
if (s < raid5_size(mddev, 0, 0)) {
skipped_disk = 1;
continue;
}
memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
set_bit(R5_Expanded, &sh->dev[j].flags);
set_bit(R5_UPTODATE, &sh->dev[j].flags);
}
if (!skipped_disk) {
set_bit(STRIPE_EXPAND_READY, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
list_add(&sh->lru, &stripes);
}
spin_lock_irq(&conf->device_lock);
if (mddev->delta_disks < 0)
conf->reshape_progress -= reshape_sectors * new_data_disks;
else
conf->reshape_progress += reshape_sectors * new_data_disks;
spin_unlock_irq(&conf->device_lock);
/* Ok, those stripe are ready. We can start scheduling
* reads on the source stripes.
* The source stripes are determined by mapping the first and last
* block on the destination stripes.
*/
first_sector =
raid5_compute_sector(conf, stripe_addr*(new_data_disks),
1, &dd_idx, NULL);
last_sector =
raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
* new_data_disks - 1),
1, &dd_idx, NULL);
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
sh = get_active_stripe(conf, first_sector, 1, 0, 1);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
/* Now that the sources are clearly marked, we can release
* the destination stripes
*/
while (!list_empty(&stripes)) {
sh = list_entry(stripes.next, struct stripe_head, lru);
list_del_init(&sh->lru);
release_stripe(sh);
}
/* If this takes us to the resync_max point where we have to pause,
* then we need to write out the superblock.
*/
sector_nr += reshape_sectors;
if ((sector_nr - mddev->curr_resync_completed) * 2
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags)
|| kthread_should_stop());
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
return reshape_sectors;
}
/* FIXME go_faster isn't used */
static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
sector_t sync_blocks;
int still_degraded = 0;
int i;
if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf);
return 0;
}
if (mddev->curr_resync < max_sector) /* aborted */
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
else /* completed sync */
conf->fullsync = 0;
bitmap_close_sync(mddev->bitmap);
return 0;
}
/* Allow raid5_quiesce to complete */
wait_event(conf->wait_for_overlap, conf->quiesce != 2);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return reshape_request(mddev, sector_nr, skipped);
/* No need to check resync_max as we never do more than one
* stripe, and as resync_max will always be on a chunk boundary,
* if the check in md_do_sync didn't fire, there is no chance
* of overstepping resync_max here
*/
/* if there is too many failed drives and we are trying
* to resync, then assert that we are finished, because there is
* nothing we can do.
*/
if (mddev->degraded >= conf->max_degraded &&
test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
sector_t rv = mddev->dev_sectors - sector_nr;
*skipped = 1;
return rv;
}
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
/* we can skip this block, and probably more */
sync_blocks /= STRIPE_SECTORS;
*skipped = 1;
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
schedule_timeout_uninterruptible(1);
}
/* Need to check if array will still be degraded after recovery/resync
* We don't need to check the 'failed' flag as when that gets set,
* recovery aborts.
*/
for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].rdev == NULL)
still_degraded = 1;
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
spin_lock(&sh->lock);
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
spin_unlock(&sh->lock);
handle_stripe(sh);
release_stripe(sh);
return STRIPE_SECTORS;
}
static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
{
/* We may not be able to submit a whole bio at once as there
* may not be enough stripe_heads available.
* We cannot pre-allocate enough stripe_heads as we may need
* more than exist in the cache (if we allow ever large chunks).
* So we do one stripe head at a time and record in
* ->bi_hw_segments how many have been done.
*
* We *know* that this entire raid_bio is in one chunk, so
* it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
*/
struct stripe_head *sh;
int dd_idx;
sector_t sector, logical_sector, last_sector;
int scnt = 0;
int remaining;
int handled = 0;
logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
for (; logical_sector < last_sector;
logical_sector += STRIPE_SECTORS,
sector += STRIPE_SECTORS,
scnt++) {
if (scnt < raid5_bi_hw_segments(raid_bio))
/* already done this stripe */
continue;
sh = get_active_stripe(conf, sector, 0, 1, 0);
if (!sh) {
/* failed to get a stripe - must wait */
raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
handle_stripe(sh);
release_stripe(sh);
handled++;
}
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(raid_bio);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0)
bio_endio(raid_bio, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
}
/*
* This is our raid5 kernel thread.
*
* We scan the hash table for stripes which can be handled now.
* During the scan, completed stripes are saved for us by the interrupt
* handler, so that they will not have to wait for our next wakeup.
*/
static void raid5d(mddev_t *mddev)
{
struct stripe_head *sh;
raid5_conf_t *conf = mddev->private;
int handled;
struct blk_plug plug;
pr_debug("+++ raid5d active\n");
md_check_recovery(mddev);
blk_start_plug(&plug);
handled = 0;
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
if (atomic_read(&mddev->plug_cnt) == 0 &&
!list_empty(&conf->bitmap_list)) {
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
if (atomic_read(&mddev->plug_cnt) == 0)
raid5_activate_delayed(conf);
while ((bio = remove_bio_from_retry(conf))) {
int ok;
spin_unlock_irq(&conf->device_lock);
ok = retry_aligned_read(conf, bio);
spin_lock_irq(&conf->device_lock);
if (!ok)
break;
handled++;
}
sh = __get_priority_stripe(conf);
if (!sh)
break;
spin_unlock_irq(&conf->device_lock);
handled++;
handle_stripe(sh);
release_stripe(sh);
cond_resched();
spin_lock_irq(&conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5d inactive\n");
}
static ssize_t
raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
{
raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", conf->max_nr_stripes);
else
return 0;
}
int
raid5_set_cache_size(mddev_t *mddev, int size)
{
raid5_conf_t *conf = mddev->private;
int err;
if (size <= 16 || size > 32768)
return -EINVAL;
while (size < conf->max_nr_stripes) {
if (drop_one_stripe(conf))
conf->max_nr_stripes--;
else
break;
}
err = md_allow_write(mddev);
if (err)
return err;
while (size > conf->max_nr_stripes) {
if (grow_one_stripe(conf))
conf->max_nr_stripes++;
else break;
}
return 0;
}
EXPORT_SYMBOL(raid5_set_cache_size);
static ssize_t
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
raid5_conf_t *conf = mddev->private;
unsigned long new;
int err;
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
return -ENODEV;
if (strict_strtoul(page, 10, &new))
return -EINVAL;
err = raid5_set_cache_size(mddev, new);
if (err)
return err;
return len;
}
static struct md_sysfs_entry
raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_show_stripe_cache_size,
raid5_store_stripe_cache_size);
static ssize_t
raid5_show_preread_threshold(mddev_t *mddev, char *page)
{
raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", conf->bypass_threshold);
else
return 0;
}
static ssize_t
raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
{
raid5_conf_t *conf = mddev->private;
unsigned long new;
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
return -ENODEV;
if (strict_strtoul(page, 10, &new))
return -EINVAL;
if (new > conf->max_nr_stripes)
return -EINVAL;
conf->bypass_threshold = new;
return len;
}
static struct md_sysfs_entry
raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
S_IRUGO | S_IWUSR,
raid5_show_preread_threshold,
raid5_store_preread_threshold);
static ssize_t
stripe_cache_active_show(mddev_t *mddev, char *page)
{
raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
else
return 0;
}
static struct md_sysfs_entry
raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
&raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
.name = NULL,
.attrs = raid5_attrs,
};
static sector_t
raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
raid5_conf_t *conf = mddev->private;
if (!sectors)
sectors = mddev->dev_sectors;
if (!raid_disks)
/* size is defined by the smallest of previous and new size */
raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
sectors &= ~((sector_t)mddev->chunk_sectors - 1);
sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
return sectors * (raid_disks - conf->max_degraded);
}
static void raid5_free_percpu(raid5_conf_t *conf)
{
struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
get_online_cpus();
for_each_possible_cpu(cpu) {
percpu = per_cpu_ptr(conf->percpu, cpu);
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
}
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
put_online_cpus();
free_percpu(conf->percpu);
}
static void free_conf(raid5_conf_t *conf)
{
shrink_stripes(conf);
raid5_free_percpu(conf);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
}
#ifdef CONFIG_HOTPLUG_CPU
static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
long cpu = (long)hcpu;
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!percpu->scribble ||
(conf->level == 6 && !percpu->spare_page)) {
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
}
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
break;
default:
break;
}
return NOTIFY_OK;
}
#endif
static int raid5_alloc_percpu(raid5_conf_t *conf)
{
unsigned long cpu;
struct page *spare_page;
struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
allcpus = alloc_percpu(struct raid5_percpu);
if (!allcpus)
return -ENOMEM;
conf->percpu = allcpus;
get_online_cpus();
err = 0;
for_each_present_cpu(cpu) {
if (conf->level == 6) {
spare_page = alloc_page(GFP_KERNEL);
if (!spare_page) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
}
scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!scribble) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
#ifdef CONFIG_HOTPLUG_CPU
conf->cpu_notify.notifier_call = raid456_cpu_notify;
conf->cpu_notify.priority = 0;
if (err == 0)
err = register_cpu_notifier(&conf->cpu_notify);
#endif
put_online_cpus();
return err;
}
static raid5_conf_t *setup_conf(mddev_t *mddev)
{
raid5_conf_t *conf;
int raid_disk, memory, max_disks;
mdk_rdev_t *rdev;
struct disk_info *disk;
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
if ((mddev->new_level == 5
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
if (conf == NULL)
goto abort;
spin_lock_init(&conf->device_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
conf->bypass_threshold = BYPASS_THRESHOLD;
conf->raid_disks = mddev->raid_disks;
if (mddev->reshape_position == MaxSector)
conf->previous_raid_disks = mddev->raid_disks;
else
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
max_disks = max(conf->raid_disks, conf->previous_raid_disks);
conf->scribble_len = scribble_len(max_disks);
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
if (!conf->disks)
goto abort;
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
conf->level = mddev->new_level;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
pr_debug("raid456: run(%s) called.\n", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set) {
raid_disk = rdev->raid_disk;
if (raid_disk >= max_disks
|| raid_disk < 0)
continue;
disk = conf->disks + raid_disk;
disk->rdev = rdev;
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md/raid:%s: device %s operational as raid"
" disk %d\n",
mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
}
conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level;
if (conf->level == 6)
conf->max_degraded = 2;
else
conf->max_degraded = 1;
conf->algorithm = mddev->new_layout;
conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
conf->prev_algo = mddev->layout;
}
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
goto abort;
} else
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
goto abort;
}
return conf;
abort:
if (conf) {
free_conf(conf);
return ERR_PTR(-EIO);
} else
return ERR_PTR(-ENOMEM);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
{
switch (algo) {
case ALGORITHM_PARITY_0:
if (raid_disk < max_degraded)
return 1;
break;
case ALGORITHM_PARITY_N:
if (raid_disk >= raid_disks - max_degraded)
return 1;
break;
case ALGORITHM_PARITY_0_6:
if (raid_disk == 0 ||
raid_disk == raid_disks - 1)
return 1;
break;
case ALGORITHM_LEFT_ASYMMETRIC_6:
case ALGORITHM_RIGHT_ASYMMETRIC_6:
case ALGORITHM_LEFT_SYMMETRIC_6:
case ALGORITHM_RIGHT_SYMMETRIC_6:
if (raid_disk == raid_disks - 1)
return 1;
}
return 0;
}
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
int working_disks = 0;
int dirty_parity_disks = 0;
mdk_rdev_t *rdev;
sector_t reshape_offset = 0;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
* Currently only disks can change, it must
* increase, and we must be past the point where
* a stripe over-writes itself
*/
sector_t here_new, here_old;
int old_disks;
int max_degraded = (mddev->level == 6 ? 2 : 1);
if (mddev->new_level != mddev->level) {
printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
/* reshape_position must be on a new-stripe boundary, and one
* further up in new geometry must map after here in old
* geometry.
*/
here_new = mddev->reshape_position;
if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
printk(KERN_ERR "md/raid:%s: reshape_position not "
"on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * mddev->new_chunk_sectors;
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
sector_div(here_old, mddev->chunk_sectors *
(old_disks-max_degraded));
/* here_old is the first stripe that we might need to read
* from */
if (mddev->delta_disks == 0) {
/* We cannot be sure it is safe to start an in-place
* reshape. It is only safe if user-space if monitoring
* and taking constant backups.
* mdadm always starts a situation like this in
* readonly mode so it can take control before
* allowing any writes. So just check for that.
*/
if ((here_new * mddev->new_chunk_sectors !=
here_old * mddev->chunk_sectors) ||
mddev->ro == 0) {
printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
" in read-only mode - aborting\n",
mdname(mddev));
return -EINVAL;
}
} else if (mddev->delta_disks < 0
? (here_new * mddev->new_chunk_sectors <=
here_old * mddev->chunk_sectors)
: (here_new * mddev->new_chunk_sectors >=
here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
printk(KERN_ERR "md/raid:%s: reshape_position too early for "
"auto-recovery - aborting.\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_INFO "md/raid:%s: reshape will continue\n",
mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
BUG_ON(mddev->layout != mddev->new_layout);
BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
BUG_ON(mddev->delta_disks != 0);
}
if (mddev->private == NULL)
conf = setup_conf(mddev);
else
conf = mddev->private;
if (IS_ERR(conf))
return PTR_ERR(conf);
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
/*
* 0 for a fully functional array, 1 or 2 for a degraded array.
*/
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk < 0)
continue;
if (test_bit(In_sync, &rdev->flags)) {
working_disks++;
continue;
}
/* This disc is not fully in-sync. However if it
* just stored parity (beyond the recovery_offset),
* when we don't need to be concerned about the
* array being dirty.
* When reshape goes 'backwards', we never have
* partially completed devices, so we only need
* to worry about reshape going forwards.
*/
/* Hack because v0.91 doesn't store recovery_offset properly. */
if (mddev->major_version == 0 &&
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
conf->algorithm,
conf->raid_disks,
conf->max_degraded))
continue;
}
if (!only_parity(rdev->raid_disk,
conf->prev_algo,
conf->previous_raid_disks,
conf->max_degraded))
continue;
dirty_parity_disks++;
}
mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
- working_disks);
if (has_failed(conf)) {
printk(KERN_ERR "md/raid:%s: not enough operational devices"
" (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
}
/* device size must be a multiple of chunk size */
mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > dirty_parity_disks &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
"md/raid:%s: starting dirty degraded array"
" - data corruption possible.\n",
mdname(mddev));
else {
printk(KERN_ERR
"md/raid:%s: cannot start dirty degraded array.\n",
mdname(mddev));
goto abort;
}
}
if (mddev->degraded == 0)
printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
" devices, algorithm %d\n", mdname(mddev), conf->level,
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
mddev->new_layout);
else
printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
" out of %d devices, algorithm %d\n",
mdname(mddev), conf->level,
mddev->raid_disks - mddev->degraded,
mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
if (conf->reshape_progress != MaxSector) {
conf->reshape_safe = conf->reshape_progress;
atomic_set(&conf->reshape_stripes, 0);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
}
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
"raid5: failed to create sysfs attributes for %s\n",
mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) {
int chunk_size;
/* read-ahead size must cover two whole stripes, which
* is 2 * (datadisks) * chunksize where 'n' is the
* number of raid devices
*/
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->raid_disks - conf->max_degraded));
list_for_each_entry(rdev, &mddev->disks, same_set)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
}
return 0;
abort:
md_unregister_thread(&mddev->thread);
if (conf) {
print_raid5_conf(conf);
free_conf(conf);
}
mddev->private = NULL;
printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
md_unregister_thread(&mddev->thread);
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);
mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
return 0;
}
#ifdef DEBUG
static void print_sh(struct seq_file *seq, struct stripe_head *sh)
{
int i;
seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
(unsigned long long)sh->sector, sh->pd_idx, sh->state);
seq_printf(seq, "sh %llu, count %d.\n",
(unsigned long long)sh->sector, atomic_read(&sh->count));
seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
for (i = 0; i < sh->disks; i++) {
seq_printf(seq, "(cache%d: %p %ld) ",
i, sh->dev[i].page, sh->dev[i].flags);
}
seq_printf(seq, "\n");
}
static void printall(struct seq_file *seq, raid5_conf_t *conf)
{
struct stripe_head *sh;
struct hlist_node *hn;
int i;
spin_lock_irq(&conf->device_lock);
for (i = 0; i < NR_HASH; i++) {
hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
if (sh->raid_conf != conf)
continue;
print_sh(seq, sh);
}
}
spin_unlock_irq(&conf->device_lock);
}
#endif
static void status(struct seq_file *seq, mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
mddev->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->disks[i].rdev &&
test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
#ifdef DEBUG
seq_printf (seq, "\n");
printall(seq, conf);
#endif
}
static void print_raid5_conf (raid5_conf_t *conf)
{
int i;
struct disk_info *tmp;
printk(KERN_DEBUG "RAID conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev, b));
}
}
static int raid5_spare_active(mddev_t *mddev)
{
int i;
raid5_conf_t *conf = mddev->private;
struct disk_info *tmp;
int count = 0;
unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
if (tmp->rdev
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
}
static int raid5_remove_disk(mddev_t *mddev, int number)
{
raid5_conf_t *conf = mddev->private;
int err = 0;
mdk_rdev_t *rdev;
struct disk_info *p = conf->disks + number;
print_raid5_conf(conf);
rdev = p->rdev;
if (rdev) {
if (number >= conf->raid_disks &&
conf->reshape_progress == MaxSector)
clear_bit(In_sync, &rdev->flags);
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
goto abort;
}
/* Only remove non-faulty devices if recovery
* isn't possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
!has_failed(conf) &&
number < conf->raid_disks) {
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
p->rdev = rdev;
}
}
abort:
print_raid5_conf(conf);
return err;
}
static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
raid5_conf_t *conf = mddev->private;
int err = -EEXIST;
int disk;
struct disk_info *p;
int first = 0;
int last = conf->raid_disks - 1;
if (has_failed(conf))
/* no point adding a device */
return -EINVAL;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
/*
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
disk = rdev->saved_raid_disk;
else
disk = first;
for ( ; disk <= last ; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
err = 0;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid5_conf(conf);
return err;
}
static int raid5_resize(mddev_t *mddev, sector_t sectors)
{
/* no resync is happening, and there is enough space
* on all devices, so we can resize.
* We need to make sure resync covers any new space.
* If the array is shrinking we should possibly wait until
* any io in the removed space completes, but it hardly seems
* worth it.
*/
sectors &= ~((sector_t)mddev->chunk_sectors - 1);
md_set_array_sectors(mddev, raid5_size(mddev, sectors,
mddev->raid_disks));
if (mddev->array_sectors >
raid5_size(mddev, sectors, mddev->raid_disks))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
mddev->resync_max_sectors = sectors;
return 0;
}
static int check_stripe_cache(mddev_t *mddev)
{
/* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress
* it is best to have about 4 times that.
* If we require 4 times, then the default 256 4K stripe_heads will
* allow for chunk sizes up to 256K, which is probably OK.
* If the chunk size is greater, user-space should request more
* stripe_heads first.
*/
raid5_conf_t *conf = mddev->private;
if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
}
return 1;
}
static int check_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout &&
mddev->new_chunk_sectors == mddev->chunk_sectors)
return 0; /* nothing to do */
if (mddev->bitmap)
/* Cannot grow a bitmap yet */
return -EBUSY;
if (has_failed(conf))
return -EINVAL;
if (mddev->delta_disks < 0) {
/* We might be able to shrink, but the devices must
* be made bigger first.
* For raid6, 4 is the minimum size.
* Otherwise 2 is the minimum
*/
int min = 2;
if (mddev->level == 6)
min = 4;
if (mddev->raid_disks + mddev->delta_disks < min)
return -EINVAL;
}
if (!check_stripe_cache(mddev))
return -ENOSPC;
return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
}
static int raid5_start_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
mdk_rdev_t *rdev;
int spares = 0;
unsigned long flags;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (!check_stripe_cache(mddev))
return -ENOSPC;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (!test_bit(In_sync, &rdev->flags)
&& !test_bit(Faulty, &rdev->flags))
spares++;
if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
/* Not enough devices even to make a degraded array
* of that size
*/
return -EINVAL;
/* Refuse to reduce size of the array. Any reductions in
* array size must be through explicit setting of array_size
* attribute.
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
printk(KERN_ERR "md/raid:%s: array size must be reduced "
"before number of disks\n", mdname(mddev));
return -EINVAL;
}
atomic_set(&conf->reshape_stripes, 0);
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
conf->prev_chunk_sectors = conf->chunk_sectors;
conf->chunk_sectors = mddev->new_chunk_sectors;
conf->prev_algo = conf->algorithm;
conf->algorithm = mddev->new_layout;
if (mddev->delta_disks < 0)
conf->reshape_progress = raid5_size(mddev, 0, 0);
else
conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
conf->generation++;
spin_unlock_irq(&conf->device_lock);
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
* Don't add devices if we are reducing the number of
* devices in the array. This is because it is not possible
* to correctly record the "partially reconstructed" state of
* such devices during the reshape and confusion could result.
*/
if (mddev->delta_disks >= 0) {
int added_devices = 0;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
char nm[20];
if (rdev->raid_disk
>= conf->previous_raid_disks) {
set_bit(In_sync, &rdev->flags);
added_devices++;
} else
rdev->recovery_offset = 0;
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
/* Failure here is OK */;
}
} else if (rdev->raid_disk >= conf->previous_raid_disks
&& !test_bit(Faulty, &rdev->flags)) {
/* This is a spare that was manually added */
set_bit(In_sync, &rdev->flags);
added_devices++;
}
/* When a reshape changes the number of devices,
* ->degraded is measured against the larger of the
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
conf->reshape_progress = MaxSector;
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
md_new_event(mddev);
return 0;
}
/* This is called from the reshape thread and should make any
* changes needed in 'conf'
*/
static void end_reshape(raid5_conf_t *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->reshape_progress = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
/* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/
if (conf->mddev->queue) {
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
}
}
/* This is called from the raid5d thread with mddev_lock held.
* It makes config changes to the device.
*/
static void raid5_finish_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
} else {
int d;
mddev->degraded = conf->raid_disks;
for (d = 0; d < conf->raid_disks ; d++)
if (conf->disks[d].rdev &&
test_bit(In_sync,
&conf->disks[d].rdev->flags))
mddev->degraded--;
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
mdk_rdev_t *rdev = conf->disks[d].rdev;
if (rdev && raid5_remove_disk(mddev, d) == 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
rdev->raid_disk = -1;
}
}
}
mddev->layout = conf->algorithm;
mddev->chunk_sectors = conf->chunk_sectors;
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
}
}
static void raid5_quiesce(mddev_t *mddev, int state)
{
raid5_conf_t *conf = mddev->private;
switch(state) {
case 2: /* resume for a suspend */
wake_up(&conf->wait_for_overlap);
break;
case 1: /* stop all writes */
spin_lock_irq(&conf->device_lock);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
conf->quiesce = 2;
wait_event_lock_irq(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
conf->device_lock, /* nothing */);
conf->quiesce = 1;
spin_unlock_irq(&conf->device_lock);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
break;
case 0: /* re-enable writes */
spin_lock_irq(&conf->device_lock);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
wake_up(&conf->wait_for_overlap);
spin_unlock_irq(&conf->device_lock);
break;
}
}
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
{
struct raid0_private_data *raid0_priv = mddev->private;
sector_t sectors;
/* for raid0 takeover only one zone is supported */
if (raid0_priv->nr_strip_zones > 1) {
printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
mdname(mddev));
return ERR_PTR(-EINVAL);
}
sectors = raid0_priv->strip_zone[0].zone_end;
sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
mddev->dev_sectors = sectors;
mddev->new_level = level;
mddev->new_layout = ALGORITHM_PARITY_N;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks += 1;
mddev->delta_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
return setup_conf(mddev);
}
static void *raid5_takeover_raid1(mddev_t *mddev)
{
int chunksect;
if (mddev->raid_disks != 2 ||
mddev->degraded > 1)
return ERR_PTR(-EINVAL);
/* Should check if there are write-behind devices? */
chunksect = 64*2; /* 64K by default */
/* The array must be an exact multiple of chunksize */
while (chunksect && (mddev->array_sectors & (chunksect-1)))
chunksect >>= 1;
if ((chunksect<<9) < STRIPE_SIZE)
/* array size does not allow a suitable chunk size */
return ERR_PTR(-EINVAL);
mddev->new_level = 5;
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk_sectors = chunksect;
return setup_conf(mddev);
}
static void *raid5_takeover_raid6(mddev_t *mddev)
{
int new_layout;
switch (mddev->layout) {
case ALGORITHM_LEFT_ASYMMETRIC_6:
new_layout = ALGORITHM_LEFT_ASYMMETRIC;
break;
case ALGORITHM_RIGHT_ASYMMETRIC_6:
new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
new_layout = ALGORITHM_LEFT_SYMMETRIC;
break;
case ALGORITHM_RIGHT_SYMMETRIC_6:
new_layout = ALGORITHM_RIGHT_SYMMETRIC;
break;
case ALGORITHM_PARITY_0_6:
new_layout = ALGORITHM_PARITY_0;
break;
case ALGORITHM_PARITY_N:
new_layout = ALGORITHM_PARITY_N;
break;
default:
return ERR_PTR(-EINVAL);
}
mddev->new_level = 5;
mddev->new_layout = new_layout;
mddev->delta_disks = -1;
mddev->raid_disks -= 1;
return setup_conf(mddev);
}
static int raid5_check_reshape(mddev_t *mddev)
{
/* For a 2-drive array, the layout and chunk size can be changed
* immediately as not restriping is needed.
* For larger arrays we record the new value - after validation
* to be used by a reshape pass.
*/
raid5_conf_t *conf = mddev->private;
int new_chunk = mddev->new_chunk_sectors;
if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
return -EINVAL;
if (new_chunk > 0) {
if (!is_power_of_2(new_chunk))
return -EINVAL;
if (new_chunk < (PAGE_SIZE>>9))
return -EINVAL;
if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */
return -EINVAL;
}
/* They look valid */
if (mddev->raid_disks == 2) {
/* can make the change immediately */
if (mddev->new_layout >= 0) {
conf->algorithm = mddev->new_layout;
mddev->layout = mddev->new_layout;
}
if (new_chunk > 0) {
conf->chunk_sectors = new_chunk ;
mddev->chunk_sectors = new_chunk;
}
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
}
return check_reshape(mddev);
}
static int raid6_check_reshape(mddev_t *mddev)
{
int new_chunk = mddev->new_chunk_sectors;
if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
return -EINVAL;
if (new_chunk > 0) {
if (!is_power_of_2(new_chunk))
return -EINVAL;
if (new_chunk < (PAGE_SIZE >> 9))
return -EINVAL;
if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */
return -EINVAL;
}
/* They look valid */
return check_reshape(mddev);
}
static void *raid5_takeover(mddev_t *mddev)
{
/* raid5 can take over:
* raid0 - if there is only one strip zone - make it a raid4 layout
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
*/
if (mddev->level == 0)
return raid45_takeover_raid0(mddev, 5);
if (mddev->level == 1)
return raid5_takeover_raid1(mddev);
if (mddev->level == 4) {
mddev->new_layout = ALGORITHM_PARITY_N;
mddev->new_level = 5;
return setup_conf(mddev);
}
if (mddev->level == 6)
return raid5_takeover_raid6(mddev);
return ERR_PTR(-EINVAL);
}
static void *raid4_takeover(mddev_t *mddev)
{
/* raid4 can take over:
* raid0 - if there is only one strip zone
* raid5 - if layout is right
*/
if (mddev->level == 0)
return raid45_takeover_raid0(mddev, 4);
if (mddev->level == 5 &&
mddev->layout == ALGORITHM_PARITY_N) {
mddev->new_layout = 0;
mddev->new_level = 4;
return setup_conf(mddev);
}
return ERR_PTR(-EINVAL);
}
static struct mdk_personality raid5_personality;
static void *raid6_takeover(mddev_t *mddev)
{
/* Currently can only take over a raid5. We map the
* personality to an equivalent raid6 personality
* with the Q block at the end.
*/
int new_layout;
if (mddev->pers != &raid5_personality)
return ERR_PTR(-EINVAL);
if (mddev->degraded > 1)
return ERR_PTR(-EINVAL);
if (mddev->raid_disks > 253)
return ERR_PTR(-EINVAL);
if (mddev->raid_disks < 3)
return ERR_PTR(-EINVAL);
switch (mddev->layout) {
case ALGORITHM_LEFT_ASYMMETRIC:
new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
break;
case ALGORITHM_LEFT_SYMMETRIC:
new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
break;
case ALGORITHM_PARITY_0:
new_layout = ALGORITHM_PARITY_0_6;
break;
case ALGORITHM_PARITY_N:
new_layout = ALGORITHM_PARITY_N;
break;
default:
return ERR_PTR(-EINVAL);
}
mddev->new_level = 6;
mddev->new_layout = new_layout;
mddev->delta_disks = 1;
mddev->raid_disks += 1;
return setup_conf(mddev);
}
static struct mdk_personality raid6_personality =
{
.name = "raid6",
.level = 6,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
.stop = stop,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid6_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
};
static struct mdk_personality raid5_personality =
{
.name = "raid5",
.level = 5,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
.stop = stop,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
};
static struct mdk_personality raid4_personality =
{
.name = "raid4",
.level = 4,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
.stop = stop,
.status = status,
.error_handler = error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
};
static int __init raid5_init(void)
{
register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
return 0;
}
static void raid5_exit(void)
{
unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
}
module_init(raid5_init);
module_exit(raid5_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
MODULE_ALIAS("md-personality-4"); /* RAID5 */
MODULE_ALIAS("md-raid5");
MODULE_ALIAS("md-raid4");
MODULE_ALIAS("md-level-5");
MODULE_ALIAS("md-level-4");
MODULE_ALIAS("md-personality-8"); /* RAID6 */
MODULE_ALIAS("md-raid6");
MODULE_ALIAS("md-level-6");
/* This used to be two separate modules, they were: */
MODULE_ALIAS("raid5");
MODULE_ALIAS("raid6");
| gpl-2.0 |
followtheart/linux | arch/s390/kernel/perf_event.c | 1515 | 8136 | /*
* Performance event support for s390x
*
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "perf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
const char *perf_pmu_name(void)
{
if (cpum_cf_avail() || cpum_sf_avail())
return "CPU-Measurement Facilities (CPU-MF)";
return "pmu";
}
EXPORT_SYMBOL(perf_pmu_name);
int perf_num_counters(void)
{
int num = 0;
if (cpum_cf_avail())
num += PERF_CPUM_CF_MAX_CTR;
if (cpum_sf_avail())
num += PERF_CPUM_SF_MAX_CTR;
return num;
}
EXPORT_SYMBOL(perf_num_counters);
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
{
struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
if (!stack)
return NULL;
return (struct kvm_s390_sie_block *) stack->empty1[0];
}
static bool is_in_guest(struct pt_regs *regs)
{
if (user_mode(regs))
return false;
#if IS_ENABLED(CONFIG_KVM)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)
{
return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
}
static unsigned long instruction_pointer_guest(struct pt_regs *regs)
{
return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
return is_in_guest(regs) ? instruction_pointer_guest(regs)
: instruction_pointer(regs);
}
static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
{
return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
}
static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
{
struct perf_sf_sde_regs *sde_regs;
unsigned long flags;
sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long;
if (sde_regs->in_guest)
flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
else
flags = user_mode(regs) ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
return flags;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
/* Check if the cpum_sf PMU has created the pt_regs structure.
* In this case, perf misc flags can be easily extracted. Otherwise,
* do regular checks on the pt_regs content.
*/
if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
if (!regs->gprs[15])
return perf_misc_flags_sf(regs);
if (is_in_guest(regs))
return perf_misc_guest_flags(regs);
return user_mode(regs) ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
}
static void print_debug_cf(void)
{
struct cpumf_ctr_info cf_info;
int cpu = smp_processor_id();
memset(&cf_info, 0, sizeof(cf_info));
if (!qctri(&cf_info))
pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
cpu, cf_info.cfvn, cf_info.csvn,
cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
}
static void print_debug_sf(void)
{
struct hws_qsi_info_block si;
int cpu = smp_processor_id();
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
if (si.ad)
pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
}
void perf_event_print_debug(void)
{
unsigned long flags;
local_irq_save(flags);
if (cpum_cf_avail())
print_debug_cf();
if (cpum_sf_avail())
print_debug_sf();
local_irq_restore(flags);
}
/* Service level infrastructure */
static void sl_print_counter(struct seq_file *m)
{
struct cpumf_ctr_info ci;
memset(&ci, 0, sizeof(ci));
if (qctri(&ci))
return;
seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
"authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
}
static void sl_print_sampling(struct seq_file *m)
{
struct hws_qsi_info_block si;
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
if (!si.as && !si.ad)
return;
seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
" cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
" sample_size=%u\n", si.bsdes);
if (si.ad)
seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
" sample_size=%u\n", si.dsdes);
}
static void service_level_perf_print(struct seq_file *m,
struct service_level *sl)
{
if (cpum_cf_avail())
sl_print_counter(m);
if (cpum_sf_avail())
sl_print_sampling(m);
}
static struct service_level service_level_perf = {
.seq_print = service_level_perf_print,
};
static int __init service_level_perf_register(void)
{
return register_service_level(&service_level_perf);
}
arch_initcall(service_level_perf_register);
/* See also arch/s390/kernel/traps.c */
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
sp = sp & PSW_ADDR_INSN;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain & PSW_ADDR_INSN;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry,
sf->gprs[8] & PSW_ADDR_INSN);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
low = sp;
sp = regs->gprs[15];
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long head;
struct stack_frame *head_sf;
if (user_mode(regs))
return;
head = regs->gprs[15];
head_sf = (struct stack_frame *) head;
if (!head_sf || !head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
/* Perf defintions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx,name=%s\n",
pmu_attr->id, attr->attr.name);
}
/* Reserve/release functions for sharing perf hardware */
static DEFINE_SPINLOCK(perf_hw_owner_lock);
static void *perf_sampling_owner;
int perf_reserve_sampling(void)
{
int err;
err = 0;
spin_lock(&perf_hw_owner_lock);
if (perf_sampling_owner) {
pr_warn("The sampling facility is already reserved by %p\n",
perf_sampling_owner);
err = -EBUSY;
} else
perf_sampling_owner = __builtin_return_address(0);
spin_unlock(&perf_hw_owner_lock);
return err;
}
EXPORT_SYMBOL(perf_reserve_sampling);
void perf_release_sampling(void)
{
spin_lock(&perf_hw_owner_lock);
WARN_ON(!perf_sampling_owner);
perf_sampling_owner = NULL;
spin_unlock(&perf_hw_owner_lock);
}
EXPORT_SYMBOL(perf_release_sampling);
| gpl-2.0 |
byungbok/webcon-kernel | drivers/hwmon/ds620.c | 2283 | 8095 | /*
* ds620.c - Support for temperature sensor and thermostat DS620
*
* Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
*
* based on ds1621.c by Christian W. Zuckschwerdt <zany@triq.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/i2c/ds620.h>
/*
* Many DS620 constants specified below
* 15 14 13 12 11 10 09 08
* |Done|NVB |THF |TLF |R1 |R0 |AUTOC|1SHOT|
*
* 07 06 05 04 03 02 01 00
* |PO2 |PO1 |A2 |A1 |A0 | | | |
*/
#define DS620_REG_CONFIG_DONE 0x8000
#define DS620_REG_CONFIG_NVB 0x4000
#define DS620_REG_CONFIG_THF 0x2000
#define DS620_REG_CONFIG_TLF 0x1000
#define DS620_REG_CONFIG_R1 0x0800
#define DS620_REG_CONFIG_R0 0x0400
#define DS620_REG_CONFIG_AUTOC 0x0200
#define DS620_REG_CONFIG_1SHOT 0x0100
#define DS620_REG_CONFIG_PO2 0x0080
#define DS620_REG_CONFIG_PO1 0x0040
#define DS620_REG_CONFIG_A2 0x0020
#define DS620_REG_CONFIG_A1 0x0010
#define DS620_REG_CONFIG_A0 0x0008
/* The DS620 registers */
static const u8 DS620_REG_TEMP[3] = {
0xAA, /* input, word, RO */
0xA2, /* min, word, RW */
0xA0, /* max, word, RW */
};
#define DS620_REG_CONF 0xAC /* word, RW */
#define DS620_COM_START 0x51 /* no data */
#define DS620_COM_STOP 0x22 /* no data */
/* Each client has this additional data */
struct ds620_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values, word */
};
static void ds620_init_client(struct i2c_client *client)
{
struct ds620_platform_data *ds620_info = client->dev.platform_data;
u16 conf, new_conf;
new_conf = conf =
i2c_smbus_read_word_swapped(client, DS620_REG_CONF);
/* switch to continuous conversion mode */
new_conf &= ~DS620_REG_CONFIG_1SHOT;
/* already high at power-on, but don't trust the BIOS! */
new_conf |= DS620_REG_CONFIG_PO2;
/* thermostat mode according to platform data */
if (ds620_info && ds620_info->pomode == 1)
new_conf &= ~DS620_REG_CONFIG_PO1; /* PO_LOW */
else if (ds620_info && ds620_info->pomode == 2)
new_conf |= DS620_REG_CONFIG_PO1; /* PO_HIGH */
else
new_conf &= ~DS620_REG_CONFIG_PO2; /* always low */
/* with highest precision */
new_conf |= DS620_REG_CONFIG_R1 | DS620_REG_CONFIG_R0;
if (conf != new_conf)
i2c_smbus_write_word_swapped(client, DS620_REG_CONF, new_conf);
/* start conversion */
i2c_smbus_write_byte(client, DS620_COM_START);
}
static struct ds620_data *ds620_update_client(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds620_data *data = i2c_get_clientdata(client);
struct ds620_data *ret = data;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
int i;
int res;
dev_dbg(&client->dev, "Starting ds620 update\n");
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
res = i2c_smbus_read_word_swapped(client,
DS620_REG_TEMP[i]);
if (res < 0) {
ret = ERR_PTR(res);
goto abort;
}
data->temp[i] = res;
}
data->last_updated = jiffies;
data->valid = 1;
}
abort:
mutex_unlock(&data->update_lock);
return ret;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ds620_data *data = ds620_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%d\n", ((data->temp[attr->index] / 8) * 625) / 10);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
int res;
long val;
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
struct ds620_data *data = i2c_get_clientdata(client);
res = kstrtol(buf, 10, &val);
if (res)
return res;
val = (val * 10 / 625) * 8;
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
i2c_smbus_write_word_swapped(client, DS620_REG_TEMP[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ds620_data *data = ds620_update_client(dev);
struct i2c_client *client = to_i2c_client(dev);
u16 conf, new_conf;
int res;
if (IS_ERR(data))
return PTR_ERR(data);
/* reset alarms if necessary */
res = i2c_smbus_read_word_swapped(client, DS620_REG_CONF);
if (res < 0)
return res;
new_conf = conf = res;
new_conf &= ~attr->index;
if (conf != new_conf) {
res = i2c_smbus_write_word_swapped(client, DS620_REG_CONF,
new_conf);
if (res < 0)
return res;
}
return sprintf(buf, "%d\n", !!(conf & attr->index));
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
DS620_REG_CONFIG_TLF);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
DS620_REG_CONFIG_THF);
static struct attribute *ds620_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group ds620_group = {
.attrs = ds620_attributes,
};
static int ds620_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds620_data *data;
int err;
data = devm_kzalloc(&client->dev, sizeof(struct ds620_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the DS620 chip */
ds620_init_client(client);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &ds620_group);
if (err)
return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
dev_info(&client->dev, "temperature sensor found\n");
return 0;
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &ds620_group);
return err;
}
static int ds620_remove(struct i2c_client *client)
{
struct ds620_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &ds620_group);
return 0;
}
static const struct i2c_device_id ds620_id[] = {
{"ds620", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, ds620_id);
/* This is the driver that will be inserted */
static struct i2c_driver ds620_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "ds620",
},
.probe = ds620_probe,
.remove = ds620_remove,
.id_table = ds620_id,
};
module_i2c_driver(ds620_driver);
MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
MODULE_DESCRIPTION("DS620 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hundsbuah/SGP771_SGP712 | drivers/media/usb/gspca/stk014.c | 2539 | 11081 | /*
* Syntek DV4000 (STK014) subdriver
*
* Copyright (C) 2008 Jean-Francois Moine (http://moinejf.free.fr)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "stk014"
#include "gspca.h"
#include "jpeg.h"
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver");
MODULE_LICENSE("GPL");
#define QUALITY 50
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 jpeg_hdr[JPEG_HDR_SZ];
};
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
/* -- read a register -- */
static u8 reg_r(struct gspca_dev *gspca_dev,
__u16 index)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
if (gspca_dev->usb_err < 0)
return 0;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00,
index,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
return 0;
}
return gspca_dev->usb_buf[0];
}
/* -- write a register -- */
static void reg_w(struct gspca_dev *gspca_dev,
__u16 index, __u16 value)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
if (gspca_dev->usb_err < 0)
return;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
NULL,
0,
500);
if (ret < 0) {
pr_err("reg_w err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
/* -- get a bulk value (4 bytes) -- */
static void rcv_val(struct gspca_dev *gspca_dev,
int ads)
{
struct usb_device *dev = gspca_dev->dev;
int alen, ret;
reg_w(gspca_dev, 0x634, (ads >> 16) & 0xff);
reg_w(gspca_dev, 0x635, (ads >> 8) & 0xff);
reg_w(gspca_dev, 0x636, ads & 0xff);
reg_w(gspca_dev, 0x637, 0);
reg_w(gspca_dev, 0x638, 4); /* len & 0xff */
reg_w(gspca_dev, 0x639, 0); /* len >> 8 */
reg_w(gspca_dev, 0x63a, 0);
reg_w(gspca_dev, 0x63b, 0);
reg_w(gspca_dev, 0x630, 5);
if (gspca_dev->usb_err < 0)
return;
ret = usb_bulk_msg(dev,
usb_rcvbulkpipe(dev, 0x05),
gspca_dev->usb_buf,
4, /* length */
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
pr_err("rcv_val err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
/* -- send a bulk value -- */
static void snd_val(struct gspca_dev *gspca_dev,
int ads,
unsigned int val)
{
struct usb_device *dev = gspca_dev->dev;
int alen, ret;
__u8 seq = 0;
if (ads == 0x003f08) {
reg_r(gspca_dev, 0x0704);
seq = reg_r(gspca_dev, 0x0705);
reg_r(gspca_dev, 0x0650);
reg_w(gspca_dev, 0x654, seq);
} else {
reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff);
}
reg_w(gspca_dev, 0x655, (ads >> 8) & 0xff);
reg_w(gspca_dev, 0x656, ads & 0xff);
reg_w(gspca_dev, 0x657, 0);
reg_w(gspca_dev, 0x658, 0x04); /* size */
reg_w(gspca_dev, 0x659, 0);
reg_w(gspca_dev, 0x65a, 0);
reg_w(gspca_dev, 0x65b, 0);
reg_w(gspca_dev, 0x650, 5);
if (gspca_dev->usb_err < 0)
return;
gspca_dev->usb_buf[0] = val >> 24;
gspca_dev->usb_buf[1] = val >> 16;
gspca_dev->usb_buf[2] = val >> 8;
gspca_dev->usb_buf[3] = val;
ret = usb_bulk_msg(dev,
usb_sndbulkpipe(dev, 6),
gspca_dev->usb_buf,
4,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
pr_err("snd_val err %d\n", ret);
gspca_dev->usb_err = ret;
} else {
if (ads == 0x003f08) {
seq += 4;
seq &= 0x3f;
reg_w(gspca_dev, 0x705, seq);
}
}
}
/* set a camera parameter */
static void set_par(struct gspca_dev *gspca_dev,
int parval)
{
snd_val(gspca_dev, 0x003f08, parval);
}
static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
int parval;
parval = 0x06000000 /* whiteness */
+ (val << 16);
set_par(gspca_dev, parval);
}
static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
int parval;
parval = 0x07000000 /* contrast */
+ (val << 16);
set_par(gspca_dev, parval);
}
static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
int parval;
parval = 0x08000000 /* saturation */
+ (val << 16);
set_par(gspca_dev, parval);
}
static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
set_par(gspca_dev, val == 1
? 0x33640000 /* 50 Hz */
: 0x33780000); /* 60 Hz */
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
u8 ret;
/* check if the device responds */
usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
ret = reg_r(gspca_dev, 0x0740);
if (gspca_dev->usb_err >= 0) {
if (ret != 0xff) {
pr_err("init reg: 0x%02x\n", ret);
gspca_dev->usb_err = -EIO;
}
}
return gspca_dev->usb_err;
}
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret, value;
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
/* work on alternate 1 */
usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
set_par(gspca_dev, 0x10000000);
set_par(gspca_dev, 0x00000000);
set_par(gspca_dev, 0x8002e001);
set_par(gspca_dev, 0x14000000);
if (gspca_dev->width > 320)
value = 0x8002e001; /* 640x480 */
else
value = 0x4001f000; /* 320x240 */
set_par(gspca_dev, value);
ret = usb_set_interface(gspca_dev->dev,
gspca_dev->iface,
gspca_dev->alt);
if (ret < 0) {
pr_err("set intf %d %d failed\n",
gspca_dev->iface, gspca_dev->alt);
gspca_dev->usb_err = ret;
goto out;
}
reg_r(gspca_dev, 0x0630);
rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */
reg_r(gspca_dev, 0x0650);
snd_val(gspca_dev, 0x000020, 0xffffffff);
reg_w(gspca_dev, 0x0620, 0);
reg_w(gspca_dev, 0x0630, 0);
reg_w(gspca_dev, 0x0640, 0);
reg_w(gspca_dev, 0x0650, 0);
reg_w(gspca_dev, 0x0660, 0);
set_par(gspca_dev, 0x09800000); /* Red ? */
set_par(gspca_dev, 0x0a800000); /* Green ? */
set_par(gspca_dev, 0x0b800000); /* Blue ? */
set_par(gspca_dev, 0x0d030000); /* Gamma ? */
/* start the video flow */
set_par(gspca_dev, 0x01000000);
set_par(gspca_dev, 0x01000000);
if (gspca_dev->usb_err >= 0)
PDEBUG(D_STREAM, "camera started alt: 0x%02x",
gspca_dev->alt);
out:
return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
struct usb_device *dev = gspca_dev->dev;
set_par(gspca_dev, 0x02000000);
set_par(gspca_dev, 0x02000000);
usb_set_interface(dev, gspca_dev->iface, 1);
reg_r(gspca_dev, 0x0630);
rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */
reg_r(gspca_dev, 0x0650);
snd_val(gspca_dev, 0x000020, 0xffffffff);
reg_w(gspca_dev, 0x0620, 0);
reg_w(gspca_dev, 0x0630, 0);
reg_w(gspca_dev, 0x0640, 0);
reg_w(gspca_dev, 0x0650, 0);
reg_w(gspca_dev, 0x0660, 0);
PDEBUG(D_STREAM, "camera stopped");
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
static unsigned char ffd9[] = {0xff, 0xd9};
/* a frame starts with:
* - 0xff 0xfe
* - 0x08 0x00 - length (little endian ?!)
* - 4 bytes = size of whole frame (BE - including header)
* - 0x00 0x0c
* - 0xff 0xd8
* - .. JPEG image with escape sequences (ff 00)
* (without ending - ff d9)
*/
if (data[0] == 0xff && data[1] == 0xfe) {
gspca_frame_add(gspca_dev, LAST_PACKET,
ffd9, 2);
/* put the JPEG 411 header */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
/* beginning of the frame */
#define STKHDRSZ 12
data += STKHDRSZ;
len -= STKHDRSZ;
}
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
gspca_dev->usb_err = 0;
if (!gspca_dev->streaming)
return 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
setbrightness(gspca_dev, ctrl->val);
break;
case V4L2_CID_CONTRAST:
setcontrast(gspca_dev, ctrl->val);
break;
case V4L2_CID_SATURATION:
setcolors(gspca_dev, ctrl->val);
break;
case V4L2_CID_POWER_LINE_FREQUENCY:
setlightfreq(gspca_dev, ctrl->val);
break;
}
return gspca_dev->usb_err;
}
static const struct v4l2_ctrl_ops sd_ctrl_ops = {
.s_ctrl = sd_s_ctrl,
};
static int sd_init_controls(struct gspca_dev *gspca_dev)
{
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 127);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 127);
v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
V4L2_CID_POWER_LINE_FREQUENCY,
V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1,
V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
}
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e1, 0x0893)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
bergwolf/rhel6 | drivers/scsi/mac_scsi.c | 4331 | 15018 | /*
* Generic Macintosh NCR5380 driver
*
* Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
*
* derived in part from:
*/
/*
* Generic Generic NCR5380 driver
*
* Copyright 1995, Russell King
*
* ALPHA RELEASE 1.
*
* For more information, please consult
*
* NCR 5380 Family
* SCSI Protocol Controller
* Databook
*
* NCR Microelectronics
* 1635 Aeroplaza Drive
* Colorado Springs, CO 80916
* 1+ (719) 578-3400
* 1+ (800) 334-5454
*/
/*
* $Log: mac_NCR5380.c,v $
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "mac_scsi.h"
/* These control the behaviour of the generic 5380 core */
#define AUTOSENSE
#define PSEUDO_DMA
#include "NCR5380.h"
#if 0
#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
#else
#define NDEBUG (NDEBUG_ABORT)
#endif
#define RESET_BOOT
#define DRIVER_SETUP
extern void via_scsi_clear(void);
#ifdef RESET_BOOT
static void mac_scsi_reset_boot(struct Scsi_Host *instance);
#endif
static int setup_called = 0;
static int setup_can_queue = -1;
static int setup_cmd_per_lun = -1;
static int setup_sg_tablesize = -1;
static int setup_use_pdma = -1;
#ifdef SUPPORT_TAGS
static int setup_use_tagged_queuing = -1;
#endif
static int setup_hostid = -1;
/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
* we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
* need ten times the standard value... */
#define TOSHIBA_DELAY
#ifdef TOSHIBA_DELAY
#define AFTER_RESET_DELAY (5*HZ/2)
#else
#define AFTER_RESET_DELAY (HZ/2)
#endif
static volatile unsigned char *mac_scsi_regp = NULL;
static volatile unsigned char *mac_scsi_drq = NULL;
static volatile unsigned char *mac_scsi_nodrq = NULL;
/*
* NCR 5380 register access functions
*/
#if 0
/* Debug versions */
#define CTRL(p,v) (*ctrl = (v))
static char macscsi_read(struct Scsi_Host *instance, int reg)
{
int iobase = instance->io_port;
int i;
int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
CTRL(iobase, 0);
i = in_8(iobase + (reg<<4));
CTRL(iobase, 0x40);
return i;
}
static void macscsi_write(struct Scsi_Host *instance, int reg, int value)
{
int iobase = instance->io_port;
int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
CTRL(iobase, 0);
out_8(iobase + (reg<<4), value);
CTRL(iobase, 0x40);
}
#else
/* Fast versions */
static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg)
{
return in_8(instance->io_port + (reg<<4));
}
static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value)
{
out_8(instance->io_port + (reg<<4), value);
}
#endif
/*
* Function : mac_scsi_setup(char *str)
*
* Purpose : booter command line initialization of the overrides array,
*
* Inputs : str - comma delimited list of options
*
*/
static int __init mac_scsi_setup(char *str) {
#ifdef DRIVER_SETUP
int ints[7];
(void)get_options( str, ARRAY_SIZE(ints), ints);
if (setup_called++ || ints[0] < 1 || ints[0] > 6) {
printk(KERN_WARNING "scsi: <mac5380>"
" Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n");
printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n");
return 0;
}
if (ints[0] >= 1) {
if (ints[1] > 0)
/* no limits on this, just > 0 */
setup_can_queue = ints[1];
}
if (ints[0] >= 2) {
if (ints[2] > 0)
setup_cmd_per_lun = ints[2];
}
if (ints[0] >= 3) {
if (ints[3] >= 0) {
setup_sg_tablesize = ints[3];
/* Must be <= SG_ALL (255) */
if (setup_sg_tablesize > SG_ALL)
setup_sg_tablesize = SG_ALL;
}
}
if (ints[0] >= 4) {
/* Must be between 0 and 7 */
if (ints[4] >= 0 && ints[4] <= 7)
setup_hostid = ints[4];
else if (ints[4] > 7)
printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] );
}
#ifdef SUPPORT_TAGS
if (ints[0] >= 5) {
if (ints[5] >= 0)
setup_use_tagged_queuing = !!ints[5];
}
if (ints[0] == 6) {
if (ints[6] >= 0)
setup_use_pdma = ints[6];
}
#else
if (ints[0] == 5) {
if (ints[5] >= 0)
setup_use_pdma = ints[5];
}
#endif /* SUPPORT_TAGS */
#endif /* DRIVER_SETUP */
return 1;
}
__setup("mac5380=", mac_scsi_setup);
/*
* If you want to find the instance with (k)gdb ...
*/
#if NDEBUG
static struct Scsi_Host *default_instance;
#endif
/*
* Function : int macscsi_detect(struct scsi_host_template * tpnt)
*
* Purpose : initializes mac NCR5380 driver based on the
* command line / compile time port and irq definitions.
*
* Inputs : tpnt - template for this SCSI adapter.
*
* Returns : 1 if a host adapter was found, 0 if not.
*
*/
int macscsi_detect(struct scsi_host_template * tpnt)
{
static int called = 0;
int flags = 0;
struct Scsi_Host *instance;
if (!MACH_IS_MAC || called)
return( 0 );
if (macintosh_config->scsi_type != MAC_SCSI_OLD)
return( 0 );
/* setup variables */
tpnt->can_queue =
(setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
tpnt->cmd_per_lun =
(setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
tpnt->sg_tablesize =
(setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
if (setup_hostid >= 0)
tpnt->this_id = setup_hostid;
else {
/* use 7 as default */
tpnt->this_id = 7;
}
#ifdef SUPPORT_TAGS
if (setup_use_tagged_queuing < 0)
setup_use_tagged_queuing = USE_TAGGED_QUEUING;
#endif
/* Once we support multiple 5380s (e.g. DuoDock) we'll do
something different here */
instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
#if NDEBUG
default_instance = instance;
#endif
if (macintosh_config->ident == MAC_MODEL_IIFX) {
mac_scsi_regp = via1+0x8000;
mac_scsi_drq = via1+0xE000;
mac_scsi_nodrq = via1+0xC000;
/* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */
flags = FLAG_NO_PSEUDO_DMA;
} else {
mac_scsi_regp = via1+0x10000;
mac_scsi_drq = via1+0x6000;
mac_scsi_nodrq = via1+0x12000;
}
if (! setup_use_pdma)
flags = FLAG_NO_PSEUDO_DMA;
instance->io_port = (unsigned long) mac_scsi_regp;
instance->irq = IRQ_MAC_SCSI;
#ifdef RESET_BOOT
mac_scsi_reset_boot(instance);
#endif
NCR5380_init(instance, flags);
instance->n_io_port = 255;
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
"ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
}
printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port);
if (instance->irq == SCSI_IRQ_NONE)
printk (KERN_INFO "s disabled");
else
printk (KERN_INFO " %d", instance->irq);
printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE);
printk(KERN_INFO "\nscsi%d:", instance->host_no);
NCR5380_print_options(instance);
printk("\n");
called = 1;
return 1;
}
int macscsi_release (struct Scsi_Host *shpnt)
{
if (shpnt->irq != SCSI_IRQ_NONE)
free_irq(shpnt->irq, shpnt);
NCR5380_exit(shpnt);
return 0;
}
#ifdef RESET_BOOT
/*
* Our 'bus reset on boot' function
*/
static void mac_scsi_reset_boot(struct Scsi_Host *instance)
{
unsigned long end;
NCR5380_local_declare();
NCR5380_setup(instance);
/*
* Do a SCSI reset to clean up the bus during initialization. No messing
* with the queues, interrupts, or locks necessary here.
*/
printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
/* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
disable_irq(IRQ_MAC_SCSI);
/* get in phase */
NCR5380_write( TARGET_COMMAND_REG,
PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
/* assert RST */
NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
/* The min. reset hold time is 25us, so 40us should be enough */
udelay( 50 );
/* reset RST and interrupt */
NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
NCR5380_read( RESET_PARITY_INTERRUPT_REG );
for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
barrier();
/* switch on SCSI IRQ again */
enable_irq(IRQ_MAC_SCSI);
printk(KERN_INFO " done\n" );
}
#endif
const char * macscsi_info (struct Scsi_Host *spnt) {
return "";
}
/*
Pseudo-DMA: (Ove Edlund)
The code attempts to catch bus errors that occur if one for example
"trips over the cable".
XXX: Since bus errors in the PDMA routines never happen on my
computer, the bus error code is untested.
If the code works as intended, a bus error results in Pseudo-DMA
beeing disabled, meaning that the driver switches to slow handshake.
If bus errors are NOT extremely rare, this has to be changed.
*/
#define CP_IO_TO_MEM(s,d,len) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
" move.w %1,%%d0\n" \
" neg.b %%d0\n" \
" and.w #3,%%d0\n" \
" sub.w %%d0,%2\n" \
" bra 2f\n" \
" 1: move.b (%0),(%1)+\n" \
" 2: dbf %%d0,1b\n" \
" move.w %2,%%d0\n" \
" lsr.w #5,%%d0\n" \
" bra 4f\n" \
" 3: move.l (%0),(%1)+\n" \
"31: move.l (%0),(%1)+\n" \
"32: move.l (%0),(%1)+\n" \
"33: move.l (%0),(%1)+\n" \
"34: move.l (%0),(%1)+\n" \
"35: move.l (%0),(%1)+\n" \
"36: move.l (%0),(%1)+\n" \
"37: move.l (%0),(%1)+\n" \
" 4: dbf %%d0,3b\n" \
" move.w %2,%%d0\n" \
" lsr.w #2,%%d0\n" \
" and.w #7,%%d0\n" \
" bra 6f\n" \
" 5: move.l (%0),(%1)+\n" \
" 6: dbf %%d0,5b\n" \
" and.w #3,%2\n" \
" bra 8f\n" \
" 7: move.b (%0),(%1)+\n" \
" 8: dbf %2,7b\n" \
" moveq.l #0, %2\n" \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
"90: moveq.l #1, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,90b\n" \
" .long 3b,90b\n" \
" .long 31b,90b\n" \
" .long 32b,90b\n" \
" .long 33b,90b\n" \
" .long 34b,90b\n" \
" .long 35b,90b\n" \
" .long 36b,90b\n" \
" .long 37b,90b\n" \
" .long 5b,90b\n" \
" .long 7b,90b\n" \
".previous" \
: "=a"(s), "=a"(d), "=d"(len) \
: "0"(s), "1"(d), "2"(len) \
: "d0")
static int macscsi_pread (struct Scsi_Host *instance,
unsigned char *dst, int len)
{
unsigned char *d;
volatile unsigned char *s;
NCR5380_local_declare();
NCR5380_setup(instance);
s = mac_scsi_drq+0x60;
d = dst;
/* These conditions are derived from MacOS */
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& !(NCR5380_read(STATUS_REG) & SR_REQ))
;
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
printk(KERN_ERR "Error in macscsi_pread\n");
return -1;
}
CP_IO_TO_MEM(s, d, len);
if (len != 0) {
printk(KERN_NOTICE "Bus error in macscsi_pread\n");
return -1;
}
return 0;
}
#define CP_MEM_TO_IO(s,d,len) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
" move.w %0,%%d0\n" \
" neg.b %%d0\n" \
" and.w #3,%%d0\n" \
" sub.w %%d0,%2\n" \
" bra 2f\n" \
" 1: move.b (%0)+,(%1)\n" \
" 2: dbf %%d0,1b\n" \
" move.w %2,%%d0\n" \
" lsr.w #5,%%d0\n" \
" bra 4f\n" \
" 3: move.l (%0)+,(%1)\n" \
"31: move.l (%0)+,(%1)\n" \
"32: move.l (%0)+,(%1)\n" \
"33: move.l (%0)+,(%1)\n" \
"34: move.l (%0)+,(%1)\n" \
"35: move.l (%0)+,(%1)\n" \
"36: move.l (%0)+,(%1)\n" \
"37: move.l (%0)+,(%1)\n" \
" 4: dbf %%d0,3b\n" \
" move.w %2,%%d0\n" \
" lsr.w #2,%%d0\n" \
" and.w #7,%%d0\n" \
" bra 6f\n" \
" 5: move.l (%0)+,(%1)\n" \
" 6: dbf %%d0,5b\n" \
" and.w #3,%2\n" \
" bra 8f\n" \
" 7: move.b (%0)+,(%1)\n" \
" 8: dbf %2,7b\n" \
" moveq.l #0, %2\n" \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
"90: moveq.l #1, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,90b\n" \
" .long 3b,90b\n" \
" .long 31b,90b\n" \
" .long 32b,90b\n" \
" .long 33b,90b\n" \
" .long 34b,90b\n" \
" .long 35b,90b\n" \
" .long 36b,90b\n" \
" .long 37b,90b\n" \
" .long 5b,90b\n" \
" .long 7b,90b\n" \
".previous" \
: "=a"(s), "=a"(d), "=d"(len) \
: "0"(s), "1"(d), "2"(len) \
: "d0")
static int macscsi_pwrite (struct Scsi_Host *instance,
unsigned char *src, int len)
{
unsigned char *s;
volatile unsigned char *d;
NCR5380_local_declare();
NCR5380_setup(instance);
s = src;
d = mac_scsi_drq;
/* These conditions are derived from MacOS */
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& (!(NCR5380_read(STATUS_REG) & SR_REQ)
|| (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
;
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
printk(KERN_ERR "Error in macscsi_pwrite\n");
return -1;
}
CP_MEM_TO_IO(s, d, len);
if (len != 0) {
printk(KERN_NOTICE "Bus error in macscsi_pwrite\n");
return -1;
}
return 0;
}
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
.proc_name = "Mac5380",
.proc_info = macscsi_proc_info,
.name = "Macintosh NCR5380 SCSI",
.detect = macscsi_detect,
.release = macscsi_release,
.info = macscsi_info,
.queuecommand = macscsi_queue_command,
.eh_abort_handler = macscsi_abort,
.eh_bus_reset_handler = macscsi_bus_reset,
.can_queue = CAN_QUEUE,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.use_clustering = DISABLE_CLUSTERING
};
#include "scsi_module.c"
| gpl-2.0 |
MattCrystal/shiny-octo-happiness | drivers/scsi/bfa/bfa_ioc.c | 4843 | 138699 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
#include "bfa_defs_svc.h"
BFA_TRC_FILE(CNA, IOC);
/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_hb_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
#define bfa_ioc_firmware_unlock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
#define bfa_ioc_sync_start(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
#define bfa_ioc_sync_ack(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
readl((__ioc)->ioc_regs.hfn_mbox_cmd))
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
*/
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
IOC_E_RESET = 1, /* IOC reset request */
IOC_E_ENABLE = 2, /* IOC enable request */
IOC_E_DISABLE = 3, /* IOC disable request */
IOC_E_DETACH = 4, /* driver detach cleanup */
IOC_E_ENABLED = 5, /* f/w enabled */
IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
IOC_E_DISABLED = 7, /* f/w disabled */
IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
IOC_E_HBFAIL = 9, /* heartbeat failure */
IOC_E_HWERROR = 10, /* hardware error interrupt */
IOC_E_TIMEOUT = 11, /* timeout */
IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
/*
* IOCPF state machine definitions/declarations
*/
#define bfa_iocpf_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_iocpf_poll_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
#define bfa_sem_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
/*
* Forward declareations for iocpf state machine
*/
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
* IOCPF state machine events
*/
enum iocpf_event {
IOCPF_E_ENABLE = 1, /* IOCPF enable request */
IOCPF_E_DISABLE = 2, /* IOCPF disable request */
IOCPF_E_STOP = 3, /* stop on driver detach */
IOCPF_E_FWREADY = 4, /* f/w initialization done */
IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
};
/*
* IOCPF states
*/
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /* IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
BFA_IOCPF_READY = 4, /* IOCPF is initialized */
BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
BFA_IOCPF_FAIL = 6, /* IOCPF failed */
BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
};
bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
static struct bfa_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
/*
* IOC State Machine
*/
/*
* Beginning state. IOC uninit state.
*/
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
{
}
/*
* IOC is in uninit state.
*/
static void
bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_RESET:
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/*
* IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_send_getattr(ioc);
}
/*
* IOC configuration in progress. Timer is active.
*/
static void
bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
break;
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
bfa_ioc_hb_monitor(ioc);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_hb_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_hb_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
bfa_ioc_fail_notify(ioc);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
}
/*
* IOC is being disabled
*/
static void
bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_DISABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_HWERROR:
/*
* No state change. Will move to disabled state
* after iocpf sm completes failure processing and
* moves to disabled state.
*/
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
case IOC_E_HWFAILED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
bfa_ioc_disable_comp(ioc);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_disable_comp(ioc);
}
static void
bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* Hardware initialization retry.
*/
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
/*
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* IOC failure.
*/
static void
bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_HWERROR:
/*
* HB failure notification, ignore.
*/
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOCPF State Machine
*/
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->fw_mismatch_notified = BFA_FALSE;
iocpf->auto_recover = bfa_auto_recover;
}
/*
* Beginning state. IOC is in reset state.
*/
static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_STOP:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Semaphore should be acquired for version check.
*/
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
u32 r32, fwstate, pgnum, pgoff, loff = 0;
int i;
/*
* Spin on init semaphore to serialize.
*/
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
while (r32 & 0x1) {
udelay(20);
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
}
/* h/w sem init */
fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
/*
* Clear fwver hdr
*/
pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
}
bfa_trc(iocpf->ioc, fwstate);
bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
/*
* Unlock the hw semaphore. Should be here only once per boot.
*/
readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
/*
* unlock init semaphore.
*/
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
bfa_ioc_firmware_unlock(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Notify enable completion callback.
*/
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Call only the first time sm enters fwmismatch state.
*/
if (iocpf->fw_mismatch_notified == BFA_FALSE)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
iocpf->fw_mismatch_notified = BFA_TRUE;
bfa_iocpf_timer_start(iocpf->ioc);
}
/*
* Awaiting firmware version match.
*/
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_TIMEOUT:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Request for semaphore.
*/
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting semaphore for h/w initialzation.
*/
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_sync_complete(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->poll_time = 0;
bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
}
/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWREADY:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
/*
* Enable Interrupts before sending fw IOC ENABLE cmd.
*/
iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
bfa_ioc_send_enable(iocpf->ioc);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
if (event == IOCPF_E_TIMEOUT)
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
{
bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
}
static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
case IOCPF_E_GETATTRFAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_FAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
bfa_ioc_send_disable(iocpf->ioc);
}
/*
* IOC is being disabled
*/
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FWRSP_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* IOC hb ack request is being removed.
*/
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
}
static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
/*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_ack(ioc);
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
if (bfa_ioc_sync_complete(ioc))
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
}
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* IOC is in failed state.
*/
static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* BFA IOC private functions
*/
/*
* Notify common modules registered for notification.
*/
static void
bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
{
struct bfa_ioc_notify_s *notify;
struct list_head *qe;
list_for_each(qe, &ioc->notify_q) {
notify = (struct bfa_ioc_notify_s *)qe;
notify->cbfn(notify->cbarg, event);
}
}
static void
bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
{
ioc->cbfn->disable_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
}
bfa_boolean_t
bfa_ioc_sem_get(void __iomem *sem_reg)
{
u32 r32;
int cnt = 0;
#define BFA_SEM_SPINCNT 3000
r32 = readl(sem_reg);
while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
udelay(2);
r32 = readl(sem_reg);
}
if (!(r32 & 1))
return BFA_TRUE;
return BFA_FALSE;
}
static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
/*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
if (r32 == ~0) {
WARN_ON(r32 == ~0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
return;
}
if (!(r32 & 1)) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
bfa_sem_timer_start(ioc);
}
/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
int i;
#define PSS_LMEM_INIT_TIME 10000
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LMEM_RESET;
pss_ctl |= __PSS_LMEM_INIT_EN;
/*
* i2c workaround 12.5khz clock
*/
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
/*
* wait for memory initialization to be complete
*/
i = 0;
do {
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
/*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
bfa_trc(ioc, pss_ctl);
pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Take processor out of reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LPU0_RESET;
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Put processors in reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
/*
* Get driver and firmware versions.
*/
void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
u32 pgnum, pgoff;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
i++) {
fwsig[i] =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
}
/*
* Returns TRUE if same.
*/
bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
return BFA_FALSE;
}
}
bfa_trc(ioc, fwhdr->md5sum[0]);
return BFA_TRUE;
}
/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
bfa_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
}
if (swab32(fwhdr.bootenv) != boot_env) {
bfa_trc(ioc, fwhdr.bootenv);
bfa_trc(ioc, boot_env);
return BFA_FALSE;
}
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
/*
* Conditionally flush any pending message from firmware at start.
*/
static void
bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if (r32)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
static void
bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
{
enum bfi_ioc_state ioc_fwstate;
bfa_boolean_t fwvalid;
u32 boot_type;
u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
bfa_trc(ioc, ioc_fwstate);
boot_type = BFI_FWBOOT_TYPE_NORMAL;
boot_env = BFI_FWBOOT_ENV_OS;
/*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
* If option rom, IOC must not be in operational state. With
* convergence, IOC will be in operational state when 2nd driver
* is loaded.
*/
if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
/*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
/*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
}
static void
bfa_ioc_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
void
bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
{
u32 *msgp = (u32 *) ioc_msg;
u32 i;
bfa_trc(ioc, msgp[0]);
bfa_trc(ioc, len);
WARN_ON(len > BFI_IOC_MSGLEN_MAX);
/*
* first write msg to mailbox registers
*/
for (i = 0; i < len / sizeof(u32); i++)
writel(cpu_to_le32(msgp[i]),
ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
/*
* write 1 to mailbox CMD to trigger LPU event
*/
writel(1, ioc->ioc_regs.hfn_mbox_cmd);
(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
}
static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s disable_req;
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_getattr_req_s attr_req;
bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
bfa_ioc_portid(ioc));
bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
}
static void
bfa_ioc_hb_check(void *cbarg)
{
struct bfa_ioc_s *ioc = cbarg;
u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
bfa_ioc_recover(ioc);
return;
} else {
ioc->hb_count = hb_count;
}
bfa_ioc_mbox_poll(ioc);
bfa_hb_timer_start(ioc);
}
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
bfa_hb_timer_start(ioc);
}
/*
* Initiate a full firmware download.
*/
static void
bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
u32 asicmode;
bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* Set boot type and device mode at the end.
*/
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
swab32(asicmode));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
swab32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
swab32(boot_env));
}
/*
* Update BFA configuration from firmware configuration.
*/
static void
bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_attr_s *attr = ioc->attr;
attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
attr->card_type = be32_to_cpu(attr->card_type);
attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
/*
* Attach time initialization of mbox logic.
*/
static void
bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
INIT_LIST_HEAD(&mod->cmd_q);
for (mc = 0; mc < BFI_MC_MAX; mc++) {
mod->mbhdlr[mc].cbfn = NULL;
mod->mbhdlr[mc].cbarg = ioc->bfa;
}
}
/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
u32 stat;
/*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
/*
* If previous command is not yet fetched by firmware, do nothing
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
/*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Cleanup any pending requests.
*/
static void
bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
while (!list_empty(&mod->cmd_q))
bfa_q_deq(&mod->cmd_q, &cmd);
}
/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] tbuf app memory to store data from smem
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
{
u32 pgnum, loff;
__be32 r32;
int i, len;
u32 *buf = tbuf;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
{
int i, len;
u32 pgnum, loff;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32); /* len in words */
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
static void
bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
bfa_ioc_debug_save_ftrc(ioc);
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
"Heart Beat of IOC has failed\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
}
static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Running firmware version is incompatible "
"with the driver version\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
{
/*
* Hold semaphore so that nobody can access the chip during init.
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
bfa_ioc_pll_init_asic(ioc);
ioc->pllinit = BFA_TRUE;
/*
* Initialize LMEM
*/
bfa_ioc_lmem_init(ioc);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
}
/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
/*
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
} else {
writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
bfa_ioc_lpu_start(ioc);
}
/*
* Enable/disable IOC failure auto recovery.
*/
void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
{
bfa_auto_recover = auto_recover;
}
bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
return ((r32 != BFI_IOC_UNINIT) &&
(r32 != BFI_IOC_INITING) &&
(r32 != BFI_IOC_MEMTEST));
}
bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
{
__be32 *msgp = mbmsg;
u32 r32;
int i;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if ((r32 & 1) == 0)
return BFA_FALSE;
/*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
i++) {
r32 = readl(ioc->ioc_regs.lpu_mbox +
i * sizeof(u32));
msgp[i] = cpu_to_be32(r32);
}
/*
* turn off mailbox interrupt by clearing mailbox status
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
readl(ioc->ioc_regs.lpu_mbox_cmd);
return BFA_TRUE;
}
void
bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
{
union bfi_ioc_i2h_msg_u *msg;
struct bfa_iocpf_s *iocpf = &ioc->iocpf;
msg = (union bfi_ioc_i2h_msg_u *) m;
bfa_ioc_stats(ioc, ioc_isrs);
switch (msg->mh.msg_id) {
case BFI_IOC_I2H_HBEAT:
break;
case BFI_IOC_I2H_ENABLE_REPLY:
ioc->port_mode = ioc->port_mode_cfg =
(enum bfa_mode_s)msg->fw_event.port_mode;
ioc->ad_cap_bm = msg->fw_event.cap_bm;
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
break;
case BFI_IOC_I2H_GETATTR_REPLY:
bfa_ioc_getattr_reply(ioc);
break;
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
* @param[in] bfa driver instance structure
*/
void
bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
struct bfa_timer_mod_s *timer_mod)
{
ioc->bfa = bfa;
ioc->cbfn = cbfn;
ioc->timer_mod = timer_mod;
ioc->fcmode = BFA_FALSE;
ioc->pllinit = BFA_FALSE;
ioc->dbg_fwsave_once = BFA_TRUE;
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
/*
* Driver detach time IOC cleanup.
*/
void
bfa_ioc_detach(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
INIT_LIST_HEAD(&ioc->notify_q);
}
/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
*/
void
bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_pcifn_class clscode)
{
ioc->clscode = clscode;
ioc->pcidev = *pcidev;
/*
* Initialize IOC and device personality
*/
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
ioc->asic_mode = BFI_ASIC_MODE_FC;
switch (pcidev->device_id) {
case BFA_PCI_DEVICE_ID_FC_8G1P:
case BFA_PCI_DEVICE_ID_FC_8G2P:
ioc->asic_gen = BFI_ASIC_GEN_CB;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
break;
case BFA_PCI_DEVICE_ID_CT_FC:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT2:
ioc->asic_gen = BFI_ASIC_GEN_CT2;
if (clscode == BFI_PCIFN_CLASS_FC &&
pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
ioc->asic_mode = BFI_ASIC_MODE_FC16;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
} else {
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
} else {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_NIC;
ioc->ad_cap_bm = BFA_CM_NIC;
}
}
break;
default:
WARN_ON(1);
}
/*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->asic_gen == BFI_ASIC_GEN_CB)
bfa_ioc_set_cb_hwif(ioc);
else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
bfa_ioc_set_ct_hwif(ioc);
else {
WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
bfa_ioc_set_ct2_hwif(ioc);
bfa_ioc_ct2_poweron(ioc);
}
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
}
/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
* @param[in] dm_pa physical address of IOC dma memory
*/
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
/*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
ioc->attr_dma.pa = dm_pa;
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
void
bfa_ioc_enable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_enables);
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_fsm_send_event(ioc, IOC_E_ENABLE);
}
void
bfa_ioc_disable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_disables);
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
* @param[in] mcfuncs message class handler functions
*/
void
bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
for (mc = 0; mc < BFI_MC_MAX; mc++)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
/*
* Register mailbox message handler function, to be called by common modules
*/
void
bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
mod->mbhdlr[mc].cbfn = cbfn;
mod->mbhdlr[mc].cbarg = cbarg;
}
/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
* @param[in] ioc IOC instance
* @param[i] cmd Mailbox command
*/
void
bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
/*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* If mailbox is busy, queue command for poll timer
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Handle mailbox interrupts
*/
void
bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfi_mbmsg_s m;
int mc;
if (bfa_ioc_msgget(ioc, &m)) {
/*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
if (mc == BFI_MC_IOC) {
bfa_ioc_isr(ioc, &m);
return;
}
if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
return;
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
bfa_ioc_lpu_read_stat(ioc);
/*
* Try to send pending mailbox commands
*/
bfa_ioc_mbox_poll(ioc);
}
void
bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/*
* return true if IOC is disabled
*/
bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
}
#define bfa_ioc_state_disabled(__sm) \
(((__sm) == BFI_IOC_UNINIT) || \
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
}
return BFA_TRUE;
}
/*
* Reset IOC fwstate registers.
*/
void
bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
{
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
}
#define BFA_MFG_NAME "Brocade"
void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr)
{
struct bfi_ioc_attr_s *ioc_attr;
ioc_attr = ioc->attr;
bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
ad_attr->nports = bfa_ioc_get_nports(ioc);
ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
bfa_ioc_get_adapter_model(ioc, ad_attr->model);
/* For now, model descr uses same model string */
bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
ad_attr->card_type = ioc_attr->card_type;
ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
ad_attr->prototype = 1;
else
ad_attr->prototype = 0;
ad_attr->pwwn = ioc->attr->pwwn;
ad_attr->mac = bfa_ioc_get_mac(ioc);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
ad_attr->asic_rev = ioc_attr->asic_rev;
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
ad_attr->trunk_capable = (ad_attr->nports > 1) &&
!bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
}
enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s *ioc)
{
if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
return BFA_IOC_TYPE_LL;
WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
}
void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
{
memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
memcpy((void *)serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
}
void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
{
memset((void *)fw_ver, 0, BFA_VERSION_LEN);
memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
{
WARN_ON(!chip_rev);
memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
chip_rev[0] = 'R';
chip_rev[1] = 'e';
chip_rev[2] = 'v';
chip_rev[3] = '-';
chip_rev[4] = ioc->attr->asic_rev;
chip_rev[5] = '\0';
}
void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
{
memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
{
struct bfi_ioc_attr_s *ioc_attr;
WARN_ON(!model);
memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
ioc_attr = ioc->attr;
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
enum bfa_iocpf_state iocpf_st;
enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
ioc_st = BFA_IOC_SEMWAIT;
break;
case BFA_IOCPF_HWINIT:
ioc_st = BFA_IOC_HWINIT;
break;
case BFA_IOCPF_FWMISMATCH:
ioc_st = BFA_IOC_FWMISMATCH;
break;
case BFA_IOCPF_FAIL:
ioc_st = BFA_IOC_FAIL;
break;
case BFA_IOCPF_INITFAIL:
ioc_st = BFA_IOC_INITFAIL;
break;
default:
break;
}
}
return ioc_st;
}
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
ioc_attr->port_mode = ioc->port_mode;
ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
ioc_attr->cap_bm = ioc->ad_cap_bm;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
mac_t
bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
{
/*
* Check the IOC type and return the appropriate MAC
*/
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
return ioc->attr->fcoe_mac;
else
return ioc->attr->mac;
}
mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
{
mac_t m;
m = ioc->attr->mfg_mac;
if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
else
bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
bfa_ioc_pcifn(ioc));
return m;
}
/*
* Send AEN notification
*/
void
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_ioc_type_e ioc_type;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
ioc_type = bfa_ioc_get_type(ioc);
switch (ioc_type) {
case BFA_IOC_TYPE_FC:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
break;
case BFA_IOC_TYPE_FCoE:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
case BFA_IOC_TYPE_LL:
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
default:
WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
break;
}
/* Send the AEN notification */
aen_entry->aen_data.ioc.ioc_type = ioc_type;
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_IOC, event);
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
int tlen;
if (ioc->dbg_fwsave_len == 0)
return BFA_STATUS_ENOFSAVE;
tlen = *trclen;
if (tlen > ioc->dbg_fwsave_len)
tlen = ioc->dbg_fwsave_len;
memcpy(trcdata, ioc->dbg_fwsave, tlen);
*trclen = tlen;
return BFA_STATUS_OK;
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
bfa_trc(ioc, *trclen);
tlen = *trclen;
if (tlen > BFA_DBG_FWTRC_LEN)
tlen = BFA_DBG_FWTRC_LEN;
status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
*trclen = tlen;
return status;
}
static void
bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
{
struct bfa_mbox_cmd_s cmd;
struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
bfa_ioc_portid(ioc));
req->clscode = cpu_to_be16(ioc->clscode);
bfa_ioc_mbox_queue(ioc, &cmd);
}
static void
bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
{
u32 fwsync_iter = 1000;
bfa_ioc_send_fwsync(ioc);
/*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
* 2. Even if we implement that, interrupts might not
* be enabled when we call this function.
* So, just keep checking if any mbox cmd is pending, and
* after waiting for a reasonable amount of time, go ahead.
* It is possible that fw has crashed and the mbox command
* is never acknowledged.
*/
while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
fwsync_iter--;
}
/*
* Dump firmware smem
*/
bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen)
{
u32 loff;
int dlen;
bfa_status_t status;
u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
if (*offset >= smem_len) {
*offset = *buflen = 0;
return BFA_STATUS_EINVAL;
}
loff = *offset;
dlen = *buflen;
/*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
if (loff == 0)
bfa_ioc_fwsync(ioc);
if ((loff + dlen) >= smem_len)
dlen = smem_len - loff;
status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
if (status != BFA_STATUS_OK) {
*offset = *buflen = 0;
return status;
}
*offset += dlen;
if (*offset >= smem_len)
*offset = 0;
*buflen = dlen;
return status;
}
/*
* Firmware statistics
*/
bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_clr(ioc, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
/*
* Save firmware trace if configured.
*/
static void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
{
int tlen;
if (ioc->dbg_fwsave_once) {
ioc->dbg_fwsave_once = BFA_FALSE;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
}
}
}
/*
* Firmware failure detected. Start recovery actions.
*/
static void
bfa_ioc_recover(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
/*
* BFA IOC PF private functions
*/
static void
bfa_iocpf_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
}
static void
bfa_iocpf_sem_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_hw_sem_get(ioc);
}
static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
{
u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, fwstate);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
bfa_iocpf_timeout(ioc);
else {
ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
bfa_iocpf_poll_timer_start(ioc);
}
}
static void
bfa_iocpf_poll_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_poll_fwinit(ioc);
}
/*
* bfa timer function
*/
void
bfa_timer_beat(struct bfa_timer_mod_s *mod)
{
struct list_head *qh = &mod->timer_q;
struct list_head *qe, *qe_next;
struct bfa_timer_s *elem;
struct list_head timedout_q;
INIT_LIST_HEAD(&timedout_q);
qe = bfa_q_next(qh);
while (qe != qh) {
qe_next = bfa_q_next(qe);
elem = (struct bfa_timer_s *) qe;
if (elem->timeout <= BFA_TIMER_FREQ) {
elem->timeout = 0;
list_del(&elem->qe);
list_add_tail(&elem->qe, &timedout_q);
} else {
elem->timeout -= BFA_TIMER_FREQ;
}
qe = qe_next; /* go to next elem */
}
/*
* Pop all the timeout entries
*/
while (!list_empty(&timedout_q)) {
bfa_q_deq(&timedout_q, &elem);
elem->timercb(elem->arg);
}
}
/*
* Should be called with lock protection
*/
void
bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
void (*timercb) (void *), void *arg, unsigned int timeout)
{
WARN_ON(timercb == NULL);
WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
timer->timeout = timeout;
timer->timercb = timercb;
timer->arg = arg;
list_add_tail(&timer->qe, &mod->timer_q);
}
/*
* Should be called with lock protection
*/
void
bfa_timer_stop(struct bfa_timer_s *timer)
{
WARN_ON(list_empty(&timer->qe));
list_del(&timer->qe);
}
/*
* ASIC block related
*/
static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
{
struct bfa_ablk_cfg_inst_s *cfg_inst;
int i, j;
u16 be16;
u32 be32;
for (i = 0; i < BFA_ABLK_MAX; i++) {
cfg_inst = &cfg->inst[i];
for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
be16 = cfg_inst->pf_cfg[j].pers;
cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_qpairs;
cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_vectors;
cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
be32 = cfg_inst->pf_cfg[j].bw;
cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
}
}
}
static void
bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
bfa_ablk_cbfn_t cbfn;
WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
bfa_trc(ablk->ioc, msg->mh.msg_id);
switch (msg->mh.msg_id) {
case BFI_ABLK_I2H_QUERY:
if (rsp->status == BFA_STATUS_OK) {
memcpy(ablk->cfg, ablk->dma_addr.kva,
sizeof(struct bfa_ablk_cfg_s));
bfa_ablk_config_swap(ablk->cfg);
ablk->cfg = NULL;
}
break;
case BFI_ABLK_I2H_ADPT_CONFIG:
case BFI_ABLK_I2H_PORT_CONFIG:
/* update config port mode */
ablk->ioc->port_mode_cfg = rsp->port_mode;
case BFI_ABLK_I2H_PF_DELETE:
case BFI_ABLK_I2H_PF_UPDATE:
case BFI_ABLK_I2H_OPTROM_ENABLE:
case BFI_ABLK_I2H_OPTROM_DISABLE:
/* No-op */
break;
case BFI_ABLK_I2H_PF_CREATE:
*(ablk->pcifn) = rsp->pcifn;
ablk->pcifn = NULL;
break;
default:
WARN_ON(1);
}
ablk->busy = BFA_FALSE;
if (ablk->cbfn) {
cbfn = ablk->cbfn;
ablk->cbfn = NULL;
cbfn(ablk->cbarg, rsp->status);
}
}
static void
bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
bfa_trc(ablk->ioc, event);
switch (event) {
case BFA_IOC_E_ENABLED:
WARN_ON(ablk->busy != BFA_FALSE);
break;
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
/* Fail any pending requests */
ablk->pcifn = NULL;
if (ablk->busy) {
if (ablk->cbfn)
ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
ablk->cbfn = NULL;
ablk->busy = BFA_FALSE;
}
break;
default:
WARN_ON(1);
break;
}
}
u32
bfa_ablk_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
}
void
bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
{
ablk->dma_addr.kva = dma_kva;
ablk->dma_addr.pa = dma_pa;
}
void
bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
{
ablk->ioc = ioc;
bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
bfa_q_qe_init(&ablk->ioc_notify);
bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
}
bfa_status_t
bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_query_s *m;
WARN_ON(!ablk_cfg);
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cfg = ablk_cfg;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
bfa_ioc_portid(ablk->ioc));
bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
u8 port, enum bfi_pcifn_class personality, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->pcifn = pcifn;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
bfa_ioc_portid(ablk->ioc));
m->pers = cpu_to_be16((u16)personality);
m->bw = cpu_to_be32(bw);
m->port = port;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->port = (u8)port;
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
m->bw = cpu_to_be32(bw);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
/*
* SFP module specific
*/
/* forward declarations */
static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
enum bfa_port_speed portspeed);
static void
bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->lock);
if (sfp->cbfn)
sfp->cbfn(sfp->cbarg, sfp->status);
sfp->lock = 0;
sfp->cbfn = NULL;
}
static void
bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->portspeed);
if (sfp->media) {
bfa_sfp_media_get(sfp);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->media = NULL;
}
if (sfp->portspeed) {
sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
}
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
}
/*
* IOC event handler.
*/
static void
bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
{
struct bfa_sfp_s *sfp = sfp_arg;
bfa_trc(sfp, event);
bfa_trc(sfp, sfp->lock);
bfa_trc(sfp, sfp->state_query_lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (sfp->lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_show(sfp);
}
if (sfp->state_query_lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_state_query(sfp);
}
break;
default:
break;
}
}
/*
* SFP's State Change Notification post to AEN
*/
static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
{
struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_port_aen_event aen_evt = 0;
bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
((u64)rsp->event));
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
aen_evt = BFA_PORT_AEN_SFP_INSERT;
break;
case BFA_SFP_SCN_REMOVED:
aen_evt = BFA_PORT_AEN_SFP_REMOVE;
break;
case BFA_SFP_SCN_FAILED:
aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
break;
case BFA_SFP_SCN_UNSUPPORT:
aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
break;
case BFA_SFP_SCN_POM:
aen_evt = BFA_PORT_AEN_SFP_POM;
aen_entry->aen_data.port.level = rsp->pomlvl;
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
BFA_AEN_CAT_PORT, aen_evt);
}
/*
* SFP get data send
*/
static void
bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
bfa_trc(sfp, req->memtype);
/* build host command */
bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
bfa_ioc_portid(sfp->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
}
/*
* SFP is valid, read sfp data
*/
static void
bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
WARN_ON(sfp->lock != 0);
bfa_trc(sfp, sfp->state);
sfp->lock = 1;
sfp->memtype = memtype;
req->memtype = memtype;
/* Setup SG list */
bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
bfa_sfp_getdata_send(sfp);
}
/*
* SFP scn handler
*/
static void
bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
sfp->state = BFA_SFP_STATE_INSERTED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_REMOVED:
sfp->state = BFA_SFP_STATE_REMOVED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_FAILED:
sfp->state = BFA_SFP_STATE_FAILED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_UNSUPPORT:
sfp->state = BFA_SFP_STATE_UNSUPPORT;
bfa_sfp_scn_aen_post(sfp, rsp);
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
case BFA_SFP_SCN_POM:
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_VALID:
sfp->state = BFA_SFP_STATE_VALID;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
}
/*
* SFP show complete
*/
static void
bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
if (!sfp->lock) {
/*
* receiving response after ioc failure
*/
bfa_trc(sfp, sfp->lock);
return;
}
bfa_trc(sfp, rsp->status);
if (rsp->status == BFA_STATUS_OK) {
sfp->data_valid = 1;
if (sfp->state == BFA_SFP_STATE_VALID)
sfp->status = BFA_STATUS_OK;
else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
sfp->status = BFA_STATUS_SFP_UNSUPP;
else
bfa_trc(sfp, sfp->state);
} else {
sfp->data_valid = 0;
sfp->status = rsp->status;
/* sfpshow shouldn't change sfp state */
}
bfa_trc(sfp, sfp->memtype);
if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
memcpy(des, sfp->dbuf_kva, size);
}
/*
* Queue completion callback.
*/
bfa_cb_sfp_show(sfp);
} else
sfp->lock = 0;
bfa_trc(sfp, sfp->state_query_lock);
if (sfp->state_query_lock) {
sfp->state = rsp->state;
/* Complete callback */
bfa_cb_sfp_state_query(sfp);
}
}
/*
* SFP query fw sfp state
*/
static void
bfa_sfp_state_query(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
/* Should not be doing query if not in _INIT state */
WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
WARN_ON(sfp->state_query_lock != 0);
bfa_trc(sfp, sfp->state);
sfp->state_query_lock = 1;
req->memtype = 0;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
}
static void
bfa_sfp_media_get(struct bfa_sfp_s *sfp)
{
enum bfa_defs_sfp_media_e *media = sfp->media;
*media = BFA_SFP_MEDIA_UNKNOWN;
if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
*media = BFA_SFP_MEDIA_UNSUPPORT;
else if (sfp->state == BFA_SFP_STATE_VALID) {
union sfp_xcvr_e10g_code_u e10g;
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
(sfpmem->srlid_base.xcvr[5] >> 1);
e10g.b = sfpmem->srlid_base.xcvr[0];
bfa_trc(sfp, e10g.b);
bfa_trc(sfp, xmtr_tech);
/* check fc transmitter tech */
if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
(xmtr_tech & SFP_XMTR_TECH_CP) ||
(xmtr_tech & SFP_XMTR_TECH_CA))
*media = BFA_SFP_MEDIA_CU;
else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
(xmtr_tech & SFP_XMTR_TECH_EL_INTER))
*media = BFA_SFP_MEDIA_EL;
else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
(xmtr_tech & SFP_XMTR_TECH_LC))
*media = BFA_SFP_MEDIA_LW;
else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
(xmtr_tech & SFP_XMTR_TECH_SN) ||
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
} else
bfa_trc(sfp, sfp->state);
}
static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
{
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
if (portspeed == BFA_PORT_SPEED_10GBPS) {
if (e10g.r.e10g_sr || e10g.r.e10g_lr)
return BFA_STATUS_OK;
else {
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
return BFA_STATUS_OK;
else {
bfa_trc(sfp, portspeed);
bfa_trc(sfp, fc3.b);
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
/*
* SFP hmbox handler
*/
void
bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
{
struct bfa_sfp_s *sfp = sfparg;
switch (msg->mh.msg_id) {
case BFI_SFP_I2H_SHOW:
bfa_sfp_show_comp(sfp, msg);
break;
case BFI_SFP_I2H_SCN:
bfa_sfp_scn(sfp, msg);
break;
default:
bfa_trc(sfp, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Return DMA memory needed by sfp module.
*/
u32
bfa_sfp_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for SFP.
*/
void
bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod)
{
sfp->dev = dev;
sfp->ioc = ioc;
sfp->trcmod = trcmod;
sfp->cbfn = NULL;
sfp->cbarg = NULL;
sfp->sfpmem = NULL;
sfp->lock = 0;
sfp->data_valid = 0;
sfp->state = BFA_SFP_STATE_INIT;
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
sfp->state_query_cbarg = NULL;
sfp->media = NULL;
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
sfp->is_elb = BFA_FALSE;
bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
bfa_q_qe_init(&sfp->ioc_notify);
bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
}
/*
* Claim Memory for SFP
*/
void
bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
{
sfp->dbuf_kva = dm_kva;
sfp->dbuf_pa = dm_pa;
memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Show SFP eeprom content
*
* @param[in] sfp - bfa sfp module
*
* @param[out] sfpmem - sfp eeprom data
*
*/
bfa_status_t
bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
if (sfp->lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
}
sfp->cbfn = cbfn;
sfp->cbarg = cbarg;
sfp->sfpmem = sfpmem;
bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
return BFA_STATUS_OK;
}
/*
* Return SFP Media type
*
* @param[in] sfp - bfa sfp module
*
* @param[out] media - port speed from user
*
*/
bfa_status_t
bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
sfp->media = media;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
bfa_sfp_media_get(sfp);
return BFA_STATUS_OK;
}
/*
* Check if user set port speed is allowed by the SFP
*
* @param[in] sfp - bfa sfp module
* @param[in] portspeed - port speed from user
*
*/
bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
bfa_cb_sfp_t cbfn, void *cbarg)
{
WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
if (!bfa_ioc_is_operational(sfp->ioc))
return BFA_STATUS_IOC_NON_OP;
/* For Mezz card, all speed is allowed */
if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
return BFA_STATUS_OK;
/* Check SFP state */
sfp->portspeed = portspeed;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
if (sfp->state == BFA_SFP_STATE_REMOVED ||
sfp->state == BFA_SFP_STATE_FAILED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_NO_SFP_DEV;
}
if (sfp->state == BFA_SFP_STATE_INSERTED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_DEVBUSY; /* sfp is reading data */
}
/* For eloopback, all speed is allowed */
if (sfp->is_elb)
return BFA_STATUS_OK;
return bfa_sfp_speed_valid(sfp, portspeed);
}
/*
* Flash module specific
*/
/*
* FLASH DMA buffer should be big enough to hold both MFG block and
* asic block(64k) at the same time and also should be 2k aligned to
* avoid write segement to cross sector boundary.
*/
#define BFA_FLASH_SEG_SZ 2048
#define BFA_FLASH_DMA_BUF_SZ \
BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
static void
bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
int inst, int type)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.audit.partition_inst = inst;
aen_entry->aen_data.audit.partition_type = type;
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_AUDIT, event);
}
static void
bfa_flash_cb(struct bfa_flash_s *flash)
{
flash->op_busy = 0;
if (flash->cbfn)
flash->cbfn(flash->cbarg, flash->status);
}
static void
bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_flash_s *flash = cbarg;
bfa_trc(flash, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (flash->op_busy) {
flash->status = BFA_STATUS_IOC_FAILURE;
flash->cbfn(flash->cbarg, flash->status);
flash->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send flash attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_query_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_query_req_s *msg =
(struct bfi_flash_query_req_s *) flash->mb.msg;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash_s *flash)
{
struct bfi_flash_write_req_s *msg =
(struct bfi_flash_write_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == flash->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
flash->residue -= len;
flash->offset += len;
}
/*
* Send flash read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_read_req_s *msg =
(struct bfi_flash_read_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash erase request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_erase_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_erase_req_s *msg =
(struct bfi_flash_erase_req_s *) flash->mb.msg;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Process flash response messages upon receiving interrupts.
*
* @param[in] flasharg - flash structure
* @param[in] msg - message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
{
struct bfa_flash_s *flash = flasharg;
u32 status;
union {
struct bfi_flash_query_rsp_s *query;
struct bfi_flash_erase_rsp_s *erase;
struct bfi_flash_write_rsp_s *write;
struct bfi_flash_read_rsp_s *read;
struct bfi_flash_event_s *event;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(flash, msg->mh.msg_id);
if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
/* receiving response after ioc failure */
bfa_trc(flash, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_FLASH_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_OK) {
u32 i;
struct bfa_flash_attr_s *attr, *f;
attr = (struct bfa_flash_attr_s *) flash->ubuf;
f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
attr->status = be32_to_cpu(f->status);
attr->npart = be32_to_cpu(f->npart);
bfa_trc(flash, attr->status);
bfa_trc(flash, attr->npart);
for (i = 0; i < attr->npart; i++) {
attr->part[i].part_type =
be32_to_cpu(f->part[i].part_type);
attr->part[i].part_instance =
be32_to_cpu(f->part[i].part_instance);
attr->part[i].part_off =
be32_to_cpu(f->part[i].part_off);
attr->part[i].part_size =
be32_to_cpu(f->part[i].part_size);
attr->part[i].part_len =
be32_to_cpu(f->part[i].part_len);
attr->part[i].part_status =
be32_to_cpu(f->part[i].part_status);
}
}
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_ERASE_RSP:
status = be32_to_cpu(m.erase->status);
bfa_trc(flash, status);
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK || flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else {
bfa_trc(flash, flash->offset);
bfa_flash_write_send(flash);
}
break;
case BFI_FLASH_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK) {
flash->status = status;
bfa_flash_cb(flash);
} else {
u32 len = be32_to_cpu(m.read->length);
bfa_trc(flash, flash->offset);
bfa_trc(flash, len);
memcpy(flash->ubuf + flash->offset,
flash->dbuf_kva, len);
flash->residue -= len;
flash->offset += len;
if (flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else
bfa_flash_read_send(flash);
}
break;
case BFI_FLASH_I2H_BOOT_VER_RSP:
break;
case BFI_FLASH_I2H_EVENT:
status = be32_to_cpu(m.event->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_BAD_FWCFG)
bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
else if (status == BFA_STATUS_INVALID_VENDOR) {
u32 param;
param = be32_to_cpu(m.event->param);
bfa_trc(flash, param);
bfa_ioc_aen_post(flash->ioc,
BFA_IOC_AEN_INVALID_VENDOR);
}
break;
default:
WARN_ON(1);
}
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_flash_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need flash */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] flash - flash structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
flash->ioc = ioc;
flash->trcmod = trcmod;
flash->cbfn = NULL;
flash->cbarg = NULL;
flash->op_busy = 0;
bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
/* min driver doesn't need flash */
if (mincfg) {
flash->dbuf_kva = NULL;
flash->dbuf_pa = 0;
}
}
/*
* Claim memory for flash
*
* @param[in] flash - flash structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
flash->dbuf_kva = dm_kva;
flash->dbuf_pa = dm_pa;
memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Get flash attribute.
*
* @param[in] flash - flash structure
* @param[in] attr - flash attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->ubuf = (u8 *) attr;
bfa_flash_query_send(flash);
return BFA_STATUS_OK;
}
/*
* Erase flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
bfa_flash_erase_send(flash);
bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
instance, type);
return BFA_STATUS_OK;
}
/*
* Update flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (type == BFA_FLASH_PART_MFG)
return BFA_STATUS_EINVAL;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_write_send(flash);
return BFA_STATUS_OK;
}
/*
* Read flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_read_send(flash);
return BFA_STATUS_OK;
}
/*
* DIAG module specific
*/
#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
/* IOC event handler */
static void
bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
{
struct bfa_diag_s *diag = diag_arg;
bfa_trc(diag, event);
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
bfa_trc(diag, diag->tsensor.lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (diag->fwping.lock) {
diag->fwping.status = BFA_STATUS_IOC_FAILURE;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
}
if (diag->tsensor.lock) {
diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
diag->tsensor.cbfn(diag->tsensor.cbarg,
diag->tsensor.status);
diag->tsensor.lock = 0;
}
if (diag->block) {
if (diag->timer_active) {
bfa_timer_stop(&diag->timer);
diag->timer_active = 0;
}
diag->status = BFA_STATUS_IOC_FAILURE;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
break;
default:
break;
}
}
static void
bfa_diag_memtest_done(void *cbarg)
{
struct bfa_diag_s *diag = cbarg;
struct bfa_ioc_s *ioc = diag->ioc;
struct bfa_diag_memtest_result *res = diag->result;
u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
u32 pgnum, pgoff, i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
sizeof(u32)); i++) {
/* read test result from smem */
*((u32 *) res + i) =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
/* Reset IOC fwstates to BFI_IOC_UNINIT */
bfa_ioc_reset_fwstate(ioc);
res->status = swab32(res->status);
bfa_trc(diag, res->status);
if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
diag->status = BFA_STATUS_OK;
else {
diag->status = BFA_STATUS_MEMTEST_FAILED;
res->addr = swab32(res->addr);
res->exp = swab32(res->exp);
res->act = swab32(res->act);
res->err_status = swab32(res->err_status);
res->err_status1 = swab32(res->err_status1);
res->err_addr = swab32(res->err_addr);
bfa_trc(diag, res->addr);
bfa_trc(diag, res->exp);
bfa_trc(diag, res->act);
bfa_trc(diag, res->err_status);
bfa_trc(diag, res->err_status1);
bfa_trc(diag, res->err_addr);
}
diag->timer_active = 0;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
/*
* Firmware ping
*/
/*
* Perform DMA test directly
*/
static void
diag_fwping_send(struct bfa_diag_s *diag)
{
struct bfi_diag_fwping_req_s *fwping_req;
u32 i;
bfa_trc(diag, diag->fwping.dbuf_pa);
/* fill DMA area with pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
/* Fill mbox msg */
fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
/* Setup SG list */
bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
diag->fwping.dbuf_pa);
/* Set up dma count */
fwping_req->count = cpu_to_be32(diag->fwping.count);
/* Set up data pattern */
fwping_req->data = diag->fwping.data;
/* build host command */
bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
}
static void
diag_fwping_comp(struct bfa_diag_s *diag,
struct bfi_diag_fwping_rsp_s *diag_rsp)
{
u32 rsp_data = diag_rsp->data;
u8 rsp_dma_status = diag_rsp->dma_status;
bfa_trc(diag, rsp_data);
bfa_trc(diag, rsp_dma_status);
if (rsp_dma_status == BFA_STATUS_OK) {
u32 i, pat;
pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
diag->fwping.data;
/* Check mbox data */
if (diag->fwping.data != rsp_data) {
bfa_trc(diag, rsp_data);
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
/* Check dma pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
bfa_trc(diag, i);
bfa_trc(diag, pat);
bfa_trc(diag,
*((u32 *)diag->fwping.dbuf_kva + i));
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
}
diag->fwping.result->dmastatus = BFA_STATUS_OK;
diag->fwping.status = BFA_STATUS_OK;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
} else {
diag->fwping.status = BFA_STATUS_HDMA_FAILED;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
}
}
/*
* Temperature Sensor
*/
static void
diag_tempsensor_send(struct bfa_diag_s *diag)
{
struct bfi_diag_ts_req_s *msg;
msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
bfa_trc(diag, msg->temp);
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
}
static void
diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
{
if (!diag->tsensor.lock) {
/* receiving response after ioc failure */
bfa_trc(diag, diag->tsensor.lock);
return;
}
/*
* ASIC junction tempsensor is a reg read operation
* it will always return OK
*/
diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
diag->tsensor.temp->ts_junc = rsp->ts_junc;
diag->tsensor.temp->ts_brd = rsp->ts_brd;
diag->tsensor.temp->status = BFA_STATUS_OK;
if (rsp->ts_brd) {
if (rsp->status == BFA_STATUS_OK) {
diag->tsensor.temp->brd_temp =
be16_to_cpu(rsp->brd_temp);
} else {
bfa_trc(diag, rsp->status);
diag->tsensor.temp->brd_temp = 0;
diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
}
}
bfa_trc(diag, rsp->ts_junc);
bfa_trc(diag, rsp->temp);
bfa_trc(diag, rsp->ts_brd);
bfa_trc(diag, rsp->brd_temp);
diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
diag->tsensor.lock = 0;
}
/*
* LED Test command
*/
static void
diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
struct bfi_diag_ledtest_req_s *msg;
msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
bfa_ioc_portid(diag->ioc));
/*
* convert the freq from N blinks per 10 sec to
* crossbow ontime value. We do it here because division is need
*/
if (ledtest->freq)
ledtest->freq = 500 / ledtest->freq;
if (ledtest->freq == 0)
ledtest->freq = 1;
bfa_trc(diag, ledtest->freq);
/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
msg->cmd = (u8) ledtest->cmd;
msg->color = (u8) ledtest->color;
msg->portid = bfa_ioc_portid(diag->ioc);
msg->led = ledtest->led;
msg->freq = cpu_to_be16(ledtest->freq);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
}
static void
diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
{
bfa_trc(diag, diag->ledtest.lock);
diag->ledtest.lock = BFA_FALSE;
/* no bfa_cb_queue is needed because driver is not waiting */
}
/*
* Port beaconing
*/
static void
diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
{
struct bfi_diag_portbeacon_req_s *msg;
msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
bfa_ioc_portid(diag->ioc));
msg->beacon = beacon;
msg->period = cpu_to_be32(sec);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
}
static void
diag_portbeacon_comp(struct bfa_diag_s *diag)
{
bfa_trc(diag, diag->beacon.state);
diag->beacon.state = BFA_FALSE;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
}
/*
* Diag hmbox handler
*/
void
bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
{
struct bfa_diag_s *diag = diagarg;
switch (msg->mh.msg_id) {
case BFI_DIAG_I2H_PORTBEACON:
diag_portbeacon_comp(diag);
break;
case BFI_DIAG_I2H_FWPING:
diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
break;
case BFI_DIAG_I2H_TEMPSENSOR:
diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
break;
case BFI_DIAG_I2H_LEDTEST:
diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
break;
default:
bfa_trc(diag, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Gen RAM Test
*
* @param[in] *diag - diag data struct
* @param[in] *memtest - mem test params input from upper layer,
* @param[in] pattern - mem test pattern
* @param[in] *result - mem test result
* @param[in] cbfn - mem test callback functioin
* @param[in] cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
u32 pattern, struct bfa_diag_memtest_result *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
u32 memtest_tov;
bfa_trc(diag, pattern);
if (!bfa_ioc_adapter_is_disabled(diag->ioc))
return BFA_STATUS_ADAPTER_ENABLED;
/* check to see if there is another destructive diag cmd running */
if (diag->block) {
bfa_trc(diag, diag->block);
return BFA_STATUS_DEVBUSY;
} else
diag->block = 1;
diag->result = result;
diag->cbfn = cbfn;
diag->cbarg = cbarg;
/* download memtest code and take LPU0 out of reset */
bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
bfa_diag_memtest_done, diag, memtest_tov);
diag->timer_active = 1;
return BFA_STATUS_OK;
}
/*
* DIAG firmware ping command
*
* @param[in] *diag - diag data struct
* @param[in] cnt - dma loop count for testing PCIE
* @param[in] data - data pattern to pass in fw
* @param[in] *result - pt to bfa_diag_fwping_result_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
void *cbarg)
{
bfa_trc(diag, cnt);
bfa_trc(diag, data);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
return BFA_STATUS_CMD_NOTSUPP;
/* check to see if there is another destructive diag cmd running */
if (diag->block || diag->fwping.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
return BFA_STATUS_DEVBUSY;
}
/* Initialization */
diag->fwping.lock = 1;
diag->fwping.cbfn = cbfn;
diag->fwping.cbarg = cbarg;
diag->fwping.result = result;
diag->fwping.data = data;
diag->fwping.count = cnt;
/* Init test results */
diag->fwping.result->data = 0;
diag->fwping.result->status = BFA_STATUS_OK;
/* kick off the first ping */
diag_fwping_send(diag);
return BFA_STATUS_OK;
}
/*
* Read Temperature Sensor
*
* @param[in] *diag - diag data struct
* @param[in] *result - pt to bfa_diag_temp_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s *diag,
struct bfa_diag_results_tempsensor_s *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
/* check to see if there is a destructive diag cmd running */
if (diag->block || diag->tsensor.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->tsensor.lock);
return BFA_STATUS_DEVBUSY;
}
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
/* Init diag mod params */
diag->tsensor.lock = 1;
diag->tsensor.temp = result;
diag->tsensor.cbfn = cbfn;
diag->tsensor.cbarg = cbarg;
/* Send msg to fw */
diag_tempsensor_send(diag);
return BFA_STATUS_OK;
}
/*
* LED Test command
*
* @param[in] *diag - diag data struct
* @param[in] *ledtest - pt to ledtest data structure
*
* @param[out]
*/
bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
bfa_trc(diag, ledtest->cmd);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->beacon.state)
return BFA_STATUS_BEACON_ON;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
/* Send msg to fw */
diag->ledtest.lock = BFA_TRUE;
diag_ledtest_send(diag, ledtest);
return BFA_STATUS_OK;
}
/*
* Port beaconing command
*
* @param[in] *diag - diag data struct
* @param[in] beacon - port beaconing 1:ON 0:OFF
* @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
* @param[in] sec - beaconing duration in seconds
*
* @param[out]
*/
bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon, uint32_t sec)
{
bfa_trc(diag, beacon);
bfa_trc(diag, link_e2e_beacon);
bfa_trc(diag, sec);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
if (diag->beacon.state && beacon) /* beacon alread on */
return BFA_STATUS_BEACON_ON;
diag->beacon.state = beacon;
diag->beacon.link_e2e = link_e2e_beacon;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
/* Send msg to fw */
diag_portbeacon_send(diag, beacon, sec);
return BFA_STATUS_OK;
}
/*
* Return DMA memory needed by diag module.
*/
u32
bfa_diag_meminfo(void)
{
return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for Diag.
*/
void
bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
{
diag->dev = dev;
diag->ioc = ioc;
diag->trcmod = trcmod;
diag->block = 0;
diag->cbfn = NULL;
diag->cbarg = NULL;
diag->result = NULL;
diag->cbfn_beacon = cbfn_beacon;
bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
bfa_q_qe_init(&diag->ioc_notify);
bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
}
void
bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
{
diag->fwping.dbuf_kva = dm_kva;
diag->fwping.dbuf_pa = dm_pa;
memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
}
/*
* PHY module specific
*/
#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
static void
bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
{
int i, m = sz >> 2;
for (i = 0; i < m; i++)
obuf[i] = be32_to_cpu(ibuf[i]);
}
static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s *phy)
{
return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
}
static void
bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_phy_s *phy = cbarg;
bfa_trc(phy, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (phy->op_busy) {
phy->status = BFA_STATUS_IOC_FAILURE;
phy->cbfn(phy->cbarg, phy->status);
phy->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send phy attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_query_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_query_req_s *msg =
(struct bfi_phy_query_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_write_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_write_req_s *msg =
(struct bfi_phy_write_req_s *) phy->mb.msg;
u32 len;
u16 *buf, *dbuf;
int i, sz;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == phy->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
buf = (u16 *) (phy->ubuf + phy->offset);
dbuf = (u16 *)phy->dbuf_kva;
sz = len >> 1;
for (i = 0; i < sz; i++)
buf[i] = cpu_to_be16(dbuf[i]);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
phy->residue -= len;
phy->offset += len;
}
/*
* Send phy read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_read_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_read_req_s *msg =
(struct bfi_phy_read_req_s *) phy->mb.msg;
u32 len;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy stats request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_stats_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_stats_req_s *msg =
(struct bfi_phy_stats_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_phy_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need phy */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] phy - phy structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
phy->ioc = ioc;
phy->trcmod = trcmod;
phy->cbfn = NULL;
phy->cbarg = NULL;
phy->op_busy = 0;
bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
bfa_q_qe_init(&phy->ioc_notify);
bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
/* min driver doesn't need phy */
if (mincfg) {
phy->dbuf_kva = NULL;
phy->dbuf_pa = 0;
}
}
/*
* Claim memory for phy
*
* @param[in] phy - phy structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
phy->dbuf_kva = dm_kva;
phy->dbuf_pa = dm_pa;
memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
rb = bfa_ioc_bar0(ioc);
return readl(rb + BFA_PHY_LOCK_STATUS);
}
/*
* Get phy attribute.
*
* @param[in] phy - phy structure
* @param[in] attr - phy attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (uint8_t *) attr;
bfa_phy_query_send(phy);
return BFA_STATUS_OK;
}
/*
* Get phy stats.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] stats - pointer to phy stats
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_stats_s *stats,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (u8 *) stats;
bfa_phy_stats_send(phy);
return BFA_STATUS_OK;
}
/*
* Update phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_write_send(phy);
return BFA_STATUS_OK;
}
/*
* Read phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_read_send(phy);
return BFA_STATUS_OK;
}
/*
* Process phy response messages upon receiving interrupts.
*
* @param[in] phyarg - phy structure
* @param[in] msg - message structure
*/
void
bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
{
struct bfa_phy_s *phy = phyarg;
u32 status;
union {
struct bfi_phy_query_rsp_s *query;
struct bfi_phy_stats_rsp_s *stats;
struct bfi_phy_write_rsp_s *write;
struct bfi_phy_read_rsp_s *read;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(phy, msg->mh.msg_id);
if (!phy->op_busy) {
/* receiving response after ioc failure */
bfa_trc(phy, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_PHY_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_attr_s *attr =
(struct bfa_phy_attr_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_attr_s));
bfa_trc(phy, attr->status);
bfa_trc(phy, attr->length);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_STATS_RSP:
status = be32_to_cpu(m.stats->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_stats_s *stats =
(struct bfa_phy_stats_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_stats_s));
bfa_trc(phy, stats->status);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK || phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
bfa_trc(phy, phy->offset);
bfa_phy_write_send(phy);
}
break;
case BFI_PHY_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
u32 len = be32_to_cpu(m.read->length);
u16 *buf = (u16 *)(phy->ubuf + phy->offset);
u16 *dbuf = (u16 *)phy->dbuf_kva;
int i, sz = len >> 1;
bfa_trc(phy, phy->offset);
bfa_trc(phy, len);
for (i = 0; i < sz; i++)
buf[i] = be16_to_cpu(dbuf[i]);
phy->residue -= len;
phy->offset += len;
if (phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else
bfa_phy_read_send(phy);
}
break;
default:
WARN_ON(1);
}
}
/*
* DCONF module specific
*/
BFA_MODULE(dconf);
/*
* DCONF state machine events
*/
enum bfa_dconf_event {
BFA_DCONF_SM_INIT = 1, /* dconf Init */
BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
BFA_DCONF_SM_WR = 3, /* binding change, map */
BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
};
/* forward declaration of DCONF state machine */
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
static void bfa_dconf_timer(void *cbarg);
static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
/*
* Begining state of dconf module. Waiting for an event to start.
*/
static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
bfa_fsm_send_event(&dconf->bfa->iocfc,
IOCFC_E_DCONF_DONE);
return;
}
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf,
sizeof(struct bfa_dconf_s), 0,
bfa_dconf_init_cb, dconf->bfa);
if (bfa_status != BFA_STATUS_OK) {
bfa_timer_stop(&dconf->timer);
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
return;
}
break;
case BFA_DCONF_SM_EXIT:
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Read flash for dconf entries and make a call back to the driver once done.
*/
static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* DCONF Module is in ready state. Has completed the initialization.
*/
static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_INIT:
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* entries are dirty, write back to the flash.
*/
static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_WR:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_FLASH_COMP:
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Sync the dconf entries to the flash.
*/
static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Compute and return memory needed by DRV_CFG module.
*/
static void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
if (cfg->drvcfg.min_cfg)
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_hdr_s));
else
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_s));
}
static void
bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
dconf->bfad = bfad;
dconf->bfa = bfa;
dconf->instance = bfa->ioc.port_id;
bfa_trc(bfa, dconf->instance);
dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
if (cfg->drvcfg.min_cfg) {
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
dconf->min_cfg = BFA_TRUE;
} else {
dconf->min_cfg = BFA_FALSE;
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
}
bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
}
static void
bfa_dconf_init_cb(void *arg, bfa_status_t status)
{
struct bfa_s *bfa = arg;
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
if (status == BFA_STATUS_OK) {
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
}
bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
}
void
bfa_dconf_modinit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
}
static void
bfa_dconf_start(struct bfa_s *bfa)
{
}
static void
bfa_dconf_stop(struct bfa_s *bfa)
{
}
static void bfa_dconf_timer(void *cbarg)
{
struct bfa_dconf_mod_s *dconf = cbarg;
bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
}
static void
bfa_dconf_iocdisable(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
}
static void
bfa_dconf_detach(struct bfa_s *bfa)
{
}
static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, 0);
bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf, sizeof(struct bfa_dconf_s), 0,
bfa_dconf_cbfn, dconf);
if (bfa_status != BFA_STATUS_OK)
WARN_ON(bfa_status);
bfa_trc(dconf->bfa, bfa_status);
return bfa_status;
}
bfa_status_t
bfa_dconf_update(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_trc(dconf->bfa, 0);
if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
return BFA_STATUS_FAILED;
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
return BFA_STATUS_FAILED;
}
bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
return BFA_STATUS_OK;
}
static void
bfa_dconf_cbfn(void *arg, bfa_status_t status)
{
struct bfa_dconf_mod_s *dconf = arg;
WARN_ON(status);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
}
void
bfa_dconf_modexit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
}
| gpl-2.0 |
DeqingSun/Glass_kernel | drivers/s390/block/dasd.c | 4843 | 96607 | /*
* File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include <asm/itcw.h>
#include <asm/diag.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd:"
#include "dasd_int.h"
/*
* SECTION: Constant definitions to be used within this file
*/
#define DASD_CHANQ_MAX_SIZE 4
#define DASD_SLEEPON_START_TAG (void *) 1
#define DASD_SLEEPON_END_TAG (void *) 2
/*
* SECTION: exported variables of dasd.c
*/
debug_info_t *dasd_debug_area;
static struct dentry *dasd_debugfs_root_entry;
struct dasd_discipline *dasd_diag_discipline_pointer;
void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
" Copyright 2000 IBM Corporation");
MODULE_SUPPORTED_DEVICE("dasd");
MODULE_LICENSE("GPL");
/*
* SECTION: prototypes for static functions of dasd.c
*/
static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static void dasd_flush_request_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
/*
* SECTION: Operations on the device structure.
*/
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
static wait_queue_head_t generic_waitq;
/*
* Allocate memory for a new device structure.
*/
struct dasd_device *dasd_alloc_device(void)
{
struct dasd_device *device;
device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
if (!device)
return ERR_PTR(-ENOMEM);
/* Get two pages for normal block device operations. */
device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
if (!device->ccw_mem) {
kfree(device);
return ERR_PTR(-ENOMEM);
}
/* Get one page for error recovery. */
device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
if (!device->erp_mem) {
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
return ERR_PTR(-ENOMEM);
}
dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
tasklet_init(&device->tasklet,
(void (*)(unsigned long)) dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
init_timer(&device->timer);
device->timer.function = dasd_device_timeout;
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
spin_lock_init(&device->profile.lock);
return device;
}
/*
* Free memory of a device structure.
*/
void dasd_free_device(struct dasd_device *device)
{
kfree(device->private);
free_page((unsigned long) device->erp_mem);
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
}
/*
* Allocate memory for a new device structure.
*/
struct dasd_block *dasd_alloc_block(void)
{
struct dasd_block *block;
block = kzalloc(sizeof(*block), GFP_ATOMIC);
if (!block)
return ERR_PTR(-ENOMEM);
/* open_count = 0 means device online but not in use */
atomic_set(&block->open_count, -1);
spin_lock_init(&block->request_queue_lock);
atomic_set(&block->tasklet_scheduled, 0);
tasklet_init(&block->tasklet,
(void (*)(unsigned long)) dasd_block_tasklet,
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
init_timer(&block->timer);
block->timer.function = dasd_block_timeout;
block->timer.data = (unsigned long) block;
spin_lock_init(&block->profile.lock);
return block;
}
/*
* Free memory of a device structure.
*/
void dasd_free_block(struct dasd_block *block)
{
kfree(block);
}
/*
* Make a new device known to the system.
*/
static int dasd_state_new_to_known(struct dasd_device *device)
{
int rc;
/*
* As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0.
*/
dasd_get_device(device);
if (device->block) {
rc = dasd_alloc_queue(device->block);
if (rc) {
dasd_put_device(device);
return rc;
}
}
device->state = DASD_STATE_KNOWN;
return 0;
}
/*
* Let the system forget about a device.
*/
static int dasd_state_known_to_new(struct dasd_device *device)
{
/* Disable extended error reporting for this device. */
dasd_eer_disable(device);
/* Forget the discipline information. */
if (device->discipline) {
if (device->discipline->uncheck_device)
device->discipline->uncheck_device(device);
module_put(device->discipline->owner);
}
device->discipline = NULL;
if (device->base_discipline)
module_put(device->base_discipline->owner);
device->base_discipline = NULL;
device->state = DASD_STATE_NEW;
if (device->block)
dasd_free_queue(device->block);
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
return 0;
}
static struct dentry *dasd_debugfs_setup(const char *name,
struct dentry *base_dentry)
{
struct dentry *pde;
if (!base_dentry)
return NULL;
pde = debugfs_create_dir(name, base_dentry);
if (!pde || IS_ERR(pde))
return NULL;
return pde;
}
/*
* Request the irq line for the device.
*/
static int dasd_state_known_to_basic(struct dasd_device *device)
{
struct dasd_block *block = device->block;
int rc;
/* Allocate and register gendisk structure. */
if (block) {
rc = dasd_gendisk_alloc(block);
if (rc)
return rc;
block->debugfs_dentry =
dasd_debugfs_setup(block->gdp->disk_name,
dasd_debugfs_root_entry);
dasd_profile_init(&block->profile, block->debugfs_dentry);
if (dasd_global_profile_level == DASD_PROFILE_ON)
dasd_profile_on(&device->block->profile);
}
device->debugfs_dentry =
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
8 * sizeof(long));
debug_register_view(device->debug_area, &debug_sprintf_view);
debug_set_level(device->debug_area, DBF_WARNING);
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
return 0;
}
/*
* Release the irq line for the device. Terminate any running i/o.
*/
static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
if (device->block) {
dasd_profile_exit(&device->block->profile);
if (device->block->debugfs_dentry)
debugfs_remove(device->block->debugfs_dentry);
dasd_gendisk_free(device->block);
dasd_block_clear_timer(device->block);
}
rc = dasd_flush_device_queue(device);
if (rc)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
if (device->debugfs_dentry)
debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
debug_unregister(device->debug_area);
device->debug_area = NULL;
}
device->state = DASD_STATE_KNOWN;
return 0;
}
/*
* Do the initial analysis. The do_analysis function may return
* -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
* until the discipline decides to continue the startup sequence
* by calling the function dasd_change_state. The eckd disciplines
* uses this to start a ccw that detects the format. The completion
* interrupt for this detection ccw uses the kernel event daemon to
* trigger the call to dasd_change_state. All this is done in the
* discipline code, see dasd_eckd.c.
* After the analysis ccw is done (do_analysis returned 0) the block
* device is setup.
* In case the analysis returns an error, the device setup is stopped
* (a fake disk was already added to allow formatting).
*/
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
int rc;
struct dasd_block *block;
rc = 0;
block = device->block;
/* make disk known with correct capacity */
if (block) {
if (block->base->discipline->do_analysis != NULL)
rc = block->base->discipline->do_analysis(block);
if (rc) {
if (rc != -EAGAIN)
device->state = DASD_STATE_UNFMT;
return rc;
}
dasd_setup_queue(block);
set_capacity(block->gdp,
block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
rc = dasd_scan_partitions(block);
if (rc)
device->state = DASD_STATE_BASIC;
} else {
device->state = DASD_STATE_READY;
}
return rc;
}
/*
* Remove device from block device layer. Destroy dirty buffers.
* Forget format information. Check if the target level is basic
* and if it is create fake disk for formatting.
*/
static int dasd_state_ready_to_basic(struct dasd_device *device)
{
int rc;
device->state = DASD_STATE_BASIC;
if (device->block) {
struct dasd_block *block = device->block;
rc = dasd_flush_block_queue(block);
if (rc) {
device->state = DASD_STATE_READY;
return rc;
}
dasd_flush_request_queue(block);
dasd_destroy_partitions(block);
block->blocks = 0;
block->bp_block = 0;
block->s2b_shift = 0;
}
return 0;
}
/*
* Back to basic.
*/
static int dasd_state_unfmt_to_basic(struct dasd_device *device)
{
device->state = DASD_STATE_BASIC;
return 0;
}
/*
* Make the device online and schedule the bottom half to start
* the requeueing of requests from the linux request queue to the
* ccw queue.
*/
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
int rc;
struct gendisk *disk;
struct disk_part_iter piter;
struct hd_struct *part;
if (device->discipline->ready_to_online) {
rc = device->discipline->ready_to_online(device);
if (rc)
return rc;
}
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
if ((device->features & DASD_FEATURE_USERAW)) {
disk = device->block->gdp;
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
return 0;
}
disk = device->block->bdev->bd_disk;
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
disk_part_iter_exit(&piter);
}
return 0;
}
/*
* Stop the requeueing of requests again.
*/
static int dasd_state_online_to_ready(struct dasd_device *device)
{
int rc;
struct gendisk *disk;
struct disk_part_iter piter;
struct hd_struct *part;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device);
if (rc)
return rc;
}
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
disk = device->block->bdev->bd_disk;
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
disk_part_iter_exit(&piter);
}
return 0;
}
/*
* Device startup state changes.
*/
static int dasd_increase_state(struct dasd_device *device)
{
int rc;
rc = 0;
if (device->state == DASD_STATE_NEW &&
device->target >= DASD_STATE_KNOWN)
rc = dasd_state_new_to_known(device);
if (!rc &&
device->state == DASD_STATE_KNOWN &&
device->target >= DASD_STATE_BASIC)
rc = dasd_state_known_to_basic(device);
if (!rc &&
device->state == DASD_STATE_BASIC &&
device->target >= DASD_STATE_READY)
rc = dasd_state_basic_to_ready(device);
if (!rc &&
device->state == DASD_STATE_UNFMT &&
device->target > DASD_STATE_UNFMT)
rc = -EPERM;
if (!rc &&
device->state == DASD_STATE_READY &&
device->target >= DASD_STATE_ONLINE)
rc = dasd_state_ready_to_online(device);
return rc;
}
/*
* Device shutdown state changes.
*/
static int dasd_decrease_state(struct dasd_device *device)
{
int rc;
rc = 0;
if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY)
rc = dasd_state_online_to_ready(device);
if (!rc &&
device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
rc = dasd_state_ready_to_basic(device);
if (!rc &&
device->state == DASD_STATE_UNFMT &&
device->target <= DASD_STATE_BASIC)
rc = dasd_state_unfmt_to_basic(device);
if (!rc &&
device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
rc = dasd_state_basic_to_known(device);
if (!rc &&
device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW)
rc = dasd_state_known_to_new(device);
return rc;
}
/*
* This is the main startup/shutdown routine.
*/
static void dasd_change_state(struct dasd_device *device)
{
int rc;
if (device->state == device->target)
/* Already where we want to go today... */
return;
if (device->state < device->target)
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
if (rc == -EAGAIN)
return;
if (rc)
device->target = device->state;
if (device->state == device->target)
wake_up(&dasd_init_waitq);
/* let user-space know that the device status changed */
kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
}
/*
* Kick starter for devices that did not complete the startup/shutdown
* procedure or were sleeping because of a pending state.
* dasd_kick_device will schedule a call do do_kick_device to the kernel
* event daemon.
*/
static void do_kick_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
mutex_lock(&device->state_mutex);
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
void dasd_kick_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_kick_device to the kernel event daemon. */
schedule_work(&device->kick_work);
}
/*
* dasd_reload_device will schedule a call do do_reload_device to the kernel
* event daemon.
*/
static void do_reload_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
reload_device);
device->discipline->reload(device);
dasd_put_device(device);
}
void dasd_reload_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */
schedule_work(&device->reload_device);
}
EXPORT_SYMBOL(dasd_reload_device);
/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon.
*/
static void do_restore_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
restore_device);
device->cdev->drv->restore(device->cdev);
dasd_put_device(device);
}
void dasd_restore_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_restore_device to the kernel event daemon. */
schedule_work(&device->restore_device);
}
/*
* Set the target state for a device and starts the state change.
*/
void dasd_set_target_state(struct dasd_device *device, int target)
{
dasd_get_device(device);
mutex_lock(&device->state_mutex);
/* If we are in probeonly mode stop at DASD_STATE_READY. */
if (dasd_probeonly && target > DASD_STATE_READY)
target = DASD_STATE_READY;
if (device->target != target) {
if (device->state == target)
wake_up(&dasd_init_waitq);
device->target = target;
}
if (device->state != device->target)
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
}
/*
* Enable devices with device numbers in [from..to].
*/
static inline int _wait_for_device(struct dasd_device *device)
{
return (device->state == device->target);
}
void dasd_enable_device(struct dasd_device *device)
{
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN)
/* No discipline for device found. */
dasd_set_target_state(device, DASD_STATE_NEW);
/* Now wait for the devices to come up. */
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_reload_device(device);
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
/*
* SECTION: device operation (interrupt handler, start i/o, term i/o ...)
*/
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info dasd_global_profile_data;
static struct dentry *dasd_global_profile_dentry;
static struct dentry *dasd_debugfs_global_entry;
/*
* Add profiling information for cqr before execution.
*/
static void dasd_profile_start(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
struct list_head *l;
unsigned int counter;
struct dasd_device *device;
/* count the length of the chanq for statistics */
counter = 0;
if (dasd_global_profile_level || block->profile.data)
list_for_each(l, &block->ccw_queue)
if (++counter >= 31)
break;
if (dasd_global_profile_level) {
dasd_global_profile_data.dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
dasd_global_profile_data.dasd_read_nr_req[counter]++;
}
spin_lock(&block->profile.lock);
if (block->profile.data)
block->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&block->profile.lock);
/*
* We count the request for the start device, even though it may run on
* some other device due to error recovery. This way we make sure that
* we count each request only once.
*/
device = cqr->startdev;
if (device->profile.data) {
counter = 1; /* request is not yet queued on the start device */
list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
}
spin_lock(&device->profile.lock);
if (device->profile.data) {
device->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
device->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&device->profile.lock);
}
/*
* Add profiling information for cqr after execution.
*/
#define dasd_profile_counter(value, index) \
{ \
for (index = 0; index < 31 && value >> (2+index); index++) \
; \
}
static void dasd_profile_end_add_data(struct dasd_profile_info *data,
int is_alias,
int is_tpm,
int is_read,
long sectors,
int sectors_ind,
int tottime_ind,
int tottimeps_ind,
int strtime_ind,
int irqtime_ind,
int irqtimeps_ind,
int endtime_ind)
{
/* in case of an overflow, reset the whole profile */
if (data->dasd_io_reqs == UINT_MAX) {
memset(data, 0, sizeof(*data));
getnstimeofday(&data->starttod);
}
data->dasd_io_reqs++;
data->dasd_io_sects += sectors;
if (is_alias)
data->dasd_io_alias++;
if (is_tpm)
data->dasd_io_tpm++;
data->dasd_io_secs[sectors_ind]++;
data->dasd_io_times[tottime_ind]++;
data->dasd_io_timps[tottimeps_ind]++;
data->dasd_io_time1[strtime_ind]++;
data->dasd_io_time2[irqtime_ind]++;
data->dasd_io_time2ps[irqtimeps_ind]++;
data->dasd_io_time3[endtime_ind]++;
if (is_read) {
data->dasd_read_reqs++;
data->dasd_read_sects += sectors;
if (is_alias)
data->dasd_read_alias++;
if (is_tpm)
data->dasd_read_tpm++;
data->dasd_read_secs[sectors_ind]++;
data->dasd_read_times[tottime_ind]++;
data->dasd_read_time1[strtime_ind]++;
data->dasd_read_time2[irqtime_ind]++;
data->dasd_read_time3[endtime_ind]++;
}
}
static void dasd_profile_end(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
long strtime, irqtime, endtime, tottime; /* in microseconds */
long tottimeps, sectors;
struct dasd_device *device;
int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
int irqtime_ind, irqtimeps_ind, endtime_ind;
device = cqr->startdev;
if (!(dasd_global_profile_level ||
block->profile.data ||
device->profile.data))
return;
sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
return;
strtime = ((cqr->startclk - cqr->buildclk) >> 12);
irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
endtime = ((cqr->endclk - cqr->stopclk) >> 12);
tottime = ((cqr->endclk - cqr->buildclk) >> 12);
tottimeps = tottime / sectors;
dasd_profile_counter(sectors, sectors_ind);
dasd_profile_counter(tottime, tottime_ind);
dasd_profile_counter(tottimeps, tottimeps_ind);
dasd_profile_counter(strtime, strtime_ind);
dasd_profile_counter(irqtime, irqtime_ind);
dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
dasd_profile_counter(endtime, endtime_ind);
if (dasd_global_profile_level) {
dasd_profile_end_add_data(&dasd_global_profile_data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
}
spin_lock(&block->profile.lock);
if (block->profile.data)
dasd_profile_end_add_data(block->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
spin_unlock(&block->profile.lock);
spin_lock(&device->profile.lock);
if (device->profile.data)
dasd_profile_end_add_data(device->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
spin_unlock(&device->profile.lock);
}
void dasd_profile_reset(struct dasd_profile *profile)
{
struct dasd_profile_info *data;
spin_lock_bh(&profile->lock);
data = profile->data;
if (!data) {
spin_unlock_bh(&profile->lock);
return;
}
memset(data, 0, sizeof(*data));
getnstimeofday(&data->starttod);
spin_unlock_bh(&profile->lock);
}
void dasd_global_profile_reset(void)
{
memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
getnstimeofday(&dasd_global_profile_data.starttod);
}
int dasd_profile_on(struct dasd_profile *profile)
{
struct dasd_profile_info *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_bh(&profile->lock);
if (profile->data) {
spin_unlock_bh(&profile->lock);
kfree(data);
return 0;
}
getnstimeofday(&data->starttod);
profile->data = data;
spin_unlock_bh(&profile->lock);
return 0;
}
void dasd_profile_off(struct dasd_profile *profile)
{
spin_lock_bh(&profile->lock);
kfree(profile->data);
profile->data = NULL;
spin_unlock_bh(&profile->lock);
}
char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
char *buffer;
buffer = vmalloc(user_len + 1);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
vfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
return buffer;
}
static ssize_t dasd_stats_write(struct file *file,
const char __user *user_buf,
size_t user_len, loff_t *pos)
{
char *buffer, *str;
int rc;
struct seq_file *m = (struct seq_file *)file->private_data;
struct dasd_profile *prof = m->private;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
str = skip_spaces(buffer);
rc = user_len;
if (strncmp(str, "reset", 5) == 0) {
dasd_profile_reset(prof);
} else if (strncmp(str, "on", 2) == 0) {
rc = dasd_profile_on(prof);
if (!rc)
rc = user_len;
} else if (strncmp(str, "off", 3) == 0) {
dasd_profile_off(prof);
} else
rc = -EINVAL;
vfree(buffer);
return rc;
}
static void dasd_stats_array(struct seq_file *m, unsigned int *array)
{
int i;
for (i = 0; i < 32; i++)
seq_printf(m, "%u ", array[i]);
seq_putc(m, '\n');
}
static void dasd_stats_seq_print(struct seq_file *m,
struct dasd_profile_info *data)
{
seq_printf(m, "start_time %ld.%09ld\n",
data->starttod.tv_sec, data->starttod.tv_nsec);
seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
seq_printf(m, "histogram_sectors ");
dasd_stats_array(m, data->dasd_io_secs);
seq_printf(m, "histogram_io_times ");
dasd_stats_array(m, data->dasd_io_times);
seq_printf(m, "histogram_io_times_weighted ");
dasd_stats_array(m, data->dasd_io_timps);
seq_printf(m, "histogram_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_io_time1);
seq_printf(m, "histogram_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_io_time2);
seq_printf(m, "histogram_time_ssch_to_irq_weighted ");
dasd_stats_array(m, data->dasd_io_time2ps);
seq_printf(m, "histogram_time_irq_to_end ");
dasd_stats_array(m, data->dasd_io_time3);
seq_printf(m, "histogram_ccw_queue_length ");
dasd_stats_array(m, data->dasd_io_nr_req);
seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
seq_printf(m, "histogram_read_sectors ");
dasd_stats_array(m, data->dasd_read_secs);
seq_printf(m, "histogram_read_times ");
dasd_stats_array(m, data->dasd_read_times);
seq_printf(m, "histogram_read_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_read_time1);
seq_printf(m, "histogram_read_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_read_time2);
seq_printf(m, "histogram_read_time_irq_to_end ");
dasd_stats_array(m, data->dasd_read_time3);
seq_printf(m, "histogram_read_ccw_queue_length ");
dasd_stats_array(m, data->dasd_read_nr_req);
}
static int dasd_stats_show(struct seq_file *m, void *v)
{
struct dasd_profile *profile;
struct dasd_profile_info *data;
profile = m->private;
spin_lock_bh(&profile->lock);
data = profile->data;
if (!data) {
spin_unlock_bh(&profile->lock);
seq_printf(m, "disabled\n");
return 0;
}
dasd_stats_seq_print(m, data);
spin_unlock_bh(&profile->lock);
return 0;
}
static int dasd_stats_open(struct inode *inode, struct file *file)
{
struct dasd_profile *profile = inode->i_private;
return single_open(file, dasd_stats_show, profile);
}
static const struct file_operations dasd_stats_raw_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_write,
};
static ssize_t dasd_stats_global_write(struct file *file,
const char __user *user_buf,
size_t user_len, loff_t *pos)
{
char *buffer, *str;
ssize_t rc;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
str = skip_spaces(buffer);
rc = user_len;
if (strncmp(str, "reset", 5) == 0) {
dasd_global_profile_reset();
} else if (strncmp(str, "on", 2) == 0) {
dasd_global_profile_reset();
dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
} else if (strncmp(str, "off", 3) == 0) {
dasd_global_profile_level = DASD_PROFILE_OFF;
} else
rc = -EINVAL;
vfree(buffer);
return rc;
}
static int dasd_stats_global_show(struct seq_file *m, void *v)
{
if (!dasd_global_profile_level) {
seq_printf(m, "disabled\n");
return 0;
}
dasd_stats_seq_print(m, &dasd_global_profile_data);
return 0;
}
static int dasd_stats_global_open(struct inode *inode, struct file *file)
{
return single_open(file, dasd_stats_global_show, NULL);
}
static const struct file_operations dasd_stats_global_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_global_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_global_write,
};
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
umode_t mode;
struct dentry *pde;
if (!base_dentry)
return;
profile->dentry = NULL;
profile->data = NULL;
mode = (S_IRUSR | S_IWUSR | S_IFREG);
pde = debugfs_create_file("statistics", mode, base_dentry,
profile, &dasd_stats_raw_fops);
if (pde && !IS_ERR(pde))
profile->dentry = pde;
return;
}
static void dasd_profile_exit(struct dasd_profile *profile)
{
dasd_profile_off(profile);
if (profile->dentry) {
debugfs_remove(profile->dentry);
profile->dentry = NULL;
}
}
static void dasd_statistics_removeroot(void)
{
dasd_global_profile_level = DASD_PROFILE_OFF;
if (dasd_global_profile_dentry) {
debugfs_remove(dasd_global_profile_dentry);
dasd_global_profile_dentry = NULL;
}
if (dasd_debugfs_global_entry)
debugfs_remove(dasd_debugfs_global_entry);
if (dasd_debugfs_root_entry)
debugfs_remove(dasd_debugfs_root_entry);
}
static void dasd_statistics_createroot(void)
{
umode_t mode;
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
dasd_debugfs_global_entry = NULL;
dasd_global_profile_dentry = NULL;
pde = debugfs_create_dir("dasd", NULL);
if (!pde || IS_ERR(pde))
goto error;
dasd_debugfs_root_entry = pde;
pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
if (!pde || IS_ERR(pde))
goto error;
dasd_debugfs_global_entry = pde;
mode = (S_IRUSR | S_IWUSR | S_IFREG);
pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
NULL, &dasd_stats_global_fops);
if (!pde || IS_ERR(pde))
goto error;
dasd_global_profile_dentry = pde;
return;
error:
DBF_EVENT(DBF_ERR, "%s",
"Creation of the dasd debugfs interface failed");
dasd_statistics_removeroot();
return;
}
#else
#define dasd_profile_start(block, cqr, req) do {} while (0)
#define dasd_profile_end(block, cqr, req) do {} while (0)
static void dasd_statistics_createroot(void)
{
return;
}
static void dasd_statistics_removeroot(void)
{
return;
}
int dasd_stats_generic_show(struct seq_file *m, void *v)
{
seq_printf(m, "Statistics are not activated in this kernel\n");
return 0;
}
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
return;
}
static void dasd_profile_exit(struct dasd_profile *profile)
{
return;
}
int dasd_profile_on(struct dasd_profile *profile)
{
return 0;
}
#endif /* CONFIG_DASD_PROFILE */
/*
* Allocate memory for a channel program with 'cplength' channel
* command words and 'datasize' additional space. There are two
* variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
* memory and 2) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device.
*/
struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
/* Sanity checks */
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
GFP_ATOMIC | GFP_DMA);
if (cqr->cpaddr == NULL) {
kfree(cqr);
return ERR_PTR(-ENOMEM);
}
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
if (cqr->data == NULL) {
kfree(cqr->cpaddr);
kfree(cqr);
return ERR_PTR(-ENOMEM);
}
}
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
unsigned long flags;
struct dasd_ccw_req *cqr;
char *data;
int size;
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
spin_lock_irqsave(&device->mem_lock, flags);
cqr = (struct dasd_ccw_req *)
dasd_alloc_chunk(&device->ccw_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(struct dasd_ccw_req));
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = (struct ccw1 *) data;
data += cplength*sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
/*
* Free memory of a channel program. This function needs to free all the
* idal lists that might have been created by dasd_set_cda and the
* struct dasd_ccw_req itself.
*/
void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
#ifdef CONFIG_64BIT
struct ccw1 *ccw;
/* Clear any idals used for the request. */
ccw = cqr->cpaddr;
do {
clear_normalized_cda(ccw);
} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
#endif
kfree(cqr->cpaddr);
kfree(cqr->data);
kfree(cqr);
dasd_put_device(device);
}
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->ccw_chunks, cqr);
spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device);
}
/*
* Check discipline magic in cqr.
*/
static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
if (cqr == NULL)
return -EINVAL;
device = cqr->startdev;
if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_ccw_req 0x%08x magic doesn't match"
" discipline 0x%08x",
cqr->magic,
*(unsigned int *) device->discipline->name);
return -EINVAL;
}
return 0;
}
/*
* Terminate the current i/o and set the request to clear_pending.
* Timer keeps device runnig.
* ccw_device_clear can fail if the i/o subsystem
* is in a bad mood.
*/
int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
if (rc)
return rc;
retries = 0;
device = (struct dasd_device *) cqr->startdev;
while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
rc = ccw_device_clear(device->cdev, (long) cqr);
switch (rc) {
case 0: /* termination successful */
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
cqr);
break;
case -ENODEV:
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"device gone, retry");
break;
case -EIO:
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"I/O error, retry");
break;
case -EINVAL:
case -EBUSY:
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"device busy, retry later");
break;
default:
/* internal error 10 - unknown rc*/
snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
dev_err(&device->cdev->dev, "An error occurred in the "
"DASD device driver, reason=%s\n", errorstring);
BUG();
break;
}
retries++;
}
dasd_schedule_device_bh(device);
return rc;
}
/*
* Start the i/o. This start_IO can fail if the channel is really busy.
* In that case set up a timer to start the request later.
*/
int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
if (rc) {
cqr->intrc = rc;
return rc;
}
device = (struct dasd_device *) cqr->startdev;
if (((cqr->block &&
test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
"because of stolen lock", cqr);
cqr->status = DASD_CQR_ERROR;
cqr->intrc = -EPERM;
return -EPERM;
}
if (cqr->retries < 0) {
/* internal error 14 - start_IO run out of retries */
sprintf(errorstring, "14 %p", cqr);
dev_err(&device->cdev->dev, "An error occurred in the DASD "
"device driver, reason=%s\n", errorstring);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
cqr->startclk = get_clock();
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->lpm &= device->path_data.opm;
if (!cqr->lpm)
cqr->lpm = device->path_data.opm;
}
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
} else {
rc = ccw_device_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm, 0);
}
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
break;
case -EBUSY:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: device busy, retry later");
break;
case -ETIMEDOUT:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: request timeout, retry later");
break;
case -EACCES:
/* -EACCES indicates that the request used only a subset of the
* available paths and all these paths are gone. If the lpm of
* this request was only a subset of the opm (e.g. the ppm) then
* we just do a retry with all available paths.
* If we already use the full opm, something is amiss, and we
* need a full path verification.
*/
if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected paths gone (%x)",
cqr->lpm);
} else if (cqr->lpm != device->path_data.opm) {
cqr->lpm = device->path_data.opm;
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone,"
" retry on all paths");
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: all paths in opm gone,"
" do path verification");
dasd_generic_last_path_gone(device);
device->path_data.opm = 0;
device->path_data.ppm = 0;
device->path_data.npm = 0;
device->path_data.tbvpm =
ccw_device_get_path_mask(device->cdev);
}
break;
case -ENODEV:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -ENODEV device gone, retry");
break;
case -EIO:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EIO device gone, retry");
break;
case -EINVAL:
/* most likely caused in power management context */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EINVAL device currently "
"not accessible");
break;
default:
/* internal error 11 - unknown rc */
snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", errorstring);
BUG();
break;
}
cqr->intrc = rc;
return rc;
}
/*
* Timeout function for dasd devices. This is used for different purposes
* 1) missing interrupt handler for normal operation
* 2) delayed start of request where start_IO failed with -EBUSY
* 3) timeout for missing state change interrupts
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
static void dasd_device_timeout(unsigned long ptr)
{
unsigned long flags;
struct dasd_device *device;
device = (struct dasd_device *) ptr;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
}
/*
* Setup timeout for a device in jiffies.
*/
void dasd_device_set_timer(struct dasd_device *device, int expires)
{
if (expires == 0)
del_timer(&device->timer);
else
mod_timer(&device->timer, jiffies + expires);
}
/*
* Clear timeout for a device.
*/
void dasd_device_clear_timer(struct dasd_device *device)
{
del_timer(&device->timer);
}
static void dasd_handle_killed_request(struct ccw_device *cdev,
unsigned long intparm)
{
struct dasd_ccw_req *cqr;
struct dasd_device *device;
if (!intparm)
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev,
"invalid status in handle_killed_request: "
"%02x", cqr->status);
return;
}
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"unable to get device from cdev");
return;
}
if (!cqr->startdev ||
device != cqr->startdev ||
strncmp(cqr->startdev->discipline->ebcname,
(char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request");
dasd_put_device(device);
return;
}
/* Schedule request to be retried. */
cqr->status = DASD_CQR_QUEUED;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
void dasd_generic_handle_state_change(struct dasd_device *device)
{
/* First of all start sense subsystem status request. */
dasd_eer_snss(device);
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
}
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
*/
void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long now;
int expires;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
"request timed out\n", __func__);
break;
default:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
"unknown error %ld\n", __func__,
PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
}
now = get_clock();
cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
scsw_cstat(&irb->scsw) == 0)) {
if (cqr)
memcpy(&cqr->irb, irb, sizeof(*irb));
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
/* ignore unsolicited interrupts for DIAG discipline */
if (device->discipline == dasd_diag_discipline_pointer) {
dasd_put_device(device);
return;
}
device->discipline->dump_sense_dbf(device, irb, "int");
if (device->features & DASD_FEATURE_ERPLOG)
device->discipline->dump_sense(device, cqr, irb);
device->discipline->check_for_device_change(device, cqr, irb);
dasd_put_device(device);
}
if (!cqr)
return;
device = (struct dasd_device *) cqr->startdev;
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request");
return;
}
/* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
dasd_schedule_device_bh(device);
return;
}
/* check status - the request might have been killed by dyn detach */
if (cqr->status != DASD_CQR_IN_IO) {
DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
"status %02x", dev_name(&cdev->dev), cqr->status);
return;
}
next = NULL;
expires = 0;
if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
scsw_cstat(&irb->scsw) == 0) {
/* request was completed successfully */
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
/* Start first request on queue if possible -> fast_io. */
if (cqr->devlist.next != &device->ccw_queue) {
next = list_entry(cqr->devlist.next,
struct dasd_ccw_req, devlist);
}
} else { /* error */
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
if (cqr->lpm == device->path_data.opm)
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath "
"(%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->status = DASD_CQR_QUEUED;
next = cqr;
} else
cqr->status = DASD_CQR_ERROR;
}
if (next && (next->status == DASD_CQR_QUEUED) &&
(!device->stopped)) {
if (device->discipline->start_IO(next) == 0)
expires = next->expires;
}
if (expires != 0)
dasd_device_set_timer(device, expires);
else
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
}
enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
{
struct dasd_device *device;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
goto out;
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
device->state != device->target ||
!device->discipline->check_for_device_change){
dasd_put_device(device);
goto out;
}
if (device->discipline->dump_sense_dbf)
device->discipline->dump_sense_dbf(device, irb, "uc");
device->discipline->check_for_device_change(device, NULL, irb);
dasd_put_device(device);
out:
return UC_TODO_RETRY;
}
EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
/*
* If we have an error on a dasd_block layer request then we cancel
* and return all further requests from the same dasd_block as well.
*/
static void __dasd_device_recovery(struct dasd_device *device,
struct dasd_ccw_req *ref_cqr)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
/*
* only requeue request that came from the dasd_block layer
*/
if (!ref_cqr->block)
return;
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
if (cqr->status == DASD_CQR_QUEUED &&
ref_cqr->block == cqr->block) {
cqr->status = DASD_CQR_CLEARED;
}
}
};
/*
* Remove those ccw requests from the queue that need to be returned
* to the upper layer.
*/
static void __dasd_device_process_ccw_queue(struct dasd_device *device,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
/* Process request with final status. */
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
/* Stop list processing at the first non-final request. */
if (cqr->status == DASD_CQR_QUEUED ||
cqr->status == DASD_CQR_IN_IO ||
cqr->status == DASD_CQR_CLEAR_PENDING)
break;
if (cqr->status == DASD_CQR_ERROR) {
__dasd_device_recovery(device, cqr);
}
/* Rechain finished requests to final queue */
list_move_tail(&cqr->devlist, final_queue);
}
}
/*
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
*/
static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
char errorstring[ERRORLENGTH];
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
callback = cqr->callback;
callback_data = cqr->callback_data;
if (block)
spin_lock_bh(&block->queue_lock);
switch (cqr->status) {
case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE;
break;
case DASD_CQR_ERROR:
cqr->status = DASD_CQR_NEED_ERP;
break;
case DASD_CQR_CLEARED:
cqr->status = DASD_CQR_TERMINATED;
break;
default:
/* internal error 12 - wrong cqr status*/
snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", errorstring);
BUG();
}
if (cqr->callback != NULL)
(callback)(cqr, callback_data);
if (block)
spin_unlock_bh(&block->queue_lock);
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it reached its expire time. If so, terminate the IO.
*/
static void __dasd_device_check_expire(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
"cqr %p timed out (%lus) but cannot be "
"ended, retrying in 5 s\n",
cqr, (cqr->expires/HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
"cqr %p timed out (%lus), %i retries "
"remaining\n", cqr, (cqr->expires/HZ),
cqr->retries);
}
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started.
*/
static void __dasd_device_start_head(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if (cqr->status != DASD_CQR_QUEUED)
return;
/* when device is stopped, return request to previous layer
* exception: only the disconnect or unresumed bits are set and the
* cqr is a path verification request
*/
if (device->stopped &&
!(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
&& test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device);
return;
}
rc = device->discipline->start_IO(cqr);
if (rc == 0)
dasd_device_set_timer(device, cqr->expires);
else if (rc == -EACCES) {
dasd_schedule_device_bh(device);
} else
/* Hmpf, try again in 1/2 sec */
dasd_device_set_timer(device, 50);
}
static void __dasd_device_check_path_events(struct dasd_device *device)
{
int rc;
if (device->path_data.tbvpm) {
if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
DASD_UNRESUMED_PM))
return;
rc = device->discipline->verify_path(
device, device->path_data.tbvpm);
if (rc)
dasd_device_set_timer(device, 50);
else
device->path_data.tbvpm = 0;
}
};
/*
* Go through all request on the dasd_device request queue,
* terminate them on the cdev if necessary, and return them to the
* submitting layer via callback.
* Note:
* Make sure that all 'submitting layers' still exist when
* this function is called!. In other words, when 'device' is a base
* device then all block layer requests must have been removed before
* via dasd_flush_block_queue.
*/
int dasd_flush_device_queue(struct dasd_device *device)
{
struct dasd_ccw_req *cqr, *n;
int rc;
struct list_head flush_queue;
INIT_LIST_HEAD(&flush_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
switch (cqr->status) {
case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
"Flushing the DASD request queue "
"failed for request %p\n", cqr);
/* stop flush processing */
goto finished;
}
break;
case DASD_CQR_QUEUED:
cqr->stopclk = get_clock();
cqr->status = DASD_CQR_CLEARED;
break;
default: /* no need to modify the others */
break;
}
list_move_tail(&cqr->devlist, &flush_queue);
}
finished:
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/*
* After this point all requests must be in state CLEAR_PENDING,
* CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
* one of the others.
*/
list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING));
/*
* Now set each request back to TERMINATED, DONE or NEED_ERP
* and call the callback function of flushed requests
*/
__dasd_device_process_final_queue(device, &flush_queue);
return rc;
}
/*
* Acquire the device lock and process queues for the device.
*/
static void dasd_device_tasklet(struct dasd_device *device)
{
struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Check expire time of first request on the ccw queue. */
__dasd_device_check_expire(device);
/* find final requests on ccw queue */
__dasd_device_process_ccw_queue(device, &final_queue);
__dasd_device_check_path_events(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/* Now call the callback function of requests with final status */
__dasd_device_process_final_queue(device, &final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Now check if the head of the ccw queue needs to be started. */
__dasd_device_start_head(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
dasd_put_device(device);
}
/*
* Schedules a call to dasd_tasklet over the device tasklet.
*/
void dasd_schedule_device_bh(struct dasd_device *device)
{
/* Protect against rescheduling. */
if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
return;
dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet);
}
void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
{
device->stopped |= bits;
}
EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
{
device->stopped &= ~bits;
if (!device->stopped)
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
/*
* Queue a request to the head of the device ccw_queue.
* Start the I/O if possible.
*/
void dasd_add_request_head(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
unsigned long flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
/*
* Queue a request to the tail of the device ccw_queue.
* Start the I/O if possible.
*/
void dasd_add_request_tail(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
unsigned long flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add_tail(&cqr->devlist, &device->ccw_queue);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
/*
* Wakeup helper for the 'sleep_on' functions.
*/
void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
{
spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
cqr->callback_data = DASD_SLEEPON_END_TAG;
spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
device = cqr->startdev;
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
*/
static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
dasd_erp_fn_t erp_fn;
if (cqr->status == DASD_CQR_FILLED)
return 0;
device = cqr->startdev;
if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
if (cqr->status == DASD_CQR_TERMINATED) {
device->discipline->handle_terminated_request(cqr);
return 1;
}
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = device->discipline->erp_action(cqr);
erp_fn(cqr);
return 1;
}
if (cqr->status == DASD_CQR_FAILED)
dasd_log_sense(cqr, &cqr->irb);
if (cqr->refers) {
__dasd_process_erp(device, cqr);
return 1;
}
}
return 0;
}
static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
{
if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
if (cqr->refers) /* erp is not done yet */
return 1;
return ((cqr->status != DASD_CQR_DONE) &&
(cqr->status != DASD_CQR_FAILED));
} else
return (cqr->status == DASD_CQR_FILLED);
}
static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
{
struct dasd_device *device;
int rc;
struct list_head ccw_queue;
struct dasd_ccw_req *cqr;
INIT_LIST_HEAD(&ccw_queue);
maincqr->status = DASD_CQR_FILLED;
device = maincqr->startdev;
list_add(&maincqr->blocklist, &ccw_queue);
for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
cqr = list_first_entry(&ccw_queue,
struct dasd_ccw_req, blocklist)) {
if (__dasd_sleep_on_erp(cqr))
continue;
if (cqr->status != DASD_CQR_FILLED) /* could be failed */
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
continue;
}
/* Non-temporary stop condition will trigger fail fast */
if (device->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
(!dasd_eer_enabled(device))) {
cqr->status = DASD_CQR_FAILED;
continue;
}
/* Don't try to start requests if device is stopped */
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, !(device->stopped));
if (rc == -ERESTARTSYS) {
cqr->status = DASD_CQR_FAILED;
maincqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, !(device->stopped));
if (!cqr->callback)
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, _wait_for_wakeup(cqr));
if (rc == -ERESTARTSYS) {
dasd_cancel_req(cqr);
/* wait (non-interruptible) for final status */
wait_event(generic_waitq,
_wait_for_wakeup(cqr));
cqr->status = DASD_CQR_FAILED;
maincqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, _wait_for_wakeup(cqr));
}
maincqr->endclk = get_clock();
if ((maincqr->status != DASD_CQR_DONE) &&
(maincqr->intrc != -ERESTARTSYS))
dasd_log_sense(maincqr, &maincqr->irb);
if (maincqr->status == DASD_CQR_DONE)
rc = 0;
else if (maincqr->intrc)
rc = maincqr->intrc;
else
rc = -EIO;
return rc;
}
/*
* Queue a request to the tail of the device ccw_queue and wait for
* it's completion.
*/
int dasd_sleep_on(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 0);
}
/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 1);
}
/*
* Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
* for eckd devices) the currently running request has to be terminated
* and be put back to status queued, before the special request is added
* to the head of the queue. Then the special request is waited on normally.
*/
static inline int _dasd_term_running_cqr(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
if (list_empty(&device->ccw_queue))
return 0;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
rc = device->discipline->term_IO(cqr);
if (!rc)
/*
* CQR terminated because a more important request is pending.
* Undo decreasing of retry counter because this is
* not an error case.
*/
cqr->retries++;
return rc;
}
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
device = cqr->startdev;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
return -EIO;
}
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = _dasd_term_running_cqr(device);
if (rc) {
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
cqr->status = DASD_CQR_QUEUED;
/*
* add new request as second
* first the terminated cqr needs to be finished
*/
list_add(&cqr->devlist, device->ccw_queue.next);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(generic_waitq, _wait_for_wakeup(cqr));
if (cqr->status == DASD_CQR_DONE)
rc = 0;
else if (cqr->intrc)
rc = cqr->intrc;
else
rc = -EIO;
return rc;
}
/*
* Cancels a request that was started with dasd_sleep_on_req.
* This is useful to timeout requests. The request will be
* terminated if it is currently in i/o.
* Returns 1 if the request has been terminated.
* 0 if there was no need to terminate the request (not started yet)
* negative error code if termination failed
* Cancellation of a request is an asynchronous operation! The calling
* function has to wait until the request is properly returned via callback.
*/
int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
int rc;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
switch (cqr->status) {
case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */
cqr->status = DASD_CQR_CLEARED;
break;
case DASD_CQR_IN_IO:
/* request in IO - terminate IO and release again */
rc = device->discipline->term_IO(cqr);
if (rc) {
dev_err(&device->cdev->dev,
"Cancelling request %p failed with rc=%d\n",
cqr, rc);
} else {
cqr->stopclk = get_clock();
}
break;
default: /* already finished or clear pending - do nothing */
break;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
return rc;
}
/*
* SECTION: Operations of the dasd_block layer.
*/
/*
* Timeout function for dasd_block. This is used when the block layer
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
static void dasd_block_timeout(unsigned long ptr)
{
unsigned long flags;
struct dasd_block *block;
block = (struct dasd_block *) ptr;
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
}
/*
* Setup timeout for a dasd_block in jiffies.
*/
void dasd_block_set_timer(struct dasd_block *block, int expires)
{
if (expires == 0)
del_timer(&block->timer);
else
mod_timer(&block->timer, jiffies + expires);
}
/*
* Clear timeout for a dasd_block.
*/
void dasd_block_clear_timer(struct dasd_block *block)
{
del_timer(&block->timer);
}
/*
* Process finished error recovery ccw.
*/
static void __dasd_process_erp(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
dasd_erp_fn_t erp_fn;
if (cqr->status == DASD_CQR_DONE)
DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
else
dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
erp_fn = device->discipline->erp_postaction(cqr);
erp_fn(cqr);
}
/*
* Fetch requests from the block device queue.
*/
static void __dasd_process_request_queue(struct dasd_block *block)
{
struct request_queue *queue;
struct request *req;
struct dasd_ccw_req *cqr;
struct dasd_device *basedev;
unsigned long flags;
queue = block->request_queue;
basedev = block->base;
/* No queue ? Then there is nothing to do. */
if (queue == NULL)
return;
/*
* We requeue request from the block device queue to the ccw
* queue only in two states. In state DASD_STATE_READY the
* partition detection is done and we need to requeue requests
* for that. State DASD_STATE_ONLINE is normal block device
* operation.
*/
if (basedev->state < DASD_STATE_READY) {
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, -EIO);
return;
}
/* Now we try to fetch requests from the request queue */
while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -EBUSY)
break; /* normal end condition */
if (PTR_ERR(cqr) == -ENOMEM)
break; /* terminate request queue loop */
if (PTR_ERR(cqr) == -EAGAIN) {
/*
* The current request cannot be build right
* now, we have to try later. If this request
* is the head-of-queue we stop the device
* for 1/2 second.
*/
if (!list_empty(&block->ccw_queue))
break;
spin_lock_irqsave(
get_ccwdev_lock(basedev->cdev), flags);
dasd_device_set_stop_bits(basedev,
DASD_STOPPED_PENDING);
spin_unlock_irqrestore(
get_ccwdev_lock(basedev->cdev), flags);
dasd_block_set_timer(block, HZ/2);
break;
}
DBF_DEV_EVENT(DBF_ERR, basedev,
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
/*
* Note: callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
}
static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
struct request *req;
int status;
int error = 0;
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
__blk_end_request_all(req, error);
}
/*
* Process ccw request queue.
*/
static void __dasd_process_block_ccw_queue(struct dasd_block *block,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
dasd_erp_fn_t erp_fn;
unsigned long flags;
struct dasd_device *base = block->base;
restart:
/* Process request with final status. */
list_for_each_safe(l, n, &block->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist);
if (cqr->status != DASD_CQR_DONE &&
cqr->status != DASD_CQR_FAILED &&
cqr->status != DASD_CQR_NEED_ERP &&
cqr->status != DASD_CQR_TERMINATED)
continue;
if (cqr->status == DASD_CQR_TERMINATED) {
base->discipline->handle_terminated_request(cqr);
goto restart;
}
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = base->discipline->erp_action(cqr);
if (IS_ERR(erp_fn(cqr)))
continue;
goto restart;
}
/* log sense for fatal error */
if (cqr->status == DASD_CQR_FAILED) {
dasd_log_sense(cqr, &cqr->irb);
}
/* First of all call extended error reporting. */
if (dasd_eer_enabled(base) &&
cqr->status == DASD_CQR_FAILED) {
dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
/* restart request */
cqr->status = DASD_CQR_FILLED;
cqr->retries = 255;
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
flags);
goto restart;
}
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(base, cqr);
goto restart;
}
/* Rechain finished requests to final queue */
cqr->endclk = get_clock();
list_move_tail(&cqr->blocklist, final_queue);
}
}
static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
{
dasd_schedule_block_bh(cqr->block);
}
static void __dasd_block_start_head(struct dasd_block *block)
{
struct dasd_ccw_req *cqr;
if (list_empty(&block->ccw_queue))
return;
/* We allways begin with the first requests on the queue, as some
* of previously started requests have to be enqueued on a
* dasd_device again for error recovery.
*/
list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
if (cqr->status != DASD_CQR_FILLED)
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
dasd_schedule_block_bh(block);
continue;
}
/* Non-temporary stop condition will trigger fail fast */
if (block->base->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
(!dasd_eer_enabled(block->base))) {
cqr->status = DASD_CQR_FAILED;
dasd_schedule_block_bh(block);
continue;
}
/* Don't try to start requests if device is stopped */
if (block->base->stopped)
return;
/* just a fail safe check, should not happen */
if (!cqr->startdev)
cqr->startdev = block->base;
/* make sure that the requests we submit find their way back */
cqr->callback = dasd_return_cqr_cb;
dasd_add_request_tail(cqr);
}
}
/*
* Central dasd_block layer routine. Takes requests from the generic
* block layer request queue, creates ccw requests, enqueues them on
* a dasd_device and processes ccw requests that have been returned.
*/
static void dasd_block_tasklet(struct dasd_block *block)
{
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
atomic_set(&block->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
spin_lock(&block->queue_lock);
/* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue(block, &final_queue);
spin_unlock(&block->queue_lock);
/* Now call the callback function of requests with final status */
spin_lock_irq(&block->request_queue_lock);
list_for_each_safe(l, n, &final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist);
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
}
spin_lock(&block->queue_lock);
/* Get new request from the block device request queue */
__dasd_process_request_queue(block);
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head(block);
spin_unlock(&block->queue_lock);
spin_unlock_irq(&block->request_queue_lock);
dasd_put_device(block->base);
}
static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
{
wake_up(&dasd_flush_wq);
}
/*
* Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic
* block layer.
*/
static int dasd_flush_block_queue(struct dasd_block *block)
{
struct dasd_ccw_req *cqr, *n;
int rc, i;
struct list_head flush_queue;
INIT_LIST_HEAD(&flush_queue);
spin_lock_bh(&block->queue_lock);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
/* if this request currently owned by a dasd_device cancel it */
if (cqr->status >= DASD_CQR_QUEUED)
rc = dasd_cancel_req(cqr);
if (rc < 0)
break;
/* Rechain request (including erp chain) so it won't be
* touched by the dasd_block_tasklet anymore.
* Replace the callback so we notice when the request
* is returned from the dasd_device layer.
*/
cqr->callback = _dasd_wake_block_flush_cb;
for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
list_move_tail(&cqr->blocklist, &flush_queue);
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
spin_unlock_bh(&block->queue_lock);
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
__dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb;
}
/* call the callback function */
spin_lock_irq(&block->request_queue_lock);
cqr->endclk = get_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
spin_unlock_irq(&block->request_queue_lock);
}
return rc;
}
/*
* Schedules a call to dasd_tasklet over the device tasklet.
*/
void dasd_schedule_block_bh(struct dasd_block *block)
{
/* Protect against rescheduling. */
if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
return;
/* life cycle of block is bound to it's base device */
dasd_get_device(block->base);
tasklet_hi_schedule(&block->tasklet);
}
/*
* SECTION: external block device operations
* (request queue handling, open, release, etc.)
*/
/*
* Dasd request queue function. Called from ll_rw_blk.c
*/
static void do_dasd_request(struct request_queue *queue)
{
struct dasd_block *block;
block = queue->queuedata;
spin_lock(&block->queue_lock);
/* Get new request from the block device request queue */
__dasd_process_request_queue(block);
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head(block);
spin_unlock(&block->queue_lock);
}
/*
* Allocate and initialize request queue and default I/O scheduler.
*/
static int dasd_alloc_queue(struct dasd_block *block)
{
int rc;
block->request_queue = blk_init_queue(do_dasd_request,
&block->request_queue_lock);
if (block->request_queue == NULL)
return -ENOMEM;
block->request_queue->queuedata = block;
elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
rc = elevator_init(block->request_queue, "deadline");
if (rc) {
blk_cleanup_queue(block->request_queue);
return rc;
}
return 0;
}
/*
* Allocate and initialize request queue.
*/
static void dasd_setup_queue(struct dasd_block *block)
{
int max;
if (block->base->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
* only need one ccw per track
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
max = 2048;
} else {
max = block->base->discipline->max_blocks << block->s2b_shift;
}
blk_queue_logical_block_size(block->request_queue,
block->bp_block);
blk_queue_max_hw_sectors(block->request_queue, max);
blk_queue_max_segments(block->request_queue, -1L);
/* with page sized segments we can translate each segement into
* one idaw/tidaw
*/
blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
}
/*
* Deactivate and free request queue.
*/
static void dasd_free_queue(struct dasd_block *block)
{
if (block->request_queue) {
blk_cleanup_queue(block->request_queue);
block->request_queue = NULL;
}
}
/*
* Flush request on the request queue.
*/
static void dasd_flush_request_queue(struct dasd_block *block)
{
struct request *req;
if (!block->request_queue)
return;
spin_lock_irq(&block->request_queue_lock);
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, -EIO);
spin_unlock_irq(&block->request_queue_lock);
}
static int dasd_open(struct block_device *bdev, fmode_t mode)
{
struct dasd_device *base;
int rc;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
atomic_inc(&base->block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
rc = -ENODEV;
goto unlock;
}
if (!try_module_get(base->discipline->owner)) {
rc = -EINVAL;
goto unlock;
}
if (dasd_probeonly) {
dev_info(&base->cdev->dev,
"Accessing the DASD failed because it is in "
"probeonly mode\n");
rc = -EPERM;
goto out;
}
if (base->state <= DASD_STATE_BASIC) {
DBF_DEV_EVENT(DBF_ERR, base, " %s",
" Cannot open unrecognized device");
rc = -ENODEV;
goto out;
}
if ((mode & FMODE_WRITE) &&
(test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
(base->features & DASD_FEATURE_READONLY))) {
rc = -EROFS;
goto out;
}
dasd_put_device(base);
return 0;
out:
module_put(base->discipline->owner);
unlock:
atomic_dec(&base->block->open_count);
dasd_put_device(base);
return rc;
}
static int dasd_release(struct gendisk *disk, fmode_t mode)
{
struct dasd_device *base;
base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
atomic_dec(&base->block->open_count);
module_put(base->discipline->owner);
dasd_put_device(base);
return 0;
}
/*
* Return disk geometry.
*/
static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct dasd_device *base;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (!base->discipline ||
!base->discipline->fill_geometry) {
dasd_put_device(base);
return -EINVAL;
}
base->discipline->fill_geometry(base->block, geo);
geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
dasd_put_device(base);
return 0;
}
const struct block_device_operations
dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
.release = dasd_release,
.ioctl = dasd_ioctl,
.compat_ioctl = dasd_ioctl,
.getgeo = dasd_getgeo,
};
/*******************************************************************************
* end of block device operations
*/
static void
dasd_exit(void)
{
#ifdef CONFIG_PROC_FS
dasd_proc_exit();
#endif
dasd_eer_exit();
if (dasd_page_cache != NULL) {
kmem_cache_destroy(dasd_page_cache);
dasd_page_cache = NULL;
}
dasd_gendisk_exit();
dasd_devmap_exit();
if (dasd_debug_area != NULL) {
debug_unregister(dasd_debug_area);
dasd_debug_area = NULL;
}
dasd_statistics_removeroot();
}
/*
* SECTION: common functions for ccw_driver use
*/
/*
* Is the device read-only?
* Note that this function does not report the setting of the
* readonly device attribute, but how it is configured in z/VM.
*/
int dasd_device_is_ro(struct dasd_device *device)
{
struct ccw_dev_id dev_id;
struct diag210 diag_data;
int rc;
if (!MACHINE_IS_VM)
return 0;
ccw_device_get_id(device->cdev, &dev_id);
memset(&diag_data, 0, sizeof(diag_data));
diag_data.vrdcdvno = dev_id.devno;
diag_data.vrdclen = sizeof(diag_data);
rc = diag210(&diag_data);
if (rc == 0 || rc == 2) {
return diag_data.vrdcvfla & 0x80;
} else {
DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
dev_id.devno, rc);
return 0;
}
}
EXPORT_SYMBOL_GPL(dasd_device_is_ro);
static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
{
struct ccw_device *cdev = data;
int ret;
ret = ccw_device_set_online(cdev);
if (ret)
pr_warning("%s: Setting the DASD online failed with rc=%d\n",
dev_name(&cdev->dev), ret);
}
/*
* Initial attempt at a probe function. this can be simplified once
* the other detection code is gone.
*/
int dasd_generic_probe(struct ccw_device *cdev,
struct dasd_discipline *discipline)
{
int ret;
ret = dasd_add_sysfs_files(cdev);
if (ret) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
"dasd_generic_probe: could not add "
"sysfs entries");
return ret;
}
cdev->handler = &dasd_int_handler;
/*
* Automatically online either all dasd devices (dasd_autodetect)
* or all devices specified with dasd= parameters during
* initial probe.
*/
if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
(dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
async_schedule(dasd_generic_auto_online, cdev);
return 0;
}
/*
* This will one day be called from a global not_oper handler.
* It is also used by driver_unregister during module unload.
*/
void dasd_generic_remove(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
cdev->handler = NULL;
dasd_remove_sysfs_files(cdev);
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return;
}
/*
* This device is removed unconditionally. Set offline
* flag to prevent dasd_open from opening it while it is
* no quite down yet.
*/
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
dasd_delete_device(device);
/*
* life cycle of block is bound to device, so delete it after
* device was safely removed
*/
if (block)
dasd_free_block(block);
}
/*
* Activate a device. This is called from dasd_{eckd,fba}_probe() when either
* the device is detected for the first time and is supposed to be used
* or the user has started activation through sysfs.
*/
int dasd_generic_set_online(struct ccw_device *cdev,
struct dasd_discipline *base_discipline)
{
struct dasd_discipline *discipline;
struct dasd_device *device;
int rc;
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
pr_warning("%s Setting the DASD online failed because "
"of missing DIAG discipline\n",
dev_name(&cdev->dev));
dasd_delete_device(device);
return -ENODEV;
}
discipline = dasd_diag_discipline_pointer;
}
if (!try_module_get(base_discipline->owner)) {
dasd_delete_device(device);
return -EINVAL;
}
if (!try_module_get(discipline->owner)) {
module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
device->base_discipline = base_discipline;
device->discipline = discipline;
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
pr_warning("%s Setting the DASD online with discipline %s "
"failed with rc=%i\n",
dev_name(&cdev->dev), discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
return rc;
}
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
pr_warning("%s Setting the DASD online failed because of a "
"missing discipline\n", dev_name(&cdev->dev));
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
dasd_free_block(device->block);
dasd_delete_device(device);
} else
pr_debug("dasd_generic device %s found\n",
dev_name(&cdev->dev));
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_put_device(device);
return rc;
}
int dasd_generic_set_offline(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
int max_count, open_count;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return 0;
}
/*
* We must make sure that this device is currently not in use.
* The open_count is increased for every opener, that includes
* the blkdev_get in dasd_scan_partitions. We are only interested
* in the other openers.
*/
if (device->block) {
max_count = device->block->bdev ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
pr_warning("%s: The DASD cannot be set offline "
"with open count %i\n",
dev_name(&cdev->dev), open_count);
else
pr_warning("%s: The DASD cannot be set offline "
"while it is in use\n",
dev_name(&cdev->dev));
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
return -EBUSY;
}
}
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
dasd_delete_device(device);
/*
* life cycle of block is bound to device, so delete it after
* device was safely removed
*/
if (block)
dasd_free_block(block);
return 0;
}
int dasd_generic_last_path_gone(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
dev_warn(&device->cdev->dev, "No operational channel path is left "
"for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
/* First of all call extended error reporting. */
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
if (device->state < DASD_STATE_BASIC)
return 0;
/* Device is active. We want to keep it. */
list_for_each_entry(cqr, &device->ccw_queue, devlist)
if ((cqr->status == DASD_CQR_IN_IO) ||
(cqr->status == DASD_CQR_CLEAR_PENDING)) {
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
int dasd_generic_path_operational(struct dasd_device *device)
{
dev_info(&device->cdev->dev, "A channel path to the device has become "
"operational\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
if (device->stopped & DASD_UNRESUMED_PM) {
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
dasd_restore_device(device);
return 1;
}
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
int dasd_generic_notify(struct ccw_device *cdev, int event)
{
struct dasd_device *device;
int ret;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return 0;
ret = 0;
switch (event) {
case CIO_GONE:
case CIO_BOXED:
case CIO_NO_PATH:
device->path_data.opm = 0;
device->path_data.ppm = 0;
device->path_data.npm = 0;
ret = dasd_generic_last_path_gone(device);
break;
case CIO_OPER:
ret = 1;
if (device->path_data.opm)
ret = dasd_generic_path_operational(device);
break;
}
dasd_put_device(device);
return ret;
}
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
int chp;
__u8 oldopm, eventlpm;
struct dasd_device *device;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
for (chp = 0; chp < 8; chp++) {
eventlpm = 0x80 >> chp;
if (path_event[chp] & PE_PATH_GONE) {
oldopm = device->path_data.opm;
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
if (oldopm && !device->path_data.opm)
dasd_generic_last_path_gone(device);
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
device->path_data.tbvpm |= eventlpm;
dasd_schedule_device_bh(device);
}
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Pathgroup re-established\n");
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
}
dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
if (!device->path_data.opm && lpm) {
device->path_data.opm = lpm;
dasd_generic_path_operational(device);
} else
device->path_data.opm |= lpm;
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
struct dasd_ccw_req *cqr, *n;
int rc;
struct list_head freeze_queue;
struct dasd_device *device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
/* mark device as suspended */
set_bit(DASD_FLAG_SUSPENDED, &device->flags);
if (device->discipline->freeze)
rc = device->discipline->freeze(device);
/* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
/* clear active requests */
INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
if (cqr->status == DASD_CQR_IN_IO) {
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
"Unable to terminate request %p "
"on suspend\n", cqr);
spin_unlock_irq(get_ccwdev_lock(cdev));
dasd_put_device(device);
return rc;
}
}
list_move_tail(&cqr->devlist, &freeze_queue);
}
spin_unlock_irq(get_ccwdev_lock(cdev));
list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING));
if (cqr->status == DASD_CQR_CLEARED)
cqr->status = DASD_CQR_QUEUED;
}
/* move freeze_queue to start of the ccw_queue */
spin_lock_irq(get_ccwdev_lock(cdev));
list_splice_tail(&freeze_queue, &device->ccw_queue);
spin_unlock_irq(get_ccwdev_lock(cdev));
dasd_put_device(device);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
int dasd_generic_restore_device(struct ccw_device *cdev)
{
struct dasd_device *device = dasd_device_from_cdev(cdev);
int rc = 0;
if (IS_ERR(device))
return PTR_ERR(device);
/* allow new IO again */
dasd_device_remove_stop_bits(device,
(DASD_STOPPED_PM | DASD_UNRESUMED_PM));
dasd_schedule_device_bh(device);
/*
* call discipline restore function
* if device is stopped do nothing e.g. for disconnected devices
*/
if (device->discipline->restore && !(device->stopped))
rc = device->discipline->restore(device);
if (rc || device->stopped)
/*
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
*/
device->stopped |= DASD_UNRESUMED_PM;
if (device->block)
dasd_schedule_block_bh(device->block);
clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
dasd_put_device(device);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
void *rdc_buffer,
int rdc_buffer_size,
int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
unsigned long *idaw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
if (IS_ERR(cqr)) {
/* internal error 13 - Allocating the RDC request failed*/
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", "13");
return cqr;
}
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
idaw = (unsigned long *) (cqr->data);
ccw->cda = (__u32)(addr_t) idaw;
ccw->flags = CCW_FLAG_IDA;
idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
} else {
ccw->cda = (__u32)(addr_t) rdc_buffer;
ccw->flags = 0;
}
ccw->count = rdc_buffer_size;
cqr->startdev = device;
cqr->memdev = device;
cqr->expires = 10*HZ;
cqr->retries = 256;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
void *rdc_buffer, int rdc_buffer_size)
{
int ret;
struct dasd_ccw_req *cqr;
cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
magic);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
ret = dasd_sleep_on(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return ret;
}
EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
/*
* In command mode and transport mode we need to look for sense
* data in different places. The sense data itself is allways
* an array of 32 bytes, so we can unify the sense data access
* for both modes.
*/
char *dasd_get_sense(struct irb *irb)
{
struct tsb *tsb = NULL;
char *sense = NULL;
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
tsb = tcw_get_tsb((struct tcw *)(unsigned long)
irb->scsw.tm.tcw);
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
sense = tsb->tsa.iostat.sense;
break;
case 2: /* tsa_ddpc */
sense = tsb->tsa.ddpc.sense;
break;
default:
/* currently we don't use interrogate data */
break;
}
} else if (irb->esw.esw0.erw.cons) {
sense = irb->ecw;
}
return sense;
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
static int __init dasd_init(void)
{
int rc;
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
init_waitqueue_head(&generic_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
if (dasd_debug_area == NULL) {
rc = -ENOMEM;
goto failed;
}
debug_register_view(dasd_debug_area, &debug_sprintf_view);
debug_set_level(dasd_debug_area, DBF_WARNING);
DBF_EVENT(DBF_EMERG, "%s", "debug area created");
dasd_diag_discipline_pointer = NULL;
dasd_statistics_createroot();
rc = dasd_devmap_init();
if (rc)
goto failed;
rc = dasd_gendisk_init();
if (rc)
goto failed;
rc = dasd_parse();
if (rc)
goto failed;
rc = dasd_eer_init();
if (rc)
goto failed;
#ifdef CONFIG_PROC_FS
rc = dasd_proc_init();
if (rc)
goto failed;
#endif
return 0;
failed:
pr_info("The DASD device driver could not be initialized\n");
dasd_exit();
return rc;
}
module_init(dasd_init);
module_exit(dasd_exit);
EXPORT_SYMBOL(dasd_debug_area);
EXPORT_SYMBOL(dasd_diag_discipline_pointer);
EXPORT_SYMBOL(dasd_add_request_head);
EXPORT_SYMBOL(dasd_add_request_tail);
EXPORT_SYMBOL(dasd_cancel_req);
EXPORT_SYMBOL(dasd_device_clear_timer);
EXPORT_SYMBOL(dasd_block_clear_timer);
EXPORT_SYMBOL(dasd_enable_device);
EXPORT_SYMBOL(dasd_int_handler);
EXPORT_SYMBOL(dasd_kfree_request);
EXPORT_SYMBOL(dasd_kick_device);
EXPORT_SYMBOL(dasd_kmalloc_request);
EXPORT_SYMBOL(dasd_schedule_device_bh);
EXPORT_SYMBOL(dasd_schedule_block_bh);
EXPORT_SYMBOL(dasd_set_target_state);
EXPORT_SYMBOL(dasd_device_set_timer);
EXPORT_SYMBOL(dasd_block_set_timer);
EXPORT_SYMBOL(dasd_sfree_request);
EXPORT_SYMBOL(dasd_sleep_on);
EXPORT_SYMBOL(dasd_sleep_on_immediatly);
EXPORT_SYMBOL(dasd_sleep_on_interruptible);
EXPORT_SYMBOL(dasd_smalloc_request);
EXPORT_SYMBOL(dasd_start_IO);
EXPORT_SYMBOL(dasd_term_IO);
EXPORT_SYMBOL_GPL(dasd_generic_probe);
EXPORT_SYMBOL_GPL(dasd_generic_remove);
EXPORT_SYMBOL_GPL(dasd_generic_notify);
EXPORT_SYMBOL_GPL(dasd_generic_set_online);
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
EXPORT_SYMBOL_GPL(dasd_alloc_block);
EXPORT_SYMBOL_GPL(dasd_free_block);
| gpl-2.0 |
ArthySundaram/chromeos-kvm | drivers/platform/x86/fujitsu-tablet.c | 4843 | 10510 | /*
* Copyright (C) 2006-2012 Robert Gerlach <khnz@gmx.de>
* Copyright (C) 2005-2006 Jan Rychter <jan@rychter.com>
*
* You can redistribute and/or modify this program under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#define MODULENAME "fujitsu-tablet"
#define ACPI_FUJITSU_CLASS "fujitsu"
#define INVERT_TABLET_MODE_BIT 0x01
#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
#define KEYMAP_LEN 16
static const struct acpi_device_id fujitsu_ids[] = {
{ .id = "FUJ02BD" },
{ .id = "FUJ02BF" },
{ .id = "" }
};
struct fujitsu_config {
unsigned short keymap[KEYMAP_LEN];
unsigned int quirks;
};
static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_SCROLLDOWN,
KEY_SCROLLUP,
KEY_DIRECTION,
KEY_LEFTCTRL,
KEY_BRIGHTNESSUP,
KEY_BRIGHTNESSDOWN,
KEY_BRIGHTNESS_ZERO,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_LEFTALT
};
static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_PROG1,
KEY_PROG2,
KEY_DIRECTION,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_UP,
KEY_DOWN,
KEY_RESERVED,
KEY_RESERVED,
KEY_LEFTCTRL,
KEY_LEFTALT
};
static unsigned short keymap_Stylistic_Tseries[KEYMAP_LEN] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_PRINT,
KEY_BACKSPACE,
KEY_SPACE,
KEY_ENTER,
KEY_BRIGHTNESSUP,
KEY_BRIGHTNESSDOWN,
KEY_DOWN,
KEY_UP,
KEY_SCROLLUP,
KEY_SCROLLDOWN,
KEY_LEFTCTRL,
KEY_LEFTALT
};
static unsigned short keymap_Stylistic_ST5xxx[KEYMAP_LEN] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_MAIL,
KEY_DIRECTION,
KEY_ESC,
KEY_ENTER,
KEY_BRIGHTNESSUP,
KEY_BRIGHTNESSDOWN,
KEY_DOWN,
KEY_UP,
KEY_SCROLLUP,
KEY_SCROLLDOWN,
KEY_LEFTCTRL,
KEY_LEFTALT
};
static struct {
struct input_dev *idev;
struct fujitsu_config config;
unsigned long prev_keymask;
char phys[21];
int irq;
int io_base;
int io_length;
} fujitsu;
static u8 fujitsu_ack(void)
{
return inb(fujitsu.io_base + 2);
}
static u8 fujitsu_status(void)
{
return inb(fujitsu.io_base + 6);
}
static u8 fujitsu_read_register(const u8 addr)
{
outb(addr, fujitsu.io_base);
return inb(fujitsu.io_base + 4);
}
static void fujitsu_send_state(void)
{
int state;
int dock, tablet_mode;
state = fujitsu_read_register(0xdd);
dock = state & 0x02;
if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
tablet_mode = 1;
} else{
tablet_mode = state & 0x01;
if (fujitsu.config.quirks & INVERT_TABLET_MODE_BIT)
tablet_mode = !tablet_mode;
}
input_report_switch(fujitsu.idev, SW_DOCK, dock);
input_report_switch(fujitsu.idev, SW_TABLET_MODE, tablet_mode);
input_sync(fujitsu.idev);
}
static void fujitsu_reset(void)
{
int timeout = 50;
fujitsu_ack();
while ((fujitsu_status() & 0x02) && (--timeout))
msleep(20);
fujitsu_send_state();
}
static int __devinit input_fujitsu_setup(struct device *parent,
const char *name, const char *phys)
{
struct input_dev *idev;
int error;
int i;
idev = input_allocate_device();
if (!idev)
return -ENOMEM;
idev->dev.parent = parent;
idev->phys = phys;
idev->name = name;
idev->id.bustype = BUS_HOST;
idev->id.vendor = 0x1734; /* Fujitsu Siemens Computer GmbH */
idev->id.product = 0x0001;
idev->id.version = 0x0101;
idev->keycode = fujitsu.config.keymap;
idev->keycodesize = sizeof(fujitsu.config.keymap[0]);
idev->keycodemax = ARRAY_SIZE(fujitsu.config.keymap);
__set_bit(EV_REP, idev->evbit);
for (i = 0; i < ARRAY_SIZE(fujitsu.config.keymap); i++)
if (fujitsu.config.keymap[i])
input_set_capability(idev, EV_KEY, fujitsu.config.keymap[i]);
input_set_capability(idev, EV_MSC, MSC_SCAN);
input_set_capability(idev, EV_SW, SW_DOCK);
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
input_set_capability(idev, EV_SW, SW_DOCK);
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
error = input_register_device(idev);
if (error) {
input_free_device(idev);
return error;
}
fujitsu.idev = idev;
return 0;
}
static void input_fujitsu_remove(void)
{
input_unregister_device(fujitsu.idev);
}
static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
{
unsigned long keymask, changed;
unsigned int keycode;
int pressed;
int i;
if (unlikely(!(fujitsu_status() & 0x01)))
return IRQ_NONE;
fujitsu_send_state();
keymask = fujitsu_read_register(0xde);
keymask |= fujitsu_read_register(0xdf) << 8;
keymask ^= 0xffff;
changed = keymask ^ fujitsu.prev_keymask;
if (changed) {
fujitsu.prev_keymask = keymask;
for_each_set_bit(i, &changed, KEYMAP_LEN) {
keycode = fujitsu.config.keymap[i];
pressed = keymask & changed & BIT(i);
if (pressed)
input_event(fujitsu.idev, EV_MSC, MSC_SCAN, i);
input_report_key(fujitsu.idev, keycode, pressed);
input_sync(fujitsu.idev);
}
}
fujitsu_ack();
return IRQ_HANDLED;
}
static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
{
printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
memcpy(fujitsu.config.keymap, dmi->driver_data,
sizeof(fujitsu.config.keymap));
return 1;
}
static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
{
fujitsu_dmi_default(dmi);
fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
return 1;
}
static struct dmi_system_id dmi_ids[] __initconst = {
{
.callback = fujitsu_dmi_default,
.ident = "Fujitsu Siemens P/T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK")
},
.driver_data = keymap_Lifebook_Tseries
},
{
.callback = fujitsu_dmi_default,
.ident = "Fujitsu Lifebook T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T")
},
.driver_data = keymap_Lifebook_Tseries
},
{
.callback = fujitsu_dmi_stylistic,
.ident = "Fujitsu Siemens Stylistic T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic T")
},
.driver_data = keymap_Stylistic_Tseries
},
{
.callback = fujitsu_dmi_default,
.ident = "Fujitsu LifeBook U810",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook U810")
},
.driver_data = keymap_Lifebook_U810
},
{
.callback = fujitsu_dmi_stylistic,
.ident = "Fujitsu Siemens Stylistic ST5xxx Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "STYLISTIC ST5")
},
.driver_data = keymap_Stylistic_ST5xxx
},
{
.callback = fujitsu_dmi_stylistic,
.ident = "Fujitsu Siemens Stylistic ST5xxx Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic ST5")
},
.driver_data = keymap_Stylistic_ST5xxx
},
{
.callback = fujitsu_dmi_default,
.ident = "Unknown (using defaults)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, ""),
DMI_MATCH(DMI_PRODUCT_NAME, "")
},
.driver_data = keymap_Lifebook_Tseries
},
{ NULL }
};
static acpi_status __devinit
fujitsu_walk_resources(struct acpi_resource *res, void *data)
{
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
fujitsu.irq = res->data.irq.interrupts[0];
return AE_OK;
case ACPI_RESOURCE_TYPE_IO:
fujitsu.io_base = res->data.io.minimum;
fujitsu.io_length = res->data.io.address_length;
return AE_OK;
case ACPI_RESOURCE_TYPE_END_TAG:
if (fujitsu.irq && fujitsu.io_base)
return AE_OK;
else
return AE_NOT_FOUND;
default:
return AE_ERROR;
}
}
static int __devinit acpi_fujitsu_add(struct acpi_device *adev)
{
acpi_status status;
int error;
if (!adev)
return -EINVAL;
status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
fujitsu_walk_resources, NULL);
if (ACPI_FAILURE(status) || !fujitsu.irq || !fujitsu.io_base)
return -ENODEV;
sprintf(acpi_device_name(adev), "Fujitsu %s", acpi_device_hid(adev));
sprintf(acpi_device_class(adev), "%s", ACPI_FUJITSU_CLASS);
snprintf(fujitsu.phys, sizeof(fujitsu.phys),
"%s/input0", acpi_device_hid(adev));
error = input_fujitsu_setup(&adev->dev,
acpi_device_name(adev), fujitsu.phys);
if (error)
return error;
if (!request_region(fujitsu.io_base, fujitsu.io_length, MODULENAME)) {
input_fujitsu_remove();
return -EBUSY;
}
fujitsu_reset();
error = request_irq(fujitsu.irq, fujitsu_interrupt,
IRQF_SHARED, MODULENAME, fujitsu_interrupt);
if (error) {
release_region(fujitsu.io_base, fujitsu.io_length);
input_fujitsu_remove();
return error;
}
return 0;
}
static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
{
free_irq(fujitsu.irq, fujitsu_interrupt);
release_region(fujitsu.io_base, fujitsu.io_length);
input_fujitsu_remove();
return 0;
}
static int acpi_fujitsu_resume(struct acpi_device *adev)
{
fujitsu_reset();
return 0;
}
static struct acpi_driver acpi_fujitsu_driver = {
.name = MODULENAME,
.class = "hotkey",
.ids = fujitsu_ids,
.ops = {
.add = acpi_fujitsu_add,
.remove = acpi_fujitsu_remove,
.resume = acpi_fujitsu_resume,
}
};
static int __init fujitsu_module_init(void)
{
int error;
dmi_check_system(dmi_ids);
error = acpi_bus_register_driver(&acpi_fujitsu_driver);
if (error)
return error;
return 0;
}
static void __exit fujitsu_module_exit(void)
{
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
}
module_init(fujitsu_module_init);
module_exit(fujitsu_module_exit);
MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.4");
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
| gpl-2.0 |
SlimRoms/kernel_lge_geeb | arch/alpha/boot/main.c | 7915 | 4373 | /*
* arch/alpha/boot/main.c
*
* Copyright (C) 1994, 1995 Linus Torvalds
*
* This file is the bootloader for the Linux/AXP kernel
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <generated/utsrelease.h>
#include <linux/mm.h>
#include <asm/console.h>
#include <asm/hwrpb.h>
#include <asm/pgtable.h>
#include <stdarg.h>
#include "ksize.h"
extern int vsprintf(char *, const char *, va_list);
extern unsigned long switch_to_osf_pal(unsigned long nr,
struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
unsigned long *vptb);
struct hwrpb_struct *hwrpb = INIT_HWRPB;
static struct pcb_struct pcb_va[1];
/*
* Find a physical address of a virtual object..
*
* This is easy using the virtual page table address.
*/
static inline void *
find_pa(unsigned long *vptb, void *ptr)
{
unsigned long address = (unsigned long) ptr;
unsigned long result;
result = vptb[address >> 13];
result >>= 32;
result <<= 13;
result |= address & 0x1fff;
return (void *) result;
}
/*
* This function moves into OSF/1 pal-code, and has a temporary
* PCB for that. The kernel proper should replace this PCB with
* the real one as soon as possible.
*
* The page table muckery in here depends on the fact that the boot
* code has the L1 page table identity-map itself in the second PTE
* in the L1 page table. Thus the L1-page is virtually addressable
* itself (through three levels) at virtual address 0x200802000.
*/
#define VPTB ((unsigned long *) 0x200000000)
#define L1 ((unsigned long *) 0x200802000)
void
pal_init(void)
{
unsigned long i, rev;
struct percpu_struct * percpu;
struct pcb_struct * pcb_pa;
/* Create the dummy PCB. */
pcb_va->ksp = 0;
pcb_va->usp = 0;
pcb_va->ptbr = L1[1] >> 32;
pcb_va->asn = 0;
pcb_va->pcc = 0;
pcb_va->unique = 0;
pcb_va->flags = 1;
pcb_va->res1 = 0;
pcb_va->res2 = 0;
pcb_pa = find_pa(VPTB, pcb_va);
/*
* a0 = 2 (OSF)
* a1 = return address, but we give the asm the vaddr of the PCB
* a2 = physical addr of PCB
* a3 = new virtual page table pointer
* a4 = KSP (but the asm sets it)
*/
srm_printk("Switching to OSF PAL-code .. ");
i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB);
if (i) {
srm_printk("failed, code %ld\n", i);
__halt();
}
percpu = (struct percpu_struct *)
(INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB);
rev = percpu->pal_revision = percpu->palcode_avail[2];
srm_printk("Ok (rev %lx)\n", rev);
tbia(); /* do it directly in case we are SMP */
}
static inline long openboot(void)
{
char bootdev[256];
long result;
result = callback_getenv(ENV_BOOTED_DEV, bootdev, 255);
if (result < 0)
return result;
return callback_open(bootdev, result & 255);
}
static inline long close(long dev)
{
return callback_close(dev);
}
static inline long load(long dev, unsigned long addr, unsigned long count)
{
char bootfile[256];
extern char _end;
long result, boot_size = &_end - (char *) BOOT_ADDR;
result = callback_getenv(ENV_BOOTED_FILE, bootfile, 255);
if (result < 0)
return result;
result &= 255;
bootfile[result] = '\0';
if (result)
srm_printk("Boot file specification (%s) not implemented\n",
bootfile);
return callback_read(dev, count, (void *)addr, boot_size/512 + 1);
}
/*
* Start the kernel.
*/
static void runkernel(void)
{
__asm__ __volatile__(
"bis %1,%1,$30\n\t"
"bis %0,%0,$26\n\t"
"ret ($26)"
: /* no outputs: it doesn't even return */
: "r" (START_ADDR),
"r" (PAGE_SIZE + INIT_STACK));
}
void start_kernel(void)
{
long i;
long dev;
int nbytes;
char envval[256];
srm_printk("Linux/AXP bootloader for Linux " UTS_RELEASE "\n");
if (INIT_HWRPB->pagesize != 8192) {
srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10);
return;
}
pal_init();
dev = openboot();
if (dev < 0) {
srm_printk("Unable to open boot device: %016lx\n", dev);
return;
}
dev &= 0xffffffff;
srm_printk("Loading vmlinux ...");
i = load(dev, START_ADDR, KERNEL_SIZE);
close(dev);
if (i != KERNEL_SIZE) {
srm_printk("Failed (%lx)\n", i);
return;
}
nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval));
if (nbytes < 0) {
nbytes = 0;
}
envval[nbytes] = '\0';
strcpy((char*)ZERO_PGE, envval);
srm_printk(" Ok\nNow booting the kernel\n");
runkernel();
for (i = 0 ; i < 0x100000000 ; i++)
/* nothing */;
__halt();
}
| gpl-2.0 |
uniquejainakshay/Linux_Kernel | drivers/isdn/hardware/eicon/message.c | 8171 | 446143 | /*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#include "di_defs.h"
#include "pc.h"
#include "capi20.h"
#include "divacapi.h"
#include "mdm_msg.h"
#include "divasync.h"
#define FILE_ "MESSAGE.C"
#define dprintf
/*------------------------------------------------------------------*/
/* This is options supported for all adapters that are server by */
/* XDI driver. Allo it is not necessary to ask it from every adapter*/
/* and it is not necessary to save it separate for every adapter */
/* Macrose defined here have only local meaning */
/*------------------------------------------------------------------*/
static dword diva_xdi_extended_features = 0;
#define DIVA_CAPI_USE_CMA 0x00000001
#define DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR 0x00000002
#define DIVA_CAPI_XDI_PROVIDES_NO_CANCEL 0x00000004
#define DIVA_CAPI_XDI_PROVIDES_RX_DMA 0x00000008
/*
CAPI can request to process all return codes self only if:
protocol code supports this && xdi supports this
*/
#define DIVA_CAPI_SUPPORTS_NO_CANCEL(__a__) (((__a__)->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) && ((__a__)->manufacturer_features & MANUFACTURER_FEATURE_OK_FC_LABEL) && (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_NO_CANCEL))
/*------------------------------------------------------------------*/
/* local function prototypes */
/*------------------------------------------------------------------*/
static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci);
static void set_group_ind_mask(PLCI *plci);
static void clear_group_ind_mask_bit(PLCI *plci, word b);
static byte test_group_ind_mask_bit(PLCI *plci, word b);
void AutomaticLaw(DIVA_CAPI_ADAPTER *);
word CapiRelease(word);
word CapiRegister(word);
word api_put(APPL *, CAPI_MSG *);
static word api_parse(byte *, word, byte *, API_PARSE *);
static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out);
static void api_load_msg(API_SAVE *in, API_PARSE *out);
word api_remove_start(void);
void api_remove_complete(void);
static void plci_remove(PLCI *);
static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a);
static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *, IDI_SYNC_REQ *);
void callback(ENTITY *);
static void control_rc(PLCI *, byte, byte, byte, byte, byte);
static void data_rc(PLCI *, byte);
static void data_ack(PLCI *, byte);
static void sig_ind(PLCI *);
static void SendInfo(PLCI *, dword, byte **, byte);
static void SendSetupInfo(APPL *, PLCI *, dword, byte **, byte);
static void SendSSExtInd(APPL *, PLCI *plci, dword Id, byte **parms);
static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms);
static void nl_ind(PLCI *);
static byte connect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte listen_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte info_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte info_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte alert_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte facility_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte facility_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte data_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte data_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte reset_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte reset_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_t90_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte select_b_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte manufacturer_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte manufacturer_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static word get_plci(DIVA_CAPI_ADAPTER *);
static void add_p(PLCI *, byte, byte *);
static void add_s(PLCI *plci, byte code, API_PARSE *p);
static void add_ss(PLCI *plci, byte code, API_PARSE *p);
static void add_ie(PLCI *plci, byte code, byte *p, word p_length);
static void add_d(PLCI *, word, byte *);
static void add_ai(PLCI *, API_PARSE *);
static word add_b1(PLCI *, API_PARSE *, word, word);
static word add_b23(PLCI *, API_PARSE *);
static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms);
static void sig_req(PLCI *, byte, byte);
static void nl_req_ncci(PLCI *, byte, byte);
static void send_req(PLCI *);
static void send_data(PLCI *);
static word plci_remove_check(PLCI *);
static void listen_check(DIVA_CAPI_ADAPTER *);
static byte AddInfo(byte **, byte **, byte *, byte *);
static byte getChannel(API_PARSE *);
static void IndParse(PLCI *, word *, byte **, byte);
static byte ie_compare(byte *, byte *);
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
/*
XON protocol helpers
*/
static void channel_flow_control_remove(PLCI *plci);
static void channel_x_off(PLCI *plci, byte ch, byte flag);
static void channel_x_on(PLCI *plci, byte ch);
static void channel_request_xon(PLCI *plci, byte ch);
static void channel_xmit_xon(PLCI *plci);
static int channel_can_xon(PLCI *plci, byte ch);
static void channel_xmit_extended_xon(PLCI *plci);
static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, dword info_mask, byte setupParse);
static word AdvCodecSupport(DIVA_CAPI_ADAPTER *, PLCI *, APPL *, byte);
static void CodecIdCheck(DIVA_CAPI_ADAPTER *, PLCI *);
static void SetVoiceChannel(PLCI *, byte *, DIVA_CAPI_ADAPTER *);
static void VoiceChannelOff(PLCI *plci);
static void adv_voice_write_coefs(PLCI *plci, word write_command);
static void adv_voice_clear_config(PLCI *plci);
static word get_b1_facilities(PLCI *plci, byte b1_resource);
static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities);
static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities);
static word adjust_b_process(dword Id, PLCI *plci, byte Rc);
static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command);
static void adjust_b_restore(dword Id, PLCI *plci, byte Rc);
static void reset_b3_command(dword Id, PLCI *plci, byte Rc);
static void select_b_command(dword Id, PLCI *plci, byte Rc);
static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc);
static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc);
static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc);
static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc);
static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc);
static void hold_save_command(dword Id, PLCI *plci, byte Rc);
static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc);
static void init_b1_config(PLCI *plci);
static void clear_b1_config(PLCI *plci);
static void dtmf_command(dword Id, PLCI *plci, byte Rc);
static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void dtmf_confirmation(dword Id, PLCI *plci);
static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length);
static void dtmf_parameter_write(PLCI *plci);
static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id);
static void mixer_set_bchannel_id(PLCI *plci, byte *chi);
static void mixer_clear_config(PLCI *plci);
static void mixer_notify_update(PLCI *plci, byte others);
static void mixer_command(dword Id, PLCI *plci, byte Rc);
static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void mixer_indication_coefs_set(dword Id, PLCI *plci);
static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length);
static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length);
static void mixer_remove(PLCI *plci);
static void ec_command(dword Id, PLCI *plci, byte Rc);
static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void ec_indication(dword Id, PLCI *plci, byte *msg, word length);
static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc);
static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc);
static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic);
static void diva_free_dma_descriptor(PLCI *plci, int nr);
/*------------------------------------------------------------------*/
/* external function prototypes */
/*------------------------------------------------------------------*/
extern byte MapController(byte);
extern byte UnMapController(byte);
#define MapId(Id)(((Id) & 0xffffff00L) | MapController((byte)(Id)))
#define UnMapId(Id)(((Id) & 0xffffff00L) | UnMapController((byte)(Id)))
void sendf(APPL *, word, dword, word, byte *, ...);
void *TransmitBufferSet(APPL *appl, dword ref);
void *TransmitBufferGet(APPL *appl, void *p);
void TransmitBufferFree(APPL *appl, void *p);
void *ReceiveBufferGet(APPL *appl, int Num);
int fax_head_line_time(char *buffer);
/*------------------------------------------------------------------*/
/* Global data definitions */
/*------------------------------------------------------------------*/
extern byte max_adapter;
extern byte max_appl;
extern DIVA_CAPI_ADAPTER *adapter;
extern APPL *application;
static byte remove_started = false;
static PLCI dummy_plci;
static struct _ftable {
word command;
byte *format;
byte (*function)(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
} ftable[] = {
{_DATA_B3_R, "dwww", data_b3_req},
{_DATA_B3_I | RESPONSE, "w", data_b3_res},
{_INFO_R, "ss", info_req},
{_INFO_I | RESPONSE, "", info_res},
{_CONNECT_R, "wsssssssss", connect_req},
{_CONNECT_I | RESPONSE, "wsssss", connect_res},
{_CONNECT_ACTIVE_I | RESPONSE, "", connect_a_res},
{_DISCONNECT_R, "s", disconnect_req},
{_DISCONNECT_I | RESPONSE, "", disconnect_res},
{_LISTEN_R, "dddss", listen_req},
{_ALERT_R, "s", alert_req},
{_FACILITY_R, "ws", facility_req},
{_FACILITY_I | RESPONSE, "ws", facility_res},
{_CONNECT_B3_R, "s", connect_b3_req},
{_CONNECT_B3_I | RESPONSE, "ws", connect_b3_res},
{_CONNECT_B3_ACTIVE_I | RESPONSE, "", connect_b3_a_res},
{_DISCONNECT_B3_R, "s", disconnect_b3_req},
{_DISCONNECT_B3_I | RESPONSE, "", disconnect_b3_res},
{_RESET_B3_R, "s", reset_b3_req},
{_RESET_B3_I | RESPONSE, "", reset_b3_res},
{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "ws", connect_b3_t90_a_res},
{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "", connect_b3_t90_a_res},
{_SELECT_B_REQ, "s", select_b_req},
{_MANUFACTURER_R, "dws", manufacturer_req},
{_MANUFACTURER_I | RESPONSE, "dws", manufacturer_res},
{_MANUFACTURER_I | RESPONSE, "", manufacturer_res}
};
static byte *cip_bc[29][2] = {
{ "", "" }, /* 0 */
{ "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 1 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 2 */
{ "\x02\x89\x90", "\x02\x89\x90" }, /* 3 */
{ "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 4 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 5 */
{ "\x02\x98\x90", "\x02\x98\x90" }, /* 6 */
{ "\x04\x88\xc0\xc6\xe6", "\x04\x88\xc0\xc6\xe6" }, /* 7 */
{ "\x04\x88\x90\x21\x8f", "\x04\x88\x90\x21\x8f" }, /* 8 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 9 */
{ "", "" }, /* 10 */
{ "", "" }, /* 11 */
{ "", "" }, /* 12 */
{ "", "" }, /* 13 */
{ "", "" }, /* 14 */
{ "", "" }, /* 15 */
{ "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 16 */
{ "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 17 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 18 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 19 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 20 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 21 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 22 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 23 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 24 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 25 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 26 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 27 */
{ "\x02\x88\x90", "\x02\x88\x90" } /* 28 */
};
static byte *cip_hlc[29] = {
"", /* 0 */
"", /* 1 */
"", /* 2 */
"", /* 3 */
"", /* 4 */
"", /* 5 */
"", /* 6 */
"", /* 7 */
"", /* 8 */
"", /* 9 */
"", /* 10 */
"", /* 11 */
"", /* 12 */
"", /* 13 */
"", /* 14 */
"", /* 15 */
"\x02\x91\x81", /* 16 */
"\x02\x91\x84", /* 17 */
"\x02\x91\xa1", /* 18 */
"\x02\x91\xa4", /* 19 */
"\x02\x91\xa8", /* 20 */
"\x02\x91\xb1", /* 21 */
"\x02\x91\xb2", /* 22 */
"\x02\x91\xb5", /* 23 */
"\x02\x91\xb8", /* 24 */
"\x02\x91\xc1", /* 25 */
"\x02\x91\x81", /* 26 */
"\x03\x91\xe0\x01", /* 27 */
"\x03\x91\xe0\x02" /* 28 */
};
/*------------------------------------------------------------------*/
#define V120_HEADER_LENGTH 1
#define V120_HEADER_EXTEND_BIT 0x80
#define V120_HEADER_BREAK_BIT 0x40
#define V120_HEADER_C1_BIT 0x04
#define V120_HEADER_C2_BIT 0x08
#define V120_HEADER_FLUSH_COND (V120_HEADER_BREAK_BIT | V120_HEADER_C1_BIT | V120_HEADER_C2_BIT)
static byte v120_default_header[] =
{
0x83 /* Ext, BR , res, res, C2 , C1 , B , F */
};
static byte v120_break_header[] =
{
0xc3 | V120_HEADER_BREAK_BIT /* Ext, BR , res, res, C2 , C1 , B , F */
};
/*------------------------------------------------------------------*/
/* API_PUT function */
/*------------------------------------------------------------------*/
word api_put(APPL *appl, CAPI_MSG *msg)
{
word i, j, k, l, n;
word ret;
byte c;
byte controller;
DIVA_CAPI_ADAPTER *a;
PLCI *plci;
NCCI *ncci_ptr;
word ncci;
CAPI_MSG *m;
API_PARSE msg_parms[MAX_MSG_PARMS + 1];
if (msg->header.length < sizeof(msg->header) ||
msg->header.length > MAX_MSG_SIZE) {
dbug(1, dprintf("bad len"));
return _BAD_MSG;
}
controller = (byte)((msg->header.controller & 0x7f) - 1);
/* controller starts with 0 up to (max_adapter - 1) */
if (controller >= max_adapter)
{
dbug(1, dprintf("invalid ctrl"));
return _BAD_MSG;
}
a = &adapter[controller];
plci = NULL;
if ((msg->header.plci != 0) && (msg->header.plci <= a->max_plci) && !a->adapter_disabled)
{
dbug(1, dprintf("plci=%x", msg->header.plci));
plci = &a->plci[msg->header.plci - 1];
ncci = GET_WORD(&msg->header.ncci);
if (plci->Id
&& (plci->appl
|| (plci->State == INC_CON_PENDING)
|| (plci->State == INC_CON_ALERT)
|| (msg->header.command == (_DISCONNECT_I | RESPONSE)))
&& ((ncci == 0)
|| (msg->header.command == (_DISCONNECT_B3_I | RESPONSE))
|| ((ncci < MAX_NCCI + 1) && (a->ncci_plci[ncci] == plci->Id))))
{
i = plci->msg_in_read_pos;
j = plci->msg_in_write_pos;
if (j >= i)
{
if (j + msg->header.length + MSG_IN_OVERHEAD <= MSG_IN_QUEUE_SIZE)
i += MSG_IN_QUEUE_SIZE - j;
else
j = 0;
}
else
{
n = (((CAPI_MSG *)(plci->msg_in_queue))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc;
if (i > MSG_IN_QUEUE_SIZE - n)
i = MSG_IN_QUEUE_SIZE - n + 1;
i -= j;
}
if (i <= ((msg->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc))
{
dbug(0, dprintf("Q-FULL1(msg) - len=%d write=%d read=%d wrap=%d free=%d",
msg->header.length, plci->msg_in_write_pos,
plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
return _QUEUE_FULL;
}
c = false;
if ((((byte *) msg) < ((byte *)(plci->msg_in_queue)))
|| (((byte *) msg) >= ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
if (plci->msg_in_write_pos != plci->msg_in_read_pos)
c = true;
}
if (msg->header.command == _DATA_B3_R)
{
if (msg->header.length < 20)
{
dbug(1, dprintf("DATA_B3 REQ wrong length %d", msg->header.length));
return _BAD_MSG;
}
ncci_ptr = &(a->ncci[ncci]);
n = ncci_ptr->data_pending;
l = ncci_ptr->data_ack_pending;
k = plci->msg_in_read_pos;
while (k != plci->msg_in_write_pos)
{
if (k == plci->msg_in_wrap_pos)
k = 0;
if ((((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.command == _DATA_B3_R)
&& (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.ncci == ncci))
{
n++;
if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->info.data_b3_req.Flags & 0x0004)
l++;
}
k += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.length +
MSG_IN_OVERHEAD + 3) & 0xfffc;
}
if ((n >= MAX_DATA_B3) || (l >= MAX_DATA_ACK))
{
dbug(0, dprintf("Q-FULL2(data) - pending=%d/%d ack_pending=%d/%d",
ncci_ptr->data_pending, n, ncci_ptr->data_ack_pending, l));
return _QUEUE_FULL;
}
if (plci->req_in || plci->internal_command)
{
if ((((byte *) msg) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *) msg) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
dbug(0, dprintf("Q-FULL3(requeue)"));
return _QUEUE_FULL;
}
c = true;
}
}
else
{
if (plci->req_in || plci->internal_command)
c = true;
else
{
plci->command = msg->header.command;
plci->number = msg->header.number;
}
}
if (c)
{
dbug(1, dprintf("enqueue msg(0x%04x,0x%x,0x%x) - len=%d write=%d read=%d wrap=%d free=%d",
msg->header.command, plci->req_in, plci->internal_command,
msg->header.length, plci->msg_in_write_pos,
plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
if (j == 0)
plci->msg_in_wrap_pos = plci->msg_in_write_pos;
m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
for (i = 0; i < msg->header.length; i++)
((byte *)(plci->msg_in_queue))[j++] = ((byte *) msg)[i];
if (m->header.command == _DATA_B3_R)
{
m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet(appl, m->info.data_b3_req.Data));
}
j = (j + 3) & 0xfffc;
*((APPL **)(&((byte *)(plci->msg_in_queue))[j])) = appl;
plci->msg_in_write_pos = j + MSG_IN_OVERHEAD;
return 0;
}
}
else
{
plci = NULL;
}
}
dbug(1, dprintf("com=%x", msg->header.command));
for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
for (i = 0, ret = _BAD_MSG; i < ARRAY_SIZE(ftable); i++) {
if (ftable[i].command == msg->header.command) {
/* break loop if the message is correct, otherwise continue scan */
/* (for example: CONNECT_B3_T90_ACT_RES has two specifications) */
if (!api_parse(msg->info.b, (word)(msg->header.length - 12), ftable[i].format, msg_parms)) {
ret = 0;
break;
}
for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
}
}
if (ret) {
dbug(1, dprintf("BAD_MSG"));
if (plci) plci->command = 0;
return ret;
}
c = ftable[i].function(GET_DWORD(&msg->header.controller),
msg->header.number,
a,
plci,
appl,
msg_parms);
channel_xmit_extended_xon(plci);
if (c == 1) send_req(plci);
if (c == 2 && plci) plci->req_in = plci->req_in_start = plci->req_out = 0;
if (plci && !plci->req_in) plci->command = 0;
return 0;
}
/*------------------------------------------------------------------*/
/* api_parse function, check the format of api messages */
/*------------------------------------------------------------------*/
static word api_parse(byte *msg, word length, byte *format, API_PARSE *parms)
{
word i;
word p;
for (i = 0, p = 0; format[i]; i++) {
if (parms)
{
parms[i].info = &msg[p];
}
switch (format[i]) {
case 'b':
p += 1;
break;
case 'w':
p += 2;
break;
case 'd':
p += 4;
break;
case 's':
if (msg[p] == 0xff) {
parms[i].info += 2;
parms[i].length = msg[p + 1] + (msg[p + 2] << 8);
p += (parms[i].length + 3);
}
else {
parms[i].length = msg[p];
p += (parms[i].length + 1);
}
break;
}
if (p > length) return true;
}
if (parms) parms[i].info = NULL;
return false;
}
static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out)
{
word i, j, n = 0;
byte *p;
p = out->info;
for (i = 0; format[i] != '\0'; i++)
{
out->parms[i].info = p;
out->parms[i].length = in[i].length;
switch (format[i])
{
case 'b':
n = 1;
break;
case 'w':
n = 2;
break;
case 'd':
n = 4;
break;
case 's':
n = in[i].length + 1;
break;
}
for (j = 0; j < n; j++)
*(p++) = in[i].info[j];
}
out->parms[i].info = NULL;
out->parms[i].length = 0;
}
static void api_load_msg(API_SAVE *in, API_PARSE *out)
{
word i;
i = 0;
do
{
out[i].info = in->parms[i].info;
out[i].length = in->parms[i].length;
} while (in->parms[i++].info);
}
/*------------------------------------------------------------------*/
/* CAPI remove function */
/*------------------------------------------------------------------*/
word api_remove_start(void)
{
word i;
word j;
if (!remove_started) {
remove_started = true;
for (i = 0; i < max_adapter; i++) {
if (adapter[i].request) {
for (j = 0; j < adapter[i].max_plci; j++) {
if (adapter[i].plci[j].Sig.Id) plci_remove(&adapter[i].plci[j]);
}
}
}
return 1;
}
else {
for (i = 0; i < max_adapter; i++) {
if (adapter[i].request) {
for (j = 0; j < adapter[i].max_plci; j++) {
if (adapter[i].plci[j].Sig.Id) return 1;
}
}
}
}
api_remove_complete();
return 0;
}
/*------------------------------------------------------------------*/
/* internal command queue */
/*------------------------------------------------------------------*/
static void init_internal_command_queue(PLCI *plci)
{
word i;
dbug(1, dprintf("%s,%d: init_internal_command_queue",
(char *)(FILE_), __LINE__));
plci->internal_command = 0;
for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS; i++)
plci->internal_command_queue[i] = NULL;
}
static void start_internal_command(dword Id, PLCI *plci, t_std_internal_command command_function)
{
word i;
dbug(1, dprintf("[%06lx] %s,%d: start_internal_command",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->internal_command == 0)
{
plci->internal_command_queue[0] = command_function;
(*command_function)(Id, plci, OK);
}
else
{
i = 1;
while (plci->internal_command_queue[i] != NULL)
i++;
plci->internal_command_queue[i] = command_function;
}
}
static void next_internal_command(dword Id, PLCI *plci)
{
word i;
dbug(1, dprintf("[%06lx] %s,%d: next_internal_command",
UnMapId(Id), (char *)(FILE_), __LINE__));
plci->internal_command = 0;
plci->internal_command_queue[0] = NULL;
while (plci->internal_command_queue[1] != NULL)
{
for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
plci->internal_command_queue[i] = plci->internal_command_queue[i + 1];
plci->internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS - 1] = NULL;
(*(plci->internal_command_queue[0]))(Id, plci, OK);
if (plci->internal_command != 0)
return;
plci->internal_command_queue[0] = NULL;
}
}
/*------------------------------------------------------------------*/
/* NCCI allocate/remove function */
/*------------------------------------------------------------------*/
static dword ncci_mapping_bug = 0;
static word get_ncci(PLCI *plci, byte ch, word force_ncci)
{
DIVA_CAPI_ADAPTER *a;
word ncci, i, j, k;
a = plci->adapter;
if (!ch || a->ch_ncci[ch])
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping exists %ld %02x %02x %02x-%02x",
ncci_mapping_bug, ch, force_ncci, a->ncci_ch[a->ch_ncci[ch]], a->ch_ncci[ch]));
ncci = ch;
}
else
{
if (force_ncci)
ncci = force_ncci;
else
{
if ((ch < MAX_NCCI + 1) && !a->ncci_ch[ch])
ncci = ch;
else
{
ncci = 1;
while ((ncci < MAX_NCCI + 1) && a->ncci_ch[ncci])
ncci++;
if (ncci == MAX_NCCI + 1)
{
ncci_mapping_bug++;
i = 1;
do
{
j = 1;
while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i))
j++;
k = j;
if (j < MAX_NCCI + 1)
{
do
{
j++;
} while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i));
}
} while ((i < MAX_NL_CHANNEL + 1) && (j < MAX_NCCI + 1));
if (i < MAX_NL_CHANNEL + 1)
{
dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x %02x-%02x-%02x",
ncci_mapping_bug, ch, force_ncci, i, k, j));
}
else
{
dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x",
ncci_mapping_bug, ch, force_ncci));
}
ncci = ch;
}
}
a->ncci_plci[ncci] = plci->Id;
a->ncci_state[ncci] = IDLE;
if (!plci->ncci_ring_list)
plci->ncci_ring_list = ncci;
else
a->ncci_next[ncci] = a->ncci_next[plci->ncci_ring_list];
a->ncci_next[plci->ncci_ring_list] = (byte) ncci;
}
a->ncci_ch[ncci] = ch;
a->ch_ncci[ch] = (byte) ncci;
dbug(1, dprintf("NCCI mapping established %ld %02x %02x %02x-%02x",
ncci_mapping_bug, ch, force_ncci, ch, ncci));
}
return (ncci);
}
static void ncci_free_receive_buffers(PLCI *plci, word ncci)
{
DIVA_CAPI_ADAPTER *a;
APPL *appl;
word i, ncci_code;
dword Id;
a = plci->adapter;
Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
if (ncci)
{
if (a->ncci_plci[ncci] == plci->Id)
{
if (!plci->appl)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping appl expected %ld %08lx",
ncci_mapping_bug, Id));
}
else
{
appl = plci->appl;
ncci_code = ncci | (((word) a->Id) << 8);
for (i = 0; i < appl->MaxBuffer; i++)
{
if ((appl->DataNCCI[i] == ncci_code)
&& (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
{
appl->DataNCCI[i] = 0;
}
}
}
}
}
else
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
if (!plci->appl)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping no appl %ld %08lx",
ncci_mapping_bug, Id));
}
else
{
appl = plci->appl;
ncci_code = ncci | (((word) a->Id) << 8);
for (i = 0; i < appl->MaxBuffer; i++)
{
if ((appl->DataNCCI[i] == ncci_code)
&& (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
{
appl->DataNCCI[i] = 0;
}
}
}
}
}
}
}
static void cleanup_ncci_data(PLCI *plci, word ncci)
{
NCCI *ncci_ptr;
if (ncci && (plci->adapter->ncci_plci[ncci] == plci->Id))
{
ncci_ptr = &(plci->adapter->ncci[ncci]);
if (plci->appl)
{
while (ncci_ptr->data_pending != 0)
{
if (!plci->data_sent || (ncci_ptr->DBuffer[ncci_ptr->data_out].P != plci->data_sent_ptr))
TransmitBufferFree(plci->appl, ncci_ptr->DBuffer[ncci_ptr->data_out].P);
(ncci_ptr->data_out)++;
if (ncci_ptr->data_out == MAX_DATA_B3)
ncci_ptr->data_out = 0;
(ncci_ptr->data_pending)--;
}
}
ncci_ptr->data_out = 0;
ncci_ptr->data_pending = 0;
ncci_ptr->data_ack_out = 0;
ncci_ptr->data_ack_pending = 0;
}
}
static void ncci_remove(PLCI *plci, word ncci, byte preserve_ncci)
{
DIVA_CAPI_ADAPTER *a;
dword Id;
word i;
a = plci->adapter;
Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
if (!preserve_ncci)
ncci_free_receive_buffers(plci, ncci);
if (ncci)
{
if (a->ncci_plci[ncci] != plci->Id)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping doesn't exist %ld %08lx %02x",
ncci_mapping_bug, Id, preserve_ncci));
}
else
{
cleanup_ncci_data(plci, ncci);
dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
a->ch_ncci[a->ncci_ch[ncci]] = 0;
if (!preserve_ncci)
{
a->ncci_ch[ncci] = 0;
a->ncci_plci[ncci] = 0;
a->ncci_state[ncci] = IDLE;
i = plci->ncci_ring_list;
while ((i != 0) && (a->ncci_next[i] != plci->ncci_ring_list) && (a->ncci_next[i] != ncci))
i = a->ncci_next[i];
if ((i != 0) && (a->ncci_next[i] == ncci))
{
if (i == ncci)
plci->ncci_ring_list = 0;
else if (plci->ncci_ring_list == ncci)
plci->ncci_ring_list = i;
a->ncci_next[i] = a->ncci_next[ncci];
}
a->ncci_next[ncci] = 0;
}
}
}
else
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
cleanup_ncci_data(plci, ncci);
dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
a->ch_ncci[a->ncci_ch[ncci]] = 0;
if (!preserve_ncci)
{
a->ncci_ch[ncci] = 0;
a->ncci_plci[ncci] = 0;
a->ncci_state[ncci] = IDLE;
a->ncci_next[ncci] = 0;
}
}
}
if (!preserve_ncci)
plci->ncci_ring_list = 0;
}
}
/*------------------------------------------------------------------*/
/* PLCI remove function */
/*------------------------------------------------------------------*/
static void plci_free_msg_in_queue(PLCI *plci)
{
word i;
if (plci->appl)
{
i = plci->msg_in_read_pos;
while (i != plci->msg_in_write_pos)
{
if (i == plci->msg_in_wrap_pos)
i = 0;
if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.command == _DATA_B3_R)
{
TransmitBufferFree(plci->appl,
(byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
}
i += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.length +
MSG_IN_OVERHEAD + 3) & 0xfffc;
}
}
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
}
static void plci_remove(PLCI *plci)
{
if (!plci) {
dbug(1, dprintf("plci_remove(no plci)"));
return;
}
init_internal_command_queue(plci);
dbug(1, dprintf("plci_remove(%x,tel=%x)", plci->Id, plci->tel));
if (plci_remove_check(plci))
{
return;
}
if (plci->Sig.Id == 0xff)
{
dbug(1, dprintf("D-channel X.25 plci->NL.Id:%0x", plci->NL.Id));
if (plci->NL.Id && !plci->nl_remove_id)
{
nl_req_ncci(plci, REMOVE, 0);
send_req(plci);
}
}
else
{
if (!plci->sig_remove_id
&& (plci->Sig.Id
|| (plci->req_in != plci->req_out)
|| (plci->nl_req || plci->sig_req)))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
}
}
ncci_remove(plci, 0, false);
plci_free_msg_in_queue(plci);
plci->channels = 0;
plci->appl = NULL;
if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT))
plci->State = OUTG_DIS_PENDING;
}
/*------------------------------------------------------------------*/
/* Application Group function helpers */
/*------------------------------------------------------------------*/
static void set_group_ind_mask(PLCI *plci)
{
word i;
for (i = 0; i < C_IND_MASK_DWORDS; i++)
plci->group_optimization_mask_table[i] = 0xffffffffL;
}
static void clear_group_ind_mask_bit(PLCI *plci, word b)
{
plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
}
static byte test_group_ind_mask_bit(PLCI *plci, word b)
{
return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
}
/*------------------------------------------------------------------*/
/* c_ind_mask operations for arbitrary MAX_APPL */
/*------------------------------------------------------------------*/
static void clear_c_ind_mask(PLCI *plci)
{
word i;
for (i = 0; i < C_IND_MASK_DWORDS; i++)
plci->c_ind_mask_table[i] = 0;
}
static byte c_ind_mask_empty(PLCI *plci)
{
word i;
i = 0;
while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0))
i++;
return (i == C_IND_MASK_DWORDS);
}
static void set_c_ind_mask_bit(PLCI *plci, word b)
{
plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f));
}
static void clear_c_ind_mask_bit(PLCI *plci, word b)
{
plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
}
static byte test_c_ind_mask_bit(PLCI *plci, word b)
{
return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
}
static void dump_c_ind_mask(PLCI *plci)
{
static char hex_digit_table[0x10] =
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word i, j, k;
dword d;
char *p;
char buf[40];
for (i = 0; i < C_IND_MASK_DWORDS; i += 4)
{
p = buf + 36;
*p = '\0';
for (j = 0; j < 4; j++)
{
if (i + j < C_IND_MASK_DWORDS)
{
d = plci->c_ind_mask_table[i + j];
for (k = 0; k < 8; k++)
{
*(--p) = hex_digit_table[d & 0xf];
d >>= 4;
}
}
else if (i != 0)
{
for (k = 0; k < 8; k++)
*(--p) = ' ';
}
*(--p) = ' ';
}
dbug(1, dprintf("c_ind_mask =%s", (char *) p));
}
}
#define dump_plcis(a)
/*------------------------------------------------------------------*/
/* translation function for each message */
/*------------------------------------------------------------------*/
static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ch;
word i;
word Info;
byte LinkLayer;
API_PARSE *ai;
API_PARSE *bp;
API_PARSE ai_parms[5];
word channel = 0;
dword ch_mask;
byte m;
static byte esc_chi[35] = {0x02, 0x18, 0x01};
static byte lli[2] = {0x01, 0x00};
byte noCh = 0;
word dir = 0;
byte *p_chi = "";
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
dbug(1, dprintf("connect_req(%d)", parms->length));
Info = _WRONG_IDENTIFIER;
if (a)
{
if (a->adapter_disabled)
{
dbug(1, dprintf("adapter disabled"));
Id = ((word)1 << 8) | a->Id;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _DISCONNECT_I, Id, 0, "w", _L1_ERROR);
return false;
}
Info = _OUT_OF_PLCI;
if ((i = get_plci(a)))
{
Info = 0;
plci = &a->plci[i - 1];
plci->appl = appl;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
/* check 'external controller' bit for codec support */
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, plci, appl, 0))
{
plci->Id = 0;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
return 2;
}
}
ai = &parms[9];
bp = &parms[5];
ch = 0;
if (bp->length)LinkLayer = bp->info[3];
else LinkLayer = 0;
if (ai->length)
{
ch = 0xffff;
if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
ch = 0;
if (ai_parms[0].length)
{
ch = GET_WORD(ai_parms[0].info + 1);
if (ch > 4) ch = 0; /* safety -> ignore ChannelID */
if (ch == 4) /* explizit CHI in message */
{
/* check length of B-CH struct */
if ((ai_parms[0].info)[3] >= 1)
{
if ((ai_parms[0].info)[4] == CHI)
{
p_chi = &((ai_parms[0].info)[5]);
}
else
{
p_chi = &((ai_parms[0].info)[3]);
}
if (p_chi[0] > 35) /* check length of channel ID */
{
Info = _WRONG_MESSAGE_FORMAT;
}
}
else Info = _WRONG_MESSAGE_FORMAT;
}
if (ch == 3 && ai_parms[0].length >= 7 && ai_parms[0].length <= 36)
{
dir = GET_WORD(ai_parms[0].info + 3);
ch_mask = 0;
m = 0x3f;
for (i = 0; i + 5 <= ai_parms[0].length; i++)
{
if (ai_parms[0].info[i + 5] != 0)
{
if ((ai_parms[0].info[i + 5] | m) != 0xff)
Info = _WRONG_MESSAGE_FORMAT;
else
{
if (ch_mask == 0)
channel = i;
ch_mask |= 1L << i;
}
}
m = 0;
}
if (ch_mask == 0)
Info = _WRONG_MESSAGE_FORMAT;
if (!Info)
{
if ((ai_parms[0].length == 36) || (ch_mask != ((dword)(1L << channel))))
{
esc_chi[0] = (byte)(ai_parms[0].length - 2);
for (i = 0; i + 5 <= ai_parms[0].length; i++)
esc_chi[i + 3] = ai_parms[0].info[i + 5];
}
else
esc_chi[0] = 2;
esc_chi[2] = (byte)channel;
plci->b_channel = (byte)channel; /* not correct for ETSI ch 17..31 */
add_p(plci, LLI, lli);
add_p(plci, ESC, esc_chi);
plci->State = LOCAL_CONNECT;
if (!dir) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; /* dir 0=DTE, 1=DCE */
}
}
}
}
else Info = _WRONG_MESSAGE_FORMAT;
}
dbug(1, dprintf("ch=%x,dir=%x,p_ch=%d", ch, dir, channel));
plci->command = _CONNECT_R;
plci->number = Number;
/* x.31 or D-ch free SAPI in LinkLayer? */
if (ch == 1 && LinkLayer != 3 && LinkLayer != 12) noCh = true;
if ((ch == 0 || ch == 2 || noCh || ch == 3 || ch == 4) && !Info)
{
/* B-channel used for B3 connections (ch==0), or no B channel */
/* is used (ch==2) or perm. connection (3) is used do a CALL */
if (noCh) Info = add_b1(plci, &parms[5], 2, 0); /* no resource */
else Info = add_b1(plci, &parms[5], ch, 0);
add_s(plci, OAD, &parms[2]);
add_s(plci, OSA, &parms[4]);
add_s(plci, BC, &parms[6]);
add_s(plci, LLC, &parms[7]);
add_s(plci, HLC, &parms[8]);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
if (GET_WORD(parms[0].info) < 29) {
add_p(plci, BC, cip_bc[GET_WORD(parms[0].info)][a->u_law]);
add_p(plci, HLC, cip_hlc[GET_WORD(parms[0].info)]);
}
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(plci, ASSIGN, DSIG_ID);
}
else if (ch == 1) {
/* D-Channel used for B3 connections */
plci->Sig.Id = 0xff;
Info = 0;
}
if (!Info && ch != 2 && !noCh) {
Info = add_b23(plci, &parms[5]);
if (!Info) {
if (!(plci->tel && !plci->adv_nl))nl_req_ncci(plci, ASSIGN, 0);
}
}
if (!Info)
{
if (ch == 0 || ch == 2 || ch == 3 || noCh || ch == 4)
{
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(parms, "wsssssssss", &plci->saved_msg);
plci->spoofed_msg = CALL_REQ;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
send_req(plci);
return false;
}
if (ch == 4)add_p(plci, CHI, p_chi);
add_s(plci, CPN, &parms[1]);
add_s(plci, DSA, &parms[3]);
if (noCh) add_p(plci, ESC, "\x02\x18\xfd"); /* D-channel, no B-L3 */
add_ai(plci, &parms[9]);
if (!dir)sig_req(plci, CALL_REQ, 0);
else
{
plci->command = PERM_LIST_REQ;
plci->appl = appl;
sig_req(plci, LISTEN_REQ, 0);
send_req(plci);
return false;
}
}
send_req(plci);
return false;
}
plci->Id = 0;
}
}
sendf(appl,
_CONNECT_R | CONFIRM,
Id,
Number,
"w", Info);
return 2;
}
static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word i, Info;
word Reject;
static byte cau_t[] = {0, 0, 0x90, 0x91, 0xac, 0x9d, 0x86, 0xd8, 0x9b};
static byte esc_t[] = {0x03, 0x08, 0x00, 0x00};
API_PARSE *ai;
API_PARSE ai_parms[5];
word ch = 0;
if (!plci) {
dbug(1, dprintf("connect_res(no plci)"));
return 0; /* no plci, no send */
}
dbug(1, dprintf("connect_res(State=0x%x)", plci->State));
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
ai = &parms[5];
dbug(1, dprintf("ai->length=%d", ai->length));
if (ai->length)
{
if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
dbug(1, dprintf("ai_parms[0].length=%d/0x%x", ai_parms[0].length, GET_WORD(ai_parms[0].info + 1)));
ch = 0;
if (ai_parms[0].length)
{
ch = GET_WORD(ai_parms[0].info + 1);
dbug(1, dprintf("BCH-I=0x%x", ch));
}
}
}
if (plci->State == INC_CON_CONNECTED_ALERT)
{
dbug(1, dprintf("Connected Alert Call_Res"));
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
add_s(plci, CONN_NR, &parms[2]);
add_s(plci, LLC, &parms[4]);
add_ai(plci, &parms[5]);
plci->State = INC_CON_ACCEPT;
sig_req(plci, CALL_RES, 0);
return 1;
}
else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) {
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
dump_c_ind_mask(plci);
Reject = GET_WORD(parms[0].info);
dbug(1, dprintf("Reject=0x%x", Reject));
if (Reject)
{
if (c_ind_mask_empty(plci))
{
if ((Reject & 0xff00) == 0x3400)
{
esc_t[2] = ((byte)(Reject & 0x00ff)) | 0x80;
add_p(plci, ESC, esc_t);
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
else if (Reject == 1 || Reject > 9)
{
add_ai(plci, &parms[5]);
sig_req(plci, HANGUP, 0);
}
else
{
esc_t[2] = cau_t[(Reject&0x000f)];
add_p(plci, ESC, esc_t);
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
plci->appl = appl;
}
else
{
sendf(appl, _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
else {
plci->appl = appl;
if (Id & EXT_CONTROLLER) {
if (AdvCodecSupport(a, plci, appl, 0)) {
dbug(1, dprintf("connect_res(error from AdvCodecSupport)"));
sig_req(plci, HANGUP, 0);
return 1;
}
if (plci->tel == ADV_VOICE && a->AdvCodecPLCI)
{
Info = add_b23(plci, &parms[1]);
if (Info)
{
dbug(1, dprintf("connect_res(error from add_b23)"));
sig_req(plci, HANGUP, 0);
return 1;
}
if (plci->adv_nl)
{
nl_req_ncci(plci, ASSIGN, 0);
}
}
}
else
{
plci->tel = 0;
if (ch != 2)
{
Info = add_b23(plci, &parms[1]);
if (Info)
{
dbug(1, dprintf("connect_res(error from add_b23 2)"));
sig_req(plci, HANGUP, 0);
return 1;
}
}
nl_req_ncci(plci, ASSIGN, 0);
}
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(parms, "wsssss", &plci->saved_msg);
plci->spoofed_msg = CALL_RES;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
}
else
{
add_b1(plci, &parms[1], ch, plci->B1_facilities);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
add_s(plci, CONN_NR, &parms[2]);
add_s(plci, LLC, &parms[4]);
add_ai(plci, &parms[5]);
plci->State = INC_CON_ACCEPT;
sig_req(plci, CALL_RES, 0);
}
for (i = 0; i < max_appl; i++) {
if (test_c_ind_mask_bit(plci, i)) {
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
}
}
return 1;
}
static byte connect_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("connect_a_res"));
return false;
}
static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i;
dbug(1, dprintf("disconnect_req"));
Info = _WRONG_IDENTIFIER;
if (plci)
{
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
plci->appl = appl;
for (i = 0; i < max_appl; i++)
{
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
}
plci->State = OUTG_DIS_PENDING;
}
if (plci->Sig.Id && plci->appl)
{
Info = 0;
if (plci->Sig.Id != 0xff)
{
if (plci->State != INC_DIS_PENDING)
{
add_ai(plci, &msg[0]);
sig_req(plci, HANGUP, 0);
plci->State = OUTG_DIS_PENDING;
return 1;
}
}
else
{
if (plci->NL.Id && !plci->nl_remove_id)
{
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _DISCONNECT_I, Id, 0, "w", 0);
plci->State = INC_DIS_PENDING;
}
return 1;
}
}
}
if (!appl) return false;
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", Info);
return false;
}
static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("disconnect_res"));
if (plci)
{
/* clear ind mask bit, just in case of collsion of */
/* DISCONNECT_IND and CONNECT_RES */
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
ncci_free_receive_buffers(plci, 0);
if (plci_remove_check(plci))
{
return 0;
}
if (plci->State == INC_DIS_PENDING
|| plci->State == SUSPENDING) {
if (c_ind_mask_empty(plci)) {
if (plci->State != SUSPENDING) plci->State = IDLE;
dbug(1, dprintf("chs=%d", plci->channels));
if (!plci->channels) {
plci_remove(plci);
}
}
}
}
return 0;
}
static byte listen_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
byte i;
dbug(1, dprintf("listen_req(Appl=0x%x)", appl->Id));
Info = _WRONG_IDENTIFIER;
if (a) {
Info = 0;
a->Info_Mask[appl->Id - 1] = GET_DWORD(parms[0].info);
a->CIP_Mask[appl->Id - 1] = GET_DWORD(parms[1].info);
dbug(1, dprintf("CIP_MASK=0x%lx", GET_DWORD(parms[1].info)));
if (a->Info_Mask[appl->Id - 1] & 0x200) { /* early B3 connect provides */
a->Info_Mask[appl->Id - 1] |= 0x10; /* call progression infos */
}
/* check if external controller listen and switch listen on or off*/
if (Id&EXT_CONTROLLER && GET_DWORD(parms[1].info)) {
if (a->profile.Global_Options & ON_BOARD_CODEC) {
dummy_plci.State = IDLE;
a->codec_listen[appl->Id - 1] = &dummy_plci;
a->TelOAD[0] = (byte)(parms[3].length);
for (i = 1; parms[3].length >= i && i < 22; i++) {
a->TelOAD[i] = parms[3].info[i];
}
a->TelOAD[i] = 0;
a->TelOSA[0] = (byte)(parms[4].length);
for (i = 1; parms[4].length >= i && i < 22; i++) {
a->TelOSA[i] = parms[4].info[i];
}
a->TelOSA[i] = 0;
}
else Info = 0x2002; /* wrong controller, codec not supported */
}
else{ /* clear listen */
a->codec_listen[appl->Id - 1] = (PLCI *)0;
}
}
sendf(appl,
_LISTEN_R | CONFIRM,
Id,
Number,
"w", Info);
if (a) listen_check(a);
return false;
}
static byte info_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word i;
API_PARSE *ai;
PLCI *rc_plci = NULL;
API_PARSE ai_parms[5];
word Info = 0;
dbug(1, dprintf("info_req"));
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
ai = &msg[1];
if (ai->length)
{
if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
dbug(1, dprintf("AddInfo wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
}
if (!a) Info = _WRONG_STATE;
if (!Info && plci)
{ /* no fac, with CPN, or KEY */
rc_plci = plci;
if (!ai_parms[3].length && plci->State && (msg[0].length || ai_parms[1].length))
{
/* overlap sending option */
dbug(1, dprintf("OvlSnd"));
add_s(plci, CPN, &msg[0]);
add_s(plci, KEY, &ai_parms[1]);
sig_req(plci, INFO_REQ, 0);
send_req(plci);
return false;
}
if (plci->State && ai_parms[2].length)
{
/* User_Info option */
dbug(1, dprintf("UUI"));
add_s(plci, UUI, &ai_parms[2]);
sig_req(plci, USER_DATA, 0);
}
else if (plci->State && ai_parms[3].length)
{
/* Facility option */
dbug(1, dprintf("FAC"));
add_s(plci, CPN, &msg[0]);
add_ai(plci, &msg[1]);
sig_req(plci, FACILITY_REQ, 0);
}
else
{
Info = _WRONG_STATE;
}
}
else if ((ai_parms[1].length || ai_parms[2].length || ai_parms[3].length) && !Info)
{
/* NCR_Facility option -> send UUI and Keypad too */
dbug(1, dprintf("NCR_FAC"));
if ((i = get_plci(a)))
{
rc_plci = &a->plci[i - 1];
appl->NullCREnable = true;
rc_plci->internal_command = C_NCR_FAC_REQ;
rc_plci->appl = appl;
add_p(rc_plci, CAI, "\x01\x80");
add_p(rc_plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rc_plci, ASSIGN, DSIG_ID);
send_req(rc_plci);
}
else
{
Info = _OUT_OF_PLCI;
}
if (!Info)
{
add_s(rc_plci, CPN, &msg[0]);
add_ai(rc_plci, &msg[1]);
sig_req(rc_plci, NCR_FACILITY, 0);
send_req(rc_plci);
return false;
/* for application controlled supplementary services */
}
}
if (!rc_plci)
{
Info = _WRONG_MESSAGE_FORMAT;
}
if (!Info)
{
send_req(rc_plci);
}
else
{ /* appl is not assigned to a PLCI or error condition */
dbug(1, dprintf("localInfoCon"));
sendf(appl,
_INFO_R | CONFIRM,
Id,
Number,
"w", Info);
}
return false;
}
static byte info_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("info_res"));
return false;
}
static byte alert_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
byte ret;
dbug(1, dprintf("alert_req"));
Info = _WRONG_IDENTIFIER;
ret = false;
if (plci) {
Info = _ALERT_IGNORED;
if (plci->State != INC_CON_ALERT) {
Info = _WRONG_STATE;
if (plci->State == INC_CON_PENDING) {
Info = 0;
plci->State = INC_CON_ALERT;
add_ai(plci, &msg[0]);
sig_req(plci, CALL_ALERT, 0);
ret = 1;
}
}
}
sendf(appl,
_ALERT_R | CONFIRM,
Id,
Number,
"w", Info);
return ret;
}
static byte facility_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info = 0;
word i = 0;
word selector;
word SSreq;
long relatedPLCIvalue;
DIVA_CAPI_ADAPTER *relatedadapter;
byte *SSparms = "";
byte RCparms[] = "\x05\x00\x00\x02\x00\x00";
byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
API_PARSE *parms;
API_PARSE ss_parms[11];
PLCI *rplci;
byte cai[15];
dword d;
API_PARSE dummy;
dbug(1, dprintf("facility_req"));
for (i = 0; i < 9; i++) ss_parms[i].length = 0;
parms = &msg[1];
if (!a)
{
dbug(1, dprintf("wrong Ctrl"));
Info = _WRONG_IDENTIFIER;
}
selector = GET_WORD(msg[0].info);
if (!Info)
{
switch (selector)
{
case SELECTOR_HANDSET:
Info = AdvCodecSupport(a, plci, appl, HOOK_SUPPORT);
break;
case SELECTOR_SU_SERV:
if (!msg[1].length)
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
SSreq = GET_WORD(&(msg[1].info[1]));
PUT_WORD(&RCparms[1], SSreq);
SSparms = RCparms;
switch (SSreq)
{
case S_GET_SUPPORTED_SERVICES:
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
SSparms = (byte *)SSstruct;
break;
}
rplci->internal_command = GETSERV_REQ_PEND;
rplci->number = Number;
rplci->appl = appl;
sig_req(rplci, S_SUPPORTED, 0);
send_req(rplci);
return false;
break;
case S_LISTEN:
if (parms->length == 7)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
a->Notification_Mask[appl->Id - 1] = GET_DWORD(ss_parms[2].info);
if (a->Notification_Mask[appl->Id - 1] & SMASK_MWI) /* MWI active? */
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
break;
}
rplci->internal_command = GET_MWI_STATE;
rplci->number = Number;
sig_req(rplci, MWI_POLL, 0);
send_req(rplci);
}
break;
case S_HOLD:
api_parse(&parms->info[1], (word)parms->length, "ws", ss_parms);
if (plci && plci->State && plci->SuppState == IDLE)
{
plci->SuppState = HOLD_REQUEST;
plci->command = C_HOLD_REQ;
add_s(plci, CAI, &ss_parms[1]);
sig_req(plci, CALL_HOLD, 0);
send_req(plci);
return false;
}
else Info = 0x3010; /* wrong state */
break;
case S_RETRIEVE:
if (plci && plci->State && plci->SuppState == CALL_HELD)
{
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, plci, appl, 0))
{
Info = 0x3010; /* wrong state */
break;
}
}
else plci->tel = 0;
plci->SuppState = RETRIEVE_REQUEST;
plci->command = C_RETRIEVE_REQ;
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
plci->spoofed_msg = CALL_RETRIEVE;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
return false;
}
else
{
sig_req(plci, CALL_RETRIEVE, 0);
send_req(plci);
return false;
}
}
else Info = 0x3010; /* wrong state */
break;
case S_SUSPEND:
if (parms->length)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
if (plci && plci->State)
{
add_s(plci, CAI, &ss_parms[2]);
plci->command = SUSPEND_REQ;
sig_req(plci, SUSPEND, 0);
plci->State = SUSPENDING;
send_req(plci);
}
else Info = 0x3010; /* wrong state */
break;
case S_RESUME:
if (!(i = get_plci(a)))
{
Info = _OUT_OF_PLCI;
break;
}
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->number = Number;
rplci->tel = 0;
rplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
/* check 'external controller' bit for codec support */
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, rplci, appl, 0))
{
rplci->Id = 0;
Info = 0x300A;
break;
}
}
if (parms->length)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
{
dbug(1, dprintf("format wrong"));
rplci->Id = 0;
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
dummy.length = 0;
dummy.info = "\x00";
add_b1(rplci, &dummy, 0, 0);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(rplci, LLI, "\x01\x01");
}
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
add_s(rplci, CAI, &ss_parms[2]);
rplci->command = RESUME_REQ;
sig_req(rplci, RESUME, 0);
rplci->State = RESUMING;
send_req(rplci);
break;
case S_CONF_BEGIN: /* Request */
case S_CONF_DROP:
case S_CONF_ISOLATE:
case S_CONF_REATTACH:
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (plci && plci->State && ((plci->SuppState == IDLE) || (plci->SuppState == CALL_HELD)))
{
d = GET_DWORD(ss_parms[2].info);
if (d >= 0x80)
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci->ptyState = (byte)SSreq;
plci->command = 0;
cai[0] = 2;
switch (SSreq)
{
case S_CONF_BEGIN:
cai[1] = CONF_BEGIN;
plci->internal_command = CONF_BEGIN_REQ_PEND;
break;
case S_CONF_DROP:
cai[1] = CONF_DROP;
plci->internal_command = CONF_DROP_REQ_PEND;
break;
case S_CONF_ISOLATE:
cai[1] = CONF_ISOLATE;
plci->internal_command = CONF_ISOLATE_REQ_PEND;
break;
case S_CONF_REATTACH:
cai[1] = CONF_REATTACH;
plci->internal_command = CONF_REATTACH_REQ_PEND;
break;
}
cai[2] = (byte)d; /* Conference Size resp. PartyId */
add_p(plci, CAI, cai);
sig_req(plci, S_SERVICE, 0);
send_req(plci);
return false;
}
else Info = 0x3010; /* wrong state */
break;
case S_ECT:
case S_3PTY_BEGIN:
case S_3PTY_END:
case S_CONF_ADD:
if (parms->length == 7)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else if (parms->length == 8) /* workaround for the T-View-S */
{
if (api_parse(&parms->info[1], (word)parms->length, "wbdb", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!msg[1].length)
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
relatedPLCIvalue = GET_DWORD(ss_parms[2].info);
relatedPLCIvalue &= 0x0000FFFF;
dbug(1, dprintf("PTY/ECT/addCONF,relPLCI=%lx", relatedPLCIvalue));
/* controller starts with 0 up to (max_adapter - 1) */
if (((relatedPLCIvalue & 0x7f) == 0)
|| (MapController((byte)(relatedPLCIvalue & 0x7f)) == 0)
|| (MapController((byte)(relatedPLCIvalue & 0x7f)) > max_adapter))
{
if (SSreq == S_3PTY_END)
{
dbug(1, dprintf("wrong Controller use 2nd PLCI=PLCI"));
rplci = plci;
}
else
{
Info = 0x3010; /* wrong state */
break;
}
}
else
{
relatedadapter = &adapter[MapController((byte)(relatedPLCIvalue & 0x7f)) - 1];
relatedPLCIvalue >>= 8;
/* find PLCI PTR*/
for (i = 0, rplci = NULL; i < relatedadapter->max_plci; i++)
{
if (relatedadapter->plci[i].Id == (byte)relatedPLCIvalue)
{
rplci = &relatedadapter->plci[i];
}
}
if (!rplci || !relatedPLCIvalue)
{
if (SSreq == S_3PTY_END)
{
dbug(1, dprintf("use 2nd PLCI=PLCI"));
rplci = plci;
}
else
{
Info = 0x3010; /* wrong state */
break;
}
}
}
/*
dbug(1, dprintf("rplci:%x", rplci));
dbug(1, dprintf("plci:%x", plci));
dbug(1, dprintf("rplci->ptyState:%x", rplci->ptyState));
dbug(1, dprintf("plci->ptyState:%x", plci->ptyState));
dbug(1, dprintf("SSreq:%x", SSreq));
dbug(1, dprintf("rplci->internal_command:%x", rplci->internal_command));
dbug(1, dprintf("rplci->appl:%x", rplci->appl));
dbug(1, dprintf("rplci->Id:%x", rplci->Id));
*/
/* send PTY/ECT req, cannot check all states because of US stuff */
if (!rplci->internal_command && rplci->appl)
{
plci->command = 0;
rplci->relatedPTYPLCI = plci;
plci->relatedPTYPLCI = rplci;
rplci->ptyState = (byte)SSreq;
if (SSreq == S_ECT)
{
rplci->internal_command = ECT_REQ_PEND;
cai[1] = ECT_EXECUTE;
rplci->vswitchstate = 0;
rplci->vsprot = 0;
rplci->vsprotdialect = 0;
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
}
else if (SSreq == S_CONF_ADD)
{
rplci->internal_command = CONF_ADD_REQ_PEND;
cai[1] = CONF_ADD;
}
else
{
rplci->internal_command = PTY_REQ_PEND;
cai[1] = (byte)(SSreq - 3);
}
rplci->number = Number;
if (plci != rplci) /* explicit invocation */
{
cai[0] = 2;
cai[2] = plci->Sig.Id;
dbug(1, dprintf("explicit invocation"));
}
else
{
dbug(1, dprintf("implicit invocation"));
cai[0] = 1;
}
add_p(rplci, CAI, cai);
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
}
else
{
dbug(0, dprintf("Wrong line"));
Info = 0x3010; /* wrong state */
break;
}
break;
case S_CALL_DEFLECTION:
if (api_parse(&parms->info[1], (word)parms->length, "wbwss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
/* reuse unused screening indicator */
ss_parms[3].info[3] = (byte)GET_WORD(&(ss_parms[2].info[0]));
plci->command = 0;
plci->internal_command = CD_REQ_PEND;
appl->CDEnable = true;
cai[0] = 1;
cai[1] = CALL_DEFLECTION;
add_p(plci, CAI, cai);
add_p(plci, CPN, ss_parms[3].info);
sig_req(plci, S_SERVICE, 0);
send_req(plci);
return false;
break;
case S_CALL_FORWARDING_START:
if (api_parse(&parms->info[1], (word)parms->length, "wbdwwsss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
/* reuse unused screening indicator */
rplci->internal_command = CF_START_PEND;
rplci->appl = appl;
rplci->number = Number;
appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
cai[0] = 2;
cai[1] = 0x70 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[5].info);
add_p(rplci, CPN, ss_parms[6].info);
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
break;
case S_INTERROGATE_DIVERSION:
case S_INTERROGATE_NUMBERS:
case S_CALL_FORWARDING_STOP:
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
case S_CCBS_INTERROGATE:
switch (SSreq)
{
case S_INTERROGATE_NUMBERS:
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbdw", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
case S_CCBS_INTERROGATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbdws", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
default:
if (api_parse(&parms->info[1], (word)parms->length, "wbdwws", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
break;
}
if (Info) break;
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
switch (SSreq)
{
case S_INTERROGATE_DIVERSION: /* use cai with S_SERVICE below */
cai[1] = 0x60 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
rplci->internal_command = INTERR_DIVERSION_REQ_PEND; /* move to rplci if assigned */
break;
case S_INTERROGATE_NUMBERS: /* use cai with S_SERVICE below */
cai[1] = DIVERSION_INTERROGATE_NUM; /* Function */
rplci->internal_command = INTERR_NUMBERS_REQ_PEND; /* move to rplci if assigned */
break;
case S_CALL_FORWARDING_STOP:
rplci->internal_command = CF_STOP_PEND;
cai[1] = 0x80 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
break;
case S_CCBS_REQUEST:
cai[1] = CCBS_REQUEST;
rplci->internal_command = CCBS_REQUEST_REQ_PEND;
break;
case S_CCBS_DEACTIVATE:
cai[1] = CCBS_DEACTIVATE;
rplci->internal_command = CCBS_DEACTIVATE_REQ_PEND;
break;
case S_CCBS_INTERROGATE:
cai[1] = CCBS_INTERROGATE;
rplci->internal_command = CCBS_INTERROGATE_REQ_PEND;
break;
default:
cai[1] = 0;
break;
}
rplci->appl = appl;
rplci->number = Number;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
switch (SSreq)
{
case S_INTERROGATE_NUMBERS:
cai[0] = 1;
add_p(rplci, CAI, cai);
break;
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
cai[0] = 3;
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
add_p(rplci, CAI, cai);
break;
case S_CCBS_INTERROGATE:
cai[0] = 3;
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[4].info);
break;
default:
cai[0] = 2;
cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[5].info);
break;
}
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
break;
case S_MWI_ACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbwdwwwssss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->cr_enquiry = true;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
}
else
{
rplci = plci;
rplci->cr_enquiry = false;
}
rplci->command = 0;
rplci->internal_command = MWI_ACTIVATE_REQ_PEND;
rplci->appl = appl;
rplci->number = Number;
cai[0] = 13;
cai[1] = ACTIVATION_MWI; /* Function */
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
PUT_DWORD(&cai[4], GET_DWORD(&(ss_parms[3].info[0]))); /* Number of Messages */
PUT_WORD(&cai[8], GET_WORD(&(ss_parms[4].info[0]))); /* Message Status */
PUT_WORD(&cai[10], GET_WORD(&(ss_parms[5].info[0]))); /* Message Reference */
PUT_WORD(&cai[12], GET_WORD(&(ss_parms[6].info[0]))); /* Invocation Mode */
add_p(rplci, CAI, cai);
add_p(rplci, CPN, ss_parms[7].info); /* Receiving User Number */
add_p(rplci, OAD, ss_parms[8].info); /* Controlling User Number */
add_p(rplci, OSA, ss_parms[9].info); /* Controlling User Provided Number */
add_p(rplci, UID, ss_parms[10].info); /* Time */
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
case S_MWI_DEACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbwwss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->cr_enquiry = true;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
}
else
{
rplci = plci;
rplci->cr_enquiry = false;
}
rplci->command = 0;
rplci->internal_command = MWI_DEACTIVATE_REQ_PEND;
rplci->appl = appl;
rplci->number = Number;
cai[0] = 5;
cai[1] = DEACTIVATION_MWI; /* Function */
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
PUT_WORD(&cai[4], GET_WORD(&(ss_parms[3].info[0]))); /* Invocation Mode */
add_p(rplci, CAI, cai);
add_p(rplci, CPN, ss_parms[4].info); /* Receiving User Number */
add_p(rplci, OAD, ss_parms[5].info); /* Controlling User Number */
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
default:
Info = 0x300E; /* not supported */
break;
}
break; /* case SELECTOR_SU_SERV: end */
case SELECTOR_DTMF:
return (dtmf_request(Id, Number, a, plci, appl, msg));
case SELECTOR_LINE_INTERCONNECT:
return (mixer_request(Id, Number, a, plci, appl, msg));
case PRIV_SELECTOR_ECHO_CANCELLER:
appl->appl_flags |= APPL_FLAG_PRIV_EC_SPEC;
return (ec_request(Id, Number, a, plci, appl, msg));
case SELECTOR_ECHO_CANCELLER:
appl->appl_flags &= ~APPL_FLAG_PRIV_EC_SPEC;
return (ec_request(Id, Number, a, plci, appl, msg));
case SELECTOR_V42BIS:
default:
Info = _FACILITY_NOT_SUPPORTED;
break;
} /* end of switch (selector) */
}
dbug(1, dprintf("SendFacRc"));
sendf(appl,
_FACILITY_R | CONFIRM,
Id,
Number,
"wws", Info, selector, SSparms);
return false;
}
static byte facility_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("facility_res"));
return false;
}
static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info = 0;
byte req;
byte len;
word w;
word fax_control_bits, fax_feature_bits, fax_info_change;
API_PARSE *ncpi;
byte pvc[2];
API_PARSE fax_parms[9];
word i;
dbug(1, dprintf("connect_b3_req"));
if (plci)
{
if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING)
|| (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE))
{
Info = _WRONG_STATE;
}
else
{
/* local reply if assign unsuccessful
or B3 protocol allows only one layer 3 connection
and already connected
or B2 protocol not any LAPD
and connect_b3_req contradicts originate/answer direction */
if (!plci->NL.Id
|| (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
&& ((plci->channels != 0)
|| (((plci->B2_prot != B2_SDLC) && (plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL))
&& ((plci->call_dir & CALL_DIR_ANSWER) && !(plci->call_dir & CALL_DIR_FORCE_OUTG_NL))))))
{
dbug(1, dprintf("B3 already connected=%d or no NL.Id=0x%x, dir=%d sstate=0x%x",
plci->channels, plci->NL.Id, plci->call_dir, plci->SuppState));
Info = _WRONG_STATE;
sendf(appl,
_CONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
plci->requested_options_conn = 0;
req = N_CONNECT;
ncpi = &parms[0];
if (plci->B3_prot == 2 || plci->B3_prot == 3)
{
if (ncpi->length > 2)
{
/* check for PVC */
if (ncpi->info[2] || ncpi->info[3])
{
pvc[0] = ncpi->info[3];
pvc[1] = ncpi->info[2];
add_d(plci, 2, pvc);
req = N_RESET;
}
else
{
if (ncpi->info[1] & 1) req = N_CONNECT | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
}
}
else if (plci->B3_prot == 5)
{
if (plci->NL.Id && !plci->nl_remove_id)
{
fax_control_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low);
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low);
if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
|| (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
{
len = offsetof(T30_INFO, universal_6);
fax_info_change = false;
if (ncpi->length >= 4)
{
w = GET_WORD(&ncpi->info[3]);
if ((w & 0x0001) != ((word)(((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & 0x0001)))
{
((T30_INFO *)(plci->fax_connect_info_buffer))->resolution =
(byte)((((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & ~T30_RESOLUTION_R8_0770_OR_200) |
((w & 0x0001) ? T30_RESOLUTION_R8_0770_OR_200 : 0));
fax_info_change = true;
}
fax_control_bits &= ~(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
if (w & 0x0002) /* Fax-polling request */
fax_control_bits |= T30_CONTROL_BIT_REQUEST_POLLING;
if ((w & 0x0004) /* Request to send / poll another document */
&& (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS))
{
fax_control_bits |= T30_CONTROL_BIT_MORE_DOCUMENTS;
}
if (ncpi->length >= 6)
{
w = GET_WORD(&ncpi->info[5]);
if (((byte) w) != ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->data_format = (byte) w;
fax_info_change = true;
}
if ((a->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
&& (GET_WORD(&ncpi->info[5]) & 0x8000)) /* Private SEP/SUB/PWD enable */
{
plci->requested_options_conn |= (1L << PRIVATE_FAX_SUB_SEP_PWD);
}
if ((a->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
&& (GET_WORD(&ncpi->info[5]) & 0x4000)) /* Private non-standard facilities enable */
{
plci->requested_options_conn |= (1L << PRIVATE_FAX_NONSTANDARD);
}
fax_control_bits &= ~(T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_SEL_POLLING |
T30_CONTROL_BIT_ACCEPT_PASSWORD);
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwsss", fax_parms))
Info = _WRONG_MESSAGE_FORMAT;
else
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_FAX_SUB_SEP_PWD))
{
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
}
w = fax_parms[4].length;
if (w > 20)
w = 20;
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = (byte) w;
for (i = 0; i < w; i++)
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1 + i];
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
w = fax_parms[5].length;
if (w > 20)
w = 20;
plci->fax_connect_info_buffer[len++] = (byte) w;
for (i = 0; i < w; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[5].info[1 + i];
w = fax_parms[6].length;
if (w > 20)
w = 20;
plci->fax_connect_info_buffer[len++] = (byte) w;
for (i = 0; i < w; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[6].info[1 + i];
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
plci->fax_connect_info_buffer[len++] = 0;
}
else
{
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
}
}
}
else
{
len = offsetof(T30_INFO, universal_6);
}
fax_info_change = true;
}
if (fax_control_bits != GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low))
{
PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low, fax_control_bits);
fax_info_change = true;
}
}
if (Info == GOOD)
{
plci->fax_connect_info_length = len;
if (fax_info_change)
{
if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
{
start_internal_command(Id, plci, fax_connect_info_command);
return false;
}
else
{
start_internal_command(Id, plci, fax_adjust_b23_command);
return false;
}
}
}
}
else Info = _WRONG_STATE;
}
else Info = _WRONG_STATE;
}
else if (plci->B3_prot == B3_RTP)
{
plci->internal_req_buffer[0] = ncpi->length + 1;
plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
for (w = 0; w < ncpi->length; w++)
plci->internal_req_buffer[2 + w] = ncpi->info[1 + w];
start_internal_command(Id, plci, rtp_connect_b3_req_command);
return false;
}
if (!Info)
{
nl_req_ncci(plci, req, 0);
return 1;
}
}
}
else Info = _WRONG_IDENTIFIER;
sendf(appl,
_CONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
API_PARSE *ncpi;
byte req;
word w;
API_PARSE fax_parms[9];
word i;
byte len;
dbug(1, dprintf("connect_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
if (a->ncci_state[ncci] == INC_CON_PENDING) {
if (GET_WORD(&parms[0].info[0]) != 0)
{
a->ncci_state[ncci] = OUTG_REJ_PENDING;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
return 1;
}
a->ncci_state[ncci] = INC_ACT_PENDING;
req = N_CONNECT_ACK;
ncpi = &parms[1];
if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if (((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
{
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (plci->fax_connect_info_length < len)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
}
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
}
else
{
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
plci->fax_connect_info_length = len;
((T30_INFO *)(plci->fax_connect_info_buffer))->code = 0;
start_internal_command(Id, plci, fax_connect_ack_command);
return false;
}
}
nl_req_ncci(plci, req, (byte)ncci);
if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
else if (plci->B3_prot == B3_RTP)
{
plci->internal_req_buffer[0] = ncpi->length + 1;
plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
for (w = 0; w < ncpi->length; w++)
plci->internal_req_buffer[2 + w] = ncpi->info[1+w];
start_internal_command(Id, plci, rtp_connect_b3_res_command);
return false;
}
else
{
if (ncpi->length > 2) {
if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
nl_req_ncci(plci, req, (byte)ncci);
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
if (plci->adjust_b_restore)
{
plci->adjust_b_restore = false;
start_internal_command(Id, plci, adjust_b_restore);
}
}
return 1;
}
}
return false;
}
static byte connect_b3_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
ncci = (word)(Id >> 16);
dbug(1, dprintf("connect_b3_a_res(ncci=0x%x)", ncci));
if (plci && ncci && (plci->State != IDLE) && (plci->State != INC_DIS_PENDING)
&& (plci->State != OUTG_DIS_PENDING))
{
if (a->ncci_state[ncci] == INC_ACT_PENDING) {
a->ncci_state[ncci] = CONNECTED;
if (plci->State != INC_CON_CONNECTED_ALERT) plci->State = CONNECTED;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
}
}
return false;
}
static byte disconnect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
word ncci;
API_PARSE *ncpi;
dbug(1, dprintf("disconnect_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
if (plci && ncci)
{
Info = _WRONG_STATE;
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == OUTG_CON_PENDING)
|| (a->ncci_state[ncci] == INC_CON_PENDING)
|| (a->ncci_state[ncci] == INC_ACT_PENDING))
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
if (a->ncci[ncci].data_pending
&& ((plci->B3_prot == B3_TRANSPARENT)
|| (plci->B3_prot == B3_T30)
|| (plci->B3_prot == B3_T30_WITH_EXTENSIONS)))
{
plci->send_disc = (byte)ncci;
plci->command = 0;
return false;
}
else
{
cleanup_ncci_data(plci, ncci);
if (plci->B3_prot == 2 || plci->B3_prot == 3)
{
ncpi = &parms[0];
if (ncpi->length > 3)
{
add_d(plci, (word)(ncpi->length - 3), (byte *)&(ncpi->info[4]));
}
}
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
return 1;
}
}
sendf(appl,
_DISCONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte disconnect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
word i;
ncci = (word)(Id >> 16);
dbug(1, dprintf("disconnect_b3_res(ncci=0x%x", ncci));
if (plci && ncci) {
plci->requested_options_conn = 0;
plci->fax_connect_info_length = 0;
plci->ncpi_state = 0x00;
if (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
&& ((plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL)))
{
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
}
for (i = 0; i < MAX_CHANNELS_PER_PLCI && plci->inc_dis_ncci_table[i] != (byte)ncci; i++);
if (i < MAX_CHANNELS_PER_PLCI) {
if (plci->channels)plci->channels--;
for (; i < MAX_CHANNELS_PER_PLCI - 1; i++) plci->inc_dis_ncci_table[i] = plci->inc_dis_ncci_table[i + 1];
plci->inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI - 1] = 0;
ncci_free_receive_buffers(plci, ncci);
if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
if (plci->State == SUSPENDING) {
sendf(plci->appl,
_FACILITY_I,
Id & 0xffffL,
0,
"ws", (word)3, "\x03\x04\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
}
plci_remove(plci);
plci->State = IDLE;
}
}
else
{
if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
&& ((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& (a->ncci_state[ncci] == INC_DIS_PENDING))
{
ncci_free_receive_buffers(plci, ncci);
nl_req_ncci(plci, N_EDATA, (byte)ncci);
plci->adapter->ncci_state[ncci] = IDLE;
start_internal_command(Id, plci, fax_disconnect_command);
return 1;
}
}
}
return false;
}
static byte data_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
NCCI *ncci_ptr;
DATA_B3_DESC *data;
word Info;
word ncci;
word i;
dbug(1, dprintf("data_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
dbug(1, dprintf("ncci=0x%x, plci=0x%x", ncci, plci));
if (plci && ncci)
{
Info = _WRONG_STATE;
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == INC_ACT_PENDING))
{
/* queue data */
ncci_ptr = &(a->ncci[ncci]);
i = ncci_ptr->data_out + ncci_ptr->data_pending;
if (i >= MAX_DATA_B3)
i -= MAX_DATA_B3;
data = &(ncci_ptr->DBuffer[i]);
data->Number = Number;
if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
data->P = (byte *)(long)(*((dword *)(parms[0].info)));
}
else
data->P = TransmitBufferSet(appl, *(dword *)parms[0].info);
data->Length = GET_WORD(parms[1].info);
data->Handle = GET_WORD(parms[2].info);
data->Flags = GET_WORD(parms[3].info);
(ncci_ptr->data_pending)++;
/* check for delivery confirmation */
if (data->Flags & 0x0004)
{
i = ncci_ptr->data_ack_out + ncci_ptr->data_ack_pending;
if (i >= MAX_DATA_ACK)
i -= MAX_DATA_ACK;
ncci_ptr->DataAck[i].Number = data->Number;
ncci_ptr->DataAck[i].Handle = data->Handle;
(ncci_ptr->data_ack_pending)++;
}
send_data(plci);
return false;
}
}
if (appl)
{
if (plci)
{
if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
TransmitBufferFree(appl, (byte *)(long)(*((dword *)(parms[0].info))));
}
}
sendf(appl,
_DATA_B3_R | CONFIRM,
Id,
Number,
"ww", GET_WORD(parms[2].info), Info);
}
return false;
}
static byte data_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word n;
word ncci;
word NCCIcode;
dbug(1, dprintf("data_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
n = GET_WORD(parms[0].info);
dbug(1, dprintf("free(%d)", n));
NCCIcode = ncci | (((word) a->Id) << 8);
if (n < appl->MaxBuffer &&
appl->DataNCCI[n] == NCCIcode &&
(byte)(appl->DataFlags[n] >> 8) == plci->Id) {
dbug(1, dprintf("found"));
appl->DataNCCI[n] = 0;
if (channel_can_xon(plci, a->ncci_ch[ncci])) {
channel_request_xon(plci, a->ncci_ch[ncci]);
}
channel_xmit_xon(plci);
if (appl->DataFlags[n] & 4) {
nl_req_ncci(plci, N_DATA_ACK, (byte)ncci);
return 1;
}
}
}
return false;
}
static byte reset_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
word ncci;
dbug(1, dprintf("reset_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
if (plci && ncci)
{
Info = _WRONG_STATE;
switch (plci->B3_prot)
{
case B3_ISO8208:
case B3_X25_DCE:
if (a->ncci_state[ncci] == CONNECTED)
{
nl_req_ncci(plci, N_RESET, (byte)ncci);
send_req(plci);
Info = GOOD;
}
break;
case B3_TRANSPARENT:
if (a->ncci_state[ncci] == CONNECTED)
{
start_internal_command(Id, plci, reset_b3_command);
Info = GOOD;
}
break;
}
}
/* reset_b3 must result in a reset_b3_con & reset_b3_Ind */
sendf(appl,
_RESET_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte reset_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
dbug(1, dprintf("reset_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
switch (plci->B3_prot)
{
case B3_ISO8208:
case B3_X25_DCE:
if (a->ncci_state[ncci] == INC_RES_PENDING)
{
a->ncci_state[ncci] = CONNECTED;
nl_req_ncci(plci, N_RESET_ACK, (byte)ncci);
return true;
}
break;
}
}
return false;
}
static byte connect_b3_t90_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
API_PARSE *ncpi;
byte req;
dbug(1, dprintf("connect_b3_t90_a_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
if (a->ncci_state[ncci] == INC_ACT_PENDING) {
a->ncci_state[ncci] = CONNECTED;
}
else if (a->ncci_state[ncci] == INC_CON_PENDING) {
a->ncci_state[ncci] = CONNECTED;
req = N_CONNECT_ACK;
/* parms[0]==0 for CAPI original message definition! */
if (parms[0].info) {
ncpi = &parms[1];
if (ncpi->length > 2) {
if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
}
nl_req_ncci(plci, req, (byte)ncci);
return 1;
}
}
return false;
}
static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info = 0;
word i;
byte tel;
API_PARSE bp_parms[7];
if (!plci || !msg)
{
Info = _WRONG_IDENTIFIER;
}
else
{
dbug(1, dprintf("select_b_req[%d],PLCI=0x%x,Tel=0x%x,NL=0x%x,appl=0x%x,sstate=0x%x",
msg->length, plci->Id, plci->tel, plci->NL.Id, plci->appl, plci->SuppState));
dbug(1, dprintf("PlciState=0x%x", plci->State));
for (i = 0; i < 7; i++) bp_parms[i].length = 0;
/* check if no channel is open, no B3 connected only */
if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING)
|| (plci->SuppState != IDLE) || plci->channels || plci->nl_remove_id)
{
Info = _WRONG_STATE;
}
/* check message format and fill bp_parms pointer */
else if (msg->length && api_parse(&msg->info[1], (word)msg->length, "wwwsss", bp_parms))
{
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT)) /* send alert tone inband to the network, */
{ /* e.g. Qsig or RBS or Cornet-N or xess PRI */
if (Id & EXT_CONTROLLER)
{
sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", 0x2002); /* wrong controller */
return 0;
}
plci->State = INC_CON_CONNECTED_ALERT;
plci->appl = appl;
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
dump_c_ind_mask(plci);
for (i = 0; i < max_appl; i++) /* disconnect the other appls */
{ /* its quasi a connect */
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
api_save_msg(msg, "s", &plci->saved_msg);
tel = plci->tel;
if (Id & EXT_CONTROLLER)
{
if (tel) /* external controller in use by this PLCI */
{
if (a->AdvSignalAppl && a->AdvSignalAppl != appl)
{
dbug(1, dprintf("Ext_Ctrl in use 1"));
Info = _WRONG_STATE;
}
}
else /* external controller NOT in use by this PLCI ? */
{
if (a->AdvSignalPLCI)
{
dbug(1, dprintf("Ext_Ctrl in use 2"));
Info = _WRONG_STATE;
}
else /* activate the codec */
{
dbug(1, dprintf("Ext_Ctrl start"));
if (AdvCodecSupport(a, plci, appl, 0))
{
dbug(1, dprintf("Error in codec procedures"));
Info = _WRONG_STATE;
}
else if (plci->spoofed_msg == SPOOFING_REQUIRED) /* wait until codec is active */
{
plci->spoofed_msg = AWAITING_SELECT_B;
plci->internal_command = BLOCK_PLCI; /* lock other commands */
plci->command = 0;
dbug(1, dprintf("continue if codec loaded"));
return false;
}
}
}
}
else /* external controller bit is OFF */
{
if (tel) /* external controller in use, need to switch off */
{
if (a->AdvSignalAppl == appl)
{
CodecIdCheck(a, plci);
plci->tel = 0;
plci->adv_nl = 0;
dbug(1, dprintf("Ext_Ctrl disable"));
}
else
{
dbug(1, dprintf("Ext_Ctrl not requested"));
}
}
}
if (!Info)
{
if (plci->call_dir & CALL_DIR_OUT)
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
else if (plci->call_dir & CALL_DIR_IN)
plci->call_dir = CALL_DIR_IN | CALL_DIR_ANSWER;
start_internal_command(Id, plci, select_b_command);
return false;
}
}
}
sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", Info);
return false;
}
static byte manufacturer_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word command;
word i;
word ncci;
API_PARSE *m;
API_PARSE m_parms[5];
word codec;
byte req;
byte ch;
byte dir;
static byte chi[2] = {0x01, 0x00};
static byte lli[2] = {0x01, 0x00};
static byte codec_cai[2] = {0x01, 0x01};
static byte null_msg = {0};
static API_PARSE null_parms = { 0, &null_msg };
PLCI *v_plci;
word Info = 0;
dbug(1, dprintf("manufacturer_req"));
for (i = 0; i < 5; i++) m_parms[i].length = 0;
if (GET_DWORD(parms[0].info) != _DI_MANU_ID) {
Info = _WRONG_MESSAGE_FORMAT;
}
command = GET_WORD(parms[1].info);
m = &parms[2];
if (!Info)
{
switch (command) {
case _DI_ASSIGN_PLCI:
if (api_parse(&m->info[1], (word)m->length, "wbbs", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
codec = GET_WORD(m_parms[0].info);
ch = m_parms[1].info[0];
dir = m_parms[2].info[0];
if ((i = get_plci(a))) {
plci = &a->plci[i - 1];
plci->appl = appl;
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
plci->State = LOCAL_CONNECT;
Id = (((word)plci->Id << 8) | plci->adapter->Id | 0x80);
dbug(1, dprintf("ManCMD,plci=0x%x", Id));
if ((ch == 1 || ch == 2) && (dir <= 2)) {
chi[1] = (byte)(0x80 | ch);
lli[1] = 0;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
switch (codec)
{
case 0:
Info = add_b1(plci, &m_parms[3], 0, 0);
break;
case 1:
add_p(plci, CAI, codec_cai);
break;
/* manual 'swich on' to the codec support without signalling */
/* first 'assign plci' with this function, then use */
case 2:
if (AdvCodecSupport(a, plci, appl, 0)) {
Info = _RESOURCE_ERROR;
}
else {
Info = add_b1(plci, &null_parms, 0, B1_FACILITY_LOCAL);
lli[1] = 0x10; /* local call codec stream */
}
break;
}
plci->State = LOCAL_CONNECT;
plci->manufacturer = true;
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
if (!Info)
{
add_p(plci, LLI, lli);
add_p(plci, CHI, chi);
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(plci, ASSIGN, DSIG_ID);
if (!codec)
{
Info = add_b23(plci, &m_parms[3]);
if (!Info)
{
nl_req_ncci(plci, ASSIGN, 0);
send_req(plci);
}
}
if (!Info)
{
dbug(1, dprintf("dir=0x%x,spoof=0x%x", dir, plci->spoofed_msg));
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(m_parms, "wbbs", &plci->saved_msg);
plci->spoofed_msg = AWAITING_MANUF_CON;
plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
plci->command = 0;
send_req(plci);
return false;
}
if (dir == 1) {
sig_req(plci, CALL_REQ, 0);
}
else if (!dir) {
sig_req(plci, LISTEN_REQ, 0);
}
send_req(plci);
}
else
{
sendf(appl,
_MANUFACTURER_R | CONFIRM,
Id,
Number,
"dww", _DI_MANU_ID, command, Info);
return 2;
}
}
}
}
else Info = _OUT_OF_PLCI;
break;
case _DI_IDI_CTRL:
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (api_parse(&m->info[1], (word)m->length, "bs", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
req = m_parms[0].info[0];
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
if (req == CALL_REQ)
{
plci->b_channel = getChannel(&m_parms[1]);
mixer_set_bchannel_id_esc(plci, plci->b_channel);
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
plci->spoofed_msg = CALL_REQ | AWAITING_MANUF_CON;
plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
plci->command = 0;
break;
}
}
else if (req == LAW_REQ)
{
plci->cr_enquiry = true;
}
add_ss(plci, FTY, &m_parms[1]);
sig_req(plci, req, 0);
send_req(plci);
if (req == HANGUP)
{
if (plci->NL.Id && !plci->nl_remove_id)
{
if (plci->channels)
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if ((a->ncci_plci[ncci] == plci->Id) && (a->ncci_state[ncci] == CONNECTED))
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
}
}
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
send_req(plci);
}
}
break;
case _DI_SIG_CTRL:
/* signalling control for loop activation B-channel */
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
plci->command = _MANUFACTURER_R;
plci->number = Number;
add_ss(plci, FTY, m);
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_RXT_CTRL:
/* activation control for receiver/transmitter B-channel */
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
plci->command = _MANUFACTURER_R;
plci->number = Number;
add_ss(plci, FTY, m);
sig_req(plci, DSP_CTRL, 0);
send_req(plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_ADV_CODEC:
case _DI_DSP_CTRL:
/* TEL_CTRL commands to support non standard adjustments: */
/* Ring on/off, Handset micro volume, external micro vol. */
/* handset+external speaker volume, receiver+transm. gain,*/
/* handsfree on (hookinfo off), set mixer command */
if (command == _DI_ADV_CODEC)
{
if (!a->AdvCodecPLCI) {
Info = _WRONG_STATE;
break;
}
v_plci = a->AdvCodecPLCI;
}
else
{
if (plci
&& (m->length >= 3)
&& (m->info[1] == 0x1c)
&& (m->info[2] >= 1))
{
if (m->info[3] == DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS)
{
if ((plci->tel != ADV_VOICE) || (plci != a->AdvSignalPLCI))
{
Info = _WRONG_STATE;
break;
}
a->adv_voice_coef_length = m->info[2] - 1;
if (a->adv_voice_coef_length > m->length - 3)
a->adv_voice_coef_length = (byte)(m->length - 3);
if (a->adv_voice_coef_length > ADV_VOICE_COEF_BUFFER_SIZE)
a->adv_voice_coef_length = ADV_VOICE_COEF_BUFFER_SIZE;
for (i = 0; i < a->adv_voice_coef_length; i++)
a->adv_voice_coef_buffer[i] = m->info[4 + i];
if (plci->B1_facilities & B1_FACILITY_VOICE)
adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
break;
}
else if (m->info[3] == DSP_CTRL_SET_DTMF_PARAMETERS)
{
if (!(a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_PARAMETERS))
{
Info = _FACILITY_NOT_SUPPORTED;
break;
}
plci->dtmf_parameter_length = m->info[2] - 1;
if (plci->dtmf_parameter_length > m->length - 3)
plci->dtmf_parameter_length = (byte)(m->length - 3);
if (plci->dtmf_parameter_length > DTMF_PARAMETER_BUFFER_SIZE)
plci->dtmf_parameter_length = DTMF_PARAMETER_BUFFER_SIZE;
for (i = 0; i < plci->dtmf_parameter_length; i++)
plci->dtmf_parameter_buffer[i] = m->info[4 + i];
if (plci->B1_facilities & B1_FACILITY_DTMFR)
dtmf_parameter_write(plci);
break;
}
}
v_plci = plci;
}
if (!v_plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
add_ss(v_plci, FTY, m);
sig_req(v_plci, TEL_CTRL, 0);
send_req(v_plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_OPTIONS_REQUEST:
if (api_parse(&m->info[1], (word)m->length, "d", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (GET_DWORD(m_parms[0].info) & ~a->man_profile.private_options)
{
Info = _FACILITY_NOT_SUPPORTED;
break;
}
a->requested_options_table[appl->Id - 1] = GET_DWORD(m_parms[0].info);
break;
default:
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
sendf(appl,
_MANUFACTURER_R | CONFIRM,
Id,
Number,
"dww", _DI_MANU_ID, command, Info);
return false;
}
static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word indication;
API_PARSE m_parms[3];
API_PARSE *ncpi;
API_PARSE fax_parms[9];
word i;
byte len;
dbug(1, dprintf("manufacturer_res"));
if ((msg[0].length == 0)
|| (msg[1].length == 0)
|| (GET_DWORD(msg[0].info) != _DI_MANU_ID))
{
return false;
}
indication = GET_WORD(msg[1].info);
switch (indication)
{
case _DI_NEGOTIATE_B3:
if (!plci)
break;
if (((plci->B3_prot != 4) && (plci->B3_prot != 5))
|| !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
{
dbug(1, dprintf("wrong state for NEGOTIATE_B3 parameters"));
break;
}
if (api_parse(&msg[2].info[1], msg[2].length, "ws", m_parms))
{
dbug(1, dprintf("wrong format in NEGOTIATE_B3 parameters"));
break;
}
ncpi = &m_parms[1];
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (plci->fax_connect_info_length < len)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
}
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
}
else
{
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
plci->fax_connect_info_length = len;
plci->fax_edata_ack_length = plci->fax_connect_info_length;
start_internal_command(Id, plci, fax_edata_ack_command);
break;
}
return false;
}
/*------------------------------------------------------------------*/
/* IDI callback function */
/*------------------------------------------------------------------*/
void callback(ENTITY *e)
{
DIVA_CAPI_ADAPTER *a;
APPL *appl;
PLCI *plci;
CAPI_MSG *m;
word i, j;
byte rc;
byte ch;
byte req;
byte global_req;
int no_cancel_rc;
dbug(1, dprintf("%x:CB(%x:Req=%x,Rc=%x,Ind=%x)",
(e->user[0] + 1) & 0x7fff, e->Id, e->Req, e->Rc, e->Ind));
a = &(adapter[(byte)e->user[0]]);
plci = &(a->plci[e->user[1]]);
no_cancel_rc = DIVA_CAPI_SUPPORTS_NO_CANCEL(a);
/*
If new protocol code and new XDI is used then CAPI should work
fully in accordance with IDI cpec an look on callback field instead
of Rc field for return codes.
*/
if (((e->complete == 0xff) && no_cancel_rc) ||
(e->Rc && !no_cancel_rc)) {
rc = e->Rc;
ch = e->RcCh;
req = e->Req;
e->Rc = 0;
if (e->user[0] & 0x8000)
{
/*
If REMOVE request was sent then we have to wait until
return code with Id set to zero arrives.
All other return codes should be ignored.
*/
if (req == REMOVE)
{
if (e->Id)
{
dbug(1, dprintf("cancel RC in REMOVE state"));
return;
}
channel_flow_control_remove(plci);
for (i = 0; i < 256; i++)
{
if (a->FlowControlIdTable[i] == plci->nl_remove_id)
a->FlowControlIdTable[i] = 0;
}
plci->nl_remove_id = 0;
if (plci->rx_dma_descriptor > 0) {
diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
plci->rx_dma_descriptor = 0;
}
}
if (rc == OK_FC)
{
a->FlowControlIdTable[ch] = e->Id;
a->FlowControlSkipTable[ch] = 0;
a->ch_flow_control[ch] |= N_OK_FC_PENDING;
a->ch_flow_plci[ch] = plci->Id;
plci->nl_req = 0;
}
else
{
/*
Cancel return codes self, if feature was requested
*/
if (no_cancel_rc && (a->FlowControlIdTable[ch] == e->Id) && e->Id) {
a->FlowControlIdTable[ch] = 0;
if ((rc == OK) && a->FlowControlSkipTable[ch]) {
dbug(3, dprintf("XDI CAPI: RC cancelled Id:0x02, Ch:%02x", e->Id, ch));
return;
}
}
if (a->ch_flow_control[ch] & N_OK_FC_PENDING)
{
a->ch_flow_control[ch] &= ~N_OK_FC_PENDING;
if (ch == e->ReqCh)
plci->nl_req = 0;
}
else
plci->nl_req = 0;
}
if (plci->nl_req)
control_rc(plci, 0, rc, ch, 0, true);
else
{
if (req == N_XON)
{
channel_x_on(plci, ch);
if (plci->internal_command)
control_rc(plci, req, rc, ch, 0, true);
}
else
{
if (plci->nl_global_req)
{
global_req = plci->nl_global_req;
plci->nl_global_req = 0;
if (rc != ASSIGN_OK) {
e->Id = 0;
if (plci->rx_dma_descriptor > 0) {
diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
plci->rx_dma_descriptor = 0;
}
}
channel_xmit_xon(plci);
control_rc(plci, 0, rc, ch, global_req, true);
}
else if (plci->data_sent)
{
channel_xmit_xon(plci);
plci->data_sent = false;
plci->NL.XNum = 1;
data_rc(plci, ch);
if (plci->internal_command)
control_rc(plci, req, rc, ch, 0, true);
}
else
{
channel_xmit_xon(plci);
control_rc(plci, req, rc, ch, 0, true);
}
}
}
}
else
{
/*
If REMOVE request was sent then we have to wait until
return code with Id set to zero arrives.
All other return codes should be ignored.
*/
if (req == REMOVE)
{
if (e->Id)
{
dbug(1, dprintf("cancel RC in REMOVE state"));
return;
}
plci->sig_remove_id = 0;
}
plci->sig_req = 0;
if (plci->sig_global_req)
{
global_req = plci->sig_global_req;
plci->sig_global_req = 0;
if (rc != ASSIGN_OK)
e->Id = 0;
channel_xmit_xon(plci);
control_rc(plci, 0, rc, ch, global_req, false);
}
else
{
channel_xmit_xon(plci);
control_rc(plci, req, rc, ch, 0, false);
}
}
/*
Again: in accordance with IDI spec Rc and Ind can't be delivered in the
same callback. Also if new XDI and protocol code used then jump
direct to finish.
*/
if (no_cancel_rc) {
channel_xmit_xon(plci);
goto capi_callback_suffix;
}
}
channel_xmit_xon(plci);
if (e->Ind) {
if (e->user[0] & 0x8000) {
byte Ind = e->Ind & 0x0f;
byte Ch = e->IndCh;
if (((Ind == N_DISC) || (Ind == N_DISC_ACK)) &&
(a->ch_flow_plci[Ch] == plci->Id)) {
if (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK) {
dbug(3, dprintf("XDI CAPI: I: pending N-XON Ch:%02x", Ch));
}
a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
}
nl_ind(plci);
if ((e->RNR != 1) &&
(a->ch_flow_plci[Ch] == plci->Id) &&
(a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK)) {
a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
dbug(3, dprintf("XDI CAPI: I: remove faked N-XON Ch:%02x", Ch));
}
} else {
sig_ind(plci);
}
e->Ind = 0;
}
capi_callback_suffix:
while (!plci->req_in
&& !plci->internal_command
&& (plci->msg_in_write_pos != plci->msg_in_read_pos))
{
j = (plci->msg_in_read_pos == plci->msg_in_wrap_pos) ? 0 : plci->msg_in_read_pos;
i = (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]))->header.length + 3) & 0xfffc;
m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
appl = *((APPL **)(&((byte *)(plci->msg_in_queue))[j + i]));
dbug(1, dprintf("dequeue msg(0x%04x) - write=%d read=%d wrap=%d",
m->header.command, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos));
if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
{
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = i + MSG_IN_OVERHEAD;
}
else
{
plci->msg_in_read_pos = j + i + MSG_IN_OVERHEAD;
}
if (plci->msg_in_read_pos == plci->msg_in_write_pos)
{
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
}
else if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
{
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
}
i = api_put(appl, m);
if (i != 0)
{
if (m->header.command == _DATA_B3_R)
TransmitBufferFree(appl, (byte *)(long)(m->info.data_b3_req.Data));
dbug(1, dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command));
break;
}
if (plci->li_notify_update)
{
plci->li_notify_update = false;
mixer_notify_update(plci, false);
}
}
send_data(plci);
send_req(plci);
}
static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req,
byte nl_rc)
{
dword Id;
dword rId;
word Number;
word Info = 0;
word i;
word ncci;
DIVA_CAPI_ADAPTER *a;
APPL *appl;
PLCI *rplci;
byte SSparms[] = "\x05\x00\x00\x02\x00\x00";
byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
if (!plci) {
dbug(0, dprintf("A: control_rc, no plci %02x:%02x:%02x:%02x:%02x", req, rc, ch, global_req, nl_rc));
return;
}
dbug(1, dprintf("req0_in/out=%d/%d", plci->req_in, plci->req_out));
if (plci->req_in != plci->req_out)
{
if (nl_rc || (global_req != ASSIGN) || (rc == ASSIGN_OK))
{
dbug(1, dprintf("req_1return"));
return;
}
/* cancel outstanding request on the PLCI after SIG ASSIGN failure */
}
plci->req_in = plci->req_in_start = plci->req_out = 0;
dbug(1, dprintf("control_rc"));
appl = plci->appl;
a = plci->adapter;
ncci = a->ch_ncci[ch];
if (appl)
{
Id = (((dword)(ncci ? ncci : ch)) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
Number = plci->number;
dbug(1, dprintf("Contr_RC-Id=%08lx,plci=%x,tel=%x, entity=0x%x, command=0x%x, int_command=0x%x", Id, plci->Id, plci->tel, plci->Sig.Id, plci->command, plci->internal_command));
dbug(1, dprintf("channels=0x%x", plci->channels));
if (plci_remove_check(plci))
return;
if (req == REMOVE && rc == ASSIGN_OK)
{
sig_req(plci, HANGUP, 0);
sig_req(plci, REMOVE, 0);
send_req(plci);
}
if (plci->command)
{
switch (plci->command)
{
case C_HOLD_REQ:
dbug(1, dprintf("HoldRC=0x%x", rc));
SSparms[1] = (byte)S_HOLD;
if (rc != OK)
{
plci->SuppState = IDLE;
Info = 0x2001;
}
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
break;
case C_RETRIEVE_REQ:
dbug(1, dprintf("RetrieveRC=0x%x", rc));
SSparms[1] = (byte)S_RETRIEVE;
if (rc != OK)
{
plci->SuppState = CALL_HELD;
Info = 0x2001;
}
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
break;
case _INFO_R:
dbug(1, dprintf("InfoRC=0x%x", rc));
if (rc != OK) Info = _WRONG_STATE;
sendf(appl, _INFO_R | CONFIRM, Id, Number, "w", Info);
break;
case _CONNECT_R:
dbug(1, dprintf("Connect_R=0x%x/0x%x/0x%x/0x%x", req, rc, global_req, nl_rc));
if (plci->State == INC_DIS_PENDING)
break;
if (plci->Sig.Id != 0xff)
{
if (((global_req == ASSIGN) && (rc != ASSIGN_OK))
|| (!nl_rc && (req == CALL_REQ) && (rc != OK)))
{
dbug(1, dprintf("No more IDs/Call_Req failed"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci);
plci->State = IDLE;
break;
}
if (plci->State != LOCAL_CONNECT) plci->State = OUTG_CON_PENDING;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
}
else /* D-ch activation */
{
if (rc != ASSIGN_OK)
{
dbug(1, dprintf("No more IDs/X.25 Call_Req failed"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci);
plci->State = IDLE;
break;
}
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "sss", "", "", "");
plci->State = INC_ACT_PENDING;
}
break;
case _CONNECT_I | RESPONSE:
if (plci->State != INC_DIS_PENDING)
plci->State = INC_CON_ACCEPT;
break;
case _DISCONNECT_R:
if (plci->State == INC_DIS_PENDING)
break;
if (plci->Sig.Id != 0xff)
{
plci->State = OUTG_DIS_PENDING;
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
}
break;
case SUSPEND_REQ:
break;
case RESUME_REQ:
break;
case _CONNECT_B3_R:
if (rc != OK)
{
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
break;
}
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
plci->channels++;
if (req == N_RESET)
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
}
else
{
a->ncci_state[ncci] = OUTG_CON_PENDING;
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
}
break;
case _CONNECT_B3_I | RESPONSE:
break;
case _RESET_B3_R:
/* sendf(appl, _RESET_B3_R | CONFIRM, Id, Number, "w", 0);*/
break;
case _DISCONNECT_B3_R:
sendf(appl, _DISCONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
break;
case _MANUFACTURER_R:
break;
case PERM_LIST_REQ:
if (rc != OK)
{
Info = _WRONG_IDENTIFIER;
sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
plci_remove(plci);
}
else
sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
break;
default:
break;
}
plci->command = 0;
}
else if (plci->internal_command)
{
switch (plci->internal_command)
{
case BLOCK_PLCI:
return;
case GET_MWI_STATE:
if (rc == OK) /* command supported, wait for indication */
{
return;
}
plci_remove(plci);
break;
/* Get Supported Services */
case GETSERV_REQ_PEND:
if (rc == OK) /* command supported, wait for indication */
{
break;
}
PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", 0, 3, SSstruct);
plci_remove(plci);
break;
case INTERR_DIVERSION_REQ_PEND: /* Interrogate Parameters */
case INTERR_NUMBERS_REQ_PEND:
case CF_START_PEND: /* Call Forwarding Start pending */
case CF_STOP_PEND: /* Call Forwarding Stop pending */
case CCBS_REQUEST_REQ_PEND:
case CCBS_DEACTIVATE_REQ_PEND:
case CCBS_INTERROGATE_REQ_PEND:
switch (plci->internal_command)
{
case INTERR_DIVERSION_REQ_PEND:
SSparms[1] = S_INTERROGATE_DIVERSION;
break;
case INTERR_NUMBERS_REQ_PEND:
SSparms[1] = S_INTERROGATE_NUMBERS;
break;
case CF_START_PEND:
SSparms[1] = S_CALL_FORWARDING_START;
break;
case CF_STOP_PEND:
SSparms[1] = S_CALL_FORWARDING_STOP;
break;
case CCBS_REQUEST_REQ_PEND:
SSparms[1] = S_CCBS_REQUEST;
break;
case CCBS_DEACTIVATE_REQ_PEND:
SSparms[1] = S_CCBS_DEACTIVATE;
break;
case CCBS_INTERROGATE_REQ_PEND:
SSparms[1] = S_CCBS_INTERROGATE;
break;
}
if (global_req == ASSIGN)
{
dbug(1, dprintf("AssignDiversion_RC=0x%x/0x%x", req, rc));
return;
}
if (!plci->appl) break;
if (rc == ISDN_GUARD_REJ)
{
Info = _CAPI_GUARD_ERROR;
}
else if (rc != OK)
{
Info = _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7,
plci->number, "wws", Info, (word)3, SSparms);
if (Info) plci_remove(plci);
break;
/* 3pty conference pending */
case PTY_REQ_PEND:
if (!plci->relatedPTYPLCI) break;
rplci = plci->relatedPTYPLCI;
SSparms[1] = plci->ptyState;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
if (rplci->tel) rId |= EXT_CONTROLLER;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
/* Explicit Call Transfer pending */
case ECT_REQ_PEND:
dbug(1, dprintf("ECT_RC=0x%x/0x%x", req, rc));
if (!plci->relatedPTYPLCI) break;
rplci = plci->relatedPTYPLCI;
SSparms[1] = S_ECT;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
if (rplci->tel) rId |= EXT_CONTROLLER;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
case _MANUFACTURER_R:
dbug(1, dprintf("_Manufacturer_R=0x%x/0x%x", req, rc));
if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
{
dbug(1, dprintf("No more IDs"));
sendf(appl, _MANUFACTURER_R | CONFIRM, Id, Number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
plci_remove(plci); /* after codec init, internal codec commands pending */
}
break;
case _CONNECT_R:
dbug(1, dprintf("_Connect_R=0x%x/0x%x", req, rc));
if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
{
dbug(1, dprintf("No more IDs"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci); /* after codec init, internal codec commands pending */
}
break;
case PERM_COD_HOOK: /* finished with Hook_Ind */
return;
case PERM_COD_CALL:
dbug(1, dprintf("***Codec Connect_Pending A, Rc = 0x%x", rc));
plci->internal_command = PERM_COD_CONN_PEND;
return;
case PERM_COD_ASSIGN:
dbug(1, dprintf("***Codec Assign A, Rc = 0x%x", rc));
if (rc != ASSIGN_OK) break;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
plci->internal_command = PERM_COD_CALL;
return;
/* Null Call Reference Request pending */
case C_NCR_FAC_REQ:
dbug(1, dprintf("NCR_FAC=0x%x/0x%x", req, rc));
if (global_req == ASSIGN)
{
if (rc == ASSIGN_OK)
{
return;
}
else
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
appl->NullCREnable = false;
plci_remove(plci);
}
}
else if (req == NCR_FACILITY)
{
if (rc == OK)
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", 0);
}
else
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
appl->NullCREnable = false;
}
plci_remove(plci);
}
break;
case HOOK_ON_REQ:
if (plci->channels)
{
if (a->ncci_state[ncci] == CONNECTED)
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
break;
}
break;
case HOOK_OFF_REQ:
if (plci->State == INC_DIS_PENDING)
break;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
plci->State = OUTG_CON_PENDING;
break;
case MWI_ACTIVATE_REQ_PEND:
case MWI_DEACTIVATE_REQ_PEND:
if (global_req == ASSIGN && rc == ASSIGN_OK)
{
dbug(1, dprintf("MWI_REQ assigned"));
return;
}
else if (rc != OK)
{
if (rc == WRONG_IE)
{
Info = 0x2007; /* Illegal message parameter coding */
dbug(1, dprintf("MWI_REQ invalid parameter"));
}
else
{
Info = 0x300B; /* not supported */
dbug(1, dprintf("MWI_REQ not supported"));
}
/* 0x3010: Request not allowed in this state */
PUT_WORD(&SSparms[4], 0x300E); /* SS not supported */
}
if (plci->internal_command == MWI_ACTIVATE_REQ_PEND)
{
PUT_WORD(&SSparms[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SSparms[1], S_MWI_DEACTIVATE);
if (plci->cr_enquiry)
{
sendf(plci->appl,
_FACILITY_R | CONFIRM,
Id & 0xf,
plci->number,
"wws", Info, (word)3, SSparms);
if (rc != OK) plci_remove(plci);
}
else
{
sendf(plci->appl,
_FACILITY_R | CONFIRM,
Id,
plci->number,
"wws", Info, (word)3, SSparms);
}
break;
case CONF_BEGIN_REQ_PEND:
case CONF_ADD_REQ_PEND:
case CONF_SPLIT_REQ_PEND:
case CONF_DROP_REQ_PEND:
case CONF_ISOLATE_REQ_PEND:
case CONF_REATTACH_REQ_PEND:
dbug(1, dprintf("CONF_RC=0x%x/0x%x", req, rc));
if ((plci->internal_command == CONF_ADD_REQ_PEND) && (!plci->relatedPTYPLCI)) break;
rplci = plci;
rId = Id;
switch (plci->internal_command)
{
case CONF_BEGIN_REQ_PEND:
SSparms[1] = S_CONF_BEGIN;
break;
case CONF_ADD_REQ_PEND:
SSparms[1] = S_CONF_ADD;
rplci = plci->relatedPTYPLCI;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
break;
case CONF_SPLIT_REQ_PEND:
SSparms[1] = S_CONF_SPLIT;
break;
case CONF_DROP_REQ_PEND:
SSparms[1] = S_CONF_DROP;
break;
case CONF_ISOLATE_REQ_PEND:
SSparms[1] = S_CONF_ISOLATE;
break;
case CONF_REATTACH_REQ_PEND:
SSparms[1] = S_CONF_REATTACH;
break;
}
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
case VSWITCH_REQ_PEND:
if (rc != OK)
{
if (plci->relatedPTYPLCI)
{
plci->relatedPTYPLCI->vswitchstate = 0;
plci->relatedPTYPLCI->vsprot = 0;
plci->relatedPTYPLCI->vsprotdialect = 0;
}
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
}
else
{
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 1 &&
plci->relatedPTYPLCI->vswitchstate == 3) /* join complete */
plci->vswitchstate = 3;
}
break;
/* Call Deflection Request pending (SSCT) */
case CD_REQ_PEND:
SSparms[1] = S_CALL_DEFLECTION;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->appl->CDEnable = 0;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id,
plci->number, "wws", Info, (word)3, SSparms);
break;
case RTP_CONNECT_B3_REQ_COMMAND_2:
if (rc == OK)
{
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
plci->channels++;
a->ncci_state[ncci] = OUTG_CON_PENDING;
}
default:
if (plci->internal_command_queue[0])
{
(*(plci->internal_command_queue[0]))(Id, plci, rc);
if (plci->internal_command)
return;
}
break;
}
next_internal_command(Id, plci);
}
}
else /* appl==0 */
{
Id = ((word)plci->Id << 8) | plci->adapter->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
switch (plci->internal_command)
{
case BLOCK_PLCI:
return;
case START_L1_SIG_ASSIGN_PEND:
case REM_L1_SIG_ASSIGN_PEND:
if (global_req == ASSIGN)
{
break;
}
else
{
dbug(1, dprintf("***L1 Req rem PLCI"));
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
}
break;
/* Call Deflection Request pending, just no appl ptr assigned */
case CD_REQ_PEND:
SSparms[1] = S_CALL_DEFLECTION;
if (rc != OK)
{
Info = 0x300E; /* not supported */
}
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (!application[i].Id) application[i].CDEnable = 0;
else
{
sendf(&application[i], _FACILITY_R | CONFIRM, Id,
plci->number, "wws", Info, (word)3, SSparms);
if (Info) application[i].CDEnable = 0;
}
}
}
plci->internal_command = 0;
break;
case PERM_COD_HOOK: /* finished with Hook_Ind */
return;
case PERM_COD_CALL:
plci->internal_command = PERM_COD_CONN_PEND;
dbug(1, dprintf("***Codec Connect_Pending, Rc = 0x%x", rc));
return;
case PERM_COD_ASSIGN:
dbug(1, dprintf("***Codec Assign, Rc = 0x%x", rc));
plci->internal_command = 0;
if (rc != ASSIGN_OK) break;
plci->internal_command = PERM_COD_CALL;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
return;
case LISTEN_SIG_ASSIGN_PEND:
if (rc == ASSIGN_OK)
{
plci->internal_command = 0;
dbug(1, dprintf("ListenCheck, new SIG_ID = 0x%x", plci->Sig.Id));
add_p(plci, ESC, "\x02\x18\x00"); /* support call waiting */
sig_req(plci, INDICATE_REQ, 0);
send_req(plci);
}
else
{
dbug(1, dprintf("ListenCheck failed (assignRc=0x%x)", rc));
a->listen_active--;
plci_remove(plci);
plci->State = IDLE;
}
break;
case USELAW_REQ:
if (global_req == ASSIGN)
{
if (rc == ASSIGN_OK)
{
sig_req(plci, LAW_REQ, 0);
send_req(plci);
dbug(1, dprintf("Auto-Law assigned"));
}
else
{
dbug(1, dprintf("Auto-Law assign failed"));
a->automatic_law = 3;
plci->internal_command = 0;
a->automatic_lawPLCI = NULL;
}
break;
}
else if (req == LAW_REQ && rc == OK)
{
dbug(1, dprintf("Auto-Law initiated"));
a->automatic_law = 2;
plci->internal_command = 0;
}
else
{
dbug(1, dprintf("Auto-Law not supported"));
a->automatic_law = 3;
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
a->automatic_lawPLCI = NULL;
}
break;
}
plci_remove_check(plci);
}
}
static void data_rc(PLCI *plci, byte ch)
{
dword Id;
DIVA_CAPI_ADAPTER *a;
NCCI *ncci_ptr;
DATA_B3_DESC *data;
word ncci;
if (plci->appl)
{
TransmitBufferFree(plci->appl, plci->data_sent_ptr);
a = plci->adapter;
ncci = a->ch_ncci[ch];
if (ncci && (a->ncci_plci[ncci] == plci->Id))
{
ncci_ptr = &(a->ncci[ncci]);
dbug(1, dprintf("data_out=%d, data_pending=%d", ncci_ptr->data_out, ncci_ptr->data_pending));
if (ncci_ptr->data_pending)
{
data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
if (!(data->Flags & 4) && a->ncci_state[ncci])
{
Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, data->Number,
"ww", data->Handle, 0);
}
(ncci_ptr->data_out)++;
if (ncci_ptr->data_out == MAX_DATA_B3)
ncci_ptr->data_out = 0;
(ncci_ptr->data_pending)--;
}
}
}
}
static void data_ack(PLCI *plci, byte ch)
{
dword Id;
DIVA_CAPI_ADAPTER *a;
NCCI *ncci_ptr;
word ncci;
a = plci->adapter;
ncci = a->ch_ncci[ch];
ncci_ptr = &(a->ncci[ncci]);
if (ncci_ptr->data_ack_pending)
{
if (a->ncci_state[ncci] && (a->ncci_plci[ncci] == plci->Id))
{
Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, ncci_ptr->DataAck[ncci_ptr->data_ack_out].Number,
"ww", ncci_ptr->DataAck[ncci_ptr->data_ack_out].Handle, 0);
}
(ncci_ptr->data_ack_out)++;
if (ncci_ptr->data_ack_out == MAX_DATA_ACK)
ncci_ptr->data_ack_out = 0;
(ncci_ptr->data_ack_pending)--;
}
}
static void sig_ind(PLCI *plci)
{
dword x_Id;
dword Id;
dword rId;
word i;
word cip;
dword cip_mask;
byte *ie;
DIVA_CAPI_ADAPTER *a;
API_PARSE saved_parms[MAX_MSG_PARMS + 1];
#define MAXPARMSIDS 31
byte *parms[MAXPARMSIDS];
byte *add_i[4];
byte *multi_fac_parms[MAX_MULTI_IE];
byte *multi_pi_parms[MAX_MULTI_IE];
byte *multi_ssext_parms[MAX_MULTI_IE];
byte *multi_CiPN_parms[MAX_MULTI_IE];
byte *multi_vswitch_parms[MAX_MULTI_IE];
byte ai_len;
byte *esc_chi = "";
byte *esc_law = "";
byte *pty_cai = "";
byte *esc_cr = "";
byte *esc_profile = "";
byte facility[256];
PLCI *tplci = NULL;
byte chi[] = "\x02\x18\x01";
byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
byte resume_cau[] = "\x05\x05\x00\x02\x00\x00";
/* ESC_MSGTYPE must be the last but one message, a new IE has to be */
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
/* SMSG is situated at the end because its 0 (for compatibility reasons */
/* (see Info_Mask Bit 4, first IE. then the message type) */
word parms_id[] =
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
CST, ESC_PROFILE, 0xff, ESC_MSGTYPE, SMSG};
/* 14 FTY repl by ESC_CHI */
/* 18 PI repl by ESC_LAW */
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
word multi_fac_id[] = {1, FTY};
word multi_pi_id[] = {1, PI};
word multi_CiPN_id[] = {1, OAD};
word multi_ssext_id[] = {1, ESC_SSEXT};
word multi_vswitch_id[] = {1, ESC_VSWITCH};
byte *cau;
word ncci;
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
byte force_mt_info = false;
byte dir;
dword d;
word w;
a = plci->adapter;
Id = ((word)plci->Id << 8) | a->Id;
PUT_WORD(&SS_Ind[4], 0x0000);
if (plci->sig_remove_id)
{
plci->Sig.RNR = 2; /* discard */
dbug(1, dprintf("SIG discard while remove pending"));
return;
}
if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
dbug(1, dprintf("SigInd-Id=%08lx,plci=%x,tel=%x,state=0x%x,channels=%d,Discflowcl=%d",
Id, plci->Id, plci->tel, plci->State, plci->channels, plci->hangup_flow_ctrl_timer));
if (plci->Sig.Ind == CALL_HOLD_ACK && plci->channels)
{
plci->Sig.RNR = 1;
return;
}
if (plci->Sig.Ind == HANGUP && plci->channels)
{
plci->Sig.RNR = 1;
plci->hangup_flow_ctrl_timer++;
/* recover the network layer after timeout */
if (plci->hangup_flow_ctrl_timer == 100)
{
dbug(1, dprintf("Exceptional disc"));
plci->Sig.RNR = 0;
plci->hangup_flow_ctrl_timer = 0;
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
cleanup_ncci_data(plci, ncci);
if (plci->channels)plci->channels--;
if (plci->appl)
sendf(plci->appl, _DISCONNECT_B3_I, (((dword) ncci) << 16) | Id, 0, "ws", 0, "");
}
}
if (plci->appl)
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
plci_remove(plci);
plci->State = IDLE;
}
return;
}
/* do first parse the info with no OAD in, because OAD will be converted */
/* first the multiple facility IE, then mult. progress ind. */
/* then the parameters for the info_ind + conn_ind */
IndParse(plci, multi_fac_id, multi_fac_parms, MAX_MULTI_IE);
IndParse(plci, multi_pi_id, multi_pi_parms, MAX_MULTI_IE);
IndParse(plci, multi_ssext_id, multi_ssext_parms, MAX_MULTI_IE);
IndParse(plci, multi_vswitch_id, multi_vswitch_parms, MAX_MULTI_IE);
IndParse(plci, parms_id, parms, 0);
IndParse(plci, multi_CiPN_id, multi_CiPN_parms, MAX_MULTI_IE);
esc_chi = parms[14];
esc_law = parms[18];
pty_cai = parms[24];
esc_cr = parms[25];
esc_profile = parms[27];
if (esc_cr[0] && plci)
{
if (plci->cr_enquiry && plci->appl)
{
plci->cr_enquiry = false;
/* d = MANU_ID */
/* w = m_command */
/* b = total length */
/* b = indication type */
/* b = length of all IEs */
/* b = IE1 */
/* S = IE1 length + cont. */
/* b = IE2 */
/* S = IE2 length + cont. */
sendf(plci->appl,
_MANUFACTURER_I,
Id,
0,
"dwbbbbSbS", _DI_MANU_ID, plci->m_command,
2 + 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], plci->Sig.Ind, 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], ESC, esc_cr, ESC, esc_law);
}
}
/* create the additional info structure */
add_i[1] = parms[15]; /* KEY of additional info */
add_i[2] = parms[11]; /* UUI of additional info */
ai_len = AddInfo(add_i, multi_fac_parms, esc_chi, facility);
/* the ESC_LAW indicates if u-Law or a-Law is actually used by the card */
/* indication returns by the card if requested by the function */
/* AutomaticLaw() after driver init */
if (a->automatic_law < 4)
{
if (esc_law[0]) {
if (esc_law[2]) {
dbug(0, dprintf("u-Law selected"));
a->u_law = 1;
}
else {
dbug(0, dprintf("a-Law selected"));
a->u_law = 0;
}
a->automatic_law = 4;
if (plci == a->automatic_lawPLCI) {
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
a->automatic_lawPLCI = NULL;
}
}
if (esc_profile[0])
{
dbug(1, dprintf("[%06x] CardProfile: %lx %lx %lx %lx %lx",
UnMapController(a->Id), GET_DWORD(&esc_profile[6]),
GET_DWORD(&esc_profile[10]), GET_DWORD(&esc_profile[14]),
GET_DWORD(&esc_profile[18]), GET_DWORD(&esc_profile[46])));
a->profile.Global_Options &= 0x000000ffL;
a->profile.B1_Protocols &= 0x000003ffL;
a->profile.B2_Protocols &= 0x00001fdfL;
a->profile.B3_Protocols &= 0x000000b7L;
a->profile.Global_Options &= GET_DWORD(&esc_profile[6]) |
GL_BCHANNEL_OPERATION_SUPPORTED;
a->profile.B1_Protocols &= GET_DWORD(&esc_profile[10]);
a->profile.B2_Protocols &= GET_DWORD(&esc_profile[14]);
a->profile.B3_Protocols &= GET_DWORD(&esc_profile[18]);
a->manufacturer_features = GET_DWORD(&esc_profile[46]);
a->man_profile.private_options = 0;
if (a->manufacturer_features & MANUFACTURER_FEATURE_ECHO_CANCELLER)
{
a->man_profile.private_options |= 1L << PRIVATE_ECHO_CANCELLER;
a->profile.Global_Options |= GL_ECHO_CANCELLER_SUPPORTED;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_RTP)
a->man_profile.private_options |= 1L << PRIVATE_RTP;
a->man_profile.rtp_primary_payloads = GET_DWORD(&esc_profile[50]);
a->man_profile.rtp_additional_payloads = GET_DWORD(&esc_profile[54]);
if (a->manufacturer_features & MANUFACTURER_FEATURE_T38)
a->man_profile.private_options |= 1L << PRIVATE_T38;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD)
a->man_profile.private_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
if (a->manufacturer_features & MANUFACTURER_FEATURE_V18)
a->man_profile.private_options |= 1L << PRIVATE_V18;
if (a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_TONE)
a->man_profile.private_options |= 1L << PRIVATE_DTMF_TONE;
if (a->manufacturer_features & MANUFACTURER_FEATURE_PIAFS)
a->man_profile.private_options |= 1L << PRIVATE_PIAFS;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
a->man_profile.private_options |= 1L << PRIVATE_FAX_PAPER_FORMATS;
if (a->manufacturer_features & MANUFACTURER_FEATURE_VOWN)
a->man_profile.private_options |= 1L << PRIVATE_VOWN;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_NONSTANDARD)
a->man_profile.private_options |= 1L << PRIVATE_FAX_NONSTANDARD;
}
else
{
a->profile.Global_Options &= 0x0000007fL;
a->profile.B1_Protocols &= 0x000003dfL;
a->profile.B2_Protocols &= 0x00001adfL;
a->profile.B3_Protocols &= 0x000000b7L;
a->manufacturer_features &= MANUFACTURER_FEATURE_HARDDTMF;
}
if (a->manufacturer_features & (MANUFACTURER_FEATURE_HARDDTMF |
MANUFACTURER_FEATURE_SOFTDTMF_SEND | MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
{
a->profile.Global_Options |= GL_DTMF_SUPPORTED;
}
a->manufacturer_features &= ~MANUFACTURER_FEATURE_OOB_CHANNEL;
dbug(1, dprintf("[%06x] Profile: %lx %lx %lx %lx %lx",
UnMapController(a->Id), a->profile.Global_Options,
a->profile.B1_Protocols, a->profile.B2_Protocols,
a->profile.B3_Protocols, a->manufacturer_features));
}
/* codec plci for the handset/hook state support is just an internal id */
if (plci != a->AdvCodecPLCI)
{
force_mt_info = SendMultiIE(plci, Id, multi_fac_parms, FTY, 0x20, 0);
force_mt_info |= SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, 0);
SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
SendInfo(plci, Id, parms, force_mt_info);
VSwitchReqInd(plci, Id, multi_vswitch_parms);
}
/* switch the codec to the b-channel */
if (esc_chi[0] && plci && !plci->SuppState) {
plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
mixer_set_bchannel_id_esc(plci, plci->b_channel);
dbug(1, dprintf("storeChannel=0x%x", plci->b_channel));
if (plci->tel == ADV_VOICE && plci->appl) {
SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
}
}
if (plci->appl) plci->appl->Number++;
switch (plci->Sig.Ind) {
/* Response to Get_Supported_Services request */
case S_SUPPORTED:
dbug(1, dprintf("S_Supported"));
if (!plci->appl) break;
if (pty_cai[0] == 4)
{
PUT_DWORD(&CF_Ind[6], GET_DWORD(&pty_cai[1]));
}
else
{
PUT_DWORD(&CF_Ind[6], MASK_TERMINAL_PORTABILITY | MASK_HOLD_RETRIEVE);
}
PUT_WORD(&CF_Ind[1], 0);
PUT_WORD(&CF_Ind[4], 0);
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7, plci->number, "wws", 0, 3, CF_Ind);
plci_remove(plci);
break;
/* Supplementary Service rejected */
case S_SERVICE_REJ:
dbug(1, dprintf("S_Reject=0x%x", pty_cai[5]));
if (!pty_cai[0]) break;
switch (pty_cai[5])
{
case ECT_EXECUTE:
case THREE_PTY_END:
case THREE_PTY_BEGIN:
if (!plci->relatedPTYPLCI) break;
tplci = plci->relatedPTYPLCI;
rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
if (tplci->tel) rId |= EXT_CONTROLLER;
if (pty_cai[5] == ECT_EXECUTE)
{
PUT_WORD(&SS_Ind[1], S_ECT);
plci->vswitchstate = 0;
plci->relatedPTYPLCI->vswitchstate = 0;
}
else
{
PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
}
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
break;
case CALL_DEFLECTION:
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
PUT_WORD(&SS_Ind[1], pty_cai[5]);
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
application[i].CDEnable = false;
}
}
break;
case DEACTIVATION_DIVERSION:
case ACTIVATION_DIVERSION:
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
case DIVERSION_INTERROGATE_NUM:
case CCBS_REQUEST:
case CCBS_DEACTIVATE:
case CCBS_INTERROGATE:
if (!plci->appl) break;
if (pty_cai[2] != 0xff)
{
PUT_WORD(&Interr_Err_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&Interr_Err_Ind[4], 0x300E);
}
switch (pty_cai[5])
{
case DEACTIVATION_DIVERSION:
dbug(1, dprintf("Deact_Div"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_STOP);
break;
case ACTIVATION_DIVERSION:
dbug(1, dprintf("Act_Div"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_START);
break;
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
dbug(1, dprintf("Interr_Div"));
Interr_Err_Ind[0] = 0xa;
Interr_Err_Ind[3] = 0x7;
PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_DIVERSION);
break;
case DIVERSION_INTERROGATE_NUM:
dbug(1, dprintf("Interr_Num"));
Interr_Err_Ind[0] = 0xa;
Interr_Err_Ind[3] = 0x7;
PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_NUMBERS);
break;
case CCBS_REQUEST:
dbug(1, dprintf("CCBS Request"));
Interr_Err_Ind[0] = 0xd;
Interr_Err_Ind[3] = 0xa;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_REQUEST);
break;
case CCBS_DEACTIVATE:
dbug(1, dprintf("CCBS Deactivate"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_DEACTIVATE);
break;
case CCBS_INTERROGATE:
dbug(1, dprintf("CCBS Interrogate"));
Interr_Err_Ind[0] = 0xb;
Interr_Err_Ind[3] = 0x8;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_INTERROGATE);
break;
}
PUT_DWORD(&Interr_Err_Ind[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, Interr_Err_Ind);
plci_remove(plci);
break;
case ACTIVATION_MWI:
case DEACTIVATION_MWI:
if (pty_cai[5] == ACTIVATION_MWI)
{
PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
if (plci->cr_enquiry)
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
plci_remove(plci);
}
else
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CONF_ADD: /* ERROR */
case CONF_BEGIN:
case CONF_DROP:
case CONF_ISOLATE:
case CONF_REATTACH:
CONF_Ind[0] = 9;
CONF_Ind[3] = 6;
switch (pty_cai[5])
{
case CONF_BEGIN:
PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
plci->ptyState = 0;
break;
case CONF_DROP:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
plci->ptyState = CONNECTED;
break;
case CONF_ISOLATE:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
plci->ptyState = CONNECTED;
break;
case CONF_REATTACH:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
plci->ptyState = CONNECTED;
break;
case CONF_ADD:
PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
plci->relatedPTYPLCI = NULL;
tplci = plci->relatedPTYPLCI;
if (tplci) tplci->ptyState = CONNECTED;
plci->ptyState = CONNECTED;
break;
}
if (pty_cai[2] != 0xff)
{
PUT_WORD(&CONF_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&CONF_Ind[4], 0x3303); /* Time-out: network did not respond
within the required time */
}
PUT_DWORD(&CONF_Ind[6], 0x0);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
break;
}
break;
/* Supplementary Service indicates success */
case S_SERVICE:
dbug(1, dprintf("Service_Ind"));
PUT_WORD(&CF_Ind[4], 0);
switch (pty_cai[5])
{
case THREE_PTY_END:
case THREE_PTY_BEGIN:
case ECT_EXECUTE:
if (!plci->relatedPTYPLCI) break;
tplci = plci->relatedPTYPLCI;
rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
if (tplci->tel) rId |= EXT_CONTROLLER;
if (pty_cai[5] == ECT_EXECUTE)
{
PUT_WORD(&SS_Ind[1], S_ECT);
if (plci->vswitchstate != 3)
{
plci->ptyState = IDLE;
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
dbug(1, dprintf("ECT OK"));
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
}
else
{
switch (plci->ptyState)
{
case S_3PTY_BEGIN:
plci->ptyState = CONNECTED;
dbug(1, dprintf("3PTY ON"));
break;
case S_3PTY_END:
plci->ptyState = IDLE;
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
dbug(1, dprintf("3PTY OFF"));
break;
}
PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
}
break;
case CALL_DEFLECTION:
PUT_WORD(&SS_Ind[1], pty_cai[5]);
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
application[i].CDEnable = false;
}
}
break;
case DEACTIVATION_DIVERSION:
case ACTIVATION_DIVERSION:
if (!plci->appl) break;
PUT_WORD(&CF_Ind[1], pty_cai[5] + 2);
PUT_DWORD(&CF_Ind[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, CF_Ind);
plci_remove(plci);
break;
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
case DIVERSION_INTERROGATE_NUM:
case CCBS_REQUEST:
case CCBS_DEACTIVATE:
case CCBS_INTERROGATE:
if (!plci->appl) break;
switch (pty_cai[5])
{
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
dbug(1, dprintf("Interr_Div"));
PUT_WORD(&pty_cai[1], S_INTERROGATE_DIVERSION);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case DIVERSION_INTERROGATE_NUM:
dbug(1, dprintf("Interr_Num"));
PUT_WORD(&pty_cai[1], S_INTERROGATE_NUMBERS);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_REQUEST:
dbug(1, dprintf("CCBS Request"));
PUT_WORD(&pty_cai[1], S_CCBS_REQUEST);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_DEACTIVATE:
dbug(1, dprintf("CCBS Deactivate"));
PUT_WORD(&pty_cai[1], S_CCBS_DEACTIVATE);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_INTERROGATE:
dbug(1, dprintf("CCBS Interrogate"));
PUT_WORD(&pty_cai[1], S_CCBS_INTERROGATE);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
}
PUT_WORD(&pty_cai[4], 0); /* Supplementary Service Reason */
PUT_DWORD(&pty_cai[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "wS", 3, pty_cai);
plci_remove(plci);
break;
case ACTIVATION_MWI:
case DEACTIVATION_MWI:
if (pty_cai[5] == ACTIVATION_MWI)
{
PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
if (plci->cr_enquiry)
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
plci_remove(plci);
}
else
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case MWI_INDICATION:
if (pty_cai[0] >= 0x12)
{
PUT_WORD(&pty_cai[3], S_MWI_INDICATE);
pty_cai[2] = pty_cai[0] - 2; /* len Parameter */
pty_cai[5] = pty_cai[0] - 5; /* Supplementary Service-specific parameter len */
if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_MWI))
{
if (plci->internal_command == GET_MWI_STATE) /* result on Message Waiting Listen */
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "wS", 3, &pty_cai[2]);
plci_remove(plci);
return;
}
else sendf(plci->appl, _FACILITY_I, Id, 0, "wS", 3, &pty_cai[2]);
pty_cai[0] = 0;
}
else
{
for (i = 0; i < max_appl; i++)
{
if (a->Notification_Mask[i]&SMASK_MWI)
{
sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "wS", 3, &pty_cai[2]);
pty_cai[0] = 0;
}
}
}
if (!pty_cai[0])
{ /* acknowledge */
facility[2] = 0; /* returncode */
}
else facility[2] = 0xff;
}
else
{
/* reject */
facility[2] = 0xff; /* returncode */
}
facility[0] = 2;
facility[1] = MWI_RESPONSE; /* Function */
add_p(plci, CAI, facility);
add_p(plci, ESC, multi_ssext_parms[0]); /* remembered parameter -> only one possible */
sig_req(plci, S_SERVICE, 0);
send_req(plci);
plci->command = 0;
next_internal_command(Id, plci);
break;
case CONF_ADD: /* OK */
case CONF_BEGIN:
case CONF_DROP:
case CONF_ISOLATE:
case CONF_REATTACH:
case CONF_PARTYDISC:
CONF_Ind[0] = 9;
CONF_Ind[3] = 6;
switch (pty_cai[5])
{
case CONF_BEGIN:
PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
if (pty_cai[0] == 6)
{
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
}
else
{
PUT_DWORD(&CONF_Ind[6], 0x0);
}
break;
case CONF_ISOLATE:
PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_REATTACH:
PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_DROP:
PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_ADD:
PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
tplci = plci->relatedPTYPLCI;
if (tplci) tplci->ptyState = CONNECTED;
break;
case CONF_PARTYDISC:
CONF_Ind[0] = 7;
CONF_Ind[3] = 4;
PUT_WORD(&CONF_Ind[1], S_CONF_PARTYDISC);
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[4], d); /* PartyID */
break;
}
plci->ptyState = CONNECTED;
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
break;
case CCBS_INFO_RETAIN:
case CCBS_ERASECALLLINKAGEID:
case CCBS_STOP_ALERTING:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
switch (pty_cai[5])
{
case CCBS_INFO_RETAIN:
PUT_WORD(&CONF_Ind[1], S_CCBS_INFO_RETAIN);
break;
case CCBS_STOP_ALERTING:
PUT_WORD(&CONF_Ind[1], S_CCBS_STOP_ALERTING);
break;
case CCBS_ERASECALLLINKAGEID:
PUT_WORD(&CONF_Ind[1], S_CCBS_ERASECALLLINKAGEID);
CONF_Ind[0] = 7;
CONF_Ind[3] = 4;
CONF_Ind[6] = 0;
CONF_Ind[7] = 0;
break;
}
w = pty_cai[6];
PUT_WORD(&CONF_Ind[4], w); /* PartyID */
if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_CCBS))
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
}
else
{
for (i = 0; i < max_appl; i++)
if (a->Notification_Mask[i] & SMASK_CCBS)
sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "ws", 3, CONF_Ind);
}
break;
}
break;
case CALL_HOLD_REJ:
cau = parms[7];
if (cau)
{
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0x3603;
}
else
{
i = 0x3603;
}
PUT_WORD(&SS_Ind[1], S_HOLD);
PUT_WORD(&SS_Ind[4], i);
if (plci->SuppState == HOLD_REQUEST)
{
plci->SuppState = IDLE;
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CALL_HOLD_ACK:
if (plci->SuppState == HOLD_REQUEST)
{
plci->SuppState = CALL_HELD;
CodecIdCheck(a, plci);
start_internal_command(Id, plci, hold_save_command);
}
break;
case CALL_RETRIEVE_REJ:
cau = parms[7];
if (cau)
{
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0x3603;
}
else
{
i = 0x3603;
}
PUT_WORD(&SS_Ind[1], S_RETRIEVE);
PUT_WORD(&SS_Ind[4], i);
if (plci->SuppState == RETRIEVE_REQUEST)
{
plci->SuppState = CALL_HELD;
CodecIdCheck(a, plci);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CALL_RETRIEVE_ACK:
PUT_WORD(&SS_Ind[1], S_RETRIEVE);
if (plci->SuppState == RETRIEVE_REQUEST)
{
plci->SuppState = IDLE;
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
if (plci->tel)
{
mixer_set_bchannel_id_esc(plci, plci->b_channel);
dbug(1, dprintf("RetrChannel=0x%x", plci->b_channel));
SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
if (plci->B2_prot == B2_TRANSPARENT && plci->B3_prot == B3_TRANSPARENT)
{
dbug(1, dprintf("Get B-ch"));
start_internal_command(Id, plci, retrieve_restore_command);
}
else
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
else
start_internal_command(Id, plci, retrieve_restore_command);
}
break;
case INDICATE_IND:
if (plci->State != LISTENING) {
sig_req(plci, HANGUP, 0);
send_req(plci);
break;
}
cip = find_cip(a, parms[4], parms[6]);
cip_mask = 1L << cip;
dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask));
clear_c_ind_mask(plci);
if (!remove_started && !a->adapter_disabled)
{
set_c_ind_mask_bit(plci, MAX_APPL);
group_optimization(a, plci);
for (i = 0; i < max_appl; i++) {
if (application[i].Id
&& (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask)
&& CPN_filter_ok(parms[0], a, i)
&& test_group_ind_mask_bit(plci, i)) {
dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i]));
set_c_ind_mask_bit(plci, i);
dump_c_ind_mask(plci);
plci->State = INC_CON_PENDING;
plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) |
CALL_DIR_IN | CALL_DIR_ANSWER;
if (esc_chi[0]) {
plci->b_channel = esc_chi[esc_chi[0]] & 0x1f;
mixer_set_bchannel_id_esc(plci, plci->b_channel);
}
/* if a listen on the ext controller is done, check if hook states */
/* are supported or if just a on board codec must be activated */
if (a->codec_listen[i] && !a->AdvSignalPLCI) {
if (a->profile.Global_Options & HANDSET)
plci->tel = ADV_VOICE;
else if (a->profile.Global_Options & ON_BOARD_CODEC)
plci->tel = CODEC;
if (plci->tel) Id |= EXT_CONTROLLER;
a->codec_listen[i] = plci;
}
sendf(&application[i], _CONNECT_I, Id, 0,
"wSSSSSSSbSSSSS", cip, /* CIP */
parms[0], /* CalledPartyNumber */
multi_CiPN_parms[0], /* CallingPartyNumber */
parms[2], /* CalledPartySubad */
parms[3], /* CallingPartySubad */
parms[4], /* BearerCapability */
parms[5], /* LowLC */
parms[6], /* HighLC */
ai_len, /* nested struct add_i */
add_i[0], /* B channel info */
add_i[1], /* keypad facility */
add_i[2], /* user user data */
add_i[3], /* nested facility */
multi_CiPN_parms[1] /* second CiPN(SCR) */
);
SendSSExtInd(&application[i],
plci,
Id,
multi_ssext_parms);
SendSetupInfo(&application[i],
plci,
Id,
parms,
SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true));
}
}
clear_c_ind_mask_bit(plci, MAX_APPL);
dump_c_ind_mask(plci);
}
if (c_ind_mask_empty(plci)) {
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = IDLE;
}
plci->notifiedcall = 0;
a->listen_active--;
listen_check(a);
break;
case CALL_PEND_NOTIFY:
plci->notifiedcall = 1;
listen_check(a);
break;
case CALL_IND:
case CALL_CON:
if (plci->State == ADVANCED_VOICE_SIG || plci->State == ADVANCED_VOICE_NOSIG)
{
if (plci->internal_command == PERM_COD_CONN_PEND)
{
if (plci->State == ADVANCED_VOICE_NOSIG)
{
dbug(1, dprintf("***Codec OK"));
if (a->AdvSignalPLCI)
{
tplci = a->AdvSignalPLCI;
if (tplci->spoofed_msg)
{
dbug(1, dprintf("***Spoofed Msg(0x%x)", tplci->spoofed_msg));
tplci->command = 0;
tplci->internal_command = 0;
x_Id = ((word)tplci->Id << 8) | tplci->adapter->Id | 0x80;
switch (tplci->spoofed_msg)
{
case CALL_RES:
tplci->command = _CONNECT_I | RESPONSE;
api_load_msg(&tplci->saved_msg, saved_parms);
add_b1(tplci, &saved_parms[1], 0, tplci->B1_facilities);
if (tplci->adapter->Info_Mask[tplci->appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(tplci, LLI, "\x01\x01");
}
add_s(tplci, CONN_NR, &saved_parms[2]);
add_s(tplci, LLC, &saved_parms[4]);
add_ai(tplci, &saved_parms[5]);
tplci->State = INC_CON_ACCEPT;
sig_req(tplci, CALL_RES, 0);
send_req(tplci);
break;
case AWAITING_SELECT_B:
dbug(1, dprintf("Select_B continue"));
start_internal_command(x_Id, tplci, select_b_command);
break;
case AWAITING_MANUF_CON: /* Get_Plci per Manufacturer_Req to ext controller */
if (!tplci->Sig.Id)
{
dbug(1, dprintf("No SigID!"));
sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
plci_remove(tplci);
break;
}
tplci->command = _MANUFACTURER_R;
api_load_msg(&tplci->saved_msg, saved_parms);
dir = saved_parms[2].info[0];
if (dir == 1) {
sig_req(tplci, CALL_REQ, 0);
}
else if (!dir) {
sig_req(tplci, LISTEN_REQ, 0);
}
send_req(tplci);
sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, 0);
break;
case (CALL_REQ | AWAITING_MANUF_CON):
sig_req(tplci, CALL_REQ, 0);
send_req(tplci);
break;
case CALL_REQ:
if (!tplci->Sig.Id)
{
dbug(1, dprintf("No SigID!"));
sendf(tplci->appl, _CONNECT_R | CONFIRM, tplci->adapter->Id, 0, "w", _OUT_OF_PLCI);
plci_remove(tplci);
break;
}
tplci->command = _CONNECT_R;
api_load_msg(&tplci->saved_msg, saved_parms);
add_s(tplci, CPN, &saved_parms[1]);
add_s(tplci, DSA, &saved_parms[3]);
add_ai(tplci, &saved_parms[9]);
sig_req(tplci, CALL_REQ, 0);
send_req(tplci);
break;
case CALL_RETRIEVE:
tplci->command = C_RETRIEVE_REQ;
sig_req(tplci, CALL_RETRIEVE, 0);
send_req(tplci);
break;
}
tplci->spoofed_msg = 0;
if (tplci->internal_command == 0)
next_internal_command(x_Id, tplci);
}
}
next_internal_command(Id, plci);
break;
}
dbug(1, dprintf("***Codec Hook Init Req"));
plci->internal_command = PERM_COD_HOOK;
add_p(plci, FTY, "\x01\x09"); /* Get Hook State*/
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
}
else if (plci->command != _MANUFACTURER_R /* old style permanent connect */
&& plci->State != INC_ACT_PENDING)
{
mixer_set_bchannel_id_esc(plci, plci->b_channel);
if (plci->tel == ADV_VOICE && plci->SuppState == IDLE) /* with permanent codec switch on immediately */
{
chi[2] = plci->b_channel;
SetVoiceChannel(a->AdvCodecPLCI, chi, a);
}
sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "Sss", parms[21], "", "");
plci->State = INC_ACT_PENDING;
}
break;
case TEL_CTRL:
ie = multi_fac_parms[0]; /* inspect the facility hook indications */
if (plci->State == ADVANCED_VOICE_SIG && ie[0]) {
switch (ie[1] & 0x91) {
case 0x80: /* hook off */
case 0x81:
if (plci->internal_command == PERM_COD_HOOK)
{
dbug(1, dprintf("init:hook_off"));
plci->hook_state = ie[1];
next_internal_command(Id, plci);
break;
}
else /* ignore doubled hook indications */
{
if (((plci->hook_state) & 0xf0) == 0x80)
{
dbug(1, dprintf("ignore hook"));
break;
}
plci->hook_state = ie[1]&0x91;
}
/* check for incoming call pending */
/* and signal '+'.Appl must decide */
/* with connect_res if call must */
/* accepted or not */
for (i = 0, tplci = NULL; i < max_appl; i++) {
if (a->codec_listen[i]
&& (a->codec_listen[i]->State == INC_CON_PENDING
|| a->codec_listen[i]->State == INC_CON_ALERT)) {
tplci = a->codec_listen[i];
tplci->appl = &application[i];
}
}
/* no incoming call, do outgoing call */
/* and signal '+' if outg. setup */
if (!a->AdvSignalPLCI && !tplci) {
if ((i = get_plci(a))) {
a->AdvSignalPLCI = &a->plci[i - 1];
tplci = a->AdvSignalPLCI;
tplci->tel = ADV_VOICE;
PUT_WORD(&voice_cai[5], a->AdvSignalAppl->MaxDataLength);
if (a->Info_Mask[a->AdvSignalAppl->Id - 1] & 0x200) {
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(tplci, LLI, "\x01\x01");
}
add_p(tplci, CAI, voice_cai);
add_p(tplci, OAD, a->TelOAD);
add_p(tplci, OSA, a->TelOSA);
add_p(tplci, SHIFT | 6, NULL);
add_p(tplci, SIN, "\x02\x01\x00");
add_p(tplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(tplci, ASSIGN, DSIG_ID);
a->AdvSignalPLCI->internal_command = HOOK_OFF_REQ;
a->AdvSignalPLCI->command = 0;
tplci->appl = a->AdvSignalAppl;
tplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
send_req(tplci);
}
}
if (!tplci) break;
Id = ((word)tplci->Id << 8) | a->Id;
Id |= EXT_CONTROLLER;
sendf(tplci->appl,
_FACILITY_I,
Id,
0,
"ws", (word)0, "\x01+");
break;
case 0x90: /* hook on */
case 0x91:
if (plci->internal_command == PERM_COD_HOOK)
{
dbug(1, dprintf("init:hook_on"));
plci->hook_state = ie[1] & 0x91;
next_internal_command(Id, plci);
break;
}
else /* ignore doubled hook indications */
{
if (((plci->hook_state) & 0xf0) == 0x90) break;
plci->hook_state = ie[1] & 0x91;
}
/* hangup the adv. voice call and signal '-' to the appl */
if (a->AdvSignalPLCI) {
Id = ((word)a->AdvSignalPLCI->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(a->AdvSignalAppl,
_FACILITY_I,
Id,
0,
"ws", (word)0, "\x01-");
a->AdvSignalPLCI->internal_command = HOOK_ON_REQ;
a->AdvSignalPLCI->command = 0;
sig_req(a->AdvSignalPLCI, HANGUP, 0);
send_req(a->AdvSignalPLCI);
}
break;
}
}
break;
case RESUME:
clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1));
PUT_WORD(&resume_cau[4], GOOD);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
break;
case SUSPEND:
clear_c_ind_mask(plci);
if (plci->NL.Id && !plci->nl_remove_id) {
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
}
if (!plci->sig_remove_id) {
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
}
send_req(plci);
if (!plci->channels) {
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, "\x05\x04\x00\x02\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
}
break;
case SUSPEND_REJ:
break;
case HANGUP:
plci->hangup_flow_ctrl_timer = 0;
if (plci->manufacturer && plci->State == LOCAL_CONNECT) break;
cau = parms[7];
if (cau) {
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0;
else if (cau[2] == 8) i = _L1_ERROR;
else if (cau[2] == 9 || cau[2] == 10) i = _L2_ERROR;
else if (cau[2] == 5) i = _CAPI_GUARD_ERROR;
}
else {
i = _L3_ERROR;
}
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
for (i = 0; i < max_appl; i++)
{
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
}
}
else
{
clear_c_ind_mask(plci);
}
if (!plci->appl)
{
if (plci->State == LISTENING)
{
plci->notifiedcall = 0;
a->listen_active--;
}
plci->State = INC_DIS_PENDING;
if (c_ind_mask_empty(plci))
{
plci->State = IDLE;
if (plci->NL.Id && !plci->nl_remove_id)
{
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
}
if (!plci->sig_remove_id)
{
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
}
send_req(plci);
}
}
else
{
/* collision of DISCONNECT or CONNECT_RES with HANGUP can */
/* result in a second HANGUP! Don't generate another */
/* DISCONNECT */
if (plci->State != IDLE && plci->State != INC_DIS_PENDING)
{
if (plci->State == RESUMING)
{
PUT_WORD(&resume_cau[4], i);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
}
plci->State = INC_DIS_PENDING;
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", i);
}
}
break;
case SSEXT_IND:
SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
break;
case VSWITCH_REQ:
VSwitchReqInd(plci, Id, multi_vswitch_parms);
break;
case VSWITCH_IND:
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 3 &&
plci->relatedPTYPLCI->vswitchstate == 3 &&
parms[MAXPARMSIDS - 1][0])
{
add_p(plci->relatedPTYPLCI, SMSG, parms[MAXPARMSIDS - 1]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
}
else VSwitchReqInd(plci, Id, multi_vswitch_parms);
break;
}
}
static void SendSetupInfo(APPL *appl, PLCI *plci, dword Id, byte **parms, byte Info_Sent_Flag)
{
word i;
byte *ie;
word Info_Number;
byte *Info_Element;
word Info_Mask = 0;
dbug(1, dprintf("SetupInfo"));
for (i = 0; i < MAXPARMSIDS; i++) {
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0]) {
switch (i) {
case 0:
dbug(1, dprintf("CPN "));
Info_Number = 0x0070;
Info_Mask = 0x80;
Info_Sent_Flag = true;
break;
case 8: /* display */
dbug(1, dprintf("display(%d)", i));
Info_Number = 0x0028;
Info_Mask = 0x04;
Info_Sent_Flag = true;
break;
case 16: /* Channel Id */
dbug(1, dprintf("CHI"));
Info_Number = 0x0018;
Info_Mask = 0x100;
Info_Sent_Flag = true;
mixer_set_bchannel_id(plci, Info_Element);
break;
case 19: /* Redirected Number */
dbug(1, dprintf("RDN"));
Info_Number = 0x0074;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
case 20: /* Redirected Number extended */
dbug(1, dprintf("RDX"));
Info_Number = 0x0073;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
case 22: /* Redirecing Number */
dbug(1, dprintf("RIN"));
Info_Number = 0x0076;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
default:
Info_Number = 0;
break;
}
}
if (i == MAXPARMSIDS - 2) { /* to indicate the message type "Setup" */
Info_Number = 0x8000 | 5;
Info_Mask = 0x10;
Info_Element = "";
}
if (Info_Sent_Flag && Info_Number) {
if (plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask) {
sendf(appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
}
static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent)
{
word i;
word j;
word k;
byte *ie;
word Info_Number;
byte *Info_Element;
word Info_Mask = 0;
static byte charges[5] = {4, 0, 0, 0, 0};
static byte cause[] = {0x02, 0x80, 0x00};
APPL *appl;
dbug(1, dprintf("InfoParse "));
if (
!plci->appl
&& !plci->State
&& plci->Sig.Ind != NCR_FACILITY
)
{
dbug(1, dprintf("NoParse "));
return;
}
cause[2] = 0;
for (i = 0; i < MAXPARMSIDS; i++) {
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0]) {
switch (i) {
case 0:
dbug(1, dprintf("CPN "));
Info_Number = 0x0070;
Info_Mask = 0x80;
break;
case 7: /* ESC_CAU */
dbug(1, dprintf("cau(0x%x)", ie[2]));
Info_Number = 0x0008;
Info_Mask = 0x00;
cause[2] = ie[2];
Info_Element = NULL;
break;
case 8: /* display */
dbug(1, dprintf("display(%d)", i));
Info_Number = 0x0028;
Info_Mask = 0x04;
break;
case 9: /* Date display */
dbug(1, dprintf("date(%d)", i));
Info_Number = 0x0029;
Info_Mask = 0x02;
break;
case 10: /* charges */
for (j = 0; j < 4; j++) charges[1 + j] = 0;
for (j = 0; j < ie[0] && !(ie[1 + j] & 0x80); j++);
for (k = 1, j++; j < ie[0] && k <= 4; j++, k++) charges[k] = ie[1 + j];
Info_Number = 0x4000;
Info_Mask = 0x40;
Info_Element = charges;
break;
case 11: /* user user info */
dbug(1, dprintf("uui"));
Info_Number = 0x007E;
Info_Mask = 0x08;
break;
case 12: /* congestion receiver ready */
dbug(1, dprintf("clRDY"));
Info_Number = 0x00B0;
Info_Mask = 0x08;
Info_Element = "";
break;
case 13: /* congestion receiver not ready */
dbug(1, dprintf("clNRDY"));
Info_Number = 0x00BF;
Info_Mask = 0x08;
Info_Element = "";
break;
case 15: /* Keypad Facility */
dbug(1, dprintf("KEY"));
Info_Number = 0x002C;
Info_Mask = 0x20;
break;
case 16: /* Channel Id */
dbug(1, dprintf("CHI"));
Info_Number = 0x0018;
Info_Mask = 0x100;
mixer_set_bchannel_id(plci, Info_Element);
break;
case 17: /* if no 1tr6 cause, send full cause, else esc_cause */
dbug(1, dprintf("q9cau(0x%x)", ie[2]));
if (!cause[2] || cause[2] < 0x80) break; /* eg. layer 1 error */
Info_Number = 0x0008;
Info_Mask = 0x01;
if (cause[2] != ie[2]) Info_Element = cause;
break;
case 19: /* Redirected Number */
dbug(1, dprintf("RDN"));
Info_Number = 0x0074;
Info_Mask = 0x400;
break;
case 22: /* Redirecing Number */
dbug(1, dprintf("RIN"));
Info_Number = 0x0076;
Info_Mask = 0x400;
break;
case 23: /* Notification Indicator */
dbug(1, dprintf("NI"));
Info_Number = (word)NI;
Info_Mask = 0x210;
break;
case 26: /* Call State */
dbug(1, dprintf("CST"));
Info_Number = (word)CST;
Info_Mask = 0x01; /* do with cause i.e. for now */
break;
case MAXPARMSIDS - 2: /* Escape Message Type, must be the last indication */
dbug(1, dprintf("ESC/MT[0x%x]", ie[3]));
Info_Number = 0x8000 | ie[3];
if (iesent) Info_Mask = 0xffff;
else Info_Mask = 0x10;
Info_Element = "";
break;
default:
Info_Number = 0;
Info_Mask = 0;
Info_Element = "";
break;
}
}
if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
{
for (j = 0; j < max_appl; j++)
{
appl = &application[j];
if (Info_Number
&& appl->Id
&& plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
{
dbug(1, dprintf("NCR_Ind"));
iesent = true;
sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
}
}
}
else if (!plci->appl)
{ /* overlap receiving broadcast */
if (Info_Number == CPN
|| Info_Number == KEY
|| Info_Number == NI
|| Info_Number == DSP
|| Info_Number == UUI)
{
for (j = 0; j < max_appl; j++)
{
if (test_c_ind_mask_bit(plci, j))
{
dbug(1, dprintf("Ovl_Ind"));
iesent = true;
sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
} /* all other signalling states */
else if (Info_Number
&& plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
{
dbug(1, dprintf("Std_Ind"));
iesent = true;
sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type,
dword info_mask, byte setupParse)
{
word i;
word j;
byte *ie;
word Info_Number;
byte *Info_Element;
APPL *appl;
word Info_Mask = 0;
byte iesent = 0;
if (
!plci->appl
&& !plci->State
&& plci->Sig.Ind != NCR_FACILITY
&& !setupParse
)
{
dbug(1, dprintf("NoM-IEParse "));
return 0;
}
dbug(1, dprintf("M-IEParse "));
for (i = 0; i < MAX_MULTI_IE; i++)
{
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0])
{
dbug(1, dprintf("[Ind0x%x]:IE=0x%x", plci->Sig.Ind, ie_type));
Info_Number = (word)ie_type;
Info_Mask = (word)info_mask;
}
if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
{
for (j = 0; j < max_appl; j++)
{
appl = &application[j];
if (Info_Number
&& appl->Id
&& plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
{
iesent = true;
dbug(1, dprintf("Mlt_NCR_Ind"));
sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
}
}
}
else if (!plci->appl && Info_Number)
{ /* overlap receiving broadcast */
for (j = 0; j < max_appl; j++)
{
if (test_c_ind_mask_bit(plci, j))
{
iesent = true;
dbug(1, dprintf("Mlt_Ovl_Ind"));
sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
} /* all other signalling states */
else if (Info_Number
&& plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
{
iesent = true;
dbug(1, dprintf("Mlt_Std_Ind"));
sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
return iesent;
}
static void SendSSExtInd(APPL *appl, PLCI *plci, dword Id, byte **parms)
{
word i;
/* Format of multi_ssext_parms[i][]:
0 byte length
1 byte SSEXTIE
2 byte SSEXT_REQ/SSEXT_IND
3 byte length
4 word SSExtCommand
6... Params
*/
if (
plci
&& plci->State
&& plci->Sig.Ind != NCR_FACILITY
)
for (i = 0; i < MAX_MULTI_IE; i++)
{
if (parms[i][0] < 6) continue;
if (parms[i][2] == SSEXT_REQ) continue;
if (appl)
{
parms[i][0] = 0; /* kill it */
sendf(appl, _MANUFACTURER_I,
Id,
0,
"dwS",
_DI_MANU_ID,
_DI_SSEXT_CTRL,
&parms[i][3]);
}
else if (plci->appl)
{
parms[i][0] = 0; /* kill it */
sendf(plci->appl, _MANUFACTURER_I,
Id,
0,
"dwS",
_DI_MANU_ID,
_DI_SSEXT_CTRL,
&parms[i][3]);
}
}
};
static void nl_ind(PLCI *plci)
{
byte ch;
word ncci;
dword Id;
DIVA_CAPI_ADAPTER *a;
word NCCIcode;
APPL *APPLptr;
word count;
word Num;
word i, ncpi_state;
byte len, ncci_state;
word msg;
word info = 0;
word fax_feature_bits;
byte fax_send_edata_ack;
static byte v120_header_buffer[2 + 3];
static word fax_info[] = {
0, /* T30_SUCCESS */
_FAX_NO_CONNECTION, /* T30_ERR_NO_DIS_RECEIVED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_RESPONSE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_RESPONSE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TOO_MANY_REPEATS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_UNEXPECTED_MESSAGE */
_FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DCN */
_FAX_LOCAL_ABORT, /* T30_ERR_DTC_UNSUPPORTED */
_FAX_TRAINING_ERROR, /* T30_ERR_ALL_RATES_FAILED */
_FAX_TRAINING_ERROR, /* T30_ERR_TOO_MANY_TRAINS */
_FAX_PARAMETER_ERROR, /* T30_ERR_RECEIVE_CORRUPTED */
_FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DISC */
_FAX_LOCAL_ABORT, /* T30_ERR_APPLICATION_DISC */
_FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_DIS */
_FAX_LOCAL_ABORT, /* T30_ERR_INCOMPATIBLE_DCS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_COMMAND */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_COMMAND */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_COMMAND_TOO_LONG */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_RESPONSE_TOO_LONG */
_FAX_NO_CONNECTION, /* T30_ERR_NOT_IDENTIFIED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_SUPERVISORY_TIMEOUT */
_FAX_PARAMETER_ERROR, /* T30_ERR_TOO_LONG_SCAN_LINE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_MPS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_CFR */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_FTT */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_EOM */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_MPS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_MCF */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_RTN */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_CFR */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOP */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOM */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_MPS */
0x331d, /* T30_ERR_SUB_SEP_UNSUPPORTED */
0x331e, /* T30_ERR_PWD_UNSUPPORTED */
0x331f, /* T30_ERR_SUB_SEP_PWD_UNSUPPORTED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_INVALID_COMMAND_FRAME */
_FAX_PARAMETER_ERROR, /* T30_ERR_UNSUPPORTED_PAGE_CODING */
_FAX_PARAMETER_ERROR, /* T30_ERR_INVALID_PAGE_CODING */
_FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_PAGE_CONFIG */
_FAX_LOCAL_ABORT, /* T30_ERR_TIMEOUT_FROM_APPLICATION */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_NO_REACTION_ON_MARK */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_TRAINING_TIMEOUT */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_UNEXPECTED_V21 */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_PRIMARY_CTS_ON */
_FAX_LOCAL_ABORT, /* T30_ERR_V34FAX_TURNAROUND_POLLING */
_FAX_LOCAL_ABORT /* T30_ERR_V34FAX_V8_INCOMPATIBILITY */
};
byte dtmf_code_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE + 1];
static word rtp_info[] = {
GOOD, /* RTP_SUCCESS */
0x3600 /* RTP_ERR_SSRC_OR_PAYLOAD_CHANGE */
};
static dword udata_forwarding_table[0x100 / sizeof(dword)] =
{
0x0020301e, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000
};
ch = plci->NL.IndCh;
a = plci->adapter;
ncci = a->ch_ncci[ch];
Id = (((dword)(ncci ? ncci : ch)) << 16) | (((word) plci->Id) << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
APPLptr = plci->appl;
dbug(1, dprintf("NL_IND-Id(NL:0x%x)=0x%08lx,plci=%x,tel=%x,state=0x%x,ch=0x%x,chs=%d,Ind=%x",
plci->NL.Id, Id, plci->Id, plci->tel, plci->State, ch, plci->channels, plci->NL.Ind & 0x0f));
/* in the case if no connect_active_Ind was sent to the appl we wait for */
if (plci->nl_remove_id)
{
plci->NL.RNR = 2; /* discard */
dbug(1, dprintf("NL discard while remove pending"));
return;
}
if ((plci->NL.Ind & 0x0f) == N_CONNECT)
{
if (plci->State == INC_DIS_PENDING
|| plci->State == OUTG_DIS_PENDING
|| plci->State == IDLE)
{
plci->NL.RNR = 2; /* discard */
dbug(1, dprintf("discard n_connect"));
return;
}
if (plci->State < INC_ACT_PENDING)
{
plci->NL.RNR = 1; /* flow control */
channel_x_off(plci, ch, N_XON_CONNECT_IND);
return;
}
}
if (!APPLptr) /* no application or invalid data */
{ /* while reloading the DSP */
dbug(1, dprintf("discard1"));
plci->NL.RNR = 2;
return;
}
if (((plci->NL.Ind & 0x0f) == N_UDATA)
&& (((plci->B2_prot != B2_SDLC) && ((plci->B1_resource == 17) || (plci->B1_resource == 18)))
|| (plci->B2_prot == 7)
|| (plci->B3_prot == 7)))
{
plci->ncpi_buffer[0] = 0;
ncpi_state = plci->ncpi_state;
if (plci->NL.complete == 1)
{
byte *data = &plci->NL.RBuffer->P[0];
if ((plci->NL.RBuffer->length >= 12)
&& ((*data == DSP_UDATA_INDICATION_DCD_ON)
|| (*data == DSP_UDATA_INDICATION_CTS_ON)))
{
word conn_opt, ncpi_opt = 0x00;
/* HexDump ("MDM N_UDATA:", plci->NL.RBuffer->length, data); */
if (*data == DSP_UDATA_INDICATION_DCD_ON)
plci->ncpi_state |= NCPI_MDM_DCD_ON_RECEIVED;
if (*data == DSP_UDATA_INDICATION_CTS_ON)
plci->ncpi_state |= NCPI_MDM_CTS_ON_RECEIVED;
data++; /* indication code */
data += 2; /* timestamp */
if ((*data == DSP_CONNECTED_NORM_V18) || (*data == DSP_CONNECTED_NORM_VOWN))
ncpi_state &= ~(NCPI_MDM_DCD_ON_RECEIVED | NCPI_MDM_CTS_ON_RECEIVED);
data++; /* connected norm */
conn_opt = GET_WORD(data);
data += 2; /* connected options */
PUT_WORD(&(plci->ncpi_buffer[1]), (word)(GET_DWORD(data) & 0x0000FFFF));
if (conn_opt & DSP_CONNECTED_OPTION_MASK_V42)
{
ncpi_opt |= MDM_NCPI_ECM_V42;
}
else if (conn_opt & DSP_CONNECTED_OPTION_MASK_MNP)
{
ncpi_opt |= MDM_NCPI_ECM_MNP;
}
else
{
ncpi_opt |= MDM_NCPI_TRANSPARENT;
}
if (conn_opt & DSP_CONNECTED_OPTION_MASK_COMPRESSION)
{
ncpi_opt |= MDM_NCPI_COMPRESSED;
}
PUT_WORD(&(plci->ncpi_buffer[3]), ncpi_opt);
plci->ncpi_buffer[0] = 4;
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND | NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
}
}
if (plci->B3_prot == 7)
{
if (((a->ncci_state[ncci] == INC_ACT_PENDING) || (a->ncci_state[ncci] == OUTG_CON_PENDING))
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
|| !(ncpi_state & NCPI_MDM_DCD_ON_RECEIVED)
|| !(ncpi_state & NCPI_MDM_CTS_ON_RECEIVED))
{
plci->NL.RNR = 2;
return;
}
}
if (plci->NL.complete == 2)
{
if (((plci->NL.Ind & 0x0f) == N_UDATA)
&& !(udata_forwarding_table[plci->RData[0].P[0] >> 5] & (1L << (plci->RData[0].P[0] & 0x1f))))
{
switch (plci->RData[0].P[0])
{
case DTMF_UDATA_INDICATION_FAX_CALLING_TONE:
if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01X");
break;
case DTMF_UDATA_INDICATION_ANSWER_TONE:
if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01Y");
break;
case DTMF_UDATA_INDICATION_DIGITS_RECEIVED:
dtmf_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case DTMF_UDATA_INDICATION_DIGITS_SENT:
dtmf_confirmation(Id, plci);
break;
case UDATA_INDICATION_MIXER_TAP_DATA:
capidtmf_recv_process_block(&(plci->capidtmf_state), plci->RData[0].P + 1, (word)(plci->RData[0].PLength - 1));
i = capidtmf_indication(&(plci->capidtmf_state), dtmf_code_buffer + 1);
if (i != 0)
{
dtmf_code_buffer[0] = DTMF_UDATA_INDICATION_DIGITS_RECEIVED;
dtmf_indication(Id, plci, dtmf_code_buffer, (word)(i + 1));
}
break;
case UDATA_INDICATION_MIXER_COEFS_SET:
mixer_indication_coefs_set(Id, plci);
break;
case UDATA_INDICATION_XCONNECT_FROM:
mixer_indication_xconnect_from(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case UDATA_INDICATION_XCONNECT_TO:
mixer_indication_xconnect_to(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case LEC_UDATA_INDICATION_DISABLE_DETECT:
ec_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
default:
break;
}
}
else
{
if ((plci->RData[0].PLength != 0)
&& ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
{
sendf(plci->appl, _DATA_B3_I, Id, 0,
"dwww",
plci->RData[1].P,
(plci->NL.RNum < 2) ? 0 : plci->RData[1].PLength,
plci->RNum,
plci->RFlags);
}
else
{
sendf(plci->appl, _DATA_B3_I, Id, 0,
"dwww",
plci->RData[0].P,
plci->RData[0].PLength,
plci->RNum,
plci->RFlags);
}
}
return;
}
fax_feature_bits = 0;
if ((plci->NL.Ind & 0x0f) == N_CONNECT ||
(plci->NL.Ind & 0x0f) == N_CONNECT_ACK ||
(plci->NL.Ind & 0x0f) == N_DISC ||
(plci->NL.Ind & 0x0f) == N_EDATA ||
(plci->NL.Ind & 0x0f) == N_DISC_ACK)
{
info = 0;
plci->ncpi_buffer[0] = 0;
switch (plci->B3_prot) {
case 0: /*XPARENT*/
case 1: /*T.90 NL*/
break; /* no network control protocol info - jfr */
case 2: /*ISO8202*/
case 3: /*X25 DCE*/
for (i = 0; i < plci->NL.RLength; i++) plci->ncpi_buffer[4 + i] = plci->NL.RBuffer->P[i];
plci->ncpi_buffer[0] = (byte)(i + 3);
plci->ncpi_buffer[1] = (byte)(plci->NL.Ind & N_D_BIT ? 1 : 0);
plci->ncpi_buffer[2] = 0;
plci->ncpi_buffer[3] = 0;
break;
case 4: /*T.30 - FAX*/
case 5: /*T.30 - FAX*/
if (plci->NL.RLength >= sizeof(T30_INFO))
{
dbug(1, dprintf("FaxStatus %04x", ((T30_INFO *)plci->NL.RBuffer->P)->code));
len = 9;
PUT_WORD(&(plci->ncpi_buffer[1]), ((T30_INFO *)plci->NL.RBuffer->P)->rate_div_2400 * 2400);
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
i = (((T30_INFO *)plci->NL.RBuffer->P)->resolution & T30_RESOLUTION_R8_0770_OR_200) ? 0x0001 : 0x0000;
if (plci->B3_prot == 5)
{
if (!(fax_feature_bits & T30_FEATURE_BIT_ECM))
i |= 0x8000; /* This is not an ECM connection */
if (fax_feature_bits & T30_FEATURE_BIT_T6_CODING)
i |= 0x4000; /* This is a connection with MMR compression */
if (fax_feature_bits & T30_FEATURE_BIT_2D_CODING)
i |= 0x2000; /* This is a connection with MR compression */
if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
i |= 0x0004; /* More documents */
if (fax_feature_bits & T30_FEATURE_BIT_POLLING)
i |= 0x0002; /* Fax-polling indication */
}
dbug(1, dprintf("FAX Options %04x %04x", fax_feature_bits, i));
PUT_WORD(&(plci->ncpi_buffer[3]), i);
PUT_WORD(&(plci->ncpi_buffer[5]), ((T30_INFO *)plci->NL.RBuffer->P)->data_format);
plci->ncpi_buffer[7] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_low;
plci->ncpi_buffer[8] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_high;
plci->ncpi_buffer[len] = 0;
if (((T30_INFO *)plci->NL.RBuffer->P)->station_id_len)
{
plci->ncpi_buffer[len] = 20;
for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i];
}
if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
{
if (((T30_INFO *)plci->NL.RBuffer->P)->code < ARRAY_SIZE(fax_info))
info = fax_info[((T30_INFO *)plci->NL.RBuffer->P)->code];
else
info = _FAX_PROTOCOL_ERROR;
}
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len;
while (i < plci->NL.RBuffer->length)
plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
}
plci->ncpi_buffer[0] = len;
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low, fax_feature_bits);
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND;
if (((plci->NL.Ind & 0x0f) == N_CONNECT_ACK)
|| (((plci->NL.Ind & 0x0f) == N_CONNECT)
&& (fax_feature_bits & T30_FEATURE_BIT_POLLING))
|| (((plci->NL.Ind & 0x0f) == N_EDATA)
&& ((((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_TRAIN_OK)
|| (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
|| (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DTC))))
{
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT;
}
if (((plci->NL.Ind & 0x0f) == N_DISC)
|| ((plci->NL.Ind & 0x0f) == N_DISC_ACK)
|| (((plci->NL.Ind & 0x0f) == N_EDATA)
&& (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_EOP_CAPI)))
{
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
}
}
break;
case B3_RTP:
if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
{
if (plci->NL.RLength != 0)
{
info = rtp_info[plci->NL.RBuffer->P[0]];
plci->ncpi_buffer[0] = plci->NL.RLength - 1;
for (i = 1; i < plci->NL.RLength; i++)
plci->ncpi_buffer[i] = plci->NL.RBuffer->P[i];
}
}
break;
}
plci->NL.RNR = 2;
}
switch (plci->NL.Ind & 0x0f) {
case N_EDATA:
if ((plci->B3_prot == 4) || (plci->B3_prot == 5))
{
dbug(1, dprintf("EDATA ncci=0x%x state=%d code=%02x", ncci, a->ncci_state[ncci],
((T30_INFO *)plci->NL.RBuffer->P)->code));
fax_send_edata_ack = (((T30_INFO *)(plci->fax_connect_info_buffer))->operating_mode == T30_OPERATING_MODE_CAPI_NEG);
if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & (T30_NSF_CONTROL_BIT_NEGOTIATE_IND | T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
&& (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
&& (a->ncci_state[ncci] == OUTG_CON_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
{
((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
sendf(plci->appl, _MANUFACTURER_I, Id, 0, "dwbS", _DI_MANU_ID, _DI_NEGOTIATE_B3,
(byte)(plci->ncpi_buffer[0] + 1), plci->ncpi_buffer);
plci->ncpi_state |= NCPI_NEGOTIATE_B3_SENT;
if (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)
fax_send_edata_ack = false;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
{
switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
{
case EDATA_T30_DIS:
if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
&& !(GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & T30_CONTROL_BIT_REQUEST_POLLING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
case EDATA_T30_TRAIN_OK:
if ((a->ncci_state[ncci] == INC_ACT_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
case EDATA_T30_EOP_CAPI:
if (a->ncci_state[ncci] == CONNECTED)
{
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", GOOD, plci->ncpi_buffer);
a->ncci_state[ncci] = INC_DIS_PENDING;
plci->ncpi_state = 0;
fax_send_edata_ack = false;
}
break;
}
}
else
{
switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
{
case EDATA_T30_TRAIN_OK:
if ((a->ncci_state[ncci] == INC_ACT_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
}
}
if (fax_send_edata_ack)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
plci->fax_edata_ack_length = 1;
start_internal_command(Id, plci, fax_edata_ack_command);
}
}
else
{
dbug(1, dprintf("EDATA ncci=0x%x state=%d", ncci, a->ncci_state[ncci]));
}
break;
case N_CONNECT:
if (!a->ch_ncci[ch])
{
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
}
dbug(1, dprintf("N_CONNECT: ch=%d state=%d plci=%lx plci_Id=%lx plci_State=%d",
ch, a->ncci_state[ncci], a->ncci_plci[ncci], plci->Id, plci->State));
msg = _CONNECT_B3_I;
if (a->ncci_state[ncci] == IDLE)
plci->channels++;
else if (plci->B3_prot == 1)
msg = _CONNECT_B3_T90_ACTIVE_I;
a->ncci_state[ncci] = INC_CON_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, msg, Id, 0, "s", "");
else
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
break;
case N_CONNECT_ACK:
dbug(1, dprintf("N_connect_Ack"));
if (plci->internal_command_queue[0]
&& ((plci->adjust_b_state == ADJUST_B_CONNECT_2)
|| (plci->adjust_b_state == ADJUST_B_CONNECT_3)
|| (plci->adjust_b_state == ADJUST_B_CONNECT_4)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
break;
}
msg = _CONNECT_B3_ACTIVE_I;
if (plci->B3_prot == 1)
{
if (a->ncci_state[ncci] != OUTG_CON_PENDING)
msg = _CONNECT_B3_T90_ACTIVE_I;
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
}
else if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
{
if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, msg, Id, 0, "s", "");
else
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
else
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
}
if (plci->adjust_b_restore)
{
plci->adjust_b_restore = false;
start_internal_command(Id, plci, adjust_b_restore);
}
break;
case N_DISC:
case N_DISC_ACK:
if (plci->internal_command_queue[0]
&& ((plci->internal_command == FAX_DISCONNECT_COMMAND_1)
|| (plci->internal_command == FAX_DISCONNECT_COMMAND_2)
|| (plci->internal_command == FAX_DISCONNECT_COMMAND_3)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
}
ncci_state = a->ncci_state[ncci];
ncci_remove(plci, ncci, false);
/* with N_DISC or N_DISC_ACK the IDI frees the respective */
/* channel, so we cannot store the state in ncci_state! The */
/* information which channel we received a N_DISC is thus */
/* stored in the inc_dis_ncci_table buffer. */
for (i = 0; plci->inc_dis_ncci_table[i]; i++);
plci->inc_dis_ncci_table[i] = (byte) ncci;
/* need a connect_b3_ind before a disconnect_b3_ind with FAX */
if (!plci->channels
&& (plci->B1_resource == 16)
&& (plci->State <= CONNECTED))
{
len = 9;
i = ((T30_INFO *)plci->fax_connect_info_buffer)->rate_div_2400 * 2400;
PUT_WORD(&plci->ncpi_buffer[1], i);
PUT_WORD(&plci->ncpi_buffer[3], 0);
i = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
PUT_WORD(&plci->ncpi_buffer[5], i);
PUT_WORD(&plci->ncpi_buffer[7], 0);
plci->ncpi_buffer[len] = 0;
plci->ncpi_buffer[0] = len;
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_I, Id, 0, "s", "");
else
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[0] = len;
}
sendf(plci->appl, _CONNECT_B3_I, Id, 0, "S", plci->ncpi_buffer);
}
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
plci->ncpi_state = 0;
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
/* disc here */
}
else if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
&& ((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& ((ncci_state == INC_DIS_PENDING) || (ncci_state == IDLE)))
{
if (ncci_state == IDLE)
{
if (plci->channels)
plci->channels--;
if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
if (plci->State == SUSPENDING) {
sendf(plci->appl,
_FACILITY_I,
Id & 0xffffL,
0,
"ws", (word)3, "\x03\x04\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
}
plci_remove(plci);
plci->State = IDLE;
}
}
}
else if (plci->channels)
{
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
plci->ncpi_state = 0;
if ((ncci_state == OUTG_REJ_PENDING)
&& ((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
}
}
break;
case N_RESET:
a->ncci_state[ncci] = INC_RES_PENDING;
sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
break;
case N_RESET_ACK:
a->ncci_state[ncci] = CONNECTED;
sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
break;
case N_UDATA:
if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f))))
{
plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3);
plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE;
plci->NL.R = plci->RData;
plci->NL.RNum = 1;
return;
}
case N_BDATA:
case N_DATA:
if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */
|| (a->ncci_state[ncci] == IDLE)
|| (a->ncci_state[ncci] == INC_DIS_PENDING))
{
plci->NL.RNR = 2;
break;
}
if ((a->ncci_state[ncci] != CONNECTED)
&& (a->ncci_state[ncci] != OUTG_DIS_PENDING)
&& (a->ncci_state[ncci] != OUTG_REJ_PENDING))
{
dbug(1, dprintf("flow control"));
plci->NL.RNR = 1; /* flow control */
channel_x_off(plci, ch, 0);
break;
}
NCCIcode = ncci | (((word)a->Id) << 8);
/* count all buffers within the Application pool */
/* belonging to the same NCCI. If this is below the */
/* number of buffers available per NCCI we accept */
/* this packet, otherwise we reject it */
count = 0;
Num = 0xffff;
for (i = 0; i < APPLptr->MaxBuffer; i++) {
if (NCCIcode == APPLptr->DataNCCI[i]) count++;
if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
}
if (count >= APPLptr->MaxNCCIData || Num == 0xffff)
{
dbug(3, dprintf("Flow-Control"));
plci->NL.RNR = 1;
if (++(APPLptr->NCCIDataFlowCtrlTimer) >=
(word)((a->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) ? 40 : 2000))
{
plci->NL.RNR = 2;
dbug(3, dprintf("DiscardData"));
} else {
channel_x_off(plci, ch, 0);
}
break;
}
else
{
APPLptr->NCCIDataFlowCtrlTimer = 0;
}
plci->RData[0].P = ReceiveBufferGet(APPLptr, Num);
if (!plci->RData[0].P) {
plci->NL.RNR = 1;
channel_x_off(plci, ch, 0);
break;
}
APPLptr->DataNCCI[Num] = NCCIcode;
APPLptr->DataFlags[Num] = (plci->Id << 8) | (plci->NL.Ind >> 4);
dbug(3, dprintf("Buffer(%d), Max = %d", Num, APPLptr->MaxBuffer));
plci->RNum = Num;
plci->RFlags = plci->NL.Ind >> 4;
plci->RData[0].PLength = APPLptr->MaxDataLength;
plci->NL.R = plci->RData;
if ((plci->NL.RLength != 0)
&& ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
{
plci->RData[1].P = plci->RData[0].P;
plci->RData[1].PLength = plci->RData[0].PLength;
plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
plci->RData[0].PLength = 1;
else
plci->RData[0].PLength = 2;
if (plci->NL.RBuffer->P[0] & V120_HEADER_BREAK_BIT)
plci->RFlags |= 0x0010;
if (plci->NL.RBuffer->P[0] & (V120_HEADER_C1_BIT | V120_HEADER_C2_BIT))
plci->RFlags |= 0x8000;
plci->NL.RNum = 2;
}
else
{
if ((plci->NL.Ind & 0x0f) == N_UDATA)
plci->RFlags |= 0x0010;
else if ((plci->B3_prot == B3_RTP) && ((plci->NL.Ind & 0x0f) == N_BDATA))
plci->RFlags |= 0x0001;
plci->NL.RNum = 1;
}
break;
case N_DATA_ACK:
data_ack(plci, ch);
break;
default:
plci->NL.RNR = 2;
break;
}
}
/*------------------------------------------------------------------*/
/* find a free PLCI */
/*------------------------------------------------------------------*/
static word get_plci(DIVA_CAPI_ADAPTER *a)
{
word i, j;
PLCI *plci;
dump_plcis(a);
for (i = 0; i < a->max_plci && a->plci[i].Id; i++);
if (i == a->max_plci) {
dbug(1, dprintf("get_plci: out of PLCIs"));
return 0;
}
plci = &a->plci[i];
plci->Id = (byte)(i + 1);
plci->Sig.Id = 0;
plci->NL.Id = 0;
plci->sig_req = 0;
plci->nl_req = 0;
plci->appl = NULL;
plci->relatedPTYPLCI = NULL;
plci->State = IDLE;
plci->SuppState = IDLE;
plci->channels = 0;
plci->tel = 0;
plci->B1_resource = 0;
plci->B2_prot = 0;
plci->B3_prot = 0;
plci->command = 0;
plci->m_command = 0;
init_internal_command_queue(plci);
plci->number = 0;
plci->req_in_start = 0;
plci->req_in = 0;
plci->req_out = 0;
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
plci->data_sent = false;
plci->send_disc = 0;
plci->sig_global_req = 0;
plci->sig_remove_id = 0;
plci->nl_global_req = 0;
plci->nl_remove_id = 0;
plci->adv_nl = 0;
plci->manufacturer = false;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
plci->spoofed_msg = 0;
plci->ptyState = 0;
plci->cr_enquiry = false;
plci->hangup_flow_ctrl_timer = 0;
plci->ncci_ring_list = 0;
for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0;
clear_c_ind_mask(plci);
set_group_ind_mask(plci);
plci->fax_connect_info_length = 0;
plci->nsf_control_bits = 0;
plci->ncpi_state = 0x00;
plci->ncpi_buffer[0] = 0;
plci->requested_options_conn = 0;
plci->requested_options = 0;
plci->notifiedcall = 0;
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
init_b1_config(plci);
dbug(1, dprintf("get_plci(%x)", plci->Id));
return i + 1;
}
/*------------------------------------------------------------------*/
/* put a parameter in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_p(PLCI *plci, byte code, byte *p)
{
word p_length;
p_length = 0;
if (p) p_length = p[0];
add_ie(plci, code, p, p_length);
}
/*------------------------------------------------------------------*/
/* put a structure in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_s(PLCI *plci, byte code, API_PARSE *p)
{
if (p) add_ie(plci, code, p->info, (word)p->length);
}
/*------------------------------------------------------------------*/
/* put multiple structures in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_ss(PLCI *plci, byte code, API_PARSE *p)
{
byte i;
if (p) {
dbug(1, dprintf("add_ss(%x,len=%d)", code, p->length));
for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
dbug(1, dprintf("add_ss_ie(%x,len=%d)", p->info[i - 1], p->info[i]));
add_ie(plci, p->info[i - 1], (byte *)&(p->info[i]), (word)p->info[i]);
}
}
}
/*------------------------------------------------------------------*/
/* return the channel number sent by the application in a esc_chi */
/*------------------------------------------------------------------*/
static byte getChannel(API_PARSE *p)
{
byte i;
if (p) {
for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
if (p->info[i] == 2) {
if (p->info[i - 1] == ESC && p->info[i + 1] == CHI) return (p->info[i + 2]);
}
}
}
return 0;
}
/*------------------------------------------------------------------*/
/* put an information element in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_ie(PLCI *plci, byte code, byte *p, word p_length)
{
word i;
if (!(code & 0x80) && !p_length) return;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
}
else {
plci->req_in--;
}
plci->RBuffer[plci->req_in++] = code;
if (p) {
plci->RBuffer[plci->req_in++] = (byte)p_length;
for (i = 0; i < p_length; i++) plci->RBuffer[plci->req_in++] = p[1 + i];
}
plci->RBuffer[plci->req_in++] = 0;
}
/*------------------------------------------------------------------*/
/* put a unstructured data into the buffer */
/*------------------------------------------------------------------*/
static void add_d(PLCI *plci, word length, byte *p)
{
word i;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
}
else {
plci->req_in--;
}
for (i = 0; i < length; i++) plci->RBuffer[plci->req_in++] = p[i];
}
/*------------------------------------------------------------------*/
/* put parameters from the Additional Info parameter in the */
/* parameter buffer */
/*------------------------------------------------------------------*/
static void add_ai(PLCI *plci, API_PARSE *ai)
{
word i;
API_PARSE ai_parms[5];
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
if (!ai->length)
return;
if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
return;
add_s(plci, KEY, &ai_parms[1]);
add_s(plci, UUI, &ai_parms[2]);
add_ss(plci, FTY, &ai_parms[3]);
}
/*------------------------------------------------------------------*/
/* put parameter for b1 protocol in the parameter buffer */
/*------------------------------------------------------------------*/
static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
word b1_facilities)
{
API_PARSE bp_parms[8];
API_PARSE mdm_cfg[9];
API_PARSE global_config[2];
byte cai[256];
byte resource[] = {5, 9, 13, 12, 16, 39, 9, 17, 17, 18};
byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
word i;
API_PARSE mdm_cfg_v18[4];
word j, n, w;
dword d;
for (i = 0; i < 8; i++) bp_parms[i].length = 0;
for (i = 0; i < 2; i++) global_config[i].length = 0;
dbug(1, dprintf("add_b1"));
api_save_msg(bp, "s", &plci->B_protocol);
if (b_channel_info == 2) {
plci->B1_resource = 0;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x00");
dbug(1, dprintf("Cai=1,0 (no resource)"));
return 0;
}
if (plci->tel == CODEC_PERMANENT) return 0;
else if (plci->tel == CODEC) {
plci->B1_resource = 1;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x01");
dbug(1, dprintf("Cai=1,1 (Codec)"));
return 0;
}
else if (plci->tel == ADV_VOICE) {
plci->B1_resource = add_b1_facilities(plci, 9, (word)(b1_facilities | B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities | B1_FACILITY_VOICE));
voice_cai[1] = plci->B1_resource;
PUT_WORD(&voice_cai[5], plci->appl->MaxDataLength);
add_p(plci, CAI, voice_cai);
dbug(1, dprintf("Cai=1,0x%x (AdvVoice)", voice_cai[1]));
return 0;
}
plci->call_dir &= ~(CALL_DIR_ORIGINATE | CALL_DIR_ANSWER);
if (plci->call_dir & CALL_DIR_OUT)
plci->call_dir |= CALL_DIR_ORIGINATE;
else if (plci->call_dir & CALL_DIR_IN)
plci->call_dir |= CALL_DIR_ANSWER;
if (!bp->length) {
plci->B1_resource = 0x5;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x05");
return 0;
}
dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
if (bp->length > 256) return _WRONG_MESSAGE_FORMAT;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
{
bp_parms[6].length = 0;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
}
else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
if (bp_parms[6].length)
{
if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
{
return _WRONG_MESSAGE_FORMAT;
}
switch (GET_WORD(global_config[0].info))
{
case 1:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
break;
case 2:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
break;
}
}
dbug(1, dprintf("call_dir=%04x", plci->call_dir));
if ((GET_WORD(bp_parms[0].info) == B1_RTP)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
{
plci->B1_resource = add_b1_facilities(plci, 31, (word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[1] = plci->B1_resource;
cai[2] = 0;
cai[3] = 0;
cai[4] = 0;
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
for (i = 0; i < bp_parms[3].length; i++)
cai[7 + i] = bp_parms[3].info[1 + i];
cai[0] = 6 + bp_parms[3].length;
add_p(plci, CAI, cai);
return 0;
}
if ((GET_WORD(bp_parms[0].info) == B1_PIAFS)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))
{
plci->B1_resource = add_b1_facilities(plci, 35/* PIAFS HARDWARE FACILITY */, (word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[1] = plci->B1_resource;
cai[2] = 0;
cai[3] = 0;
cai[4] = 0;
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
cai[0] = 6;
add_p(plci, CAI, cai);
return 0;
}
if ((GET_WORD(bp_parms[0].info) >= 32)
|| (!((1L << GET_WORD(bp_parms[0].info)) & plci->adapter->profile.B1_Protocols)
&& ((GET_WORD(bp_parms[0].info) != 3)
|| !((1L << B1_HDLC) & plci->adapter->profile.B1_Protocols)
|| ((bp_parms[3].length != 0) && (GET_WORD(&bp_parms[3].info[1]) != 0) && (GET_WORD(&bp_parms[3].info[1]) != 56000)))))
{
return _B1_NOT_SUPPORTED;
}
plci->B1_resource = add_b1_facilities(plci, resource[GET_WORD(bp_parms[0].info)],
(word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[0] = 6;
cai[1] = plci->B1_resource;
for (i = 2; i < sizeof(cai); i++) cai[i] = 0;
if ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC))
{ /* B1 - modem */
for (i = 0; i < 7; i++) mdm_cfg[i].length = 0;
if (bp_parms[3].length)
{
if (api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwww", mdm_cfg))
{
return (_WRONG_MESSAGE_FORMAT);
}
cai[2] = 0; /* Bit rate for adaptation */
dbug(1, dprintf("MDM Max Bit Rate:<%d>", GET_WORD(mdm_cfg[0].info)));
PUT_WORD(&cai[13], 0); /* Min Tx speed */
PUT_WORD(&cai[15], GET_WORD(mdm_cfg[0].info)); /* Max Tx speed */
PUT_WORD(&cai[17], 0); /* Min Rx speed */
PUT_WORD(&cai[19], GET_WORD(mdm_cfg[0].info)); /* Max Rx speed */
cai[3] = 0; /* Async framing parameters */
switch (GET_WORD(mdm_cfg[2].info))
{ /* Parity */
case 1: /* odd parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
dbug(1, dprintf("MDM: odd parity"));
break;
case 2: /* even parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
dbug(1, dprintf("MDM: even parity"));
break;
default:
dbug(1, dprintf("MDM: no parity"));
break;
}
switch (GET_WORD(mdm_cfg[3].info))
{ /* stop bits */
case 1: /* 2 stop bits */
cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
dbug(1, dprintf("MDM: 2 stop bits"));
break;
default:
dbug(1, dprintf("MDM: 1 stop bit"));
break;
}
switch (GET_WORD(mdm_cfg[1].info))
{ /* char length */
case 5:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
dbug(1, dprintf("MDM: 5 bits"));
break;
case 6:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
dbug(1, dprintf("MDM: 6 bits"));
break;
case 7:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
dbug(1, dprintf("MDM: 7 bits"));
break;
default:
dbug(1, dprintf("MDM: 8 bits"));
break;
}
cai[7] = 0; /* Line taking options */
cai[8] = 0; /* Modulation negotiation options */
cai[9] = 0; /* Modulation options */
if (((plci->call_dir & CALL_DIR_ORIGINATE) != 0) ^ ((plci->call_dir & CALL_DIR_OUT) != 0))
{
cai[9] |= DSP_CAI_MODEM_REVERSE_DIRECTION;
dbug(1, dprintf("MDM: Reverse direction"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RETRAIN)
{
cai[9] |= DSP_CAI_MODEM_DISABLE_RETRAIN;
dbug(1, dprintf("MDM: Disable retrain"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RING_TONE)
{
cai[7] |= DSP_CAI_MODEM_DISABLE_CALLING_TONE | DSP_CAI_MODEM_DISABLE_ANSWER_TONE;
dbug(1, dprintf("MDM: Disable ring tone"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_1800)
{
cai[8] |= DSP_CAI_MODEM_GUARD_TONE_1800HZ;
dbug(1, dprintf("MDM: 1800 guard tone"));
}
else if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_550)
{
cai[8] |= DSP_CAI_MODEM_GUARD_TONE_550HZ;
dbug(1, dprintf("MDM: 550 guard tone"));
}
if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_V100)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_V100;
dbug(1, dprintf("MDM: V100"));
}
else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_MOD_CLASS)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_IN_CLASS;
dbug(1, dprintf("MDM: IN CLASS"));
}
else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_DISABLED)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_DISABLED;
dbug(1, dprintf("MDM: DISABLED"));
}
cai[0] = 20;
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_V18))
&& (GET_WORD(mdm_cfg[5].info) & 0x8000)) /* Private V.18 enable */
{
plci->requested_options |= 1L << PRIVATE_V18;
}
if (GET_WORD(mdm_cfg[5].info) & 0x4000) /* Private VOWN enable */
plci->requested_options |= 1L << PRIVATE_VOWN;
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
{
if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwws", mdm_cfg))
{
i = 27;
if (mdm_cfg[6].length >= 4)
{
d = GET_DWORD(&mdm_cfg[6].info[1]);
cai[7] |= (byte) d; /* line taking options */
cai[9] |= (byte)(d >> 8); /* modulation options */
cai[++i] = (byte)(d >> 16); /* vown modulation options */
cai[++i] = (byte)(d >> 24);
if (mdm_cfg[6].length >= 8)
{
d = GET_DWORD(&mdm_cfg[6].info[5]);
cai[10] |= (byte) d; /* disabled modulations mask */
cai[11] |= (byte)(d >> 8);
if (mdm_cfg[6].length >= 12)
{
d = GET_DWORD(&mdm_cfg[6].info[9]);
cai[12] = (byte) d; /* enabled modulations mask */
cai[++i] = (byte)(d >> 8); /* vown enabled modulations */
cai[++i] = (byte)(d >> 16);
cai[++i] = (byte)(d >> 24);
cai[++i] = 0;
if (mdm_cfg[6].length >= 14)
{
w = GET_WORD(&mdm_cfg[6].info[13]);
if (w != 0)
PUT_WORD(&cai[13], w); /* min tx speed */
if (mdm_cfg[6].length >= 16)
{
w = GET_WORD(&mdm_cfg[6].info[15]);
if (w != 0)
PUT_WORD(&cai[15], w); /* max tx speed */
if (mdm_cfg[6].length >= 18)
{
w = GET_WORD(&mdm_cfg[6].info[17]);
if (w != 0)
PUT_WORD(&cai[17], w); /* min rx speed */
if (mdm_cfg[6].length >= 20)
{
w = GET_WORD(&mdm_cfg[6].info[19]);
if (w != 0)
PUT_WORD(&cai[19], w); /* max rx speed */
if (mdm_cfg[6].length >= 22)
{
w = GET_WORD(&mdm_cfg[6].info[21]);
cai[23] = (byte)(-((short) w)); /* transmit level */
if (mdm_cfg[6].length >= 24)
{
w = GET_WORD(&mdm_cfg[6].info[23]);
cai[22] |= (byte) w; /* info options mask */
cai[21] |= (byte)(w >> 8); /* disabled symbol rates */
}
}
}
}
}
}
}
}
}
cai[27] = i - 27;
i++;
if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwwss", mdm_cfg))
{
if (!api_parse(&mdm_cfg[7].info[1], (word)mdm_cfg[7].length, "sss", mdm_cfg_v18))
{
for (n = 0; n < 3; n++)
{
cai[i] = (byte)(mdm_cfg_v18[n].length);
for (j = 1; j < ((word)(cai[i] + 1)); j++)
cai[i + j] = mdm_cfg_v18[n].info[j];
i += cai[i] + 1;
}
}
}
cai[0] = (byte)(i - 1);
}
}
}
}
if (GET_WORD(bp_parms[0].info) == 2 || /* V.110 async */
GET_WORD(bp_parms[0].info) == 3) /* V.110 sync */
{
if (bp_parms[3].length) {
dbug(1, dprintf("V.110,%d", GET_WORD(&bp_parms[3].info[1])));
switch (GET_WORD(&bp_parms[3].info[1])) { /* Rate */
case 0:
case 56000:
if (GET_WORD(bp_parms[0].info) == 3) { /* V.110 sync 56k */
dbug(1, dprintf("56k sync HSCX"));
cai[1] = 8;
cai[2] = 0;
cai[3] = 0;
}
else if (GET_WORD(bp_parms[0].info) == 2) {
dbug(1, dprintf("56k async DSP"));
cai[2] = 9;
}
break;
case 50: cai[2] = 1; break;
case 75: cai[2] = 1; break;
case 110: cai[2] = 1; break;
case 150: cai[2] = 1; break;
case 200: cai[2] = 1; break;
case 300: cai[2] = 1; break;
case 600: cai[2] = 1; break;
case 1200: cai[2] = 2; break;
case 2400: cai[2] = 3; break;
case 4800: cai[2] = 4; break;
case 7200: cai[2] = 10; break;
case 9600: cai[2] = 5; break;
case 12000: cai[2] = 13; break;
case 24000: cai[2] = 0; break;
case 14400: cai[2] = 11; break;
case 19200: cai[2] = 6; break;
case 28800: cai[2] = 12; break;
case 38400: cai[2] = 7; break;
case 48000: cai[2] = 8; break;
case 76: cai[2] = 15; break; /* 75/1200 */
case 1201: cai[2] = 14; break; /* 1200/75 */
case 56001: cai[2] = 9; break; /* V.110 56000 */
default:
return _B1_PARM_NOT_SUPPORTED;
}
cai[3] = 0;
if (cai[1] == 13) /* v.110 async */
{
if (bp_parms[3].length >= 8)
{
switch (GET_WORD(&bp_parms[3].info[3]))
{ /* char length */
case 5:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
break;
case 6:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
break;
case 7:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
break;
}
switch (GET_WORD(&bp_parms[3].info[5]))
{ /* Parity */
case 1: /* odd parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
break;
case 2: /* even parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
break;
}
switch (GET_WORD(&bp_parms[3].info[7]))
{ /* stop bits */
case 1: /* 2 stop bits */
cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
break;
}
}
}
}
else if (cai[1] == 8 || GET_WORD(bp_parms[0].info) == 3) {
dbug(1, dprintf("V.110 default 56k sync"));
cai[1] = 8;
cai[2] = 0;
cai[3] = 0;
}
else {
dbug(1, dprintf("V.110 default 9600 async"));
cai[2] = 5;
}
}
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
dbug(1, dprintf("CAI[%d]=%x,%x,%x,%x,%x,%x", cai[0], cai[1], cai[2], cai[3], cai[4], cai[5], cai[6]));
/* HexDump ("CAI", sizeof(cai), &cai[0]); */
add_p(plci, CAI, cai);
return 0;
}
/*------------------------------------------------------------------*/
/* put parameter for b2 and B3 protocol in the parameter buffer */
/*------------------------------------------------------------------*/
static word add_b23(PLCI *plci, API_PARSE *bp)
{
word i, fax_control_bits;
byte pos, len;
byte SAPI = 0x40; /* default SAPI 16 for x.31 */
API_PARSE bp_parms[8];
API_PARSE *b1_config;
API_PARSE *b2_config;
API_PARSE b2_config_parms[8];
API_PARSE *b3_config;
API_PARSE b3_config_parms[6];
API_PARSE global_config[2];
static byte llc[3] = {2,0,0};
static byte dlc[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
static byte nlc[256];
static byte lli[12] = {1,1};
const byte llc2_out[] = {1,2,4,6,2,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
const byte llc2_in[] = {1,3,4,6,3,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
const byte llc3[] = {4,3,2,2,6,6,0};
const byte header[] = {0,2,3,3,0,0,0};
for (i = 0; i < 8; i++) bp_parms[i].length = 0;
for (i = 0; i < 6; i++) b2_config_parms[i].length = 0;
for (i = 0; i < 5; i++) b3_config_parms[i].length = 0;
lli[0] = 1;
lli[1] = 1;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
lli[1] |= 2;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
lli[1] |= 4;
if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
lli[1] |= 0x10;
if (plci->rx_dma_descriptor <= 0) {
plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
if (plci->rx_dma_descriptor >= 0)
plci->rx_dma_descriptor++;
}
if (plci->rx_dma_descriptor > 0) {
lli[0] = 6;
lli[1] |= 0x40;
lli[2] = (byte)(plci->rx_dma_descriptor - 1);
lli[3] = (byte)plci->rx_dma_magic;
lli[4] = (byte)(plci->rx_dma_magic >> 8);
lli[5] = (byte)(plci->rx_dma_magic >> 16);
lli[6] = (byte)(plci->rx_dma_magic >> 24);
}
}
if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
lli[1] |= 0x20;
}
dbug(1, dprintf("add_b23"));
api_save_msg(bp, "s", &plci->B_protocol);
if (!bp->length && plci->tel)
{
plci->adv_nl = true;
dbug(1, dprintf("Default adv.Nl"));
add_p(plci, LLI, lli);
plci->B2_prot = 1 /*XPARENT*/;
plci->B3_prot = 0 /*XPARENT*/;
llc[1] = 2;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
add_p(plci, DLC, dlc);
return 0;
}
if (!bp->length) /*default*/
{
dbug(1, dprintf("ret default"));
add_p(plci, LLI, lli);
plci->B2_prot = 0 /*X.75 */;
plci->B3_prot = 0 /*XPARENT*/;
llc[1] = 1;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
add_p(plci, DLC, dlc);
return 0;
}
dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
if ((word)bp->length > 256) return _WRONG_MESSAGE_FORMAT;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
{
bp_parms[6].length = 0;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
}
else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
if (plci->tel == ADV_VOICE) /* transparent B on advanced voice */
{
if (GET_WORD(bp_parms[1].info) != 1
|| GET_WORD(bp_parms[2].info) != 0) return _B2_NOT_SUPPORTED;
plci->adv_nl = true;
}
else if (plci->tel) return _B2_NOT_SUPPORTED;
if ((GET_WORD(bp_parms[1].info) == B2_RTP)
&& (GET_WORD(bp_parms[2].info) == B3_RTP)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
{
add_p(plci, LLI, lli);
plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? 14 : 13;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
dlc[3] = 3; /* Addr A */
dlc[4] = 1; /* Addr B */
dlc[5] = 7; /* modulo mode */
dlc[6] = 7; /* window size */
dlc[7] = 0; /* XID len Lo */
dlc[8] = 0; /* XID len Hi */
for (i = 0; i < bp_parms[4].length; i++)
dlc[9 + i] = bp_parms[4].info[1 + i];
dlc[0] = (byte)(8 + bp_parms[4].length);
add_p(plci, DLC, dlc);
for (i = 0; i < bp_parms[5].length; i++)
nlc[1 + i] = bp_parms[5].info[1 + i];
nlc[0] = (byte)(bp_parms[5].length);
add_p(plci, NLC, nlc);
return 0;
}
if ((GET_WORD(bp_parms[1].info) >= 32)
|| (!((1L << GET_WORD(bp_parms[1].info)) & plci->adapter->profile.B2_Protocols)
&& ((GET_WORD(bp_parms[1].info) != B2_PIAFS)
|| !(plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))))
{
return _B2_NOT_SUPPORTED;
}
if ((GET_WORD(bp_parms[2].info) >= 32)
|| !((1L << GET_WORD(bp_parms[2].info)) & plci->adapter->profile.B3_Protocols))
{
return _B3_NOT_SUPPORTED;
}
if ((GET_WORD(bp_parms[1].info) != B2_SDLC)
&& ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC)))
{
return (add_modem_b23(plci, bp_parms));
}
add_p(plci, LLI, lli);
plci->B2_prot = (byte)GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte)GET_WORD(bp_parms[2].info);
if (plci->B2_prot == 12) SAPI = 0; /* default SAPI D-channel */
if (bp_parms[6].length)
{
if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
{
return _WRONG_MESSAGE_FORMAT;
}
switch (GET_WORD(global_config[0].info))
{
case 1:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
break;
case 2:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
break;
}
}
dbug(1, dprintf("call_dir=%04x", plci->call_dir));
if (plci->B2_prot == B2_PIAFS)
llc[1] = PIAFS_CRC;
else
/* IMPLEMENT_PIAFS */
{
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
llc2_out[GET_WORD(bp_parms[1].info)] : llc2_in[GET_WORD(bp_parms[1].info)];
}
llc[2] = llc3[GET_WORD(bp_parms[2].info)];
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength +
header[GET_WORD(bp_parms[2].info)]);
b1_config = &bp_parms[3];
nlc[0] = 0;
if (plci->B3_prot == 4
|| plci->B3_prot == 5)
{
for (i = 0; i < sizeof(T30_INFO); i++) nlc[i] = 0;
nlc[0] = sizeof(T30_INFO);
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI;
((T30_INFO *)&nlc[1])->rate_div_2400 = 0xff;
if (b1_config->length >= 2)
{
((T30_INFO *)&nlc[1])->rate_div_2400 = (byte)(GET_WORD(&b1_config->info[1]) / 2400);
}
}
b2_config = &bp_parms[4];
if (llc[1] == PIAFS_CRC)
{
if (plci->B3_prot != B3_TRANSPARENT)
{
return _B_STACK_NOT_SUPPORTED;
}
if (b2_config->length && api_parse(&b2_config->info[1], (word)b2_config->length, "bwww", b2_config_parms)) {
return _WRONG_MESSAGE_FORMAT;
}
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
dlc[3] = 0; /* Addr A */
dlc[4] = 0; /* Addr B */
dlc[5] = 0; /* modulo mode */
dlc[6] = 0; /* window size */
if (b2_config->length >= 7) {
dlc[7] = 7;
dlc[8] = 0;
dlc[9] = b2_config_parms[0].info[0]; /* PIAFS protocol Speed configuration */
dlc[10] = b2_config_parms[1].info[0]; /* V.42bis P0 */
dlc[11] = b2_config_parms[1].info[1]; /* V.42bis P0 */
dlc[12] = b2_config_parms[2].info[0]; /* V.42bis P1 */
dlc[13] = b2_config_parms[2].info[1]; /* V.42bis P1 */
dlc[14] = b2_config_parms[3].info[0]; /* V.42bis P2 */
dlc[15] = b2_config_parms[3].info[1]; /* V.42bis P2 */
dlc[0] = 15;
if (b2_config->length >= 8) { /* PIAFS control abilities */
dlc[7] = 10;
dlc[16] = 2; /* Length of PIAFS extension */
dlc[17] = PIAFS_UDATA_ABILITIES; /* control (UDATA) ability */
dlc[18] = b2_config_parms[4].info[0]; /* value */
dlc[0] = 18;
}
}
else /* default values, 64K, variable, no compression */
{
dlc[7] = 7;
dlc[8] = 0;
dlc[9] = 0x03; /* PIAFS protocol Speed configuration */
dlc[10] = 0x03; /* V.42bis P0 */
dlc[11] = 0; /* V.42bis P0 */
dlc[12] = 0; /* V.42bis P1 */
dlc[13] = 0; /* V.42bis P1 */
dlc[14] = 0; /* V.42bis P2 */
dlc[15] = 0; /* V.42bis P2 */
dlc[0] = 15;
}
add_p(plci, DLC, dlc);
}
else
if ((llc[1] == V120_L2) || (llc[1] == V120_V42BIS))
{
if (plci->B3_prot != B3_TRANSPARENT)
return _B_STACK_NOT_SUPPORTED;
dlc[0] = 6;
PUT_WORD(&dlc[1], GET_WORD(&dlc[1]) + 2);
dlc[3] = 0x08;
dlc[4] = 0x01;
dlc[5] = 127;
dlc[6] = 7;
if (b2_config->length != 0)
{
if ((llc[1] == V120_V42BIS) && api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) {
return _WRONG_MESSAGE_FORMAT;
}
dlc[3] = (byte)((b2_config->info[2] << 3) | ((b2_config->info[1] >> 5) & 0x04));
dlc[4] = (byte)((b2_config->info[1] << 1) | 0x01);
if (b2_config->info[3] != 128)
{
dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
return _B2_PARM_NOT_SUPPORTED;
}
dlc[5] = (byte)(b2_config->info[3] - 1);
dlc[6] = b2_config->info[4];
if (llc[1] == V120_V42BIS) {
if (b2_config->length >= 10) {
dlc[7] = 6;
dlc[8] = 0;
dlc[9] = b2_config_parms[4].info[0];
dlc[10] = b2_config_parms[4].info[1];
dlc[11] = b2_config_parms[5].info[0];
dlc[12] = b2_config_parms[5].info[1];
dlc[13] = b2_config_parms[6].info[0];
dlc[14] = b2_config_parms[6].info[1];
dlc[0] = 14;
dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
}
else {
dlc[6] = 14;
}
}
}
}
else
{
if (b2_config->length)
{
dbug(1, dprintf("B2-Config"));
if (llc[1] == X75_V42BIS) {
if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
}
else {
if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbs", b2_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
}
/* if B2 Protocol is LAPD, b2_config structure is different */
if (llc[1] == 6)
{
dlc[0] = 4;
if (b2_config->length >= 1) dlc[2] = b2_config->info[1]; /* TEI */
else dlc[2] = 0x01;
if ((b2_config->length >= 2) && (plci->B2_prot == 12))
{
SAPI = b2_config->info[2]; /* SAPI */
}
dlc[1] = SAPI;
if ((b2_config->length >= 3) && (b2_config->info[3] == 128))
{
dlc[3] = 127; /* Mode */
}
else
{
dlc[3] = 7; /* Mode */
}
if (b2_config->length >= 4) dlc[4] = b2_config->info[4]; /* Window */
else dlc[4] = 1;
dbug(1, dprintf("D-dlc[%d]=%x,%x,%x,%x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
if (b2_config->length > 5) return _B2_PARM_NOT_SUPPORTED;
}
else
{
dlc[0] = (byte)(b2_config_parms[4].length + 6);
dlc[3] = b2_config->info[1];
dlc[4] = b2_config->info[2];
if (b2_config->info[3] != 8 && b2_config->info[3] != 128) {
dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
return _B2_PARM_NOT_SUPPORTED;
}
dlc[5] = (byte)(b2_config->info[3] - 1);
dlc[6] = b2_config->info[4];
if (dlc[6] > dlc[5]) {
dbug(1, dprintf("2D-dlc= %x %x %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4], dlc[5], dlc[6]));
return _B2_PARM_NOT_SUPPORTED;
}
if (llc[1] == X75_V42BIS) {
if (b2_config->length >= 10) {
dlc[7] = 6;
dlc[8] = 0;
dlc[9] = b2_config_parms[4].info[0];
dlc[10] = b2_config_parms[4].info[1];
dlc[11] = b2_config_parms[5].info[0];
dlc[12] = b2_config_parms[5].info[1];
dlc[13] = b2_config_parms[6].info[0];
dlc[14] = b2_config_parms[6].info[1];
dlc[0] = 14;
dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
}
else {
dlc[6] = 14;
}
}
else {
PUT_WORD(&dlc[7], (word)b2_config_parms[4].length);
for (i = 0; i < b2_config_parms[4].length; i++)
dlc[11 + i] = b2_config_parms[4].info[1 + i];
}
}
}
}
add_p(plci, DLC, dlc);
b3_config = &bp_parms[5];
if (b3_config->length)
{
if (plci->B3_prot == 4
|| plci->B3_prot == 5)
{
if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwss", b3_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
i = GET_WORD((byte *)(b3_config_parms[0].info));
((T30_INFO *)&nlc[1])->resolution = (byte)(((i & 0x0001) ||
((plci->B3_prot == 4) && (((byte)(GET_WORD((byte *)b3_config_parms[1].info))) != 5))) ? T30_RESOLUTION_R8_0770_OR_200 : 0);
((T30_INFO *)&nlc[1])->data_format = (byte)(GET_WORD((byte *)b3_config_parms[1].info));
fax_control_bits = T30_CONTROL_BIT_ALL_FEATURES;
if ((((T30_INFO *)&nlc[1])->rate_div_2400 != 0) && (((T30_INFO *)&nlc[1])->rate_div_2400 <= 6))
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_V34FAX;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
{
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_PAPER_FORMATS))
{
((T30_INFO *)&nlc[1])->resolution |= T30_RESOLUTION_R8_1540 |
T30_RESOLUTION_R16_1540_OR_400 | T30_RESOLUTION_300_300 |
T30_RESOLUTION_INCH_BASED | T30_RESOLUTION_METRIC_BASED;
}
((T30_INFO *)&nlc[1])->recording_properties =
T30_RECORDING_WIDTH_ISO_A3 |
(T30_RECORDING_LENGTH_UNLIMITED << 2) |
(T30_MIN_SCANLINE_TIME_00_00_00 << 4);
}
if (plci->B3_prot == 5)
{
if (i & 0x0002) /* Accept incoming fax-polling requests */
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_POLLING;
if (i & 0x2000) /* Do not use MR compression */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_2D_CODING;
if (i & 0x4000) /* Do not use MMR compression */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_T6_CODING;
if (i & 0x8000) /* Do not use ECM */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_ECM;
if (plci->fax_connect_info_length != 0)
{
((T30_INFO *)&nlc[1])->resolution = ((T30_INFO *)plci->fax_connect_info_buffer)->resolution;
((T30_INFO *)&nlc[1])->data_format = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
((T30_INFO *)&nlc[1])->recording_properties = ((T30_INFO *)plci->fax_connect_info_buffer)->recording_properties;
fax_control_bits |= GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) &
(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
}
}
/* copy station id to NLC */
for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
{
if (i < b3_config_parms[2].length)
{
((T30_INFO *)&nlc[1])->station_id[i] = ((byte *)b3_config_parms[2].info)[1 + i];
}
else
{
((T30_INFO *)&nlc[1])->station_id[i] = ' ';
}
}
((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH;
/* copy head line to NLC */
if (b3_config_parms[3].length)
{
pos = (byte)(fax_head_line_time(&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH])));
if (pos != 0)
{
if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE)
pos = 0;
else
{
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
len = (byte)b3_config_parms[2].length;
if (len > 20)
len = 20;
if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE)
{
for (i = 0; i < len; i++)
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1 + i];
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
}
}
}
len = (byte)b3_config_parms[3].length;
if (len > CAPI_MAX_HEAD_LINE_SPACE - pos)
len = (byte)(CAPI_MAX_HEAD_LINE_SPACE - pos);
((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len);
nlc[0] += (byte)(pos + len);
for (i = 0; i < len; i++)
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[3].info)[1 + i];
} else
((T30_INFO *)&nlc[1])->head_line_len = 0;
plci->nsf_control_bits = 0;
if (plci->B3_prot == 5)
{
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
&& (GET_WORD((byte *)b3_config_parms[1].info) & 0x8000)) /* Private SUB/SEP/PWD enable */
{
plci->requested_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
}
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
&& (GET_WORD((byte *)b3_config_parms[1].info) & 0x4000)) /* Private non-standard facilities enable */
{
plci->requested_options |= 1L << PRIVATE_FAX_NONSTANDARD;
}
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_SUB_SEP_PWD))
{
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
}
len = nlc[0];
pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (pos < plci->fax_connect_info_length)
{
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
nlc[++len] = 0;
if (pos < plci->fax_connect_info_length)
{
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
nlc[++len] = 0;
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if ((pos < plci->fax_connect_info_length) && (plci->fax_connect_info_buffer[pos] != 0))
{
if ((plci->fax_connect_info_buffer[pos] >= 3) && (plci->fax_connect_info_buffer[pos + 1] >= 2))
plci->nsf_control_bits = GET_WORD(&plci->fax_connect_info_buffer[pos + 2]);
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
{
if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwsss", b3_config_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
nlc[++len] = 0;
}
else
{
if ((b3_config_parms[4].length >= 3) && (b3_config_parms[4].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&b3_config_parms[4].info[2]);
nlc[++len] = (byte)(b3_config_parms[4].length);
for (i = 0; i < b3_config_parms[4].length; i++)
nlc[++len] = b3_config_parms[4].info[1 + i];
}
}
}
nlc[0] = len;
if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
{
((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI_NEG;
}
}
}
PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
for (i = 0; i < len; i++)
plci->fax_connect_info_buffer[i] = nlc[1 + i];
((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
i += ((T30_INFO *)&nlc[1])->head_line_len;
while (i < nlc[0])
plci->fax_connect_info_buffer[len++] = nlc[++i];
plci->fax_connect_info_length = len;
}
else
{
nlc[0] = 14;
if (b3_config->length != 16)
return _B3_PARM_NOT_SUPPORTED;
for (i = 0; i < 12; i++) nlc[1 + i] = b3_config->info[1 + i];
if (GET_WORD(&b3_config->info[13]) != 8 && GET_WORD(&b3_config->info[13]) != 128)
return _B3_PARM_NOT_SUPPORTED;
nlc[13] = b3_config->info[13];
if (GET_WORD(&b3_config->info[15]) >= nlc[13])
return _B3_PARM_NOT_SUPPORTED;
nlc[14] = b3_config->info[15];
}
}
else
{
if (plci->B3_prot == 4
|| plci->B3_prot == 5 /*T.30 - FAX*/) return _B3_PARM_NOT_SUPPORTED;
}
add_p(plci, NLC, nlc);
return 0;
}
/*----------------------------------------------------------------*/
/* make the same as add_b23, but only for the modem related */
/* L2 and L3 B-Chan protocol. */
/* */
/* Enabled L2 and L3 Configurations: */
/* If L1 == Modem all negotiation */
/* only L2 == Modem with full negotiation is allowed */
/* If L1 == Modem async or sync */
/* only L2 == Transparent is allowed */
/* L3 == Modem or L3 == Transparent are allowed */
/* B2 Configuration for modem: */
/* word : enable/disable compression, bitoptions */
/* B3 Configuration for modem: */
/* empty */
/*----------------------------------------------------------------*/
static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms)
{
static byte lli[12] = {1,1};
static byte llc[3] = {2,0,0};
static byte dlc[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
API_PARSE mdm_config[2];
word i;
word b2_config = 0;
for (i = 0; i < 2; i++) mdm_config[i].length = 0;
for (i = 0; i < sizeof(dlc); i++) dlc[i] = 0;
if (((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
&& (GET_WORD(bp_parms[1].info) != B2_MODEM_EC_COMPRESSION))
|| ((GET_WORD(bp_parms[0].info) != B1_MODEM_ALL_NEGOTIATE)
&& (GET_WORD(bp_parms[1].info) != B2_TRANSPARENT)))
{
return (_B_STACK_NOT_SUPPORTED);
}
if ((GET_WORD(bp_parms[2].info) != B3_MODEM)
&& (GET_WORD(bp_parms[2].info) != B3_TRANSPARENT))
{
return (_B_STACK_NOT_SUPPORTED);
}
plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
if ((GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) && bp_parms[4].length)
{
if (api_parse(&bp_parms[4].info[1],
(word)bp_parms[4].length, "w",
mdm_config))
{
return (_WRONG_MESSAGE_FORMAT);
}
b2_config = GET_WORD(mdm_config[0].info);
}
/* OK, L2 is modem */
lli[0] = 1;
lli[1] = 1;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
lli[1] |= 2;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
lli[1] |= 4;
if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
lli[1] |= 0x10;
if (plci->rx_dma_descriptor <= 0) {
plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
if (plci->rx_dma_descriptor >= 0)
plci->rx_dma_descriptor++;
}
if (plci->rx_dma_descriptor > 0) {
lli[1] |= 0x40;
lli[0] = 6;
lli[2] = (byte)(plci->rx_dma_descriptor - 1);
lli[3] = (byte)plci->rx_dma_magic;
lli[4] = (byte)(plci->rx_dma_magic >> 8);
lli[5] = (byte)(plci->rx_dma_magic >> 16);
lli[6] = (byte)(plci->rx_dma_magic >> 24);
}
}
if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
lli[1] |= 0x20;
}
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
/*V42*/ 10 : /*V42_IN*/ 9;
llc[2] = 4; /* pass L3 always transparent */
add_p(plci, LLI, lli);
add_p(plci, LLC, llc);
i = 1;
PUT_WORD(&dlc[i], plci->appl->MaxDataLength);
i += 2;
if (GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION)
{
if (bp_parms[4].length)
{
dbug(1, dprintf("MDM b2_config=%02x", b2_config));
dlc[i++] = 3; /* Addr A */
dlc[i++] = 1; /* Addr B */
dlc[i++] = 7; /* modulo mode */
dlc[i++] = 7; /* window size */
dlc[i++] = 0; /* XID len Lo */
dlc[i++] = 0; /* XID len Hi */
if (b2_config & MDM_B2_DISABLE_V42bis)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_V42_V42BIS;
}
if (b2_config & MDM_B2_DISABLE_MNP)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_MNP_MNP5;
}
if (b2_config & MDM_B2_DISABLE_TRANS)
{
dlc[i] |= DLC_MODEMPROT_REQUIRE_PROTOCOL;
}
if (b2_config & MDM_B2_DISABLE_V42)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_V42_DETECT;
}
if (b2_config & MDM_B2_DISABLE_COMP)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_COMPRESSION;
}
i++;
}
}
else
{
dlc[i++] = 3; /* Addr A */
dlc[i++] = 1; /* Addr B */
dlc[i++] = 7; /* modulo mode */
dlc[i++] = 7; /* window size */
dlc[i++] = 0; /* XID len Lo */
dlc[i++] = 0; /* XID len Hi */
dlc[i++] = DLC_MODEMPROT_DISABLE_V42_V42BIS |
DLC_MODEMPROT_DISABLE_MNP_MNP5 |
DLC_MODEMPROT_DISABLE_V42_DETECT |
DLC_MODEMPROT_DISABLE_COMPRESSION;
}
dlc[0] = (byte)(i - 1);
/* HexDump ("DLC", sizeof(dlc), &dlc[0]); */
add_p(plci, DLC, dlc);
return (0);
}
/*------------------------------------------------------------------*/
/* send a request for the signaling entity */
/*------------------------------------------------------------------*/
static void sig_req(PLCI *plci, byte req, byte Id)
{
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
dbug(1, dprintf("sig_req(%x)", req));
if (req == REMOVE)
plci->sig_remove_id = plci->Sig.Id;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
plci->RBuffer[plci->req_in++] = 0;
}
PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
plci->RBuffer[plci->req_in++] = Id; /* sig/nl flag */
plci->RBuffer[plci->req_in++] = req; /* request */
plci->RBuffer[plci->req_in++] = 0; /* channel */
plci->req_in_start = plci->req_in;
}
/*------------------------------------------------------------------*/
/* send a request for the network layer entity */
/*------------------------------------------------------------------*/
static void nl_req_ncci(PLCI *plci, byte req, byte ncci)
{
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
dbug(1, dprintf("nl_req %02x %02x %02x", plci->Id, req, ncci));
if (req == REMOVE)
{
plci->nl_remove_id = plci->NL.Id;
ncci_remove(plci, 0, (byte)(ncci != 0));
ncci = 0;
}
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
plci->RBuffer[plci->req_in++] = 0;
}
PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
plci->RBuffer[plci->req_in++] = 1; /* sig/nl flag */
plci->RBuffer[plci->req_in++] = req; /* request */
plci->RBuffer[plci->req_in++] = plci->adapter->ncci_ch[ncci]; /* channel */
plci->req_in_start = plci->req_in;
}
static void send_req(PLCI *plci)
{
ENTITY *e;
word l;
/* word i; */
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
channel_xmit_xon(plci);
/* if nothing to do, return */
if (plci->req_in == plci->req_out) return;
dbug(1, dprintf("send_req(in=%d,out=%d)", plci->req_in, plci->req_out));
if (plci->nl_req || plci->sig_req) return;
l = GET_WORD(&plci->RBuffer[plci->req_out]);
plci->req_out += 2;
plci->XData[0].P = &plci->RBuffer[plci->req_out];
plci->req_out += l;
if (plci->RBuffer[plci->req_out] == 1)
{
e = &plci->NL;
plci->req_out++;
e->Req = plci->nl_req = plci->RBuffer[plci->req_out++];
e->ReqCh = plci->RBuffer[plci->req_out++];
if (!(e->Id & 0x1f))
{
e->Id = NL_ID;
plci->RBuffer[plci->req_out - 4] = CAI;
plci->RBuffer[plci->req_out - 3] = 1;
plci->RBuffer[plci->req_out - 2] = (plci->Sig.Id == 0xff) ? 0 : plci->Sig.Id;
plci->RBuffer[plci->req_out - 1] = 0;
l += 3;
plci->nl_global_req = plci->nl_req;
}
dbug(1, dprintf("%x:NLREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
}
else
{
e = &plci->Sig;
if (plci->RBuffer[plci->req_out])
e->Id = plci->RBuffer[plci->req_out];
plci->req_out++;
e->Req = plci->sig_req = plci->RBuffer[plci->req_out++];
e->ReqCh = plci->RBuffer[plci->req_out++];
if (!(e->Id & 0x1f))
plci->sig_global_req = plci->sig_req;
dbug(1, dprintf("%x:SIGREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
}
plci->XData[0].PLength = l;
e->X = plci->XData;
plci->adapter->request(e);
dbug(1, dprintf("send_ok"));
}
static void send_data(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
DATA_B3_DESC *data;
NCCI *ncci_ptr;
word ncci;
if (!plci->nl_req && plci->ncci_ring_list)
{
a = plci->adapter;
ncci = plci->ncci_ring_list;
do
{
ncci = a->ncci_next[ncci];
ncci_ptr = &(a->ncci[ncci]);
if (!(a->ncci_ch[ncci]
&& (a->ch_flow_control[a->ncci_ch[ncci]] & N_OK_FC_PENDING)))
{
if (ncci_ptr->data_pending)
{
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == INC_ACT_PENDING)
|| (plci->send_disc == ncci))
{
data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
if ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT))
{
plci->NData[1].P = TransmitBufferGet(plci->appl, data->P);
plci->NData[1].PLength = data->Length;
if (data->Flags & 0x10)
plci->NData[0].P = v120_break_header;
else
plci->NData[0].P = v120_default_header;
plci->NData[0].PLength = 1;
plci->NL.XNum = 2;
plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
}
else
{
plci->NData[0].P = TransmitBufferGet(plci->appl, data->P);
plci->NData[0].PLength = data->Length;
if (data->Flags & 0x10)
plci->NL.Req = plci->nl_req = (byte)N_UDATA;
else if ((plci->B3_prot == B3_RTP) && (data->Flags & 0x01))
plci->NL.Req = plci->nl_req = (byte)N_BDATA;
else
plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
}
plci->NL.X = plci->NData;
plci->NL.ReqCh = a->ncci_ch[ncci];
dbug(1, dprintf("%x:DREQ(%x:%x)", a->Id, plci->NL.Id, plci->NL.Req));
plci->data_sent = true;
plci->data_sent_ptr = data->P;
a->request(&plci->NL);
}
else {
cleanup_ncci_data(plci, ncci);
}
}
else if (plci->send_disc == ncci)
{
/* dprintf("N_DISC"); */
plci->NData[0].PLength = 0;
plci->NL.ReqCh = a->ncci_ch[ncci];
plci->NL.Req = plci->nl_req = N_DISC;
a->request(&plci->NL);
plci->command = _DISCONNECT_B3_R;
plci->send_disc = 0;
}
}
} while (!plci->nl_req && (ncci != plci->ncci_ring_list));
plci->ncci_ring_list = ncci;
}
}
static void listen_check(DIVA_CAPI_ADAPTER *a)
{
word i, j;
PLCI *plci;
byte activnotifiedcalls = 0;
dbug(1, dprintf("listen_check(%d,%d)", a->listen_active, a->max_listen));
if (!remove_started && !a->adapter_disabled)
{
for (i = 0; i < a->max_plci; i++)
{
plci = &(a->plci[i]);
if (plci->notifiedcall) activnotifiedcalls++;
}
dbug(1, dprintf("listen_check(%d)", activnotifiedcalls));
for (i = a->listen_active; i < ((word)(a->max_listen + activnotifiedcalls)); i++) {
if ((j = get_plci(a))) {
a->listen_active++;
plci = &a->plci[j - 1];
plci->State = LISTENING;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, KEY, "\x04\x43\x41\x32\x30");
add_p(plci, CAI, "\x01\xc0");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, LLI, "\x01\xc4"); /* support Dummy CR FAC + MWI + SpoofNotify */
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = LISTEN_SIG_ASSIGN_PEND; /* do indicate_req if OK */
sig_req(plci, ASSIGN, DSIG_ID);
send_req(plci);
}
}
}
}
/*------------------------------------------------------------------*/
/* functions for all parameters sent in INDs */
/*------------------------------------------------------------------*/
static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
{
word ploc; /* points to current location within packet */
byte w;
byte wlen;
byte codeset, lock;
byte *in;
word i;
word code;
word mIEindex = 0;
ploc = 0;
codeset = 0;
lock = 0;
in = plci->Sig.RBuffer->P;
for (i = 0; i < parms_id[0]; i++) /* multiIE parms_id contains just the 1st */
{ /* element but parms array is larger */
parms[i] = (byte *)"";
}
for (i = 0; i < multiIEsize; i++)
{
parms[i] = (byte *)"";
}
while (ploc < plci->Sig.RBuffer->length - 1) {
/* read information element id and length */
w = in[ploc];
if (w & 0x80) {
/* w &=0xf0; removed, cannot detect congestion levels */
/* upper 4 bit masked with w==SHIFT now */
wlen = 0;
}
else {
wlen = (byte)(in[ploc + 1] + 1);
}
/* check if length valid (not exceeding end of packet) */
if ((ploc + wlen) > 270) return;
if (lock & 0x80) lock &= 0x7f;
else codeset = lock;
if ((w & 0xf0) == SHIFT) {
codeset = in[ploc];
if (!(codeset & 0x08)) lock = (byte)(codeset & 7);
codeset &= 7;
lock |= 0x80;
}
else {
if (w == ESC && wlen >= 3) code = in[ploc + 2] | 0x800;
else code = w;
code |= (codeset << 8);
for (i = 1; i < parms_id[0] + 1 && parms_id[i] != code; i++);
if (i < parms_id[0] + 1) {
if (!multiIEsize) { /* with multiIEs use next field index, */
mIEindex = i - 1; /* with normal IEs use same index like parms_id */
}
parms[mIEindex] = &in[ploc + 1];
dbug(1, dprintf("mIE[%d]=0x%x", *parms[mIEindex], in[ploc]));
if (parms_id[i] == OAD
|| parms_id[i] == CONN_NR
|| parms_id[i] == CAD) {
if (in[ploc + 2] & 0x80) {
in[ploc + 0] = (byte)(in[ploc + 1] + 1);
in[ploc + 1] = (byte)(in[ploc + 2] & 0x7f);
in[ploc + 2] = 0x80;
parms[mIEindex] = &in[ploc];
}
}
mIEindex++; /* effects multiIEs only */
}
}
ploc += (wlen + 1);
}
return;
}
/*------------------------------------------------------------------*/
/* try to match a cip from received BC and HLC */
/*------------------------------------------------------------------*/
static byte ie_compare(byte *ie1, byte *ie2)
{
word i;
if (!ie1 || !ie2) return false;
if (!ie1[0]) return false;
for (i = 0; i < (word)(ie1[0] + 1); i++) if (ie1[i] != ie2[i]) return false;
return true;
}
static word find_cip(DIVA_CAPI_ADAPTER *a, byte *bc, byte *hlc)
{
word i;
word j;
for (i = 9; i && !ie_compare(bc, cip_bc[i][a->u_law]); i--);
for (j = 16; j < 29 &&
(!ie_compare(bc, cip_bc[j][a->u_law]) || !ie_compare(hlc, cip_hlc[j])); j++);
if (j == 29) return i;
return j;
}
static byte AddInfo(byte **add_i,
byte **fty_i,
byte *esc_chi,
byte *facility)
{
byte i;
byte j;
byte k;
byte flen;
byte len = 0;
/* facility is a nested structure */
/* FTY can be more than once */
if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
{
add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
}
else
{
add_i[0] = (byte *)"";
}
if (!fty_i[0][0])
{
add_i[3] = (byte *)"";
}
else
{ /* facility array found */
for (i = 0, j = 1; i < MAX_MULTI_IE && fty_i[i][0]; i++)
{
dbug(1, dprintf("AddIFac[%d]", fty_i[i][0]));
len += fty_i[i][0];
len += 2;
flen = fty_i[i][0];
facility[j++] = 0x1c; /* copy fac IE */
for (k = 0; k <= flen; k++, j++)
{
facility[j] = fty_i[i][k];
/* dbug(1, dprintf("%x ",facility[j])); */
}
}
facility[0] = len;
add_i[3] = facility;
}
/* dbug(1, dprintf("FacArrLen=%d ",len)); */
len = add_i[0][0] + add_i[1][0] + add_i[2][0] + add_i[3][0];
len += 4; /* calculate length of all */
return (len);
}
/*------------------------------------------------------------------*/
/* voice and codec features */
/*------------------------------------------------------------------*/
static void SetVoiceChannel(PLCI *plci, byte *chi, DIVA_CAPI_ADAPTER *a)
{
byte voice_chi[] = "\x02\x18\x01";
byte channel;
channel = chi[chi[0]] & 0x3;
dbug(1, dprintf("ExtDevON(Ch=0x%x)", channel));
voice_chi[2] = (channel) ? channel : 1;
add_p(plci, FTY, "\x02\x01\x07"); /* B On, default on 1 */
add_p(plci, ESC, voice_chi); /* Channel */
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
if (a->AdvSignalPLCI)
{
adv_voice_write_coefs(a->AdvSignalPLCI, ADV_VOICE_WRITE_ACTIVATION);
}
}
static void VoiceChannelOff(PLCI *plci)
{
dbug(1, dprintf("ExtDevOFF"));
add_p(plci, FTY, "\x02\x01\x08"); /* B Off */
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
if (plci->adapter->AdvSignalPLCI)
{
adv_voice_clear_config(plci->adapter->AdvSignalPLCI);
}
}
static word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl,
byte hook_listen)
{
word j;
PLCI *splci;
/* check if hardware supports handset with hook states (adv.codec) */
/* or if just a on board codec is supported */
/* the advanced codec plci is just for internal use */
/* diva Pro with on-board codec: */
if (a->profile.Global_Options & HANDSET)
{
/* new call, but hook states are already signalled */
if (a->AdvCodecFLAG)
{
if (a->AdvSignalAppl != appl || a->AdvSignalPLCI)
{
dbug(1, dprintf("AdvSigPlci=0x%x", a->AdvSignalPLCI));
return 0x2001; /* codec in use by another application */
}
if (plci != NULL)
{
a->AdvSignalPLCI = plci;
plci->tel = ADV_VOICE;
}
return 0; /* adv codec still used */
}
if ((j = get_plci(a)))
{
splci = &a->plci[j - 1];
splci->tel = CODEC_PERMANENT;
/* hook_listen indicates if a facility_req with handset/hook support */
/* was sent. Otherwise if just a call on an external device was made */
/* the codec will be used but the hook info will be discarded (just */
/* the external controller is in use */
if (hook_listen) splci->State = ADVANCED_VOICE_SIG;
else
{
splci->State = ADVANCED_VOICE_NOSIG;
if (plci)
{
plci->spoofed_msg = SPOOFING_REQUIRED;
}
/* indicate D-ch connect if */
} /* codec is connected OK */
if (plci != NULL)
{
a->AdvSignalPLCI = plci;
plci->tel = ADV_VOICE;
}
a->AdvSignalAppl = appl;
a->AdvCodecFLAG = true;
a->AdvCodecPLCI = splci;
add_p(splci, CAI, "\x01\x15");
add_p(splci, LLI, "\x01\x00");
add_p(splci, ESC, "\x02\x18\x00");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
splci->internal_command = PERM_COD_ASSIGN;
dbug(1, dprintf("Codec Assign"));
sig_req(splci, ASSIGN, DSIG_ID);
send_req(splci);
}
else
{
return 0x2001; /* wrong state, no more plcis */
}
}
else if (a->profile.Global_Options & ON_BOARD_CODEC)
{
if (hook_listen) return 0x300B; /* Facility not supported */
/* no hook with SCOM */
if (plci != NULL) plci->tel = CODEC;
dbug(1, dprintf("S/SCOM codec"));
/* first time we use the scom-s codec we must shut down the internal */
/* handset application of the card. This can be done by an assign with */
/* a cai with the 0x80 bit set. Assign return code is 'out of resource'*/
if (!a->scom_appl_disable) {
if ((j = get_plci(a))) {
splci = &a->plci[j - 1];
add_p(splci, CAI, "\x01\x80");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(splci, ASSIGN, 0xC0); /* 0xc0 is the TEL_ID */
send_req(splci);
a->scom_appl_disable = true;
}
else{
return 0x2001; /* wrong state, no more plcis */
}
}
}
else return 0x300B; /* Facility not supported */
return 0;
}
static void CodecIdCheck(DIVA_CAPI_ADAPTER *a, PLCI *plci)
{
dbug(1, dprintf("CodecIdCheck"));
if (a->AdvSignalPLCI == plci)
{
dbug(1, dprintf("PLCI owns codec"));
VoiceChannelOff(a->AdvCodecPLCI);
if (a->AdvCodecPLCI->State == ADVANCED_VOICE_NOSIG)
{
dbug(1, dprintf("remove temp codec PLCI"));
plci_remove(a->AdvCodecPLCI);
a->AdvCodecFLAG = 0;
a->AdvCodecPLCI = NULL;
a->AdvSignalAppl = NULL;
}
a->AdvSignalPLCI = NULL;
}
}
/* -------------------------------------------------------------------
Ask for physical address of card on PCI bus
------------------------------------------------------------------- */
static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *a,
IDI_SYNC_REQ *preq) {
a->sdram_bar = 0;
if (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR) {
ENTITY *e = (ENTITY *)preq;
e->user[0] = a->Id - 1;
preq->xdi_sdram_bar.info.bar = 0;
preq->xdi_sdram_bar.Req = 0;
preq->xdi_sdram_bar.Rc = IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR;
(*(a->request))(e);
a->sdram_bar = preq->xdi_sdram_bar.info.bar;
dbug(3, dprintf("A(%d) SDRAM BAR = %08x", a->Id, a->sdram_bar));
}
}
/* -------------------------------------------------------------------
Ask XDI about extended features
------------------------------------------------------------------- */
static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a) {
IDI_SYNC_REQ *preq;
char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
char features[4];
preq = (IDI_SYNC_REQ *)&buffer[0];
if (!diva_xdi_extended_features) {
ENTITY *e = (ENTITY *)preq;
diva_xdi_extended_features |= 0x80000000;
e->user[0] = a->Id - 1;
preq->xdi_extended_features.Req = 0;
preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
preq->xdi_extended_features.info.features = &features[0];
(*(a->request))(e);
if (features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) {
/*
Check features located in the byte '0'
*/
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_CMA) {
diva_xdi_extended_features |= DIVA_CAPI_USE_CMA;
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_RX_DMA) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_RX_DMA;
dbug(1, dprintf("XDI provides RxDMA"));
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR;
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_NO_CANCEL;
dbug(3, dprintf("XDI provides NO_CANCEL_RC feature"));
}
}
}
diva_ask_for_xdi_sdram_bar(a, preq);
}
/*------------------------------------------------------------------*/
/* automatic law */
/*------------------------------------------------------------------*/
/* called from OS specific part after init time to get the Law */
/* a-law (Euro) and u-law (us,japan) use different BCs in the Setup message */
void AutomaticLaw(DIVA_CAPI_ADAPTER *a)
{
word j;
PLCI *splci;
if (a->automatic_law) {
return;
}
if ((j = get_plci(a))) {
diva_get_extended_adapter_features(a);
splci = &a->plci[j - 1];
a->automatic_lawPLCI = splci;
a->automatic_law = 1;
add_p(splci, CAI, "\x01\x80");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
splci->internal_command = USELAW_REQ;
splci->command = 0;
splci->number = 0;
sig_req(splci, ASSIGN, DSIG_ID);
send_req(splci);
}
}
/* called from OS specific part if an application sends an Capi20Release */
word CapiRelease(word Id)
{
word i, j, appls_found;
PLCI *plci;
APPL *this;
DIVA_CAPI_ADAPTER *a;
if (!Id)
{
dbug(0, dprintf("A: CapiRelease(Id==0)"));
return (_WRONG_APPL_ID);
}
this = &application[Id - 1]; /* get application pointer */
for (i = 0, appls_found = 0; i < max_appl; i++)
{
if (application[i].Id) /* an application has been found */
{
appls_found++;
}
}
for (i = 0; i < max_adapter; i++) /* scan all adapters... */
{
a = &adapter[i];
if (a->request)
{
a->Info_Mask[Id - 1] = 0;
a->CIP_Mask[Id - 1] = 0;
a->Notification_Mask[Id - 1] = 0;
a->codec_listen[Id - 1] = NULL;
a->requested_options_table[Id - 1] = 0;
for (j = 0; j < a->max_plci; j++) /* and all PLCIs connected */
{ /* with this application */
plci = &a->plci[j];
if (plci->Id) /* if plci owns no application */
{ /* it may be not jet connected */
if (plci->State == INC_CON_PENDING
|| plci->State == INC_CON_ALERT)
{
if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
{
clear_c_ind_mask_bit(plci, (word)(Id - 1));
if (c_ind_mask_empty(plci))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
}
}
}
if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
{
clear_c_ind_mask_bit(plci, (word)(Id - 1));
if (c_ind_mask_empty(plci))
{
if (!plci->appl)
{
plci_remove(plci);
plci->State = IDLE;
}
}
}
if (plci->appl == this)
{
plci->appl = NULL;
plci_remove(plci);
plci->State = IDLE;
}
}
}
listen_check(a);
if (a->flag_dynamic_l1_down)
{
if (appls_found == 1) /* last application does a capi release */
{
if ((j = get_plci(a)))
{
plci = &a->plci[j - 1];
plci->command = 0;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, CAI, "\x01\x80");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = REM_L1_SIG_ASSIGN_PEND;
sig_req(plci, ASSIGN, DSIG_ID);
add_p(plci, FTY, "\x02\xff\x06"); /* l1 down */
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
}
}
if (a->AdvSignalAppl == this)
{
this->NullCREnable = false;
if (a->AdvCodecPLCI)
{
plci_remove(a->AdvCodecPLCI);
a->AdvCodecPLCI->tel = 0;
a->AdvCodecPLCI->adv_nl = 0;
}
a->AdvSignalAppl = NULL;
a->AdvSignalPLCI = NULL;
a->AdvCodecFLAG = 0;
a->AdvCodecPLCI = NULL;
}
}
}
this->Id = 0;
return GOOD;
}
static word plci_remove_check(PLCI *plci)
{
if (!plci) return true;
if (!plci->NL.Id && c_ind_mask_empty(plci))
{
if (plci->Sig.Id == 0xff)
plci->Sig.Id = 0;
if (!plci->Sig.Id)
{
dbug(1, dprintf("plci_remove_complete(%x)", plci->Id));
dbug(1, dprintf("tel=0x%x,Sig=0x%x", plci->tel, plci->Sig.Id));
if (plci->Id)
{
CodecIdCheck(plci->adapter, plci);
clear_b1_config(plci);
ncci_remove(plci, 0, false);
plci_free_msg_in_queue(plci);
channel_flow_control_remove(plci);
plci->Id = 0;
plci->State = IDLE;
plci->channels = 0;
plci->appl = NULL;
plci->notifiedcall = 0;
}
listen_check(plci->adapter);
return true;
}
}
return false;
}
/*------------------------------------------------------------------*/
static byte plci_nl_busy(PLCI *plci)
{
/* only applicable for non-multiplexed protocols */
return (plci->nl_req
|| (plci->ncci_ring_list
&& plci->adapter->ncci_ch[plci->ncci_ring_list]
&& (plci->adapter->ch_flow_control[plci->adapter->ncci_ch[plci->ncci_ring_list]] & N_OK_FC_PENDING)));
}
/*------------------------------------------------------------------*/
/* DTMF facilities */
/*------------------------------------------------------------------*/
static struct
{
byte send_mask;
byte listen_mask;
byte character;
byte code;
} dtmf_digit_map[] =
{
{ 0x01, 0x01, 0x23, DTMF_DIGIT_TONE_CODE_HASHMARK },
{ 0x01, 0x01, 0x2a, DTMF_DIGIT_TONE_CODE_STAR },
{ 0x01, 0x01, 0x30, DTMF_DIGIT_TONE_CODE_0 },
{ 0x01, 0x01, 0x31, DTMF_DIGIT_TONE_CODE_1 },
{ 0x01, 0x01, 0x32, DTMF_DIGIT_TONE_CODE_2 },
{ 0x01, 0x01, 0x33, DTMF_DIGIT_TONE_CODE_3 },
{ 0x01, 0x01, 0x34, DTMF_DIGIT_TONE_CODE_4 },
{ 0x01, 0x01, 0x35, DTMF_DIGIT_TONE_CODE_5 },
{ 0x01, 0x01, 0x36, DTMF_DIGIT_TONE_CODE_6 },
{ 0x01, 0x01, 0x37, DTMF_DIGIT_TONE_CODE_7 },
{ 0x01, 0x01, 0x38, DTMF_DIGIT_TONE_CODE_8 },
{ 0x01, 0x01, 0x39, DTMF_DIGIT_TONE_CODE_9 },
{ 0x01, 0x01, 0x41, DTMF_DIGIT_TONE_CODE_A },
{ 0x01, 0x01, 0x42, DTMF_DIGIT_TONE_CODE_B },
{ 0x01, 0x01, 0x43, DTMF_DIGIT_TONE_CODE_C },
{ 0x01, 0x01, 0x44, DTMF_DIGIT_TONE_CODE_D },
{ 0x01, 0x00, 0x61, DTMF_DIGIT_TONE_CODE_A },
{ 0x01, 0x00, 0x62, DTMF_DIGIT_TONE_CODE_B },
{ 0x01, 0x00, 0x63, DTMF_DIGIT_TONE_CODE_C },
{ 0x01, 0x00, 0x64, DTMF_DIGIT_TONE_CODE_D },
{ 0x04, 0x04, 0x80, DTMF_SIGNAL_NO_TONE },
{ 0x00, 0x04, 0x81, DTMF_SIGNAL_UNIDENTIFIED_TONE },
{ 0x04, 0x04, 0x82, DTMF_SIGNAL_DIAL_TONE },
{ 0x04, 0x04, 0x83, DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE },
{ 0x04, 0x04, 0x84, DTMF_SIGNAL_SPECIAL_DIAL_TONE },
{ 0x04, 0x04, 0x85, DTMF_SIGNAL_SECOND_DIAL_TONE },
{ 0x04, 0x04, 0x86, DTMF_SIGNAL_RINGING_TONE },
{ 0x04, 0x04, 0x87, DTMF_SIGNAL_SPECIAL_RINGING_TONE },
{ 0x04, 0x04, 0x88, DTMF_SIGNAL_BUSY_TONE },
{ 0x04, 0x04, 0x89, DTMF_SIGNAL_CONGESTION_TONE },
{ 0x04, 0x04, 0x8a, DTMF_SIGNAL_SPECIAL_INFORMATION_TONE },
{ 0x04, 0x04, 0x8b, DTMF_SIGNAL_COMFORT_TONE },
{ 0x04, 0x04, 0x8c, DTMF_SIGNAL_HOLD_TONE },
{ 0x04, 0x04, 0x8d, DTMF_SIGNAL_RECORD_TONE },
{ 0x04, 0x04, 0x8e, DTMF_SIGNAL_CALLER_WAITING_TONE },
{ 0x04, 0x04, 0x8f, DTMF_SIGNAL_CALL_WAITING_TONE },
{ 0x04, 0x04, 0x90, DTMF_SIGNAL_PAY_TONE },
{ 0x04, 0x04, 0x91, DTMF_SIGNAL_POSITIVE_INDICATION_TONE },
{ 0x04, 0x04, 0x92, DTMF_SIGNAL_NEGATIVE_INDICATION_TONE },
{ 0x04, 0x04, 0x93, DTMF_SIGNAL_WARNING_TONE },
{ 0x04, 0x04, 0x94, DTMF_SIGNAL_INTRUSION_TONE },
{ 0x04, 0x04, 0x95, DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE },
{ 0x04, 0x04, 0x96, DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE },
{ 0x04, 0x04, 0x97, DTMF_SIGNAL_CPE_ALERTING_SIGNAL },
{ 0x04, 0x04, 0x98, DTMF_SIGNAL_OFF_HOOK_WARNING_TONE },
{ 0x04, 0x04, 0xbf, DTMF_SIGNAL_INTERCEPT_TONE },
{ 0x04, 0x04, 0xc0, DTMF_SIGNAL_MODEM_CALLING_TONE },
{ 0x04, 0x04, 0xc1, DTMF_SIGNAL_FAX_CALLING_TONE },
{ 0x04, 0x04, 0xc2, DTMF_SIGNAL_ANSWER_TONE },
{ 0x04, 0x04, 0xc3, DTMF_SIGNAL_REVERSED_ANSWER_TONE },
{ 0x04, 0x04, 0xc4, DTMF_SIGNAL_ANSAM_TONE },
{ 0x04, 0x04, 0xc5, DTMF_SIGNAL_REVERSED_ANSAM_TONE },
{ 0x04, 0x04, 0xc6, DTMF_SIGNAL_BELL103_ANSWER_TONE },
{ 0x04, 0x04, 0xc7, DTMF_SIGNAL_FAX_FLAGS },
{ 0x04, 0x04, 0xc8, DTMF_SIGNAL_G2_FAX_GROUP_ID },
{ 0x00, 0x04, 0xc9, DTMF_SIGNAL_HUMAN_SPEECH },
{ 0x04, 0x04, 0xca, DTMF_SIGNAL_ANSWERING_MACHINE_390 },
{ 0x02, 0x02, 0xf1, DTMF_MF_DIGIT_TONE_CODE_1 },
{ 0x02, 0x02, 0xf2, DTMF_MF_DIGIT_TONE_CODE_2 },
{ 0x02, 0x02, 0xf3, DTMF_MF_DIGIT_TONE_CODE_3 },
{ 0x02, 0x02, 0xf4, DTMF_MF_DIGIT_TONE_CODE_4 },
{ 0x02, 0x02, 0xf5, DTMF_MF_DIGIT_TONE_CODE_5 },
{ 0x02, 0x02, 0xf6, DTMF_MF_DIGIT_TONE_CODE_6 },
{ 0x02, 0x02, 0xf7, DTMF_MF_DIGIT_TONE_CODE_7 },
{ 0x02, 0x02, 0xf8, DTMF_MF_DIGIT_TONE_CODE_8 },
{ 0x02, 0x02, 0xf9, DTMF_MF_DIGIT_TONE_CODE_9 },
{ 0x02, 0x02, 0xfa, DTMF_MF_DIGIT_TONE_CODE_0 },
{ 0x02, 0x02, 0xfb, DTMF_MF_DIGIT_TONE_CODE_K1 },
{ 0x02, 0x02, 0xfc, DTMF_MF_DIGIT_TONE_CODE_K2 },
{ 0x02, 0x02, 0xfd, DTMF_MF_DIGIT_TONE_CODE_KP },
{ 0x02, 0x02, 0xfe, DTMF_MF_DIGIT_TONE_CODE_S1 },
{ 0x02, 0x02, 0xff, DTMF_MF_DIGIT_TONE_CODE_ST },
};
#define DTMF_DIGIT_MAP_ENTRIES ARRAY_SIZE(dtmf_digit_map)
static void dtmf_enable_receiver(PLCI *plci, byte enable_mask)
{
word min_digit_duration, min_gap_duration;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_enable_receiver %02x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, enable_mask));
if (enable_mask != 0)
{
min_digit_duration = (plci->dtmf_rec_pulse_ms == 0) ? 40 : plci->dtmf_rec_pulse_ms;
min_gap_duration = (plci->dtmf_rec_pause_ms == 0) ? 40 : plci->dtmf_rec_pause_ms;
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_ENABLE_RECEIVER;
PUT_WORD(&plci->internal_req_buffer[1], min_digit_duration);
PUT_WORD(&plci->internal_req_buffer[3], min_gap_duration);
plci->NData[0].PLength = 5;
PUT_WORD(&plci->internal_req_buffer[5], INTERNAL_IND_BUFFER_SIZE);
plci->NData[0].PLength += 2;
capidtmf_recv_enable(&(plci->capidtmf_state), min_digit_duration, min_gap_duration);
}
else
{
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_DISABLE_RECEIVER;
plci->NData[0].PLength = 1;
capidtmf_recv_disable(&(plci->capidtmf_state));
}
plci->NData[0].P = plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void dtmf_send_digits(PLCI *plci, byte *digit_buffer, word digit_count)
{
word w, i;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_digits %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, digit_count));
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_SEND_DIGITS;
w = (plci->dtmf_send_pulse_ms == 0) ? 40 : plci->dtmf_send_pulse_ms;
PUT_WORD(&plci->internal_req_buffer[1], w);
w = (plci->dtmf_send_pause_ms == 0) ? 40 : plci->dtmf_send_pause_ms;
PUT_WORD(&plci->internal_req_buffer[3], w);
for (i = 0; i < digit_count; i++)
{
w = 0;
while ((w < DTMF_DIGIT_MAP_ENTRIES)
&& (digit_buffer[i] != dtmf_digit_map[w].character))
{
w++;
}
plci->internal_req_buffer[5 + i] = (w < DTMF_DIGIT_MAP_ENTRIES) ?
dtmf_digit_map[w].code : DTMF_DIGIT_TONE_CODE_STAR;
}
plci->NData[0].PLength = 5 + digit_count;
plci->NData[0].P = plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void dtmf_rec_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_rec_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_rec_active = 0;
plci->dtmf_rec_pulse_ms = 0;
plci->dtmf_rec_pause_ms = 0;
capidtmf_init(&(plci->capidtmf_state), plci->adapter->u_law);
}
static void dtmf_send_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_send_requests = 0;
plci->dtmf_send_pulse_ms = 0;
plci->dtmf_send_pause_ms = 0;
}
static void dtmf_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
while (plci->dtmf_send_requests != 0)
dtmf_confirmation(Id, plci);
}
static word dtmf_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word dtmf_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if (plci->B1_facilities & B1_FACILITY_DTMFR)
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_DTMF_1:
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
{
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
break;
}
dtmf_enable_receiver(plci, plci->dtmf_rec_active);
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_2;
break;
case ADJUST_B_RESTORE_DTMF_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Reenable DTMF receiver failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
static void dtmf_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command, Info;
byte mask;
byte result[4];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_command %02x %04x %04x %d %d %d %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->dtmf_cmd, plci->dtmf_rec_pulse_ms, plci->dtmf_rec_pause_ms,
plci->dtmf_send_pulse_ms, plci->dtmf_send_pause_ms));
Info = GOOD;
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
internal_command = plci->internal_command;
plci->internal_command = 0;
mask = 0x01;
switch (plci->dtmf_cmd)
{
case DTMF_LISTEN_TONE_START:
mask <<= 1;
case DTMF_LISTEN_MF_START:
mask <<= 1;
case DTMF_LISTEN_START:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_DTMFR), DTMF_COMMAND_1);
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = DTMF_COMMAND_2;
return;
}
plci->internal_command = DTMF_COMMAND_3;
dtmf_enable_receiver(plci, (byte)(plci->dtmf_rec_active | mask));
return;
case DTMF_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Enable DTMF receiver failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
plci->tone_last_indication_code = DTMF_SIGNAL_NO_TONE;
plci->dtmf_rec_active |= mask;
break;
}
break;
case DTMF_LISTEN_TONE_STOP:
mask <<= 1;
case DTMF_LISTEN_MF_STOP:
mask <<= 1;
case DTMF_LISTEN_STOP:
switch (internal_command)
{
default:
plci->dtmf_rec_active &= ~mask;
if (plci->dtmf_rec_active)
break;
/*
case DTMF_COMMAND_1:
if (plci->dtmf_rec_active)
{
if (plci_nl_busy (plci))
{
plci->internal_command = DTMF_COMMAND_1;
return;
}
plci->dtmf_rec_active &= ~mask;
plci->internal_command = DTMF_COMMAND_2;
dtmf_enable_receiver (plci, false);
return;
}
Rc = OK;
case DTMF_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug (1, dprintf("[%06lx] %s,%d: Disable DTMF receiver failed %02x",
UnMapId (Id), (char far *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
*/
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3);
case DTMF_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
break;
}
break;
case DTMF_SEND_TONE:
mask <<= 1;
case DTMF_SEND_MF:
mask <<= 1;
case DTMF_DIGITS_SEND:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)),
DTMF_COMMAND_1);
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = DTMF_COMMAND_2;
return;
}
plci->dtmf_msg_number_queue[(plci->dtmf_send_requests)++] = plci->number;
plci->internal_command = DTMF_COMMAND_3;
dtmf_send_digits(plci, &plci->saved_msg.parms[3].info[1], plci->saved_msg.parms[3].length);
return;
case DTMF_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Send DTMF digits failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
if (plci->dtmf_send_requests != 0)
(plci->dtmf_send_requests)--;
Info = _FACILITY_NOT_SUPPORTED;
break;
}
return;
}
break;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
"wws", Info, SELECTOR_DTMF, result);
}
static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i, j;
byte mask;
API_PARSE dtmf_parms[5];
byte result[40];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
if (!(a->profile.Global_Options & GL_DTMF_SUPPORTED))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else if (api_parse(&msg[1].info[1], msg[1].length, "w", dtmf_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else if ((GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
|| (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_SEND_CODES))
{
if (!((a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
}
else
{
for (i = 0; i < 32; i++)
result[4 + i] = 0;
if (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
{
for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
{
if (dtmf_digit_map[i].listen_mask != 0)
result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
}
}
else
{
for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
{
if (dtmf_digit_map[i].send_mask != 0)
result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
}
}
result[0] = 3 + 32;
result[3] = 32;
}
}
else if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else
{
if (!plci->State
|| !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->dtmf_cmd = GET_WORD(dtmf_parms[0].info);
mask = 0x01;
switch (plci->dtmf_cmd)
{
case DTMF_LISTEN_TONE_START:
case DTMF_LISTEN_TONE_STOP:
mask <<= 1;
case DTMF_LISTEN_MF_START:
case DTMF_LISTEN_MF_STOP:
mask <<= 1;
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
case DTMF_LISTEN_START:
case DTMF_LISTEN_STOP:
if (!(a->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
&& !(a->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (mask & DTMF_LISTEN_ACTIVE_FLAG)
{
if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
{
plci->dtmf_rec_pulse_ms = 0;
plci->dtmf_rec_pause_ms = 0;
}
else
{
plci->dtmf_rec_pulse_ms = GET_WORD(dtmf_parms[1].info);
plci->dtmf_rec_pause_ms = GET_WORD(dtmf_parms[2].info);
}
}
start_internal_command(Id, plci, dtmf_command);
return (false);
case DTMF_SEND_TONE:
mask <<= 1;
case DTMF_SEND_MF:
mask <<= 1;
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
case DTMF_DIGITS_SEND:
if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (mask & DTMF_LISTEN_ACTIVE_FLAG)
{
plci->dtmf_send_pulse_ms = GET_WORD(dtmf_parms[1].info);
plci->dtmf_send_pause_ms = GET_WORD(dtmf_parms[2].info);
}
i = 0;
j = 0;
while ((i < dtmf_parms[3].length) && (j < DTMF_DIGIT_MAP_ENTRIES))
{
j = 0;
while ((j < DTMF_DIGIT_MAP_ENTRIES)
&& ((dtmf_parms[3].info[i + 1] != dtmf_digit_map[j].character)
|| ((dtmf_digit_map[j].send_mask & mask) == 0)))
{
j++;
}
i++;
}
if (j == DTMF_DIGIT_MAP_ENTRIES)
{
dbug(1, dprintf("[%06lx] %s,%d: Incorrect DTMF digit %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, dtmf_parms[3].info[i]));
PUT_WORD(&result[1], DTMF_INCORRECT_DIGIT);
break;
}
if (plci->dtmf_send_requests >= ARRAY_SIZE(plci->dtmf_msg_number_queue))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
break;
}
api_save_msg(dtmf_parms, "wwws", &plci->saved_msg);
start_internal_command(Id, plci, dtmf_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->dtmf_cmd));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
}
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wws", Info, SELECTOR_DTMF, result);
return (false);
}
static void dtmf_confirmation(dword Id, PLCI *plci)
{
word i;
byte result[4];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_confirmation",
UnMapId(Id), (char *)(FILE_), __LINE__));
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
if (plci->dtmf_send_requests != 0)
{
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->dtmf_msg_number_queue[0],
"wws", GOOD, SELECTOR_DTMF, result);
(plci->dtmf_send_requests)--;
for (i = 0; i < plci->dtmf_send_requests; i++)
plci->dtmf_msg_number_queue[i] = plci->dtmf_msg_number_queue[i + 1];
}
}
static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length)
{
word i, j, n;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_indication",
UnMapId(Id), (char *)(FILE_), __LINE__));
n = 0;
for (i = 1; i < length; i++)
{
j = 0;
while ((j < DTMF_DIGIT_MAP_ENTRIES)
&& ((msg[i] != dtmf_digit_map[j].code)
|| ((dtmf_digit_map[j].listen_mask & plci->dtmf_rec_active) == 0)))
{
j++;
}
if (j < DTMF_DIGIT_MAP_ENTRIES)
{
if ((dtmf_digit_map[j].listen_mask & DTMF_TONE_LISTEN_ACTIVE_FLAG)
&& (plci->tone_last_indication_code == DTMF_SIGNAL_NO_TONE)
&& (dtmf_digit_map[j].character != DTMF_SIGNAL_UNIDENTIFIED_TONE))
{
if (n + 1 == i)
{
for (i = length; i > n + 1; i--)
msg[i] = msg[i - 1];
length++;
i++;
}
msg[++n] = DTMF_SIGNAL_UNIDENTIFIED_TONE;
}
plci->tone_last_indication_code = dtmf_digit_map[j].character;
msg[++n] = dtmf_digit_map[j].character;
}
}
if (n != 0)
{
msg[0] = (byte) n;
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "wS", SELECTOR_DTMF, msg);
}
}
/*------------------------------------------------------------------*/
/* DTMF parameters */
/*------------------------------------------------------------------*/
static void dtmf_parameter_write(PLCI *plci)
{
word i;
byte parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE + 2];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_write",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
parameter_buffer[0] = plci->dtmf_parameter_length + 1;
parameter_buffer[1] = DSP_CTRL_SET_DTMF_PARAMETERS;
for (i = 0; i < plci->dtmf_parameter_length; i++)
parameter_buffer[2 + i] = plci->dtmf_parameter_buffer[i];
add_p(plci, FTY, parameter_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void dtmf_parameter_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_parameter_length = 0;
}
static void dtmf_parameter_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word dtmf_parameter_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word dtmf_parameter_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if ((plci->B1_facilities & B1_FACILITY_DTMFR)
&& (plci->dtmf_parameter_length != 0))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
break;
}
dtmf_parameter_write(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_2;
break;
case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore DTMF parameters failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
/*------------------------------------------------------------------*/
/* Line interconnect facilities */
/*------------------------------------------------------------------*/
LI_CONFIG *li_config_table;
word li_total_channels;
/*------------------------------------------------------------------*/
/* translate a CHI information element to a channel number */
/* returns 0xff - any channel */
/* 0xfe - chi wrong coding */
/* 0xfd - D-channel */
/* 0x00 - no channel */
/* else channel number / PRI: timeslot */
/* if channels is provided we accept more than one channel. */
/*------------------------------------------------------------------*/
static byte chi_to_channel(byte *chi, dword *pchannelmap)
{
int p;
int i;
dword map;
byte excl;
byte ofs;
byte ch;
if (pchannelmap) *pchannelmap = 0;
if (!chi[0]) return 0xff;
excl = 0;
if (chi[1] & 0x20) {
if (chi[0] == 1 && chi[1] == 0xac) return 0xfd; /* exclusive d-channel */
for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
if ((chi[1] | 0xc8) != 0xe9) return 0xfe;
if (chi[1] & 0x08) excl = 0x40;
/* int. id present */
if (chi[1] & 0x40) {
p = i + 1;
for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
}
/* coding standard, Number/Map, Channel Type */
p = i + 1;
for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
if ((chi[p] | 0xd0) != 0xd3) return 0xfe;
/* Number/Map */
if (chi[p] & 0x10) {
/* map */
if ((chi[0] - p) == 4) ofs = 0;
else if ((chi[0] - p) == 3) ofs = 1;
else return 0xfe;
ch = 0;
map = 0;
for (i = 0; i < 4 && p < chi[0]; i++) {
p++;
ch += 8;
map <<= 8;
if (chi[p]) {
for (ch = 0; !(chi[p] & (1 << ch)); ch++);
map |= chi[p];
}
}
ch += ofs;
map <<= ofs;
}
else {
/* number */
p = i + 1;
ch = chi[p] & 0x3f;
if (pchannelmap) {
if ((byte)(chi[0] - p) > 30) return 0xfe;
map = 0;
for (i = p; i <= chi[0]; i++) {
if ((chi[i] & 0x7f) > 31) return 0xfe;
map |= (1L << (chi[i] & 0x7f));
}
}
else {
if (p != chi[0]) return 0xfe;
if (ch > 31) return 0xfe;
map = (1L << ch);
}
if (chi[p] & 0x40) return 0xfe;
}
if (pchannelmap) *pchannelmap = map;
else if (map != ((dword)(1L << ch))) return 0xfe;
return (byte)(excl | ch);
}
else { /* not PRI */
for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
if (i != chi[0] || !(chi[i] & 0x80)) return 0xfe;
if (chi[1] & 0x08) excl = 0x40;
switch (chi[1] | 0x98) {
case 0x98: return 0;
case 0x99:
if (pchannelmap) *pchannelmap = 2;
return excl | 1;
case 0x9a:
if (pchannelmap) *pchannelmap = 4;
return excl | 2;
case 0x9b: return 0xff;
case 0x9c: return 0xfd; /* d-ch */
default: return 0xfe;
}
}
}
static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id)
{
DIVA_CAPI_ADAPTER *a;
PLCI *splci;
byte old_id;
a = plci->adapter;
old_id = plci->li_bchannel_id;
if (a->li_pri)
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = (bchannel_id & 0x1f) + 1;
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
else
{
if (((bchannel_id & 0x03) == 1) || ((bchannel_id & 0x03) == 2))
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = bchannel_id & 0x03;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
splci = a->AdvSignalPLCI;
if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
{
if ((splci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
{
li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
}
splci->li_bchannel_id = 3 - plci->li_bchannel_id;
li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id_esc %d",
(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
(char *)(FILE_), __LINE__, splci->li_bchannel_id));
}
}
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
}
if ((old_id == 0) && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
}
dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id_esc %d %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, bchannel_id, plci->li_bchannel_id));
}
static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
{
DIVA_CAPI_ADAPTER *a;
PLCI *splci;
byte ch, old_id;
a = plci->adapter;
old_id = plci->li_bchannel_id;
ch = chi_to_channel(chi, NULL);
if (!(ch & 0x80))
{
if (a->li_pri)
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = (ch & 0x1f) + 1;
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
else
{
if (((ch & 0x1f) == 1) || ((ch & 0x1f) == 2))
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = ch & 0x1f;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
splci = a->AdvSignalPLCI;
if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
{
if ((splci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
{
li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
}
splci->li_bchannel_id = 3 - plci->li_bchannel_id;
li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
(char *)(FILE_), __LINE__, splci->li_bchannel_id));
}
}
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
}
}
if ((old_id == 0) && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
}
dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id %02x %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, ch, plci->li_bchannel_id));
}
#define MIXER_MAX_DUMP_CHANNELS 34
static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word n, i, j;
char *p;
char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
dbug(1, dprintf("[%06lx] %s,%d: mixer_calculate_coefs",
(dword)(UnMapController(a->Id)), (char *)(FILE_), __LINE__));
for (i = 0; i < li_total_channels; i++)
{
li_config_table[i].channel &= LI_CHANNEL_ADDRESSES_SET;
if (li_config_table[i].chflags != 0)
li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
else
{
for (j = 0; j < li_total_channels; j++)
{
if (((li_config_table[i].flag_table[j]) != 0)
|| ((li_config_table[j].flag_table[i]) != 0))
{
li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
}
if (((li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) != 0)
|| ((li_config_table[j].flag_table[i] & LI_FLAG_CONFERENCE) != 0))
{
li_config_table[i].channel |= LI_CHANNEL_CONFERENCE;
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC);
if (li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE)
li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
}
}
for (n = 0; n < li_total_channels; n++)
{
if (li_config_table[n].channel & LI_CHANNEL_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_CONFERENCE)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] |=
li_config_table[i].coef_table[n] & li_config_table[n].coef_table[j];
}
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
li_config_table[i].coef_table[i] &= ~LI_COEF_CH_CH;
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].coef_table[j] & LI_COEF_CH_CH)
li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE;
}
if (li_config_table[i].flag_table[i] & LI_FLAG_CONFERENCE)
li_config_table[i].coef_table[i] |= LI_COEF_CH_CH;
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
if (li_config_table[i].flag_table[j] & LI_FLAG_MONITOR)
li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
if (li_config_table[i].flag_table[j] & LI_FLAG_MIX)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
if (li_config_table[i].flag_table[j] & LI_FLAG_PCCONNECT)
li_config_table[i].coef_table[j] |= LI_COEF_PC_PC;
}
if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
{
li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
if (li_config_table[j].chflags & LI_CHFLAG_MIX)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH | LI_COEF_PC_PC;
}
}
}
if (li_config_table[i].chflags & LI_CHFLAG_MIX)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT)
li_config_table[j].coef_table[i] |= LI_COEF_PC_CH;
}
}
if (li_config_table[i].chflags & LI_CHFLAG_LOOP)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
{
for (n = 0; n < li_total_channels; n++)
{
if (li_config_table[n].flag_table[i] & LI_FLAG_INTERCONNECT)
{
li_config_table[n].coef_table[j] |= LI_COEF_CH_CH;
if (li_config_table[j].chflags & LI_CHFLAG_MIX)
{
li_config_table[n].coef_table[j] |= LI_COEF_PC_CH;
if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
li_config_table[n].coef_table[j] |= LI_COEF_CH_PC | LI_COEF_PC_PC;
}
else if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
li_config_table[n].coef_table[j] |= LI_COEF_CH_PC;
}
}
}
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
if (li_config_table[i].chflags & (LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP))
li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
if (li_config_table[i].chflags & LI_CHFLAG_MIX)
li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
for (j = 0; j < li_total_channels; j++)
{
if ((li_config_table[i].flag_table[j] &
(LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_MONITOR))
|| (li_config_table[j].flag_table[i] &
(LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX)))
{
li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
}
if (li_config_table[i].flag_table[j] & (LI_FLAG_PCCONNECT | LI_FLAG_MONITOR))
li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
if (li_config_table[j].flag_table[i] & (LI_FLAG_PCCONNECT | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX))
li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
}
if (!(li_config_table[i].channel & LI_CHANNEL_ACTIVE))
{
li_config_table[i].coef_table[i] |= LI_COEF_PC_CH | LI_COEF_CH_PC;
li_config_table[i].channel |= LI_CHANNEL_TX_DATA | LI_CHANNEL_RX_DATA;
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
j = 0;
while ((j < li_total_channels) && !(li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT))
j++;
if (j < li_total_channels)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_PC_CH);
if (li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
}
}
}
}
n = li_total_channels;
if (n > MIXER_MAX_DUMP_CHANNELS)
n = MIXER_MAX_DUMP_CHANNELS;
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].curchnl >> 4];
*(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CURRENT %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].channel >> 4];
*(p++) = hex_digit_table[li_config_table[j].channel & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHANNEL %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].chflags >> 4];
*(p++) = hex_digit_table[li_config_table[j].chflags & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHFLAG %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
for (i = 0; i < n; i++)
{
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4];
*(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
(dword)(UnMapController(a->Id)), i, (char *)hex_line));
}
for (i = 0; i < n; i++)
{
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4];
*(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] COEF[%02x]%s",
(dword)(UnMapController(a->Id)), i, (char *)hex_line));
}
}
static struct
{
byte mask;
byte line_flags;
} mixer_write_prog_pri[] =
{
{ LI_COEF_CH_CH, 0 },
{ LI_COEF_CH_PC, MIXER_COEF_LINE_TO_PC_FLAG },
{ LI_COEF_PC_CH, MIXER_COEF_LINE_FROM_PC_FLAG },
{ LI_COEF_PC_PC, MIXER_COEF_LINE_TO_PC_FLAG | MIXER_COEF_LINE_FROM_PC_FLAG }
};
static struct
{
byte from_ch;
byte to_ch;
byte mask;
byte xconnect_override;
} mixer_write_prog_bri[] =
{
{ 0, 0, LI_COEF_CH_CH, 0x01 }, /* B to B */
{ 1, 0, LI_COEF_CH_CH, 0x01 }, /* Alt B to B */
{ 0, 0, LI_COEF_PC_CH, 0x80 }, /* PC to B */
{ 1, 0, LI_COEF_PC_CH, 0x01 }, /* Alt PC to B */
{ 2, 0, LI_COEF_CH_CH, 0x00 }, /* IC to B */
{ 3, 0, LI_COEF_CH_CH, 0x00 }, /* Alt IC to B */
{ 0, 0, LI_COEF_CH_PC, 0x80 }, /* B to PC */
{ 1, 0, LI_COEF_CH_PC, 0x01 }, /* Alt B to PC */
{ 0, 0, LI_COEF_PC_PC, 0x01 }, /* PC to PC */
{ 1, 0, LI_COEF_PC_PC, 0x01 }, /* Alt PC to PC */
{ 2, 0, LI_COEF_CH_PC, 0x00 }, /* IC to PC */
{ 3, 0, LI_COEF_CH_PC, 0x00 }, /* Alt IC to PC */
{ 0, 2, LI_COEF_CH_CH, 0x00 }, /* B to IC */
{ 1, 2, LI_COEF_CH_CH, 0x00 }, /* Alt B to IC */
{ 0, 2, LI_COEF_PC_CH, 0x00 }, /* PC to IC */
{ 1, 2, LI_COEF_PC_CH, 0x00 }, /* Alt PC to IC */
{ 2, 2, LI_COEF_CH_CH, 0x00 }, /* IC to IC */
{ 3, 2, LI_COEF_CH_CH, 0x00 }, /* Alt IC to IC */
{ 1, 1, LI_COEF_CH_CH, 0x01 }, /* Alt B to Alt B */
{ 0, 1, LI_COEF_CH_CH, 0x01 }, /* B to Alt B */
{ 1, 1, LI_COEF_PC_CH, 0x80 }, /* Alt PC to Alt B */
{ 0, 1, LI_COEF_PC_CH, 0x01 }, /* PC to Alt B */
{ 3, 1, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt B */
{ 2, 1, LI_COEF_CH_CH, 0x00 }, /* IC to Alt B */
{ 1, 1, LI_COEF_CH_PC, 0x80 }, /* Alt B to Alt PC */
{ 0, 1, LI_COEF_CH_PC, 0x01 }, /* B to Alt PC */
{ 1, 1, LI_COEF_PC_PC, 0x01 }, /* Alt PC to Alt PC */
{ 0, 1, LI_COEF_PC_PC, 0x01 }, /* PC to Alt PC */
{ 3, 1, LI_COEF_CH_PC, 0x00 }, /* Alt IC to Alt PC */
{ 2, 1, LI_COEF_CH_PC, 0x00 }, /* IC to Alt PC */
{ 1, 3, LI_COEF_CH_CH, 0x00 }, /* Alt B to Alt IC */
{ 0, 3, LI_COEF_CH_CH, 0x00 }, /* B to Alt IC */
{ 1, 3, LI_COEF_PC_CH, 0x00 }, /* Alt PC to Alt IC */
{ 0, 3, LI_COEF_PC_CH, 0x00 }, /* PC to Alt IC */
{ 3, 3, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt IC */
{ 2, 3, LI_COEF_CH_CH, 0x00 } /* IC to Alt IC */
};
static byte mixer_swapped_index_bri[] =
{
18, /* B to B */
19, /* Alt B to B */
20, /* PC to B */
21, /* Alt PC to B */
22, /* IC to B */
23, /* Alt IC to B */
24, /* B to PC */
25, /* Alt B to PC */
26, /* PC to PC */
27, /* Alt PC to PC */
28, /* IC to PC */
29, /* Alt IC to PC */
30, /* B to IC */
31, /* Alt B to IC */
32, /* PC to IC */
33, /* Alt PC to IC */
34, /* IC to IC */
35, /* Alt IC to IC */
0, /* Alt B to Alt B */
1, /* B to Alt B */
2, /* Alt PC to Alt B */
3, /* PC to Alt B */
4, /* Alt IC to Alt B */
5, /* IC to Alt B */
6, /* Alt B to Alt PC */
7, /* B to Alt PC */
8, /* Alt PC to Alt PC */
9, /* PC to Alt PC */
10, /* Alt IC to Alt PC */
11, /* IC to Alt PC */
12, /* Alt B to Alt IC */
13, /* B to Alt IC */
14, /* Alt PC to Alt IC */
15, /* PC to Alt IC */
16, /* Alt IC to Alt IC */
17 /* IC to Alt IC */
};
static struct
{
byte mask;
byte from_pc;
byte to_pc;
} xconnect_write_prog[] =
{
{ LI_COEF_CH_CH, false, false },
{ LI_COEF_CH_PC, false, true },
{ LI_COEF_PC_CH, true, false },
{ LI_COEF_PC_PC, true, true }
};
static void xconnect_query_addresses(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word w, ch;
byte *p;
dbug(1, dprintf("[%06lx] %s,%d: xconnect_query_addresses",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
if (a->li_pri && ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
return;
}
p = plci->internal_req_buffer;
ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
*(p++) = UDATA_REQUEST_XCONNECT_FROM;
w = ch;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
w = ch | XCONNECT_CHANNEL_PORT_PC;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
plci->NData[0].P = plci->internal_req_buffer;
plci->NData[0].PLength = p - plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void xconnect_write_coefs(PLCI *plci, word internal_command)
{
dbug(1, dprintf("[%06lx] %s,%d: xconnect_write_coefs %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, internal_command));
plci->li_write_command = internal_command;
plci->li_write_channel = 0;
}
static byte xconnect_write_coefs_process(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word w, n, i, j, r, s, to_ch;
dword d;
byte *p;
struct xconnect_transfer_address_s *transfer_address;
byte ch_map[MIXER_CHANNELS_BRI];
dbug(1, dprintf("[%06x] %s,%d: xconnect_write_coefs_process %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->li_write_channel));
a = plci->adapter;
if ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (true);
}
i = a->li_base + (plci->li_bchannel_id - 1);
j = plci->li_write_channel;
p = plci->internal_req_buffer;
if (j != 0)
{
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: LI write coefs failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
return (false);
}
}
if (li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
r = 0;
s = 0;
if (j < li_total_channels)
{
if (li_config_table[i].channel & LI_CHANNEL_ADDRESSES_SET)
{
s = ((li_config_table[i].send_b.card_address.low | li_config_table[i].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_PC | LI_COEF_PC_PC)) &
((li_config_table[i].send_pc.card_address.low | li_config_table[i].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_PC_CH));
}
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
while ((j < li_total_channels)
&& ((r == 0)
|| (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
|| (!li_config_table[j].adapter->li_pri
&& (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
|| (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
|| (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
&& (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
|| !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
|| ((li_config_table[j].adapter->li_base != a->li_base)
&& !(r & s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))))
{
j++;
if (j < li_total_channels)
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
}
}
if (j < li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
to_ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
*(p++) = UDATA_REQUEST_XCONNECT_TO;
do
{
if (li_config_table[j].adapter->li_base != a->li_base)
{
r &= s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC));
}
n = 0;
do
{
if (r & xconnect_write_prog[n].mask)
{
if (xconnect_write_prog[n].from_pc)
transfer_address = &(li_config_table[j].send_pc);
else
transfer_address = &(li_config_table[j].send_b);
d = transfer_address->card_address.low;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
d = transfer_address->card_address.high;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
d = transfer_address->offset;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
w = xconnect_write_prog[n].to_pc ? to_ch | XCONNECT_CHANNEL_PORT_PC : to_ch;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
w = ((li_config_table[i].coef_table[j] & xconnect_write_prog[n].mask) == 0) ? 0x01 :
(li_config_table[i].adapter->u_law ?
(li_config_table[j].adapter->u_law ? 0x80 : 0x86) :
(li_config_table[j].adapter->u_law ? 0x7a : 0x80));
*(p++) = (byte) w;
*(p++) = (byte) 0;
li_config_table[i].coef_table[j] ^= xconnect_write_prog[n].mask << 4;
}
n++;
} while ((n < ARRAY_SIZE(xconnect_write_prog))
&& ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
if (n == ARRAY_SIZE(xconnect_write_prog))
{
do
{
j++;
if (j < li_total_channels)
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
} while ((j < li_total_channels)
&& ((r == 0)
|| (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
|| (!li_config_table[j].adapter->li_pri
&& (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
|| (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
|| (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
&& (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
|| !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
|| ((li_config_table[j].adapter->li_base != a->li_base)
&& !(r & s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))));
}
} while ((j < li_total_channels)
&& ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
}
else if (j == li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
if (a->li_pri)
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
w = 0;
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
}
else
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
w = 0;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
&& (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
{
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
}
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
if (plci->li_bchannel_id == 2)
{
ch_map[j] = (byte)(j + 1);
ch_map[j + 1] = (byte) j;
}
else
{
ch_map[j] = (byte) j;
ch_map[j + 1] = (byte)(j + 1);
}
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*p = (mixer_write_prog_bri[n].xconnect_override != 0) ?
mixer_write_prog_bri[n].xconnect_override :
((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
if ((i >= a->li_base + MIXER_BCHANNELS_BRI) || (j >= a->li_base + MIXER_BCHANNELS_BRI))
{
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
}
else
{
*p = 0x00;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
}
}
p++;
}
}
j = li_total_channels + 1;
}
}
else
{
if (j <= li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
if (j < a->li_base)
j = a->li_base;
if (a->li_pri)
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
w = 0;
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_pri); n++)
{
*(p++) = (byte)((plci->li_bchannel_id - 1) | mixer_write_prog_pri[n].line_flags);
for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
{
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
if (w & mixer_write_prog_pri[n].mask)
{
*(p++) = (li_config_table[i].coef_table[j] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
li_config_table[i].coef_table[j] ^= mixer_write_prog_pri[n].mask << 4;
}
else
*(p++) = 0x00;
}
*(p++) = (byte)((plci->li_bchannel_id - 1) | MIXER_COEF_LINE_ROW_FLAG | mixer_write_prog_pri[n].line_flags);
for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
{
w = ((li_config_table[j].coef_table[i] & 0xf) ^ (li_config_table[j].coef_table[i] >> 4));
if (w & mixer_write_prog_pri[n].mask)
{
*(p++) = (li_config_table[j].coef_table[i] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
li_config_table[j].coef_table[i] ^= mixer_write_prog_pri[n].mask << 4;
}
else
*(p++) = 0x00;
}
}
}
else
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
w = 0;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
&& (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
{
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
}
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
if (plci->li_bchannel_id == 2)
{
ch_map[j] = (byte)(j + 1);
ch_map[j + 1] = (byte) j;
}
else
{
ch_map[j] = (byte) j;
ch_map[j + 1] = (byte)(j + 1);
}
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*p = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
else
{
*p = 0x00;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
}
}
p++;
}
}
j = li_total_channels + 1;
}
}
plci->li_write_channel = j;
if (p != plci->internal_req_buffer)
{
plci->NData[0].P = plci->internal_req_buffer;
plci->NData[0].PLength = p - plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
return (true);
}
static void mixer_notify_update(PLCI *plci, byte others)
{
DIVA_CAPI_ADAPTER *a;
word i, w;
PLCI *notify_plci;
byte msg[sizeof(CAPI_MSG_HEADER) + 6];
dbug(1, dprintf("[%06lx] %s,%d: mixer_notify_update %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, others));
a = plci->adapter;
if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
{
if (others)
plci->li_notify_update = true;
i = 0;
do
{
notify_plci = NULL;
if (others)
{
while ((i < li_total_channels) && (li_config_table[i].plci == NULL))
i++;
if (i < li_total_channels)
notify_plci = li_config_table[i++].plci;
}
else
{
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
notify_plci = plci;
}
}
if ((notify_plci != NULL)
&& !notify_plci->li_notify_update
&& (notify_plci->appl != NULL)
&& (notify_plci->State)
&& notify_plci->NL.Id && !notify_plci->nl_remove_id)
{
notify_plci->li_notify_update = true;
((CAPI_MSG *) msg)->header.length = 18;
((CAPI_MSG *) msg)->header.appl_id = notify_plci->appl->Id;
((CAPI_MSG *) msg)->header.command = _FACILITY_R;
((CAPI_MSG *) msg)->header.number = 0;
((CAPI_MSG *) msg)->header.controller = notify_plci->adapter->Id;
((CAPI_MSG *) msg)->header.plci = notify_plci->Id;
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
{
if (w != 0)
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect notify failed %06x %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__,
(dword)((notify_plci->Id << 8) | UnMapController(notify_plci->adapter->Id)), w));
}
notify_plci->li_notify_update = false;
}
}
} while (others && (notify_plci != NULL));
if (others)
plci->li_notify_update = false;
}
}
static void mixer_clear_config(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->li_notify_update = false;
plci->li_plci_b_write_pos = 0;
plci->li_plci_b_read_pos = 0;
plci->li_plci_b_req_pos = 0;
a = plci->adapter;
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[j].flag_table[i] = 0;
li_config_table[i].flag_table[j] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (!a->li_pri)
{
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
}
}
}
}
}
static void mixer_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: mixer_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
do
{
mixer_indication_coefs_set(Id, plci);
} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
}
static word mixer_save_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
a = plci->adapter;
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= 0xf;
li_config_table[j].coef_table[i] &= 0xf;
}
if (!a->li_pri)
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
}
return (GOOD);
}
static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word Info;
dbug(1, dprintf("[%06lx] %s,%d: mixer_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
a = plci->adapter;
if ((plci->B1_facilities & B1_FACILITY_MIXER)
&& (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_MIXER_1:
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
{
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
break;
}
xconnect_query_addresses(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_2;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
case ADJUST_B_RESTORE_MIXER_4:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B query addresses failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (Rc == OK)
{
if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_3;
else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
}
else if (Rc == 0)
{
if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_4;
else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
}
if (plci->adjust_b_state != ADJUST_B_RESTORE_MIXER_5)
{
plci->internal_command = plci->adjust_b_command;
break;
}
case ADJUST_B_RESTORE_MIXER_5:
xconnect_write_coefs(plci, plci->adjust_b_command);
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_6:
if (!xconnect_write_coefs_process(Id, plci, Rc))
{
dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
break;
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_7;
case ADJUST_B_RESTORE_MIXER_7:
break;
}
}
return (Info);
}
static void mixer_command(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word i, internal_command;
dbug(1, dprintf("[%06lx] %s,%d: mixer_command %02x %04x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->li_cmd));
a = plci->adapter;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (plci->li_cmd)
{
case LI_REQ_CONNECT:
case LI_REQ_DISCONNECT:
case LI_REQ_SILENT_UPDATE:
switch (internal_command)
{
default:
if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
{
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_MIXER), MIXER_COMMAND_1);
}
case MIXER_COMMAND_1:
if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
{
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load mixer failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
plci->li_plci_b_req_pos = plci->li_plci_b_write_pos;
if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
|| ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER)) == plci->B1_resource)))
{
xconnect_write_coefs(plci, MIXER_COMMAND_2);
}
else
{
do
{
mixer_indication_coefs_set(Id, plci);
} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
}
case MIXER_COMMAND_2:
if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
|| ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER)) == plci->B1_resource)))
{
if (!xconnect_write_coefs_process(Id, plci, Rc))
{
dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
{
do
{
plci->li_plci_b_write_pos = (plci->li_plci_b_write_pos == 0) ?
LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
i = (plci->li_plci_b_write_pos == 0) ?
LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
} while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
&& !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG));
}
break;
}
if (plci->internal_command)
return;
}
if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
{
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER), MIXER_COMMAND_3);
}
case MIXER_COMMAND_3:
if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
{
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload mixer failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
break;
}
break;
}
if ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int)(plci->li_bchannel_id)));
}
else
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = plci->li_channel_bits;
if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = plci->li_channel_bits;
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = plci->li_channel_bits;
}
}
}
}
static void li_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
dword plci_b_id, byte connect, dword li_flags)
{
word i, ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
PLCI *plci_b;
DIVA_CAPI_ADAPTER *a_b;
a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
ch_a = a->li_base + (plci->li_bchannel_id - 1);
if (!a->li_pri && (plci->tel == ADV_VOICE)
&& (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
{
ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
}
else
{
ch_a_v = ch_a;
ch_a_s = ch_a;
}
ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
&& (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
{
ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
}
else
{
ch_b_v = ch_b;
ch_b_s = ch_b;
}
if (connect)
{
li_config_table[ch_a].flag_table[ch_a_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_a_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
li_config_table[ch_a_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
}
li_config_table[ch_a].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
li_config_table[ch_b_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
if (ch_a_v == ch_b_v)
{
li_config_table[ch_a_v].flag_table[ch_b_v] &= ~LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_s] &= ~LI_FLAG_CONFERENCE;
}
else
{
if (li_config_table[ch_a_v].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_v)
li_config_table[ch_a_v].flag_table[i] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_a_s].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_s)
li_config_table[ch_a_s].flag_table[i] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_b_v].flag_table[ch_a_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_v)
li_config_table[i].flag_table[ch_a_v] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_b_v].flag_table[ch_a_s] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_s)
li_config_table[i].flag_table[ch_a_s] &= ~LI_FLAG_CONFERENCE;
}
}
}
if (li_flags & LI_FLAG_CONFERENCE_A_B)
{
li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
}
if (li_flags & LI_FLAG_CONFERENCE_B_A)
{
li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
}
if (li_flags & LI_FLAG_MONITOR_A)
{
li_config_table[ch_a].flag_table[ch_a_v] |= LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_a_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI_FLAG_MONITOR_B)
{
li_config_table[ch_a].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI_FLAG_ANNOUNCEMENT_A)
{
li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
}
if (li_flags & LI_FLAG_ANNOUNCEMENT_B)
{
li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
}
if (li_flags & LI_FLAG_MIX_A)
{
li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_MIX;
li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_MIX;
}
if (li_flags & LI_FLAG_MIX_B)
{
li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_MIX;
}
if (ch_a_v != ch_a_s)
{
li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
}
if (ch_b_v != ch_b_s)
{
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
}
}
static void li2_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
dword plci_b_id, byte connect, dword li_flags)
{
word ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
PLCI *plci_b;
DIVA_CAPI_ADAPTER *a_b;
a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
ch_a = a->li_base + (plci->li_bchannel_id - 1);
if (!a->li_pri && (plci->tel == ADV_VOICE)
&& (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
{
ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
}
else
{
ch_a_v = ch_a;
ch_a_s = ch_a;
}
ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
&& (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
{
ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
}
else
{
ch_b_v = ch_b;
ch_b_s = ch_b;
}
if (connect)
{
li_config_table[ch_b].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b_v].flag_table[ch_b] &= ~LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_b] &= ~LI_FLAG_MIX;
li_config_table[ch_b].flag_table[ch_b] &= ~LI_FLAG_PCCONNECT;
li_config_table[ch_b].chflags &= ~(LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP);
}
li_config_table[ch_b_v].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_s].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_v].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_s].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_v].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_v].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_s].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_s].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
if (li_flags & LI2_FLAG_INTERCONNECT_A_B)
{
li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_INTERCONNECT_B_A)
{
li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_MONITOR_B)
{
li_config_table[ch_b].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
li_config_table[ch_b].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI2_FLAG_MIX_B)
{
li_config_table[ch_b_v].flag_table[ch_b] |= LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_b] |= LI_FLAG_MIX;
}
if (li_flags & LI2_FLAG_MONITOR_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_MONITOR;
if (li_flags & LI2_FLAG_MIX_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_MIX;
if (li_flags & LI2_FLAG_LOOP_B)
{
li_config_table[ch_b_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_LOOP_PC)
li_config_table[ch_b].flag_table[ch_b] |= LI_FLAG_PCCONNECT;
if (li_flags & LI2_FLAG_LOOP_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_LOOP;
if (li_flags & LI2_FLAG_PCCONNECT_A_B)
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_PCCONNECT;
if (li_flags & LI2_FLAG_PCCONNECT_B_A)
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_PCCONNECT;
if (ch_a_v != ch_a_s)
{
li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
}
if (ch_b_v != ch_b_s)
{
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
}
}
static word li_check_main_plci(dword Id, PLCI *plci)
{
if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (_WRONG_IDENTIFIER);
}
if (!plci->State
|| !plci->NL.Id || plci->nl_remove_id
|| (plci->li_bchannel_id == 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (_WRONG_STATE);
}
li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = plci;
return (GOOD);
}
static PLCI *li_check_plci_b(dword Id, PLCI *plci,
dword plci_b_id, word plci_b_write_pos, byte *p_result)
{
byte ctlr_b;
PLCI *plci_b;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
ctlr_b = 0;
if ((plci_b_id & 0x7f) != 0)
{
ctlr_b = MapController((byte)(plci_b_id & 0x7f));
if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
ctlr_b = 0;
}
if ((ctlr_b == 0)
|| (((plci_b_id >> 8) & 0xff) == 0)
|| (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
{
dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
if (!plci_b->State
|| !plci_b->NL.Id || plci_b->nl_remove_id
|| (plci_b->li_bchannel_id == 0))
{
dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci = plci_b;
if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
&& (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
{
dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
(word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
return (plci_b);
}
static PLCI *li2_check_plci_b(dword Id, PLCI *plci,
dword plci_b_id, word plci_b_write_pos, byte *p_result)
{
byte ctlr_b;
PLCI *plci_b;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
ctlr_b = 0;
if ((plci_b_id & 0x7f) != 0)
{
ctlr_b = MapController((byte)(plci_b_id & 0x7f));
if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
ctlr_b = 0;
}
if ((ctlr_b == 0)
|| (((plci_b_id >> 8) & 0xff) == 0)
|| (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
{
dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
if (!plci_b->State
|| !plci_b->NL.Id || plci_b->nl_remove_id
|| (plci_b->li_bchannel_id == 0)
|| (li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci != plci_b))
{
dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
&& (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
{
dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
(word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
return (plci_b);
}
static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i;
dword d, li_flags, plci_b_id;
PLCI *plci_b;
API_PARSE li_parms[3];
API_PARSE li_req_parms[3];
API_PARSE li_participant_struct[2];
API_PARSE li_participant_parms[3];
word participant_parms_pos;
byte result_buffer[32];
byte *result;
word result_pos;
word plci_b_write_pos;
dbug(1, dprintf("[%06lx] %s,%d: mixer_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result = result_buffer;
result_buffer[0] = 0;
if (!(a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else if (api_parse(&msg[1].info[1], msg[1].length, "ws", li_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
result_buffer[0] = 3;
PUT_WORD(&result_buffer[1], GET_WORD(li_parms[0].info));
result_buffer[3] = 0;
switch (GET_WORD(li_parms[0].info))
{
case LI_GET_SUPPORTED_SERVICES:
if (appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
{
result_buffer[0] = 17;
result_buffer[3] = 14;
PUT_WORD(&result_buffer[4], GOOD);
d = 0;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_CH)
d |= LI_CONFERENCING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
d |= LI_MONITORING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
d |= LI_ANNOUNCEMENTS_SUPPORTED | LI_MIXING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
d |= LI_CROSS_CONTROLLER_SUPPORTED;
PUT_DWORD(&result_buffer[6], d);
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
d = 0;
for (i = 0; i < li_total_channels; i++)
{
if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
&& (li_config_table[i].adapter->li_pri
|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
{
d++;
}
}
}
else
{
d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
}
PUT_DWORD(&result_buffer[10], d / 2);
PUT_DWORD(&result_buffer[14], d);
}
else
{
result_buffer[0] = 25;
result_buffer[3] = 22;
PUT_WORD(&result_buffer[4], GOOD);
d = LI2_ASYMMETRIC_SUPPORTED | LI2_B_LOOPING_SUPPORTED | LI2_X_LOOPING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
d |= LI2_MONITORING_SUPPORTED | LI2_REMOTE_MONITORING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
d |= LI2_MIXING_SUPPORTED | LI2_REMOTE_MIXING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_PC)
d |= LI2_PC_LOOPING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
d |= LI2_CROSS_CONTROLLER_SUPPORTED;
PUT_DWORD(&result_buffer[6], d);
d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
PUT_DWORD(&result_buffer[10], d / 2);
PUT_DWORD(&result_buffer[14], d - 1);
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
d = 0;
for (i = 0; i < li_total_channels; i++)
{
if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
&& (li_config_table[i].adapter->li_pri
|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
{
d++;
}
}
}
PUT_DWORD(&result_buffer[18], d / 2);
PUT_DWORD(&result_buffer[22], d - 1);
}
break;
case LI_REQ_CONNECT:
if (li_parms[1].length == 8)
{
appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "dd", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
li_flags = GET_DWORD(li_req_parms[1].info);
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 9;
result_buffer[3] = 6;
PUT_DWORD(&result_buffer[4], plci_b_id);
PUT_WORD(&result_buffer[8], GOOD);
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
if (plci_b == NULL)
break;
li_update_connect(Id, a, plci, plci_b_id, true, li_flags);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
else
{
appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "ds", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
li_flags = GET_DWORD(li_req_parms[0].info) & ~(LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A);
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 7;
result_buffer[3] = 4;
PUT_WORD(&result_buffer[4], Info);
result_buffer[6] = 0;
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
participant_parms_pos = 0;
result_pos = 7;
li2_update_connect(Id, a, plci, UnMapId(Id), true, li_flags);
while (participant_parms_pos < li_req_parms[1].length)
{
result[result_pos] = 6;
result_pos += 7;
PUT_DWORD(&result[result_pos - 6], 0);
PUT_WORD(&result[result_pos - 2], GOOD);
if (api_parse(&li_req_parms[1].info[1 + participant_parms_pos],
(word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
if (api_parse(&li_participant_struct[0].info[1],
li_participant_struct[0].length, "dd", li_participant_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
li_flags = GET_DWORD(li_participant_parms[1].info);
PUT_DWORD(&result[result_pos - 6], plci_b_id);
if (sizeof(result) - result_pos < 7)
{
dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
break;
}
plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
if (plci_b != NULL)
{
li2_update_connect(Id, a, plci, plci_b_id, true, li_flags);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id |
((li_flags & (LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A |
LI2_FLAG_PCCONNECT_A_B | LI2_FLAG_PCCONNECT_B_A)) ? 0 : LI_PLCI_B_DISC_FLAG);
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
(&li_req_parms[1].info[1]));
}
result[0] = (byte)(result_pos - 1);
result[3] = (byte)(result_pos - 4);
result[6] = (byte)(result_pos - 7);
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
mixer_calculate_coefs(a);
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
mixer_notify_update(plci, true);
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
case LI_REQ_DISCONNECT:
if (li_parms[1].length == 4)
{
appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "d", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 9;
result_buffer[3] = 6;
PUT_DWORD(&result_buffer[4], GET_DWORD(li_req_parms[0].info));
PUT_WORD(&result_buffer[8], GOOD);
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
if (plci_b == NULL)
break;
li_update_connect(Id, a, plci, plci_b_id, false, 0);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
else
{
appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "s", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 7;
result_buffer[3] = 4;
PUT_WORD(&result_buffer[4], Info);
result_buffer[6] = 0;
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
participant_parms_pos = 0;
result_pos = 7;
while (participant_parms_pos < li_req_parms[0].length)
{
result[result_pos] = 6;
result_pos += 7;
PUT_DWORD(&result[result_pos - 6], 0);
PUT_WORD(&result[result_pos - 2], GOOD);
if (api_parse(&li_req_parms[0].info[1 + participant_parms_pos],
(word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
if (api_parse(&li_participant_struct[0].info[1],
li_participant_struct[0].length, "d", li_participant_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
PUT_DWORD(&result[result_pos - 6], plci_b_id);
if (sizeof(result) - result_pos < 7)
{
dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
break;
}
plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
if (plci_b != NULL)
{
li2_update_connect(Id, a, plci, plci_b_id, false, 0);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
(&li_req_parms[0].info[1]));
}
result[0] = (byte)(result_pos - 1);
result[3] = (byte)(result_pos - 4);
result[6] = (byte)(result_pos - 7);
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
mixer_calculate_coefs(a);
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
mixer_notify_update(plci, true);
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
case LI_REQ_SILENT_UPDATE:
if (!plci || !plci->State
|| !plci->NL.Id || plci->nl_remove_id
|| (plci->li_bchannel_id == 0)
|| (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (false);
}
plci_b_write_pos = plci->li_plci_b_write_pos;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (false);
}
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: LI unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(li_parms[0].info)));
Info = _FACILITY_NOT_SUPPORTED;
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
return (false);
}
static void mixer_indication_coefs_set(dword Id, PLCI *plci)
{
dword d;
byte result[12];
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_coefs_set",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)
{
do
{
d = plci->li_plci_b_queue[plci->li_plci_b_read_pos];
if (!(d & LI_PLCI_B_SKIP_FLAG))
{
if (plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
{
if (d & LI_PLCI_B_DISC_FLAG)
{
result[0] = 5;
PUT_WORD(&result[1], LI_IND_DISCONNECT);
result[3] = 2;
PUT_WORD(&result[4], _LI_USER_INITIATED);
}
else
{
result[0] = 7;
PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
result[3] = 4;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
}
}
else
{
if (d & LI_PLCI_B_DISC_FLAG)
{
result[0] = 9;
PUT_WORD(&result[1], LI_IND_DISCONNECT);
result[3] = 6;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
PUT_WORD(&result[8], _LI_USER_INITIATED);
}
else
{
result[0] = 7;
PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
result[3] = 4;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
}
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,
"ws", SELECTOR_LINE_INTERCONNECT, result);
}
plci->li_plci_b_read_pos = (plci->li_plci_b_read_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ?
0 : plci->li_plci_b_read_pos + 1;
} while (!(d & LI_PLCI_B_LAST_FLAG) && (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos));
}
}
static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length)
{
word i, j, ch;
struct xconnect_transfer_address_s s, *p;
DIVA_CAPI_ADAPTER *a;
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_from %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int)length));
a = plci->adapter;
i = 1;
for (i = 1; i < length; i += 16)
{
s.card_address.low = msg[i] | (msg[i + 1] << 8) | (((dword)(msg[i + 2])) << 16) | (((dword)(msg[i + 3])) << 24);
s.card_address.high = msg[i + 4] | (msg[i + 5] << 8) | (((dword)(msg[i + 6])) << 16) | (((dword)(msg[i + 7])) << 24);
s.offset = msg[i + 8] | (msg[i + 9] << 8) | (((dword)(msg[i + 10])) << 16) | (((dword)(msg[i + 11])) << 24);
ch = msg[i + 12] | (msg[i + 13] << 8);
j = ch & XCONNECT_CHANNEL_NUMBER_MASK;
if (!a->li_pri && (plci->li_bchannel_id == 2))
j = 1 - j;
j += a->li_base;
if (ch & XCONNECT_CHANNEL_PORT_PC)
p = &(li_config_table[j].send_pc);
else
p = &(li_config_table[j].send_b);
p->card_address.low = s.card_address.low;
p->card_address.high = s.card_address.high;
p->offset = s.offset;
li_config_table[j].channel |= LI_CHANNEL_ADDRESSES_SET;
}
if (plci->internal_command_queue[0]
&& ((plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
}
mixer_notify_update(plci, true);
}
static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length)
{
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_to %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int) length));
}
static byte mixer_notify_source_removed(PLCI *plci, dword plci_b_id)
{
word plci_b_write_pos;
plci_b_write_pos = plci->li_plci_b_write_pos;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 1)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
return (false);
}
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
return (true);
}
static void mixer_remove(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
PLCI *notify_plci;
dword plci_b_id;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_remove",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
plci_b_id = (plci->Id << 8) | UnMapController(plci->adapter->Id);
if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
{
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
if ((li_config_table[i].curchnl | li_config_table[i].channel) & LI_CHANNEL_INVOLVED)
{
for (j = 0; j < li_total_channels; j++)
{
if ((li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
|| (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT))
{
notify_plci = li_config_table[j].plci;
if ((notify_plci != NULL)
&& (notify_plci != plci)
&& (notify_plci->appl != NULL)
&& !(notify_plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
&& (notify_plci->State)
&& notify_plci->NL.Id && !notify_plci->nl_remove_id)
{
mixer_notify_source_removed(notify_plci, plci_b_id);
}
}
}
mixer_clear_config(plci);
mixer_calculate_coefs(a);
mixer_notify_update(plci, true);
}
li_config_table[i].plci = NULL;
plci->li_bchannel_id = 0;
}
}
}
/*------------------------------------------------------------------*/
/* Echo canceller facilities */
/*------------------------------------------------------------------*/
static void ec_write_parameters(PLCI *plci)
{
word w;
byte parameter_buffer[6];
dbug(1, dprintf("[%06lx] %s,%d: ec_write_parameters",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
parameter_buffer[0] = 5;
parameter_buffer[1] = DSP_CTRL_SET_LEC_PARAMETERS;
PUT_WORD(¶meter_buffer[2], plci->ec_idi_options);
plci->ec_idi_options &= ~LEC_RESET_COEFFICIENTS;
w = (plci->ec_tail_length == 0) ? 128 : plci->ec_tail_length;
PUT_WORD(¶meter_buffer[4], w);
add_p(plci, FTY, parameter_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void ec_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING;
plci->ec_tail_length = 0;
}
static void ec_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word ec_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word ec_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: ec_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if (plci->B1_facilities & B1_FACILITY_EC)
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_EC_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
break;
}
ec_write_parameters(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_EC_2;
break;
case ADJUST_B_RESTORE_EC_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
static void ec_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command, Info;
byte result[8];
dbug(1, dprintf("[%06lx] %s,%d: ec_command %02x %04x %04x %04x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->ec_cmd, plci->ec_idi_options, plci->ec_tail_length));
Info = GOOD;
if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
result[0] = 2;
PUT_WORD(&result[1], EC_SUCCESS);
}
else
{
result[0] = 5;
PUT_WORD(&result[1], plci->ec_cmd);
result[3] = 2;
PUT_WORD(&result[4], GOOD);
}
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
case EC_FREEZE_COEFFICIENTS:
case EC_RESUME_COEFFICIENT_UPDATE:
case EC_RESET_COEFFICIENTS:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_EC), EC_COMMAND_1);
case EC_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load EC failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case EC_COMMAND_2:
if (plci->sig_req)
{
plci->internal_command = EC_COMMAND_2;
return;
}
plci->internal_command = EC_COMMAND_3;
ec_write_parameters(plci);
return;
case EC_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Enable EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
break;
}
break;
case EC_DISABLE_OPERATION:
switch (internal_command)
{
default:
case EC_COMMAND_1:
if (plci->B1_facilities & B1_FACILITY_EC)
{
if (plci->sig_req)
{
plci->internal_command = EC_COMMAND_1;
return;
}
plci->internal_command = EC_COMMAND_2;
ec_write_parameters(plci);
return;
}
Rc = OK;
case EC_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Disable EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_EC), EC_COMMAND_3);
case EC_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload EC failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
break;
}
break;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
"wws", Info, (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
}
static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word opt;
API_PARSE ec_parms[3];
byte result[16];
dbug(1, dprintf("[%06lx] %s,%d: ec_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result[0] = 0;
if (!(a->man_profile.private_options & (1L << PRIVATE_ECHO_CANCELLER)))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else
{
if (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
if (api_parse(&msg[1].info[1], msg[1].length, "w", ec_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->ec_cmd = GET_WORD(ec_parms[0].info);
plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
result[0] = 2;
PUT_WORD(&result[1], EC_SUCCESS);
if (msg[1].length >= 4)
{
opt = GET_WORD(&ec_parms[0].info[2]);
plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
if (!(opt & EC_DISABLE_NON_LINEAR_PROCESSING))
plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
if (opt & EC_DETECT_DISABLE_TONE)
plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
if (msg[1].length >= 6)
{
plci->ec_tail_length = GET_WORD(&ec_parms[0].info[4]);
}
}
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_DISABLE_OPERATION:
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_FREEZE_COEFFICIENTS:
plci->ec_idi_options |= LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_RESUME_COEFFICIENT_UPDATE:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_RESET_COEFFICIENTS:
plci->ec_idi_options |= LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
PUT_WORD(&result[1], EC_UNSUPPORTED_OPERATION);
}
}
}
}
else
{
if (api_parse(&msg[1].info[1], msg[1].length, "ws", ec_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if (GET_WORD(ec_parms[0].info) == EC_GET_SUPPORTED_SERVICES)
{
result[0] = 11;
PUT_WORD(&result[1], EC_GET_SUPPORTED_SERVICES);
result[3] = 8;
PUT_WORD(&result[4], GOOD);
PUT_WORD(&result[6], 0x0007);
PUT_WORD(&result[8], LEC_MAX_SUPPORTED_TAIL_LENGTH);
PUT_WORD(&result[10], 0);
}
else if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->ec_cmd = GET_WORD(ec_parms[0].info);
plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
result[0] = 5;
PUT_WORD(&result[1], plci->ec_cmd);
result[3] = 2;
PUT_WORD(&result[4], GOOD);
plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
plci->ec_tail_length = 0;
if (ec_parms[1].length >= 2)
{
opt = GET_WORD(&ec_parms[1].info[1]);
if (opt & EC_ENABLE_NON_LINEAR_PROCESSING)
plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
if (opt & EC_DETECT_DISABLE_TONE)
plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
if (ec_parms[1].length >= 4)
{
plci->ec_tail_length = GET_WORD(&ec_parms[1].info[3]);
}
}
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_DISABLE_OPERATION:
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
PUT_WORD(&result[4], _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP);
}
}
}
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wws", Info, (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
return (false);
}
static void ec_indication(dword Id, PLCI *plci, byte *msg, word length)
{
byte result[8];
dbug(1, dprintf("[%06lx] %s,%d: ec_indication",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (!(plci->ec_idi_options & LEC_MANUAL_DISABLE))
{
if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
result[0] = 2;
PUT_WORD(&result[1], 0);
switch (msg[1])
{
case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
PUT_WORD(&result[1], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
break;
case LEC_DISABLE_TYPE_REVERSED_2100HZ:
PUT_WORD(&result[1], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
break;
case LEC_DISABLE_RELEASED:
PUT_WORD(&result[1], EC_BYPASS_RELEASED);
break;
}
}
else
{
result[0] = 5;
PUT_WORD(&result[1], EC_BYPASS_INDICATION);
result[3] = 2;
PUT_WORD(&result[4], 0);
switch (msg[1])
{
case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
PUT_WORD(&result[4], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
break;
case LEC_DISABLE_TYPE_REVERSED_2100HZ:
PUT_WORD(&result[4], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
break;
case LEC_DISABLE_RELEASED:
PUT_WORD(&result[4], EC_BYPASS_RELEASED);
break;
}
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
}
}
/*------------------------------------------------------------------*/
/* Advanced voice */
/*------------------------------------------------------------------*/
static void adv_voice_write_coefs(PLCI *plci, word write_command)
{
DIVA_CAPI_ADAPTER *a;
word i;
byte *p;
word w, n, j, k;
byte ch_map[MIXER_CHANNELS_BRI];
byte coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE + 2];
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_write_coefs %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, write_command));
a = plci->adapter;
p = coef_buffer + 1;
*(p++) = DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS;
i = 0;
while (i + sizeof(word) <= a->adv_voice_coef_length)
{
PUT_WORD(p, GET_WORD(a->adv_voice_coef_buffer + i));
p += 2;
i += 2;
}
while (i < ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
{
PUT_WORD(p, 0x8000);
p += 2;
i += 2;
}
if (!a->li_pri && (plci->li_bchannel_id == 0))
{
if ((li_config_table[a->li_base].plci == NULL) && (li_config_table[a->li_base + 1].plci != NULL))
{
plci->li_bchannel_id = 1;
li_config_table[a->li_base].plci = plci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, plci->li_bchannel_id));
}
else if ((li_config_table[a->li_base].plci != NULL) && (li_config_table[a->li_base + 1].plci == NULL))
{
plci->li_bchannel_id = 2;
li_config_table[a->li_base + 1].plci = plci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, plci->li_bchannel_id));
}
}
if (!a->li_pri && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
switch (write_command)
{
case ADV_VOICE_WRITE_ACTIVATION:
j = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
if (!(plci->B1_facilities & B1_FACILITY_MIXER))
{
li_config_table[j].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
li_config_table[k].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
li_config_table[i].flag_table[k] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
li_config_table[k].flag_table[j] |= LI_FLAG_CONFERENCE;
li_config_table[j].flag_table[k] |= LI_FLAG_CONFERENCE;
}
mixer_calculate_coefs(a);
li_config_table[i].curchnl = li_config_table[i].channel;
li_config_table[j].curchnl = li_config_table[j].channel;
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
li_config_table[k].curchnl = li_config_table[k].channel;
break;
case ADV_VOICE_WRITE_DEACTIVATION:
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
}
k = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[k].flag_table[j] = 0;
li_config_table[j].flag_table[k] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[k].flag_table[j] = 0;
li_config_table[j].flag_table[k] = 0;
}
}
mixer_calculate_coefs(a);
break;
}
if (plci->B1_facilities & B1_FACILITY_MIXER)
{
w = 0;
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
ch_map[j] = (byte)(j + (plci->li_bchannel_id - 1));
ch_map[j + 1] = (byte)(j + (2 - plci->li_bchannel_id));
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*(p++) = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
else
{
*(p++) = (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n < a->adv_voice_coef_length) ?
a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n] : 0x00;
}
}
}
else
{
for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
*(p++) = a->adv_voice_coef_buffer[i];
}
}
else
{
for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
*(p++) = a->adv_voice_coef_buffer[i];
}
coef_buffer[0] = (p - coef_buffer) - 1;
add_p(plci, FTY, coef_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void adv_voice_clear_config(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
a->adv_voice_coef_length = 0;
if (!a->li_pri && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
}
}
}
}
static void adv_voice_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word adv_voice_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word adv_voice_restore_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word Info;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
a = plci->adapter;
if ((plci->B1_facilities & B1_FACILITY_VOICE)
&& (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_VOICE_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
break;
}
adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_2;
break;
case ADJUST_B_RESTORE_VOICE_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore voice config failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
/*------------------------------------------------------------------*/
/* B1 resource switching */
/*------------------------------------------------------------------*/
static byte b1_facilities_table[] =
{
0x00, /* 0 No bchannel resources */
0x00, /* 1 Codec (automatic law) */
0x00, /* 2 Codec (A-law) */
0x00, /* 3 Codec (y-law) */
0x00, /* 4 HDLC for X.21 */
0x00, /* 5 HDLC */
0x00, /* 6 External Device 0 */
0x00, /* 7 External Device 1 */
0x00, /* 8 HDLC 56k */
0x00, /* 9 Transparent */
0x00, /* 10 Loopback to network */
0x00, /* 11 Test pattern to net */
0x00, /* 12 Rate adaptation sync */
0x00, /* 13 Rate adaptation async */
0x00, /* 14 R-Interface */
0x00, /* 15 HDLC 128k leased line */
0x00, /* 16 FAX */
0x00, /* 17 Modem async */
0x00, /* 18 Modem sync HDLC */
0x00, /* 19 V.110 async HDLC */
0x12, /* 20 Adv voice (Trans,mixer) */
0x00, /* 21 Codec connected to IC */
0x0c, /* 22 Trans,DTMF */
0x1e, /* 23 Trans,DTMF+mixer */
0x1f, /* 24 Trans,DTMF+mixer+local */
0x13, /* 25 Trans,mixer+local */
0x12, /* 26 HDLC,mixer */
0x12, /* 27 HDLC 56k,mixer */
0x2c, /* 28 Trans,LEC+DTMF */
0x3e, /* 29 Trans,LEC+DTMF+mixer */
0x3f, /* 30 Trans,LEC+DTMF+mixer+local */
0x2c, /* 31 RTP,LEC+DTMF */
0x3e, /* 32 RTP,LEC+DTMF+mixer */
0x3f, /* 33 RTP,LEC+DTMF+mixer+local */
0x00, /* 34 Signaling task */
0x00, /* 35 PIAFS */
0x0c, /* 36 Trans,DTMF+TONE */
0x1e, /* 37 Trans,DTMF+TONE+mixer */
0x1f /* 38 Trans,DTMF+TONE+mixer+local*/
};
static word get_b1_facilities(PLCI *plci, byte b1_resource)
{
word b1_facilities;
b1_facilities = b1_facilities_table[b1_resource];
if ((b1_resource == 9) || (b1_resource == 20) || (b1_resource == 25))
{
if (!(((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
|| (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
{
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND)
b1_facilities |= B1_FACILITY_DTMFX;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)
b1_facilities |= B1_FACILITY_DTMFR;
}
}
if ((b1_resource == 17) || (b1_resource == 18))
{
if (plci->adapter->manufacturer_features & (MANUFACTURER_FEATURE_V18 | MANUFACTURER_FEATURE_VOWN))
b1_facilities |= B1_FACILITY_DTMFX | B1_FACILITY_DTMFR;
}
/*
dbug (1, dprintf("[%06lx] %s,%d: get_b1_facilities %d %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char far *)(FILE_), __LINE__, b1_resource, b1_facilites));
*/
return (b1_facilities);
}
static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities)
{
byte b;
switch (b1_resource)
{
case 5:
case 26:
if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 26;
else
b = 5;
break;
case 8:
case 27:
if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 27;
else
b = 8;
break;
case 9:
case 20:
case 22:
case 23:
case 24:
case 25:
case 28:
case 29:
case 30:
case 36:
case 37:
case 38:
if (b1_facilities & B1_FACILITY_EC)
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 30;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 29;
else
b = 28;
}
else if ((b1_facilities & (B1_FACILITY_DTMFX | B1_FACILITY_DTMFR | B1_FACILITY_MIXER))
&& (((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
|| (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 38;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 37;
else
b = 36;
}
else if (((plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
&& !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
|| ((b1_facilities & B1_FACILITY_DTMFR)
&& ((b1_facilities & B1_FACILITY_MIXER)
|| !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)))
|| ((b1_facilities & B1_FACILITY_DTMFX)
&& ((b1_facilities & B1_FACILITY_MIXER)
|| !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND))))
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 24;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 23;
else
b = 22;
}
else
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 25;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 20;
else
b = 9;
}
break;
case 31:
case 32:
case 33:
if (b1_facilities & B1_FACILITY_LOCAL)
b = 33;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 32;
else
b = 31;
break;
default:
b = b1_resource;
}
dbug(1, dprintf("[%06lx] %s,%d: add_b1_facilities %d %04x %d %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__,
b1_resource, b1_facilities, b, get_b1_facilities(plci, b)));
return (b);
}
static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities)
{
word removed_facilities;
dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_facilities %d %04x %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, new_b1_resource, new_b1_facilities,
new_b1_facilities & get_b1_facilities(plci, new_b1_resource)));
new_b1_facilities &= get_b1_facilities(plci, new_b1_resource);
removed_facilities = plci->B1_facilities & ~new_b1_facilities;
if (removed_facilities & B1_FACILITY_EC)
ec_clear_config(plci);
if (removed_facilities & B1_FACILITY_DTMFR)
{
dtmf_rec_clear_config(plci);
dtmf_parameter_clear_config(plci);
}
if (removed_facilities & B1_FACILITY_DTMFX)
dtmf_send_clear_config(plci);
if (removed_facilities & B1_FACILITY_MIXER)
mixer_clear_config(plci);
if (removed_facilities & B1_FACILITY_VOICE)
adv_voice_clear_config(plci);
plci->B1_facilities = new_b1_facilities;
}
static void adjust_b_clear(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_clear",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->adjust_b_restore = false;
}
static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
{
word Info;
byte b1_resource;
NCCI *ncci_ptr;
API_PARSE bp[2];
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_process %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
switch (plci->adjust_b_state)
{
case ADJUST_B_START:
if ((plci->adjust_b_parms_msg == NULL)
&& (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
&& ((plci->adjust_b_mode & ~(ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_RESTORE)) == 0))
{
b1_resource = (plci->adjust_b_mode == ADJUST_B_MODE_NO_RESOURCE) ?
0 : add_b1_facilities(plci, plci->B1_resource, plci->adjust_b_facilities);
if (b1_resource == plci->B1_resource)
{
adjust_b1_facilities(plci, b1_resource, plci->adjust_b_facilities);
break;
}
if (plci->adjust_b_facilities & ~get_b1_facilities(plci, b1_resource))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B nonsupported facilities %d %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_resource, plci->adjust_b_facilities));
Info = _WRONG_STATE;
break;
}
}
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
mixer_prepare_switch(Id, plci);
dtmf_prepare_switch(Id, plci);
dtmf_parameter_prepare_switch(Id, plci);
ec_prepare_switch(Id, plci);
adv_voice_prepare_switch(Id, plci);
}
plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1;
Rc = OK;
case ADJUST_B_SAVE_MIXER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = mixer_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1;
Rc = OK;
case ADJUST_B_SAVE_DTMF_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = dtmf_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_1;
case ADJUST_B_REMOVE_L23_1:
if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
&& plci->NL.Id && !plci->nl_remove_id)
{
plci->internal_command = plci->adjust_b_command;
if (plci->adjust_b_ncci != 0)
{
ncci_ptr = &(plci->adapter->ncci[plci->adjust_b_ncci]);
while (ncci_ptr->data_pending)
{
plci->data_sent_ptr = ncci_ptr->DBuffer[ncci_ptr->data_out].P;
data_rc(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
}
while (ncci_ptr->data_ack_pending)
data_ack(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
}
nl_req_ncci(plci, REMOVE,
(byte)((plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) ? plci->adjust_b_ncci : 0));
send_req(plci);
plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
break;
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
Rc = OK;
case ADJUST_B_REMOVE_L23_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B remove failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
{
if (plci_nl_busy(plci))
{
plci->internal_command = plci->adjust_b_command;
break;
}
}
plci->adjust_b_state = ADJUST_B_SAVE_EC_1;
Rc = OK;
case ADJUST_B_SAVE_EC_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = ec_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1;
Rc = OK;
case ADJUST_B_SAVE_DTMF_PARAMETER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = dtmf_parameter_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1;
Rc = OK;
case ADJUST_B_SAVE_VOICE_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = adv_voice_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_1;
case ADJUST_B_SWITCH_L1_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
{
if (plci->sig_req)
{
plci->internal_command = plci->adjust_b_command;
break;
}
if (plci->adjust_b_parms_msg != NULL)
api_load_msg(plci->adjust_b_parms_msg, bp);
else
api_load_msg(&plci->B_protocol, bp);
Info = add_b1(plci, bp,
(word)((plci->adjust_b_mode & ADJUST_B_MODE_NO_RESOURCE) ? 2 : 0),
plci->adjust_b_facilities);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L1 parameters %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, plci->adjust_b_facilities));
break;
}
plci->internal_command = plci->adjust_b_command;
sig_req(plci, RESOURCES, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
break;
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
Rc = OK;
case ADJUST_B_SWITCH_L1_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B switch failed %02x %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
Rc, plci->B1_resource, plci->adjust_b_facilities));
Info = _WRONG_STATE;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
Rc = OK;
case ADJUST_B_RESTORE_VOICE_1:
case ADJUST_B_RESTORE_VOICE_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = adv_voice_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
Rc = OK;
case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = dtmf_parameter_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
Rc = OK;
case ADJUST_B_RESTORE_EC_1:
case ADJUST_B_RESTORE_EC_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = ec_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1;
case ADJUST_B_ASSIGN_L23_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
{
if (plci_nl_busy(plci))
{
plci->internal_command = plci->adjust_b_command;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
if (plci->adjust_b_parms_msg != NULL)
api_load_msg(plci->adjust_b_parms_msg, bp);
else
api_load_msg(&plci->B_protocol, bp);
Info = add_b23(plci, bp);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L23 parameters %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Info));
break;
}
plci->internal_command = plci->adjust_b_command;
nl_req_ncci(plci, ASSIGN, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
break;
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
Rc = ASSIGN_OK;
case ADJUST_B_ASSIGN_L23_2:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B assign failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
{
if (Rc != ASSIGN_OK)
{
plci->internal_command = plci->adjust_b_command;
break;
}
}
if (plci->adjust_b_mode & ADJUST_B_MODE_USER_CONNECT)
{
plci->adjust_b_restore = true;
break;
}
plci->adjust_b_state = ADJUST_B_CONNECT_1;
case ADJUST_B_CONNECT_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
{
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
break;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_CONNECT_2;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
Rc = OK;
case ADJUST_B_CONNECT_2:
case ADJUST_B_CONNECT_3:
case ADJUST_B_CONNECT_4:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B connect failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (Rc == OK)
{
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
{
get_ncci(plci, (byte)(Id >> 16), plci->adjust_b_ncci);
Id = (Id & 0xffff) | (((dword)(plci->adjust_b_ncci)) << 16);
}
if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
plci->adjust_b_state = ADJUST_B_CONNECT_3;
else if (plci->adjust_b_state == ADJUST_B_CONNECT_4)
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
}
else if (Rc == 0)
{
if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
plci->adjust_b_state = ADJUST_B_CONNECT_4;
else if (plci->adjust_b_state == ADJUST_B_CONNECT_3)
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
}
if (plci->adjust_b_state != ADJUST_B_RESTORE_DTMF_1)
{
plci->internal_command = plci->adjust_b_command;
break;
}
Rc = OK;
case ADJUST_B_RESTORE_DTMF_1:
case ADJUST_B_RESTORE_DTMF_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = dtmf_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_1:
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
case ADJUST_B_RESTORE_MIXER_4:
case ADJUST_B_RESTORE_MIXER_5:
case ADJUST_B_RESTORE_MIXER_6:
case ADJUST_B_RESTORE_MIXER_7:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = mixer_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_END;
case ADJUST_B_END:
break;
}
return (Info);
}
static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command)
{
dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_resource %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_facilities));
plci->adjust_b_parms_msg = bp_msg;
plci->adjust_b_facilities = b1_facilities;
plci->adjust_b_command = internal_command;
plci->adjust_b_ncci = (word)(Id >> 16);
if ((bp_msg == NULL) && (plci->B1_resource == 0))
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_SWITCH_L1;
else
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Adjust B1 resource %d %04x...",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_facilities));
}
static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_restore %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
if (plci->req_in != 0)
{
plci->internal_command = ADJUST_B_RESTORE_1;
break;
}
Rc = OK;
case ADJUST_B_RESTORE_1:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B enqueued failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
}
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = ADJUST_B_RESTORE_2;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case ADJUST_B_RESTORE_2:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
if (plci->internal_command)
break;
break;
}
}
static void reset_b3_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: reset_b3_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = RESET_B3_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_CONNECT;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Reset B3...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case RESET_B3_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Reset failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
break;
}
/* sendf (plci->appl, _RESET_B3_R | CONFIRM, Id, plci->number, "w", Info);*/
sendf(plci->appl, _RESET_B3_I, Id, 0, "s", "");
}
static void select_b_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
byte esc_chi[3];
dbug(1, dprintf("[%06lx] %s,%d: select_b_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = &plci->saved_msg;
if ((plci->tel == ADV_VOICE) && (plci == plci->adapter->AdvSignalPLCI))
plci->adjust_b_facilities = plci->B1_facilities | B1_FACILITY_VOICE;
else
plci->adjust_b_facilities = plci->B1_facilities & ~B1_FACILITY_VOICE;
plci->adjust_b_command = SELECT_B_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
if (plci->saved_msg.parms[0].length == 0)
{
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_NO_RESOURCE;
}
else
{
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
}
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Select B protocol...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case SELECT_B_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Select B protocol failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
if (plci->tel == ADV_VOICE)
{
esc_chi[0] = 0x02;
esc_chi[1] = 0x18;
esc_chi[2] = plci->b_channel;
SetVoiceChannel(plci->adapter->AdvCodecPLCI, esc_chi, plci->adapter);
}
break;
}
sendf(plci->appl, _SELECT_B_REQ | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_connect_ack_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_CONNECT_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_ACK_COMMAND_1;
return;
}
plci->internal_command = FAX_CONNECT_ACK_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_connect_info_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_CONNECT_ACK;
plci->adapter->request(&plci->NL);
return;
case FAX_CONNECT_ACK_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX issue CONNECT ACK failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
}
if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_edata_ack_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_EDATA_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_EDATA_ACK_COMMAND_1;
return;
}
plci->internal_command = FAX_EDATA_ACK_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_edata_ack_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_EDATA;
plci->adapter->request(&plci->NL);
return;
case FAX_EDATA_ACK_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX issue EDATA ACK failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
}
}
static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_connect_info_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_CONNECT_INFO_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_INFO_COMMAND_1;
return;
}
plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_connect_info_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_EDATA;
plci->adapter->request(&plci->NL);
return;
case FAX_CONNECT_INFO_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX setting connect info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
return;
}
plci->command = _CONNECT_B3_R;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_adjust_b23_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = FAX_ADJUST_B23_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: FAX adjust B23...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case FAX_ADJUST_B23_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: FAX adjust failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
case FAX_ADJUST_B23_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_ADJUST_B23_COMMAND_2;
return;
}
plci->command = _CONNECT_B3_R;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_disconnect_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->internal_command = FAX_DISCONNECT_COMMAND_1;
return;
case FAX_DISCONNECT_COMMAND_1:
case FAX_DISCONNECT_COMMAND_2:
case FAX_DISCONNECT_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX disconnect EDATA failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
if (Rc == OK)
{
if ((internal_command == FAX_DISCONNECT_COMMAND_1)
|| (internal_command == FAX_DISCONNECT_COMMAND_2))
{
plci->internal_command = FAX_DISCONNECT_COMMAND_2;
}
}
else if (Rc == 0)
{
if (internal_command == FAX_DISCONNECT_COMMAND_1)
plci->internal_command = FAX_DISCONNECT_COMMAND_3;
}
return;
}
}
static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_req_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case RTP_CONNECT_B3_REQ_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_1;
return;
}
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
case RTP_CONNECT_B3_REQ_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
return;
}
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_3;
plci->NData[0].PLength = plci->internal_req_buffer[0];
plci->NData[0].P = plci->internal_req_buffer + 1;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
break;
case RTP_CONNECT_B3_REQ_COMMAND_3:
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case RTP_CONNECT_B3_RES_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_1;
return;
}
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
nl_req_ncci(plci, N_CONNECT_ACK, (byte)(Id >> 16));
send_req(plci);
return;
case RTP_CONNECT_B3_RES_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect resp info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
return;
}
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_3;
plci->NData[0].PLength = plci->internal_req_buffer[0];
plci->NData[0].P = plci->internal_req_buffer + 1;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
return;
case RTP_CONNECT_B3_RES_COMMAND_3:
return;
}
}
static void hold_save_command(dword Id, PLCI *plci, byte Rc)
{
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: hold_save_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
if (!plci->NL.Id)
break;
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = HOLD_SAVE_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: HOLD save...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case HOLD_SAVE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: HOLD save failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
}
static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc)
{
byte SS_Ind[] = "\x05\x03\x00\x02\x00\x00"; /* Retrieve_Ind struct*/
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: retrieve_restore_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = RETRIEVE_RESTORE_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case RETRIEVE_RESTORE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
}
static void init_b1_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: init_b1_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->B1_resource = 0;
plci->B1_facilities = 0;
plci->li_bchannel_id = 0;
mixer_clear_config(plci);
ec_clear_config(plci);
dtmf_rec_clear_config(plci);
dtmf_send_clear_config(plci);
dtmf_parameter_clear_config(plci);
adv_voice_clear_config(plci);
adjust_b_clear(plci);
}
static void clear_b1_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: clear_b1_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
adv_voice_clear_config(plci);
adjust_b_clear(plci);
ec_clear_config(plci);
dtmf_rec_clear_config(plci);
dtmf_send_clear_config(plci);
dtmf_parameter_clear_config(plci);
if ((plci->li_bchannel_id != 0)
&& (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = NULL;
plci->li_bchannel_id = 0;
}
plci->B1_resource = 0;
plci->B1_facilities = 0;
}
/* -----------------------------------------------------------------
XON protocol local helpers
----------------------------------------------------------------- */
static void channel_flow_control_remove(PLCI *plci) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
word i;
for (i = 1; i < MAX_NL_CHANNEL + 1; i++) {
if (a->ch_flow_plci[i] == plci->Id) {
a->ch_flow_plci[i] = 0;
a->ch_flow_control[i] = 0;
}
}
}
static void channel_x_on(PLCI *plci, byte ch) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if (a->ch_flow_control[ch] & N_XON_SENT) {
a->ch_flow_control[ch] &= ~N_XON_SENT;
}
}
static void channel_x_off(PLCI *plci, byte ch, byte flag) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if ((a->ch_flow_control[ch] & N_RX_FLOW_CONTROL_MASK) == 0) {
a->ch_flow_control[ch] |= (N_CH_XOFF | flag);
a->ch_flow_plci[ch] = plci->Id;
a->ch_flow_control_pending++;
}
}
static void channel_request_xon(PLCI *plci, byte ch) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if (a->ch_flow_control[ch] & N_CH_XOFF) {
a->ch_flow_control[ch] |= N_XON_REQ;
a->ch_flow_control[ch] &= ~N_CH_XOFF;
a->ch_flow_control[ch] &= ~N_XON_CONNECT_IND;
}
}
static void channel_xmit_extended_xon(PLCI *plci) {
DIVA_CAPI_ADAPTER *a;
int max_ch = ARRAY_SIZE(a->ch_flow_control);
int i, one_requested = 0;
if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
return;
}
for (i = 0; i < max_ch; i++) {
if ((a->ch_flow_control[i] & N_CH_XOFF) &&
(a->ch_flow_control[i] & N_XON_CONNECT_IND) &&
(plci->Id == a->ch_flow_plci[i])) {
channel_request_xon(plci, (byte)i);
one_requested = 1;
}
}
if (one_requested) {
channel_xmit_xon(plci);
}
}
/*
Try to xmit next X_ON
*/
static int find_channel_with_pending_x_on(DIVA_CAPI_ADAPTER *a, PLCI *plci) {
int max_ch = ARRAY_SIZE(a->ch_flow_control);
int i;
if (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)) {
return (0);
}
if (a->last_flow_control_ch >= max_ch) {
a->last_flow_control_ch = 1;
}
for (i = a->last_flow_control_ch; i < max_ch; i++) {
if ((a->ch_flow_control[i] & N_XON_REQ) &&
(plci->Id == a->ch_flow_plci[i])) {
a->last_flow_control_ch = i + 1;
return (i);
}
}
for (i = 1; i < a->last_flow_control_ch; i++) {
if ((a->ch_flow_control[i] & N_XON_REQ) &&
(plci->Id == a->ch_flow_plci[i])) {
a->last_flow_control_ch = i + 1;
return (i);
}
}
return (0);
}
static void channel_xmit_xon(PLCI *plci) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
byte ch;
if (plci->nl_req || !plci->NL.Id || plci->nl_remove_id) {
return;
}
if ((ch = (byte)find_channel_with_pending_x_on(a, plci)) == 0) {
return;
}
a->ch_flow_control[ch] &= ~N_XON_REQ;
a->ch_flow_control[ch] |= N_XON_SENT;
plci->NL.Req = plci->nl_req = (byte)N_XON;
plci->NL.ReqCh = ch;
plci->NL.X = plci->NData;
plci->NL.XNum = 1;
plci->NData[0].P = &plci->RBuffer[0];
plci->NData[0].PLength = 0;
plci->adapter->request(&plci->NL);
}
static int channel_can_xon(PLCI *plci, byte ch) {
APPL *APPLptr;
DIVA_CAPI_ADAPTER *a;
word NCCIcode;
dword count;
word Num;
word i;
APPLptr = plci->appl;
a = plci->adapter;
if (!APPLptr)
return (0);
NCCIcode = a->ch_ncci[ch] | (((word) a->Id) << 8);
/* count all buffers within the Application pool */
/* belonging to the same NCCI. XON if a first is */
/* used. */
count = 0;
Num = 0xffff;
for (i = 0; i < APPLptr->MaxBuffer; i++) {
if (NCCIcode == APPLptr->DataNCCI[i]) count++;
if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
}
if ((count > 2) || (Num == 0xffff)) {
return (0);
}
return (1);
}
/*------------------------------------------------------------------*/
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *a, word offset)
{
return 1;
}
/**********************************************************************************/
/* function groups the listening applications according to the CIP mask and the */
/* Info_Mask. Each group gets just one Connect_Ind. Some application manufacturer */
/* are not multi-instance capable, so they start e.g. 30 applications what causes */
/* big problems on application level (one call, 30 Connect_Ind, ect). The */
/* function must be enabled by setting "a->group_optimization_enabled" from the */
/* OS specific part (per adapter). */
/**********************************************************************************/
static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
{
word i, j, k, busy, group_found;
dword info_mask_group[MAX_CIP_TYPES];
dword cip_mask_group[MAX_CIP_TYPES];
word appl_number_group_type[MAX_APPL];
PLCI *auxplci;
set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */
if (!a->group_optimization_enabled)
{
dbug(1, dprintf("No group optimization"));
return;
}
dbug(1, dprintf("Group optimization = 0x%x...", a->group_optimization_enabled));
for (i = 0; i < MAX_CIP_TYPES; i++)
{
info_mask_group[i] = 0;
cip_mask_group[i] = 0;
}
for (i = 0; i < MAX_APPL; i++)
{
appl_number_group_type[i] = 0;
}
for (i = 0; i < max_appl; i++) /* check if any multi instance capable application is present */
{ /* group_optimization set to 1 means not to optimize multi-instance capable applications (default) */
if (application[i].Id && (application[i].MaxNCCI) > 1 && (a->CIP_Mask[i]) && (a->group_optimization_enabled == 1))
{
dbug(1, dprintf("Multi-Instance capable, no optimization required"));
return; /* allow good application unfiltered access */
}
}
for (i = 0; i < max_appl; i++) /* Build CIP Groups */
{
if (application[i].Id && a->CIP_Mask[i])
{
for (k = 0, busy = false; k < a->max_plci; k++)
{
if (a->plci[k].Id)
{
auxplci = &a->plci[k];
if (auxplci->appl == &application[i]) /* application has a busy PLCI */
{
busy = true;
dbug(1, dprintf("Appl 0x%x is busy", i + 1));
}
else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */
{
busy = true;
dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1));
}
}
}
for (j = 0, group_found = 0; j <= (MAX_CIP_TYPES) && !busy && !group_found; j++) /* build groups with free applications only */
{
if (j == MAX_CIP_TYPES) /* all groups are in use but group still not found */
{ /* the MAX_CIP_TYPES group enables all calls because of field overflow */
appl_number_group_type[i] = MAX_CIP_TYPES;
group_found = true;
dbug(1, dprintf("Field overflow appl 0x%x", i + 1));
}
else if ((info_mask_group[j] == a->CIP_Mask[i]) && (cip_mask_group[j] == a->Info_Mask[i]))
{ /* is group already present ? */
appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
group_found = true;
dbug(1, dprintf("Group 0x%x found with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
}
else if (!info_mask_group[j])
{ /* establish a new group */
appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
info_mask_group[j] = a->CIP_Mask[i]; /* store the new CIP mask for the new group */
cip_mask_group[j] = a->Info_Mask[i]; /* store the new Info_Mask for this new group */
group_found = true;
dbug(1, dprintf("New Group 0x%x established with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
}
}
}
}
for (i = 0; i < max_appl; i++) /* Build group_optimization_mask_table */
{
if (appl_number_group_type[i]) /* application is free, has listens and is member of a group */
{
if (appl_number_group_type[i] == MAX_CIP_TYPES)
{
dbug(1, dprintf("OverflowGroup 0x%x, valid appl = 0x%x, call enabled", appl_number_group_type[i], i + 1));
}
else
{
dbug(1, dprintf("Group 0x%x, valid appl = 0x%x", appl_number_group_type[i], i + 1));
for (j = i + 1; j < max_appl; j++) /* search other group members and mark them as busy */
{
if (appl_number_group_type[i] == appl_number_group_type[j])
{
dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j]));
clear_group_ind_mask_bit(plci, j); /* disable call on other group members */
appl_number_group_type[j] = 0; /* remove disabled group member from group list */
}
}
}
}
else /* application should not get a call */
{
clear_group_ind_mask_bit(plci, i);
}
}
}
/* OS notifies the driver about a application Capi_Register */
word CapiRegister(word id)
{
word i, j, appls_found;
PLCI *plci;
DIVA_CAPI_ADAPTER *a;
for (i = 0, appls_found = 0; i < max_appl; i++)
{
if (application[i].Id && (application[i].Id != id))
{
appls_found++; /* an application has been found */
}
}
if (appls_found) return true;
for (i = 0; i < max_adapter; i++) /* scan all adapters... */
{
a = &adapter[i];
if (a->request)
{
if (a->flag_dynamic_l1_down) /* remove adapter from L1 tristate (Huntgroup) */
{
if (!appls_found) /* first application does a capi register */
{
if ((j = get_plci(a))) /* activate L1 of all adapters */
{
plci = &a->plci[j - 1];
plci->command = 0;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, CAI, "\x01\x80");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = START_L1_SIG_ASSIGN_PEND;
sig_req(plci, ASSIGN, DSIG_ID);
add_p(plci, FTY, "\x02\xff\x07"); /* l1 start */
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
}
}
}
}
return false;
}
/*------------------------------------------------------------------*/
/* Functions for virtual Switching e.g. Transfer by join, Conference */
static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms)
{
word i;
/* Format of vswitch_t:
0 byte length
1 byte VSWITCHIE
2 byte VSWITCH_REQ/VSWITCH_IND
3 byte reserved
4 word VSwitchcommand
6 word returnerror
8... Params
*/
if (!plci ||
!plci->appl ||
!plci->State ||
plci->Sig.Ind == NCR_FACILITY
)
return;
for (i = 0; i < MAX_MULTI_IE; i++)
{
if (!parms[i][0]) continue;
if (parms[i][0] < 7)
{
parms[i][0] = 0; /* kill it */
continue;
}
dbug(1, dprintf("VSwitchReqInd(%d)", parms[i][4]));
switch (parms[i][4])
{
case VSJOIN:
if (!plci->relatedPTYPLCI ||
(plci->ptyState != S_ECT && plci->relatedPTYPLCI->ptyState != S_ECT))
{ /* Error */
break;
}
/* remember all necessary informations */
if (parms[i][0] != 11 || parms[i][8] != 3) /* Length Test */
{
break;
}
if (parms[i][2] == VSWITCH_IND && parms[i][9] == 1)
{ /* first indication after ECT-Request on Consultation Call */
plci->vswitchstate = parms[i][9];
parms[i][9] = 2; /* State */
/* now ask first Call to join */
}
else if (parms[i][2] == VSWITCH_REQ && parms[i][9] == 3)
{ /* Answer of VSWITCH_REQ from first Call */
plci->vswitchstate = parms[i][9];
/* tell consultation call to join
and the protocol capabilities of the first call */
}
else
{ /* Error */
break;
}
plci->vsprot = parms[i][10]; /* protocol */
plci->vsprotdialect = parms[i][11]; /* protocoldialect */
/* send join request to related PLCI */
parms[i][1] = VSWITCHIE;
parms[i][2] = VSWITCH_REQ;
plci->relatedPTYPLCI->command = 0;
plci->relatedPTYPLCI->internal_command = VSWITCH_REQ_PEND;
add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
break;
case VSTRANSPORT:
default:
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 3 &&
plci->relatedPTYPLCI->vswitchstate == 3)
{
add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
}
break;
}
parms[i][0] = 0; /* kill it */
}
}
/*------------------------------------------------------------------*/
static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic) {
ENTITY e;
IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
if (!(diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_RX_DMA)) {
return (-1);
}
pReq->xdi_dma_descriptor_operation.Req = 0;
pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1;
pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
e.user[0] = plci->adapter->Id - 1;
plci->adapter->request((ENTITY *)pReq);
if (!pReq->xdi_dma_descriptor_operation.info.operation &&
(pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
*dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
dbug(3, dprintf("dma_alloc, a:%d (%d-%08x)",
plci->adapter->Id,
pReq->xdi_dma_descriptor_operation.info.descriptor_number,
*dma_magic));
return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
} else {
dbug(1, dprintf("dma_alloc failed"));
return (-1);
}
}
static void diva_free_dma_descriptor(PLCI *plci, int nr) {
ENTITY e;
IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
if (nr < 0) {
return;
}
pReq->xdi_dma_descriptor_operation.Req = 0;
pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr;
pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
e.user[0] = plci->adapter->Id - 1;
plci->adapter->request((ENTITY *)pReq);
if (!pReq->xdi_dma_descriptor_operation.info.operation) {
dbug(1, dprintf("dma_free(%d)", nr));
} else {
dbug(1, dprintf("dma_free failed (%d)", nr));
}
}
/*------------------------------------------------------------------*/
| gpl-2.0 |
WildfireDEV/android_kernel_htc_m7 | samples/hidraw/hid-example.c | 8171 | 3905 | /*
* Hidraw Userspace Example
*
* Copyright (c) 2010 Alan Ott <alan@signal11.us>
* Copyright (c) 2010 Signal 11 Software
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using hidraw.
*/
/* Linux */
#include <linux/types.h>
#include <linux/input.h>
#include <linux/hidraw.h>
/*
* Ugly hack to work around failing compilation on systems that don't
* yet populate new version of hidraw.h to userspace.
*
* If you need this, please have your distro update the kernel headers.
*/
#ifndef HIDIOCSFEATURE
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#endif
/* Unix */
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
/* C */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
const char *bus_str(int bus);
int main(int argc, char **argv)
{
int fd;
int i, res, desc_size = 0;
char buf[256];
struct hidraw_report_descriptor rpt_desc;
struct hidraw_devinfo info;
/* Open the Device with non-blocking reads. In real life,
don't use a hard coded path; use libudev instead. */
fd = open("/dev/hidraw0", O_RDWR|O_NONBLOCK);
if (fd < 0) {
perror("Unable to open device");
return 1;
}
memset(&rpt_desc, 0x0, sizeof(rpt_desc));
memset(&info, 0x0, sizeof(info));
memset(buf, 0x0, sizeof(buf));
/* Get Report Descriptor Size */
res = ioctl(fd, HIDIOCGRDESCSIZE, &desc_size);
if (res < 0)
perror("HIDIOCGRDESCSIZE");
else
printf("Report Descriptor Size: %d\n", desc_size);
/* Get Report Descriptor */
rpt_desc.size = desc_size;
res = ioctl(fd, HIDIOCGRDESC, &rpt_desc);
if (res < 0) {
perror("HIDIOCGRDESC");
} else {
printf("Report Descriptor:\n");
for (i = 0; i < rpt_desc.size; i++)
printf("%hhx ", rpt_desc.value[i]);
puts("\n");
}
/* Get Raw Name */
res = ioctl(fd, HIDIOCGRAWNAME(256), buf);
if (res < 0)
perror("HIDIOCGRAWNAME");
else
printf("Raw Name: %s\n", buf);
/* Get Physical Location */
res = ioctl(fd, HIDIOCGRAWPHYS(256), buf);
if (res < 0)
perror("HIDIOCGRAWPHYS");
else
printf("Raw Phys: %s\n", buf);
/* Get Raw Info */
res = ioctl(fd, HIDIOCGRAWINFO, &info);
if (res < 0) {
perror("HIDIOCGRAWINFO");
} else {
printf("Raw Info:\n");
printf("\tbustype: %d (%s)\n",
info.bustype, bus_str(info.bustype));
printf("\tvendor: 0x%04hx\n", info.vendor);
printf("\tproduct: 0x%04hx\n", info.product);
}
/* Set Feature */
buf[0] = 0x9; /* Report Number */
buf[1] = 0xff;
buf[2] = 0xff;
buf[3] = 0xff;
res = ioctl(fd, HIDIOCSFEATURE(4), buf);
if (res < 0)
perror("HIDIOCSFEATURE");
else
printf("ioctl HIDIOCGFEATURE returned: %d\n", res);
/* Get Feature */
buf[0] = 0x9; /* Report Number */
res = ioctl(fd, HIDIOCGFEATURE(256), buf);
if (res < 0) {
perror("HIDIOCGFEATURE");
} else {
printf("ioctl HIDIOCGFEATURE returned: %d\n", res);
printf("Report data (not containing the report number):\n\t");
for (i = 0; i < res; i++)
printf("%hhx ", buf[i]);
puts("\n");
}
/* Send a Report to the Device */
buf[0] = 0x1; /* Report Number */
buf[1] = 0x77;
res = write(fd, buf, 2);
if (res < 0) {
printf("Error: %d\n", errno);
perror("write");
} else {
printf("write() wrote %d bytes\n", res);
}
/* Get a report from the device */
res = read(fd, buf, 16);
if (res < 0) {
perror("read");
} else {
printf("read() read %d bytes:\n\t", res);
for (i = 0; i < res; i++)
printf("%hhx ", buf[i]);
puts("\n");
}
close(fd);
return 0;
}
const char *
bus_str(int bus)
{
switch (bus) {
case BUS_USB:
return "USB";
break;
case BUS_HIL:
return "HIL";
break;
case BUS_BLUETOOTH:
return "Bluetooth";
break;
case BUS_VIRTUAL:
return "Virtual";
break;
default:
return "Other";
break;
}
}
| gpl-2.0 |
crewrktablets/android_kernel_odys_RK30_3.0.8 | fs/drop_caches.c | 8683 | 1543 | /*
* Implement the manual drop-all-pagecache function
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
struct inode *inode, *toput_inode = NULL;
spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
(inode->i_mapping->nrpages == 0)) {
spin_unlock(&inode->i_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_sb_list_lock);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
spin_lock(&inode_sb_list_lock);
}
spin_unlock(&inode_sb_list_lock);
iput(toput_inode);
}
static void drop_slab(void)
{
int nr_objects;
struct shrink_control shrink = {
.gfp_mask = GFP_KERNEL,
};
do {
nr_objects = shrink_slab(&shrink, 1000, 1000);
} while (nr_objects > 10);
}
int drop_caches_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (ret)
return ret;
if (write) {
if (sysctl_drop_caches & 1)
iterate_supers(drop_pagecache_sb, NULL);
if (sysctl_drop_caches & 2)
drop_slab();
}
return 0;
}
| gpl-2.0 |
peat-psuwit/android_kernel_lge_w7ds | drivers/media/dvb/mantis/mantis_pcmcia.c | 9451 | 3573 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_link.h" /* temporary due to physical layer stuff */
#include "mantis_reg.h"
/*
* If Slot state is already PLUG_IN event and we are called
* again, definitely it is jitter alone
*/
void mantis_event_cam_plugin(struct mantis_ca *ca)
{
struct mantis_pci *mantis = ca->ca_priv;
u32 gpif_irqcfg;
if (ca->slot_state == MODULE_XTRACTED) {
dprintk(MANTIS_DEBUG, 1, "Event: CAM Plugged IN: Adapter(%d) Slot(0)", mantis->num);
udelay(50);
mmwrite(0xda000000, MANTIS_CARD_RESET);
gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
gpif_irqcfg |= MANTIS_MASK_PLUGOUT;
gpif_irqcfg &= ~MANTIS_MASK_PLUGIN;
mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
udelay(500);
ca->slot_state = MODULE_INSERTED;
}
udelay(100);
}
/*
* If Slot state is already UN_PLUG event and we are called
* again, definitely it is jitter alone
*/
void mantis_event_cam_unplug(struct mantis_ca *ca)
{
struct mantis_pci *mantis = ca->ca_priv;
u32 gpif_irqcfg;
if (ca->slot_state == MODULE_INSERTED) {
dprintk(MANTIS_DEBUG, 1, "Event: CAM Unplugged: Adapter(%d) Slot(0)", mantis->num);
udelay(50);
mmwrite(0x00da0000, MANTIS_CARD_RESET);
gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
gpif_irqcfg |= MANTIS_MASK_PLUGIN;
gpif_irqcfg &= ~MANTIS_MASK_PLUGOUT;
mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
udelay(500);
ca->slot_state = MODULE_XTRACTED;
}
udelay(100);
}
int mantis_pcmcia_init(struct mantis_ca *ca)
{
struct mantis_pci *mantis = ca->ca_priv;
u32 gpif_stat, card_stat;
mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_IRQ0, MANTIS_INT_MASK);
gpif_stat = mmread(MANTIS_GPIF_STATUS);
card_stat = mmread(MANTIS_GPIF_IRQCFG);
if (gpif_stat & MANTIS_GPIF_DETSTAT) {
dprintk(MANTIS_DEBUG, 1, "CAM found on Adapter(%d) Slot(0)", mantis->num);
mmwrite(card_stat | MANTIS_MASK_PLUGOUT, MANTIS_GPIF_IRQCFG);
ca->slot_state = MODULE_INSERTED;
dvb_ca_en50221_camchange_irq(&ca->en50221,
0,
DVB_CA_EN50221_CAMCHANGE_INSERTED);
} else {
dprintk(MANTIS_DEBUG, 1, "Empty Slot on Adapter(%d) Slot(0)", mantis->num);
mmwrite(card_stat | MANTIS_MASK_PLUGIN, MANTIS_GPIF_IRQCFG);
ca->slot_state = MODULE_XTRACTED;
dvb_ca_en50221_camchange_irq(&ca->en50221,
0,
DVB_CA_EN50221_CAMCHANGE_REMOVED);
}
return 0;
}
void mantis_pcmcia_exit(struct mantis_ca *ca)
{
struct mantis_pci *mantis = ca->ca_priv;
mmwrite(mmread(MANTIS_GPIF_STATUS) & (~MANTIS_CARD_PLUGOUT | ~MANTIS_CARD_PLUGIN), MANTIS_GPIF_STATUS);
mmwrite(mmread(MANTIS_INT_MASK) & ~MANTIS_INT_IRQ0, MANTIS_INT_MASK);
}
| gpl-2.0 |
jtoppins/net-next | drivers/ssb/driver_mipscore.c | 1260 | 8899 | /*
* Sonics Silicon Backplane
* Broadcom MIPS core driver
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/mtd/physmap.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
.part_probe_types = part_probes,
};
static struct resource ssb_pflash_resource = {
.name = "ssb_pflash",
.flags = IORESOURCE_MEM,
};
struct platform_device ssb_pflash_dev = {
.name = "physmap-flash",
.dev = {
.platform_data = &ssb_pflash_data,
},
.resource = &ssb_pflash_resource,
.num_resources = 1,
};
static inline u32 mips_read32(struct ssb_mipscore *mcore,
u16 offset)
{
return ssb_read32(mcore->dev, offset);
}
static inline void mips_write32(struct ssb_mipscore *mcore,
u16 offset,
u32 value)
{
ssb_write32(mcore->dev, offset, value);
}
static const u32 ipsflag_irq_mask[] = {
0,
SSB_IPSFLAG_IRQ1,
SSB_IPSFLAG_IRQ2,
SSB_IPSFLAG_IRQ3,
SSB_IPSFLAG_IRQ4,
};
static const u32 ipsflag_irq_shift[] = {
0,
SSB_IPSFLAG_IRQ1_SHIFT,
SSB_IPSFLAG_IRQ2_SHIFT,
SSB_IPSFLAG_IRQ3_SHIFT,
SSB_IPSFLAG_IRQ4_SHIFT,
};
static inline u32 ssb_irqflag(struct ssb_device *dev)
{
u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG);
if (tpsflag)
return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
else
/* not irq supported */
return 0x3f;
}
static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag)
{
struct ssb_bus *bus = rdev->bus;
int i;
for (i = 0; i < bus->nr_devices; i++) {
struct ssb_device *dev;
dev = &(bus->devices[i]);
if (ssb_irqflag(dev) == irqflag)
return dev;
}
return NULL;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
* If disabled, 5 is returned.
* If not supported, 6 is returned.
*/
unsigned int ssb_mips_irq(struct ssb_device *dev)
{
struct ssb_bus *bus = dev->bus;
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag;
u32 ipsflag;
u32 tmp;
unsigned int irq;
irqflag = ssb_irqflag(dev);
if (irqflag == 0x3f)
return 6;
ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG);
for (irq = 1; irq <= 4; irq++) {
tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]);
if (tmp == irqflag)
break;
}
if (irq == 5) {
if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
irq = 0;
}
return irq;
}
static void clear_irq(struct ssb_bus *bus, unsigned int irq)
{
struct ssb_device *dev = bus->mipscore.dev;
/* Clear the IRQ in the MIPScore backplane registers */
if (irq == 0) {
ssb_write32(dev, SSB_INTVEC, 0);
} else {
ssb_write32(dev, SSB_IPSFLAG,
ssb_read32(dev, SSB_IPSFLAG) |
ipsflag_irq_mask[irq]);
}
}
static void set_irq(struct ssb_device *dev, unsigned int irq)
{
unsigned int oldirq = ssb_mips_irq(dev);
struct ssb_bus *bus = dev->bus;
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag = ssb_irqflag(dev);
BUG_ON(oldirq == 6);
dev->irq = irq + 2;
/* clear the old irq */
if (oldirq == 0)
ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
else if (oldirq != 5)
clear_irq(bus, oldirq);
/* assign the new one */
if (irq == 0) {
ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
} else {
u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) {
u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq];
struct ssb_device *olddev = find_device(dev, oldipsflag);
if (olddev)
set_irq(olddev, 0);
}
irqflag <<= ipsflag_irq_shift[irq];
irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
dev->id.coreid, oldirq+2, irq+2);
}
static void print_irq(struct ssb_device *dev, unsigned int irq)
{
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
dev->id.coreid,
irq_name[0], irq == 0 ? "*" : " ",
irq_name[1], irq == 1 ? "*" : " ",
irq_name[2], irq == 2 ? "*" : " ",
irq_name[3], irq == 3 ? "*" : " ",
irq_name[4], irq == 4 ? "*" : " ",
irq_name[5], irq == 5 ? "*" : " ",
irq_name[6], irq == 6 ? "*" : " ");
}
static void dump_irq(struct ssb_bus *bus)
{
int i;
for (i = 0; i < bus->nr_devices; i++) {
struct ssb_device *dev;
dev = &(bus->devices[i]);
print_irq(dev, ssb_mips_irq(dev));
}
}
static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
if (ssb_extif_available(&bus->extif))
mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports);
else if (ssb_chipco_available(&bus->chipco))
mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports);
else
mcore->nr_serial_ports = 0;
}
static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
struct ssb_sflash *sflash = &mcore->sflash;
struct ssb_pflash *pflash = &mcore->pflash;
/* When there is no chipcommon on the bus there is 4MB flash */
if (!ssb_chipco_available(&bus->chipco)) {
pflash->present = true;
pflash->buswidth = 2;
pflash->window = SSB_FLASH1;
pflash->window_size = SSB_FLASH1_SZ;
goto ssb_pflash;
}
/* There is ChipCommon, so use it to read info about flash */
switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
case SSB_CHIPCO_FLASHT_STSER:
case SSB_CHIPCO_FLASHT_ATSER:
pr_debug("Found serial flash\n");
ssb_sflash_init(&bus->chipco);
break;
case SSB_CHIPCO_FLASHT_PARA:
pr_debug("Found parallel flash\n");
pflash->present = true;
pflash->window = SSB_FLASH2;
pflash->window_size = SSB_FLASH2_SZ;
if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
& SSB_CHIPCO_CFG_DS16) == 0)
pflash->buswidth = 1;
else
pflash->buswidth = 2;
break;
}
ssb_pflash:
if (sflash->present) {
#ifdef CONFIG_BCM47XX
bcm47xx_nvram_init_from_mem(sflash->window, sflash->size);
#endif
} else if (pflash->present) {
#ifdef CONFIG_BCM47XX
bcm47xx_nvram_init_from_mem(pflash->window, pflash->window_size);
#endif
ssb_pflash_data.width = pflash->buswidth;
ssb_pflash_resource.start = pflash->window;
ssb_pflash_resource.end = pflash->window + pflash->window_size;
}
}
u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
u32 pll_type, n, m, rate = 0;
if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
return ssb_pmu_get_cpu_clock(&bus->chipco);
if (ssb_extif_available(&bus->extif)) {
ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m);
} else if (ssb_chipco_available(&bus->chipco)) {
ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m);
} else
return 0;
if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) {
rate = 200000000;
} else {
rate = ssb_calc_clock_rate(pll_type, n, m);
}
if (pll_type == SSB_PLLTYPE_6) {
rate *= 2;
}
return rate;
}
void ssb_mipscore_init(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus;
struct ssb_device *dev;
unsigned long hz, ns;
unsigned int irq, i;
if (!mcore->dev)
return; /* We don't have a MIPS core */
ssb_dbg("Initializing MIPS core...\n");
bus = mcore->dev->bus;
hz = ssb_clockspeed(bus);
if (!hz)
hz = 100000000;
ns = 1000000000 / hz;
if (ssb_extif_available(&bus->extif))
ssb_extif_timing_init(&bus->extif, ns);
else if (ssb_chipco_available(&bus->chipco))
ssb_chipco_timing_init(&bus->chipco, ns);
/* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
for (irq = 2, i = 0; i < bus->nr_devices; i++) {
int mips_irq;
dev = &(bus->devices[i]);
mips_irq = ssb_mips_irq(dev);
if (mips_irq > 4)
dev->irq = 0;
else
dev->irq = mips_irq + 2;
if (dev->irq > 5)
continue;
switch (dev->id.coreid) {
case SSB_DEV_USB11_HOST:
/* shouldn't need a separate irq line for non-4710, most of them have a proper
* external usb controller on the pci */
if ((bus->chip_id == 0x4710) && (irq <= 4)) {
set_irq(dev, irq++);
}
break;
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
case SSB_DEV_ETHERNET_GBIT:
case SSB_DEV_80211:
case SSB_DEV_USB20_HOST:
/* These devices get their own IRQ line if available, the rest goes on IRQ0 */
if (irq <= 4) {
set_irq(dev, irq++);
break;
}
/* fallthrough */
case SSB_DEV_EXTIF:
set_irq(dev, 0);
break;
}
}
ssb_dbg("after irq reconfiguration\n");
dump_irq(bus);
ssb_mips_serial_init(mcore);
ssb_mips_flash_detect(mcore);
}
| gpl-2.0 |
Windeal/Linux-3.12.36 | kernel/rtmutex-debug.c | 2284 | 4821 | /*
* RT-Mutexes: blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* This code is based on the rt.c implementation in the preempt-rt tree.
* Portions of said code are
*
* Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Copyright (C) 2006 Esben Nielsen
* Copyright (C) 2006 Kihon Technologies Inc.,
* Steven Rostedt <rostedt@goodmis.org>
*
* See rt.c in preempt-rt for proper credits and further information
*/
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/plist.h>
#include <linux/fs.h>
#include <linux/debug_locks.h>
#include "rtmutex_common.h"
static void printk_task(struct task_struct *p)
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
else
printk("<none>");
}
static void printk_lock(struct rt_mutex *lock, int print_owner)
{
if (lock->name)
printk(" [%p] {%s}\n",
lock, lock->name);
else
printk(" [%p] {%s:%d}\n",
lock, lock->file, lock->line);
if (print_owner && rt_mutex_owner(lock)) {
printk(".. ->owner: %p\n", lock->owner);
printk(".. held by: ");
printk_task(rt_mutex_owner(lock));
printk("\n");
}
}
void rt_mutex_debug_task_free(struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
/*
* We fill out the fields in the waiter to store the information about
* the deadlock. We print when we return. act_waiter can be NULL in
* case of a remove waiter operation.
*/
void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
struct rt_mutex *lock)
{
struct task_struct *task;
if (!debug_locks || detect || !act_waiter)
return;
task = rt_mutex_owner(act_waiter->lock);
if (task && task != current) {
act_waiter->deadlock_task_pid = get_pid(task_pid(task));
act_waiter->deadlock_lock = lock;
}
}
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
struct task_struct *task;
if (!waiter->deadlock_lock || !debug_locks)
return;
rcu_read_lock();
task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
if (!task) {
rcu_read_unlock();
return;
}
if (!debug_locks_off()) {
rcu_read_unlock();
return;
}
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
printk("%s\n", print_tainted());
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
current->comm, task_pid_nr(current));
printk("\n1) %s/%d is trying to acquire this lock:\n",
current->comm, task_pid_nr(current));
printk_lock(waiter->lock, 1);
printk("\n2) %s/%d is blocked on this lock:\n",
task->comm, task_pid_nr(task));
printk_lock(waiter->deadlock_lock, 1);
debug_show_held_locks(current);
debug_show_held_locks(task);
printk("\n%s/%d's [blocked] stackdump:\n\n",
task->comm, task_pid_nr(task));
show_stack(task, NULL);
printk("\n%s/%d's [current] stackdump:\n\n",
current->comm, task_pid_nr(current));
dump_stack();
debug_show_all_locks();
rcu_read_unlock();
printk("[ turning off deadlock detection."
"Please report this trace. ]\n\n");
}
void debug_rt_mutex_lock(struct rt_mutex *lock)
{
}
void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
void
debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
{
}
void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}
void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
memset(waiter, 0x11, sizeof(*waiter));
plist_node_init(&waiter->list_entry, MAX_PRIO);
plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
waiter->deadlock_task_pid = NULL;
}
void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
put_pid(waiter->deadlock_task_pid);
DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
memset(waiter, 0x22, sizeof(*waiter));
}
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
{
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lock->name = name;
}
void
rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
{
}
void rt_mutex_deadlock_account_unlock(struct task_struct *task)
{
}
| gpl-2.0 |
TeamCarbonXtremeARMv7/android_kernel_samsung_golden | arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c | 2284 | 5116 | /*
*
* Copyright (C) 2010 Eric Bénard <eric@eukrea.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c/tsc2007.h>
#include <linux/leds.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-mx51.h>
#include <asm/mach/arch.h>
#include "devices-imx51.h"
#include "devices.h"
#define MBIMX51_TSC2007_GPIO IMX_GPIO_NR(3, 30)
#define MBIMX51_TSC2007_IRQ (MXC_INTERNAL_IRQS + MBIMX51_TSC2007_GPIO)
#define MBIMX51_LED0 IMX_GPIO_NR(3, 5)
#define MBIMX51_LED1 IMX_GPIO_NR(3, 6)
#define MBIMX51_LED2 IMX_GPIO_NR(3, 7)
#define MBIMX51_LED3 IMX_GPIO_NR(3, 8)
static struct gpio_led mbimx51_leds[] = {
{
.name = "led0",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = MBIMX51_LED0,
},
{
.name = "led1",
.default_trigger = "nand-disk",
.active_low = 1,
.gpio = MBIMX51_LED1,
},
{
.name = "led2",
.default_trigger = "mmc0",
.active_low = 1,
.gpio = MBIMX51_LED2,
},
{
.name = "led3",
.default_trigger = "default-on",
.active_low = 1,
.gpio = MBIMX51_LED3,
},
};
static struct gpio_led_platform_data mbimx51_leds_info = {
.leds = mbimx51_leds,
.num_leds = ARRAY_SIZE(mbimx51_leds),
};
static struct platform_device mbimx51_leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &mbimx51_leds_info,
},
};
static struct platform_device *devices[] __initdata = {
&mbimx51_leds_gpio,
};
static iomux_v3_cfg_t mbimx51_pads[] = {
/* UART2 */
MX51_PAD_UART2_RXD__UART2_RXD,
MX51_PAD_UART2_TXD__UART2_TXD,
/* UART3 */
MX51_PAD_UART3_RXD__UART3_RXD,
MX51_PAD_UART3_TXD__UART3_TXD,
MX51_PAD_KEY_COL4__UART3_RTS,
MX51_PAD_KEY_COL5__UART3_CTS,
/* TSC2007 IRQ */
MX51_PAD_NANDF_D10__GPIO3_30,
/* LEDS */
MX51_PAD_DISPB2_SER_DIN__GPIO3_5,
MX51_PAD_DISPB2_SER_DIO__GPIO3_6,
MX51_PAD_DISPB2_SER_CLK__GPIO3_7,
MX51_PAD_DISPB2_SER_RS__GPIO3_8,
/* KPP */
MX51_PAD_KEY_ROW0__KEY_ROW0,
MX51_PAD_KEY_ROW1__KEY_ROW1,
MX51_PAD_KEY_ROW2__KEY_ROW2,
MX51_PAD_KEY_ROW3__KEY_ROW3,
MX51_PAD_KEY_COL0__KEY_COL0,
MX51_PAD_KEY_COL1__KEY_COL1,
MX51_PAD_KEY_COL2__KEY_COL2,
MX51_PAD_KEY_COL3__KEY_COL3,
/* SD 1 */
MX51_PAD_SD1_CMD__SD1_CMD,
MX51_PAD_SD1_CLK__SD1_CLK,
MX51_PAD_SD1_DATA0__SD1_DATA0,
MX51_PAD_SD1_DATA1__SD1_DATA1,
MX51_PAD_SD1_DATA2__SD1_DATA2,
MX51_PAD_SD1_DATA3__SD1_DATA3,
/* SD 2 */
MX51_PAD_SD2_CMD__SD2_CMD,
MX51_PAD_SD2_CLK__SD2_CLK,
MX51_PAD_SD2_DATA0__SD2_DATA0,
MX51_PAD_SD2_DATA1__SD2_DATA1,
MX51_PAD_SD2_DATA2__SD2_DATA2,
MX51_PAD_SD2_DATA3__SD2_DATA3,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static int mbimx51_keymap[] = {
KEY(0, 0, KEY_1),
KEY(0, 1, KEY_2),
KEY(0, 2, KEY_3),
KEY(0, 3, KEY_UP),
KEY(1, 0, KEY_4),
KEY(1, 1, KEY_5),
KEY(1, 2, KEY_6),
KEY(1, 3, KEY_LEFT),
KEY(2, 0, KEY_7),
KEY(2, 1, KEY_8),
KEY(2, 2, KEY_9),
KEY(2, 3, KEY_RIGHT),
KEY(3, 0, KEY_0),
KEY(3, 1, KEY_DOWN),
KEY(3, 2, KEY_ESC),
KEY(3, 3, KEY_ENTER),
};
static const struct matrix_keymap_data mbimx51_map_data __initconst = {
.keymap = mbimx51_keymap,
.keymap_size = ARRAY_SIZE(mbimx51_keymap),
};
static int tsc2007_get_pendown_state(void)
{
return !gpio_get_value(MBIMX51_TSC2007_GPIO);
}
struct tsc2007_platform_data tsc2007_data = {
.model = 2007,
.x_plate_ohms = 180,
.get_pendown_state = tsc2007_get_pendown_state,
};
static struct i2c_board_info mbimx51_i2c_devices[] = {
{
I2C_BOARD_INFO("tsc2007", 0x49),
.irq = MBIMX51_TSC2007_IRQ,
.platform_data = &tsc2007_data,
}, {
I2C_BOARD_INFO("tlv320aic23", 0x1a),
},
};
/*
* baseboard initialization.
*/
void __init eukrea_mbimx51_baseboard_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mbimx51_pads,
ARRAY_SIZE(mbimx51_pads));
imx51_add_imx_uart(1, NULL);
imx51_add_imx_uart(2, &uart_pdata);
gpio_request(MBIMX51_LED0, "LED0");
gpio_direction_output(MBIMX51_LED0, 1);
gpio_free(MBIMX51_LED0);
gpio_request(MBIMX51_LED1, "LED1");
gpio_direction_output(MBIMX51_LED1, 1);
gpio_free(MBIMX51_LED1);
gpio_request(MBIMX51_LED2, "LED2");
gpio_direction_output(MBIMX51_LED2, 1);
gpio_free(MBIMX51_LED2);
gpio_request(MBIMX51_LED3, "LED3");
gpio_direction_output(MBIMX51_LED3, 1);
gpio_free(MBIMX51_LED3);
platform_add_devices(devices, ARRAY_SIZE(devices));
imx51_add_imx_keypad(&mbimx51_map_data);
gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq");
gpio_direction_input(MBIMX51_TSC2007_GPIO);
irq_set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING);
i2c_register_board_info(1, mbimx51_i2c_devices,
ARRAY_SIZE(mbimx51_i2c_devices));
imx51_add_sdhci_esdhc_imx(0, NULL);
imx51_add_sdhci_esdhc_imx(1, NULL);
}
| gpl-2.0 |
KylinUI/android_kernel_samsung_smdk4412 | drivers/s390/scsi/zfcp_sysfs.c | 2540 | 17528 | /*
* zfcp device driver
*
* sysfs attributes.
*
* Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
_show, _store)
#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct ccw_device *cdev = to_ccwdev(dev); \
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
int i; \
\
if (!adapter) \
return -ENODEV; \
\
i = sprintf(buf, _format, _value); \
zfcp_ccw_adapter_put(adapter); \
return i; \
} \
static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_SHARED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_READONLY) != 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
zfcp_erp_wait(port->adapter);
return count;
}
static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
struct scsi_device *sdev;
unsigned int status, failed = 1;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
status = atomic_read(&sdev_to_zfcp(sdev)->status);
failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
scsi_device_put(sdev);
}
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
unsigned long val;
struct scsi_device *sdev;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai2");
zfcp_erp_wait(unit->port->adapter);
} else
zfcp_unit_scsi_scan(unit);
return count;
}
static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int i;
if (!adapter)
return -ENODEV;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
i = sprintf(buf, "1\n");
else
i = sprintf(buf, "0\n");
zfcp_ccw_adapter_put(adapter);
return i;
}
static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
unsigned long val;
int retval = 0;
if (!adapter)
return -ENODEV;
if (strict_strtoul(buf, 0, &val) || val != 0) {
retval = -EINVAL;
goto out;
}
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"syafai2");
zfcp_erp_wait(adapter);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_adapter_failed_show,
zfcp_sysfs_adapter_failed_store);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return -ENODEV;
/* sync the user-space- with the kernel-invocation of scan_work */
queue_work(adapter->work_queue, &adapter->scan_work);
flush_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
int retval = -EINVAL;
if (!adapter)
return -ENODEV;
if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out;
else
retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) > 0) {
retval = -EBUSY;
mutex_unlock(&zfcp_sysfs_port_units_mutex);
goto out;
}
/* port is about to be removed, so no more unit_add */
atomic_set(&port->units, -1);
mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1");
zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
&dev_attr_adapter_port_remove.attr,
&dev_attr_adapter_port_rescan.attr,
&dev_attr_adapter_peer_wwnn.attr,
&dev_attr_adapter_peer_wwpn.attr,
&dev_attr_adapter_peer_d_id.attr,
&dev_attr_adapter_card_version.attr,
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
NULL
};
struct attribute_group zfcp_sysfs_adapter_attrs = {
.attrs = zfcp_adapter_attrs,
};
static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
int retval;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_remove(port, fcp_lun))
return -EINVAL;
return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
/**
* zfcp_sysfs_port_attrs - sysfs attributes for all other ports
*/
struct attribute_group zfcp_sysfs_port_attrs = {
.attrs = zfcp_port_attrs,
};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL
};
struct attribute_group zfcp_sysfs_unit_attrs = {
.attrs = zfcp_unit_attrs,
};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
csum = lat->_name.channel.sum * adapter->timer_ticks; \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
spin_unlock_bh(&lat->lock); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
do_div(fmax, 1000); \
do_div(csum, 1000); \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
lat->_name.fabric.sum = 0; \
lat->_name.fabric.min = 0xFFFFFFFF; \
lat->_name.fabric.max = 0; \
lat->_name.channel.sum = 0; \
lat->_name.channel.min = 0xFFFFFFFF; \
lat->_name.channel.max = 0; \
lat->_name.counter = 0; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
return (ssize_t) count; \
} \
static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
zfcp_sysfs_unit_##_name##_latency_show, \
zfcp_sysfs_unit_##_name##_latency_store);
ZFCP_DEFINE_LATENCY_ATTR(read);
ZFCP_DEFINE_LATENCY_ATTR(write);
ZFCP_DEFINE_LATENCY_ATTR(cmd);
#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
dev_name(&port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
&dev_attr_hba_id,
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
NULL
};
static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_port *qtcb_port;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
static int zfcp_sysfs_adapter_ex_config(struct device *dev,
struct fsf_statistics_info *stat_inf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_config *qtcb_config;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
GFP_KERNEL);
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
return retval;
}
#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct fsf_statistics_info stat_info; \
int retval; \
\
retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
if (retval) \
return retval; \
\
return sprintf(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
(unsigned long long) stat_info.input_req,
(unsigned long long) stat_info.output_req,
(unsigned long long) stat_info.control_req);
ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
(unsigned long long) stat_info.input_mb,
(unsigned long long) stat_info.output_mb);
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_qdio *qdio =
((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
u64 util;
spin_lock_bh(&qdio->stat_lock);
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
(unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
&dev_attr_queue_full,
NULL
};
| gpl-2.0 |
bigzz/shamu_flar2 | net/nfc/hci/llc_nop.c | 2540 | 2474 | /*
* nop (passthrough) Link Layer Control
*
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
#include "llc.h"
struct llc_nop {
struct nfc_hci_dev *hdev;
xmit_to_drv_t xmit_to_drv;
rcv_to_hci_t rcv_to_hci;
int tx_headroom;
int tx_tailroom;
llc_failure_t llc_failure;
};
static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
rcv_to_hci_t rcv_to_hci, int tx_headroom,
int tx_tailroom, int *rx_headroom, int *rx_tailroom,
llc_failure_t llc_failure)
{
struct llc_nop *llc_nop;
*rx_headroom = 0;
*rx_tailroom = 0;
llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL);
if (llc_nop == NULL)
return NULL;
llc_nop->hdev = hdev;
llc_nop->xmit_to_drv = xmit_to_drv;
llc_nop->rcv_to_hci = rcv_to_hci;
llc_nop->tx_headroom = tx_headroom;
llc_nop->tx_tailroom = tx_tailroom;
llc_nop->llc_failure = llc_failure;
return llc_nop;
}
static void llc_nop_deinit(struct nfc_llc *llc)
{
kfree(nfc_llc_get_data(llc));
}
static int llc_nop_start(struct nfc_llc *llc)
{
return 0;
}
static int llc_nop_stop(struct nfc_llc *llc)
{
return 0;
}
static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
{
struct llc_nop *llc_nop = nfc_llc_get_data(llc);
llc_nop->rcv_to_hci(llc_nop->hdev, skb);
}
static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
{
struct llc_nop *llc_nop = nfc_llc_get_data(llc);
return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
}
static struct nfc_llc_ops llc_nop_ops = {
.init = llc_nop_init,
.deinit = llc_nop_deinit,
.start = llc_nop_start,
.stop = llc_nop_stop,
.rcv_from_drv = llc_nop_rcv_from_drv,
.xmit_from_hci = llc_nop_xmit_from_hci,
};
int nfc_llc_nop_register(void)
{
return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops);
}
| gpl-2.0 |
cooldudezach/android_kernel_zte_msm8930 | drivers/staging/ozwpan/ozcdev.c | 3820 | 13427 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "oztrace.h"
#include "ozappif.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozevent.h"
/*------------------------------------------------------------------------------
*/
#define OZ_RD_BUF_SZ 256
struct oz_cdev {
dev_t devnum;
struct cdev cdev;
wait_queue_head_t rdq;
spinlock_t lock;
u8 active_addr[ETH_ALEN];
struct oz_pd *active_pd;
};
/* Per PD context for the serial service stored in the PD. */
struct oz_serial_ctx {
atomic_t ref_count;
u8 tx_seq_num;
u8 rx_seq_num;
u8 rd_buf[OZ_RD_BUF_SZ];
int rd_in;
int rd_out;
};
/*------------------------------------------------------------------------------
*/
int g_taction;
/*------------------------------------------------------------------------------
*/
static struct oz_cdev g_cdev;
/*------------------------------------------------------------------------------
* Context: process and softirq
*/
static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
atomic_inc(&ctx->ref_count);
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
oz_trace("Dealloc serial context.\n");
kfree(ctx);
}
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev;
oz_trace("oz_cdev_open()\n");
oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
filp->private_data = dev;
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_release(struct inode *inode, struct file *filp)
{
oz_trace("oz_cdev_release()\n");
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
struct oz_pd *pd;
struct oz_serial_ctx *ctx = 0;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == 0)
return -1;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == 0)
goto out2;
n = ctx->rd_in - ctx->rd_out;
if (n < 0)
n += OZ_RD_BUF_SZ;
if (count > n)
count = n;
ix = ctx->rd_out;
n = OZ_RD_BUF_SZ - ix;
if (n > count)
n = count;
if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
count = 0;
goto out1;
}
ix += n;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (n < count) {
if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
count = 0;
goto out1;
}
ix = count-n;
}
ctx->rd_out = ix;
out1:
oz_cdev_release_ctx(ctx);
out2:
oz_pd_put(pd);
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
loff_t *fpos)
{
struct oz_pd *pd;
struct oz_elt_buf *eb;
struct oz_elt_info *ei = 0;
struct oz_elt *elt;
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == 0)
return -1;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == 0) {
count = 0;
goto out;
}
elt = (struct oz_elt *)ei->data;
app_hdr = (struct oz_app_hdr *)(elt+1);
elt->length = sizeof(struct oz_app_hdr) + count;
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_SERIAL;
ei->length = elt->length + sizeof(struct oz_elt);
app_hdr->app_id = OZ_APPID_SERIAL;
if (copy_from_user(app_hdr+1, buf, count))
goto out;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx) {
app_hdr->elt_seq_num = ctx->tx_seq_num++;
if (ctx->tx_seq_num == 0)
ctx->tx_seq_num = 1;
spin_lock(&eb->lock);
if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
ei = 0;
spin_unlock(&eb->lock);
}
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
out:
if (ei) {
count = 0;
spin_lock_bh(&eb->lock);
oz_elt_info_free(eb, ei);
spin_unlock_bh(&eb->lock);
}
oz_pd_put(pd);
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
static int oz_set_active_pd(u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
memcpy(g_cdev.active_addr, addr, ETH_ALEN);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
if (old_pd)
oz_pd_put(old_pd);
} else {
if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
g_cdev.active_pd = 0;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
spin_unlock_bh(&g_cdev.lock);
if (pd)
oz_pd_put(pd);
} else {
rc = -1;
}
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: process
*/
long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
return -ENOTTY;
if (_IOC_DIR(cmd) & _IOC_READ)
rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
rc = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
if (rc)
return -EFAULT;
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
oz_trace("OZ_IOCTL_GET_PD_LIST\n");
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
sizeof(list)))
return -EFAULT;
}
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
}
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
}
break;
#ifdef WANT_EVENT_TRACE
case OZ_IOCTL_CLEAR_EVENTS:
oz_events_clear();
break;
case OZ_IOCTL_GET_EVENTS:
rc = oz_events_copy((void __user *)arg);
break;
case OZ_IOCTL_SET_EVENT_MASK:
if (copy_from_user(&g_evt_mask, (void __user *)arg,
sizeof(unsigned long))) {
return -EFAULT;
}
break;
#endif /* WANT_EVENT_TRACE */
case OZ_IOCTL_ADD_BINDING:
case OZ_IOCTL_REMOVE_BINDING: {
struct oz_binding_info b;
if (copy_from_user(&b, (void __user *)arg,
sizeof(struct oz_binding_info))) {
return -EFAULT;
}
/* Make sure name is null terminated. */
b.name[OZ_MAX_BINDING_LEN-1] = 0;
if (cmd == OZ_IOCTL_ADD_BINDING)
oz_binding_add(b.name);
else
oz_binding_remove(b.name);
}
break;
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: process
*/
unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
oz_trace("Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
if (ctx) {
if (ctx->rd_in != ctx->rd_out)
ret |= POLLIN | POLLRDNORM;
oz_cdev_release_ctx(ctx);
}
}
spin_unlock_bh(&dev->lock);
if (wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
/*------------------------------------------------------------------------------
*/
const struct file_operations oz_fops = {
.owner = THIS_MODULE,
.open = oz_cdev_open,
.release = oz_cdev_release,
.read = oz_cdev_read,
.write = oz_cdev_write,
.unlocked_ioctl = oz_cdev_ioctl,
.poll = oz_cdev_poll
};
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_register(void)
{
int err;
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
return err;
oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
spin_lock_init(&g_cdev.lock);
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_deregister(void)
{
cdev_del(&g_cdev.cdev);
unregister_chrdev_region(g_cdev.devnum, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_init(void)
{
oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_cdev_term(void)
{
oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
oz_app_enable(OZ_APPID_SERIAL, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx = 0;
oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
if (resume) {
oz_trace("Serial service resumed.\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
if (ctx == 0)
return -ENOMEM;
atomic_set(&ctx->ref_count, 1);
ctx->tx_seq_num = 1;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
if (old_ctx) {
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
kfree(ctx);
} else {
pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == 0) &&
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_trace("Active PD arrived.\n");
}
spin_unlock(&g_cdev.lock);
oz_trace("Serial service started.\n");
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
if (pause) {
oz_trace("Serial service paused.\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
if (ctx)
oz_cdev_release_ctx(ctx);
spin_lock(&g_cdev.lock);
if (pd == g_cdev.active_pd)
g_cdev.active_pd = 0;
else
pd = 0;
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
oz_trace("Active PD departed.\n");
}
oz_trace("Serial service stopped.\n");
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
{
struct oz_serial_ctx *ctx;
struct oz_app_hdr *app_hdr;
u8 *data;
int len;
int space;
int copy_sz;
int ix;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == 0) {
oz_trace("Cannot claim serial context.\n");
return;
}
app_hdr = (struct oz_app_hdr *)(elt+1);
/* If sequence number is non-zero then check it is not a duplicate.
*/
if (app_hdr->elt_seq_num != 0) {
if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
/* Reject duplicate element. */
oz_trace("Duplicate element:%02x %02x\n",
app_hdr->elt_seq_num, ctx->rx_seq_num);
goto out;
}
}
ctx->rx_seq_num = app_hdr->elt_seq_num;
len = elt->length - sizeof(struct oz_app_hdr);
data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
if (len <= 0)
goto out;
space = ctx->rd_out - ctx->rd_in - 1;
if (space < 0)
space += OZ_RD_BUF_SZ;
if (len > space) {
oz_trace("Not enough space:%d %d\n", len, space);
len = space;
}
ix = ctx->rd_in;
copy_sz = OZ_RD_BUF_SZ - ix;
if (copy_sz > len)
copy_sz = len;
memcpy(&ctx->rd_buf[ix], data, copy_sz);
len -= copy_sz;
ix += copy_sz;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (len) {
memcpy(ctx->rd_buf, data+copy_sz, len);
ix = len;
}
ctx->rd_in = ix;
wake_up(&g_cdev.rdq);
out:
oz_cdev_release_ctx(ctx);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_cdev_heartbeat(struct oz_pd *pd)
{
}
| gpl-2.0 |
TV-LP51-Devices/android_kernel_mediatek_sprout | arch/arm/mach-omap2/voltagedomains44xx_data.c | 3820 | 3522 | /*
* OMAP3/OMAP4 Voltage Management Routines
*
* Author: Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Rajendra Nayak <rnayak@ti.com>
* Lesly A M <x0080970@ti.com>
*
* Copyright (C) 2008 Nokia Corporation
* Kalle Jokiniemi
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
#include "common.h"
#include "soc.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
#include "prcm44xx.h"
#include "prminst44xx.h"
#include "voltage.h"
#include "omap_opp_data.h"
#include "vc.h"
#include "vp.h"
static const struct omap_vfsm_instance omap4_vdd_mpu_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET,
.voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET,
};
static const struct omap_vfsm_instance omap4_vdd_iva_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET,
.voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET,
};
static const struct omap_vfsm_instance omap4_vdd_core_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET,
.voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET,
};
static struct voltagedomain omap4_voltdm_mpu = {
.name = "mpu",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_mpu,
.vfsm = &omap4_vdd_mpu_vfsm,
.vp = &omap4_vp_mpu,
};
static struct voltagedomain omap4_voltdm_iva = {
.name = "iva",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_iva,
.vfsm = &omap4_vdd_iva_vfsm,
.vp = &omap4_vp_iva,
};
static struct voltagedomain omap4_voltdm_core = {
.name = "core",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_core,
.vfsm = &omap4_vdd_core_vfsm,
.vp = &omap4_vp_core,
};
static struct voltagedomain omap4_voltdm_wkup = {
.name = "wakeup",
};
static struct voltagedomain *voltagedomains_omap4[] __initdata = {
&omap4_voltdm_mpu,
&omap4_voltdm_iva,
&omap4_voltdm_core,
&omap4_voltdm_wkup,
NULL,
};
static const char *sys_clk_name __initdata = "sys_clkin_ck";
void __init omap44xx_voltagedomains_init(void)
{
struct voltagedomain *voltdm;
int i;
/*
* XXX Will depend on the process, validation, and binning
* for the currently-running IC
*/
#ifdef CONFIG_PM_OPP
if (cpu_is_omap443x()) {
omap4_voltdm_mpu.volt_data = omap443x_vdd_mpu_volt_data;
omap4_voltdm_iva.volt_data = omap443x_vdd_iva_volt_data;
omap4_voltdm_core.volt_data = omap443x_vdd_core_volt_data;
} else if (cpu_is_omap446x()) {
omap4_voltdm_mpu.volt_data = omap446x_vdd_mpu_volt_data;
omap4_voltdm_iva.volt_data = omap446x_vdd_iva_volt_data;
omap4_voltdm_core.volt_data = omap446x_vdd_core_volt_data;
}
#endif
omap4_voltdm_mpu.vp_param = &omap4_mpu_vp_data;
omap4_voltdm_iva.vp_param = &omap4_iva_vp_data;
omap4_voltdm_core.vp_param = &omap4_core_vp_data;
omap4_voltdm_mpu.vc_param = &omap4_mpu_vc_data;
omap4_voltdm_iva.vc_param = &omap4_iva_vc_data;
omap4_voltdm_core.vc_param = &omap4_core_vc_data;
for (i = 0; voltdm = voltagedomains_omap4[i], voltdm; i++)
voltdm->sys_clk.name = sys_clk_name;
voltdm_init(voltagedomains_omap4);
};
| gpl-2.0 |
dezelin/kvm | arch/m68k/platform/68328/ints.c | 4588 | 4291 | /*
* linux/arch/m68knommu/platform/68328/ints.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Copyright 1996 Roman Zippel
* Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/machdep.h>
#if defined(CONFIG_M68328)
#include <asm/MC68328.h>
#elif defined(CONFIG_M68EZ328)
#include <asm/MC68EZ328.h>
#elif defined(CONFIG_M68VZ328)
#include <asm/MC68VZ328.h>
#endif
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void trap3(void);
asmlinkage void trap4(void);
asmlinkage void trap5(void);
asmlinkage void trap6(void);
asmlinkage void trap7(void);
asmlinkage void trap8(void);
asmlinkage void trap9(void);
asmlinkage void trap10(void);
asmlinkage void trap11(void);
asmlinkage void trap12(void);
asmlinkage void trap13(void);
asmlinkage void trap14(void);
asmlinkage void trap15(void);
asmlinkage void trap33(void);
asmlinkage void trap34(void);
asmlinkage void trap35(void);
asmlinkage void trap36(void);
asmlinkage void trap37(void);
asmlinkage void trap38(void);
asmlinkage void trap39(void);
asmlinkage void trap40(void);
asmlinkage void trap41(void);
asmlinkage void trap42(void);
asmlinkage void trap43(void);
asmlinkage void trap44(void);
asmlinkage void trap45(void);
asmlinkage void trap46(void);
asmlinkage void trap47(void);
asmlinkage irqreturn_t bad_interrupt(int, void *);
asmlinkage irqreturn_t inthandler(void);
asmlinkage irqreturn_t inthandler1(void);
asmlinkage irqreturn_t inthandler2(void);
asmlinkage irqreturn_t inthandler3(void);
asmlinkage irqreturn_t inthandler4(void);
asmlinkage irqreturn_t inthandler5(void);
asmlinkage irqreturn_t inthandler6(void);
asmlinkage irqreturn_t inthandler7(void);
/* The 68k family did not have a good way to determine the source
* of interrupts until later in the family. The EC000 core does
* not provide the vector number on the stack, we vector everything
* into one vector and look in the blasted mask register...
* This code is designed to be fast, almost constant time, not clean!
*/
void process_int(int vec, struct pt_regs *fp)
{
int irq;
int mask;
unsigned long pend = ISR;
while (pend) {
if (pend & 0x0000ffff) {
if (pend & 0x000000ff) {
if (pend & 0x0000000f) {
mask = 0x00000001;
irq = 0;
} else {
mask = 0x00000010;
irq = 4;
}
} else {
if (pend & 0x00000f00) {
mask = 0x00000100;
irq = 8;
} else {
mask = 0x00001000;
irq = 12;
}
}
} else {
if (pend & 0x00ff0000) {
if (pend & 0x000f0000) {
mask = 0x00010000;
irq = 16;
} else {
mask = 0x00100000;
irq = 20;
}
} else {
if (pend & 0x0f000000) {
mask = 0x01000000;
irq = 24;
} else {
mask = 0x10000000;
irq = 28;
}
}
}
while (! (mask & pend)) {
mask <<=1;
irq++;
}
do_IRQ(irq, fp);
pend &= ~mask;
}
}
static void intc_irq_unmask(struct irq_data *d)
{
IMR &= ~(1 << d->irq);
}
static void intc_irq_mask(struct irq_data *d)
{
IMR |= (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
/*
* This function should be called during kernel startup to initialize
* the machine vector table.
*/
void __init trap_init(void)
{
int i;
/* set up the vectors */
for (i = 72; i < 256; ++i)
_ramvec[i] = (e_vector) bad_interrupt;
_ramvec[32] = system_call;
_ramvec[65] = (e_vector) inthandler1;
_ramvec[66] = (e_vector) inthandler2;
_ramvec[67] = (e_vector) inthandler3;
_ramvec[68] = (e_vector) inthandler4;
_ramvec[69] = (e_vector) inthandler5;
_ramvec[70] = (e_vector) inthandler6;
_ramvec[71] = (e_vector) inthandler7;
}
void __init init_IRQ(void)
{
int i;
IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
/* turn off all interrupts */
IMR = ~0;
for (i = 0; (i < NR_IRQS); i++) {
irq_set_chip(i, &intc_irq_chip);
irq_set_handler(i, handle_level_irq);
}
}
| gpl-2.0 |
mujeebulhasan/kernel | drivers/gpu/drm/nouveau/nv04_display.c | 5356 | 6849 | /*
* Copyright 2009 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_fb.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
static void nv04_vblank_crtc0_isr(struct drm_device *);
static void nv04_vblank_crtc1_isr(struct drm_device *);
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->chipset != 0x11) {
dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
return;
}
/* reading CR44 is broken on nv11, so we attempt to infer it */
if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
dev_priv->crtc_owner = 0x4;
else {
uint8_t slaved_on_A, slaved_on_B;
bool tvA = false;
bool tvB = false;
slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
0x80;
if (slaved_on_B)
tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
MASK(NV_CIO_CRE_LCD_LCD_SELECT));
slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
0x80;
if (slaved_on_A)
tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
MASK(NV_CIO_CRE_LCD_LCD_SELECT));
if (slaved_on_A && !tvA)
dev_priv->crtc_owner = 0x0;
else if (slaved_on_B && !tvB)
dev_priv->crtc_owner = 0x3;
else if (slaved_on_A)
dev_priv->crtc_owner = 0x0;
else if (slaved_on_B)
dev_priv->crtc_owner = 0x3;
else
dev_priv->crtc_owner = 0x0;
}
}
int
nv04_display_early_init(struct drm_device *dev)
{
/* Make the I2C buses accessible. */
if (!nv_gf4_disp_arch(dev)) {
uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
if (!(pmc_enable & 1))
nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
}
/* Unlock the VGA CRTCs. */
NVLockVgaCrtcs(dev, false);
/* Make sure the CRTCs aren't in slaved mode. */
if (nv_two_heads(dev)) {
nv04_display_store_initial_head_owner(dev);
NVSetOwner(dev, 0);
}
return 0;
}
void
nv04_display_late_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (nv_two_heads(dev))
NVSetOwner(dev, dev_priv->crtc_owner);
NVLockVgaCrtcs(dev, true);
}
int
nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
int i, ret;
NV_DEBUG_KMS(dev, "\n");
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
for (i = 0; i < dcb->entries; i++) {
struct dcb_entry *dcbent = &dcb->entry[i];
connector = nouveau_connector_create(dev, dcbent->connector);
if (IS_ERR(connector))
continue;
switch (dcbent->type) {
case OUTPUT_ANALOG:
ret = nv04_dac_create(connector, dcbent);
break;
case OUTPUT_LVDS:
case OUTPUT_TMDS:
ret = nv04_dfp_create(connector, dcbent);
break;
case OUTPUT_TV:
if (dcbent->location == DCB_LOC_ON_CHIP)
ret = nv17_tv_create(connector, dcbent);
else
ret = nv04_tv_create(connector, dcbent);
break;
default:
NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
continue;
}
if (ret)
continue;
}
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
if (!connector->encoder_ids[0]) {
NV_WARN(dev, "%s has no encoders, removing\n",
drm_get_connector_name(connector));
connector->funcs->destroy(connector);
}
}
/* Save previous state */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->save(encoder);
}
nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
return 0;
}
void
nv04_display_destroy(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
NV_DEBUG_KMS(dev, "\n");
nouveau_irq_unregister(dev, 24);
nouveau_irq_unregister(dev, 25);
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
.crtc = crtc,
};
crtc->funcs->set_config(&modeset);
}
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
nouveau_hw_save_vga_fonts(dev, 0);
}
int
nv04_display_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
/* meh.. modeset apparently doesn't setup all the regs and depends
* on pre-existing state, for now load the state of the card *before*
* nouveau was loaded, and then do a modeset.
*
* best thing to do probably is to make save/restore routines not
* save/restore "pre-load" state, but more general so we can save
* on suspend too.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
return 0;
}
void
nv04_display_fini(struct drm_device *dev)
{
}
static void
nv04_vblank_crtc0_isr(struct drm_device *dev)
{
nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
drm_handle_vblank(dev, 0);
}
static void
nv04_vblank_crtc1_isr(struct drm_device *dev)
{
nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
drm_handle_vblank(dev, 1);
}
| gpl-2.0 |
AdrielVelazquez/Moto_XT1058 | arch/x86/kernel/tsc_sync.c | 6892 | 5642 | /*
* check TSC synchronization.
*
* Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
*
* We check whether all boot CPUs have their TSC's synchronized,
* print a warning if not and turn off the TSC clock-source.
*
* The warp-check is point-to-point between two CPUs, the CPU
* initiating the bootup is the 'source CPU', the freshly booting
* CPU is the 'target CPU'.
*
* Only two CPUs may participate - they can enter in any order.
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
static __cpuinitdata atomic_t start_count;
static __cpuinitdata atomic_t stop_count;
/*
* We use a raw spinlock in this exceptional case, because
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
static __cpuinitdata int nr_warps;
/*
* TSC-warp measurement loop running on both CPUs:
*/
static __cpuinit void check_tsc_warp(unsigned int timeout)
{
cycles_t start, now, prev, end;
int i;
rdtsc_barrier();
start = get_cycles();
rdtsc_barrier();
/*
* The measurement runs for 'timeout' msecs:
*/
end = start + (cycles_t) tsc_khz * timeout;
now = start;
for (i = 0; ; i++) {
/*
* We take the global lock, measure TSC, save the
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
arch_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
arch_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
* measurement is done [we also insert a 10 million
* loops safety exit, so we dont lock up in case the
* TSC readout is totally broken]):
*/
if (unlikely(!(i & 7))) {
if (now > end || i > 10000000)
break;
cpu_relax();
touch_nmi_watchdog();
}
/*
* Outside the critical section we can now see whether
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
now-start, end-start);
}
/*
* If the target CPU coming online doesn't have any of its core-siblings
* online, a timeout of 20msec will be used for the TSC-warp measurement
* loop. Otherwise a smaller timeout of 2msec will be used, as we have some
* information about this socket already (and this information grows as we
* have more and more logical-siblings in that socket).
*
* Ideally we should be able to skip the TSC sync check on the other
* core-siblings, if the first logical CPU in a socket passed the sync test.
* But as the TSC is per-logical CPU and can potentially be modified wrongly
* by the bios, TSC sync test for smaller duration should be able
* to catch such errors. Also this will catch the condition where all the
* cores in the socket doesn't get reset at the same time.
*/
static inline unsigned int loop_timeout(int cpu)
{
return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20;
}
/*
* Source CPU calls into this - it waits for the freshly booted
* target CPU to arrive and then starts the measurement:
*/
void __cpuinit check_tsc_sync_source(int cpu)
{
int cpus = 2;
/*
* No need to check if we already know that the TSC is not
* synchronized:
*/
if (unsynchronized_tsc())
return;
if (tsc_clocksource_reliable) {
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
pr_info(
"Skipped synchronization checks as TSC is reliable.\n");
return;
}
/*
* Reset it - in case this is a second bootup:
*/
atomic_set(&stop_count, 0);
/*
* Wait for the target to arrive:
*/
while (atomic_read(&start_count) != cpus-1)
cpu_relax();
/*
* Trigger the target to continue into the measurement too:
*/
atomic_inc(&start_count);
check_tsc_warp(loop_timeout(cpu));
while (atomic_read(&stop_count) != cpus-1)
cpu_relax();
if (nr_warps) {
pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
pr_warning("Measured %Ld cycles TSC warp between CPUs, "
"turning off TSC clock.\n", max_warp);
mark_tsc_unstable("check_tsc_sync_source failed");
} else {
pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
smp_processor_id(), cpu);
}
/*
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
nr_warps = 0;
max_warp = 0;
last_tsc = 0;
/*
* Let the target continue with the bootup:
*/
atomic_inc(&stop_count);
}
/*
* Freshly booted CPUs call into this:
*/
void __cpuinit check_tsc_sync_target(void)
{
int cpus = 2;
if (unsynchronized_tsc() || tsc_clocksource_reliable)
return;
/*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
*/
atomic_inc(&start_count);
while (atomic_read(&start_count) != cpus)
cpu_relax();
check_tsc_warp(loop_timeout(smp_processor_id()));
/*
* Ok, we are done:
*/
atomic_inc(&stop_count);
/*
* Wait for the source CPU to print stuff:
*/
while (atomic_read(&stop_count) != cpus)
cpu_relax();
}
| gpl-2.0 |
shengdie/simon_kernel_l01f_kk | drivers/mfd/htc-pasic3.c | 7916 | 5333 | /*
* Core driver for HTC PASIC3 LED/DS1WM chip.
*
* Copyright (C) 2006 Philipp Zabel <philipp.zabel@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/ds1wm.h>
#include <linux/mfd/htc-pasic3.h>
#include <linux/slab.h>
struct pasic3_data {
void __iomem *mapping;
unsigned int bus_shift;
};
#define REG_ADDR 5
#define REG_DATA 6
#define READ_MODE 0x80
/*
* write to a secondary register on the PASIC3
*/
void pasic3_write_register(struct device *dev, u32 reg, u8 val)
{
struct pasic3_data *asic = dev_get_drvdata(dev);
int bus_shift = asic->bus_shift;
void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
__raw_writeb(~READ_MODE & reg, addr);
__raw_writeb(val, data);
}
EXPORT_SYMBOL(pasic3_write_register); /* for leds-pasic3 */
/*
* read from a secondary register on the PASIC3
*/
u8 pasic3_read_register(struct device *dev, u32 reg)
{
struct pasic3_data *asic = dev_get_drvdata(dev);
int bus_shift = asic->bus_shift;
void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
__raw_writeb(READ_MODE | reg, addr);
return __raw_readb(data);
}
EXPORT_SYMBOL(pasic3_read_register); /* for leds-pasic3 */
/*
* LEDs
*/
static struct mfd_cell led_cell __initdata = {
.name = "leds-pasic3",
};
/*
* DS1WM
*/
static int ds1wm_enable(struct platform_device *pdev)
{
struct device *dev = pdev->dev.parent;
int c;
c = pasic3_read_register(dev, 0x28);
pasic3_write_register(dev, 0x28, c & 0x7f);
dev_dbg(dev, "DS1WM OWM_EN low (active) %02x\n", c & 0x7f);
return 0;
}
static int ds1wm_disable(struct platform_device *pdev)
{
struct device *dev = pdev->dev.parent;
int c;
c = pasic3_read_register(dev, 0x28);
pasic3_write_register(dev, 0x28, c | 0x80);
dev_dbg(dev, "DS1WM OWM_EN high (inactive) %02x\n", c | 0x80);
return 0;
}
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 0,
.reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] __initdata = {
[0] = {
.start = 0,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell ds1wm_cell __initdata = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
.platform_data = &ds1wm_pdata,
.pdata_size = sizeof(ds1wm_pdata),
.num_resources = 2,
.resources = ds1wm_resources,
};
static int __init pasic3_probe(struct platform_device *pdev)
{
struct pasic3_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct pasic3_data *asic;
struct resource *r;
int ret;
int irq = 0;
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (r) {
ds1wm_resources[1].flags = IORESOURCE_IRQ | (r->flags &
(IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE));
irq = r->start;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENXIO;
if (!request_mem_region(r->start, resource_size(r), "pasic3"))
return -EBUSY;
asic = kzalloc(sizeof(struct pasic3_data), GFP_KERNEL);
if (!asic)
return -ENOMEM;
platform_set_drvdata(pdev, asic);
asic->mapping = ioremap(r->start, resource_size(r));
if (!asic->mapping) {
dev_err(dev, "couldn't ioremap PASIC3\n");
kfree(asic);
return -ENOMEM;
}
/* calculate bus shift from mem resource */
asic->bus_shift = (resource_size(r) - 5) >> 3;
if (pdata && pdata->clock_rate) {
ds1wm_pdata.clock_rate = pdata->clock_rate;
/* the first 5 PASIC3 registers control the DS1WM */
ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
ret = mfd_add_devices(&pdev->dev, pdev->id,
&ds1wm_cell, 1, r, irq);
if (ret < 0)
dev_warn(dev, "failed to register DS1WM\n");
}
if (pdata && pdata->led_pdata) {
led_cell.platform_data = pdata->led_pdata;
led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo);
ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
if (ret < 0)
dev_warn(dev, "failed to register LED device\n");
}
return 0;
}
static int pasic3_remove(struct platform_device *pdev)
{
struct pasic3_data *asic = platform_get_drvdata(pdev);
struct resource *r;
mfd_remove_devices(&pdev->dev);
iounmap(asic->mapping);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
kfree(asic);
return 0;
}
MODULE_ALIAS("platform:pasic3");
static struct platform_driver pasic3_driver = {
.driver = {
.name = "pasic3",
},
.remove = pasic3_remove,
};
static int __init pasic3_base_init(void)
{
return platform_driver_probe(&pasic3_driver, pasic3_probe);
}
static void __exit pasic3_base_exit(void)
{
platform_driver_unregister(&pasic3_driver);
}
module_init(pasic3_base_init);
module_exit(pasic3_base_exit);
MODULE_AUTHOR("Philipp Zabel <philipp.zabel@gmail.com>");
MODULE_DESCRIPTION("Core driver for HTC PASIC3");
MODULE_LICENSE("GPL");
| gpl-2.0 |
AshleyLai/testing1 | drivers/isdn/mISDN/dsp_hwec.c | 9708 | 3078 | /*
* dsp_hwec.c:
* builtin mISDN dsp pipeline element for enabling the hw echocanceller
*
* Copyright (C) 2007, Nadi Sarrar
*
* Nadi Sarrar <nadi@beronet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mISDNdsp.h>
#include <linux/mISDNif.h>
#include "core.h"
#include "dsp.h"
#include "dsp_hwec.h"
static struct mISDN_dsp_element_arg args[] = {
{ "deftaps", "128", "Set the number of taps of cancellation." },
};
static struct mISDN_dsp_element dsp_hwec_p = {
.name = "hwec",
.new = NULL,
.free = NULL,
.process_tx = NULL,
.process_rx = NULL,
.num_args = ARRAY_SIZE(args),
.args = args,
};
struct mISDN_dsp_element *dsp_hwec = &dsp_hwec_p;
void dsp_hwec_enable(struct dsp *dsp, const char *arg)
{
int deftaps = 128,
len;
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to enable hwec: dsp is NULL\n",
__func__);
return;
}
if (!arg)
goto _do;
len = strlen(arg);
if (!len)
goto _do;
{
char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
strcpy(_dup, arg);
dup = _dup;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "=");
val = tok;
if (!val)
continue;
if (!strcmp(name, "deftaps")) {
if (sscanf(val, "%d", &tmp) == 1)
deftaps = tmp;
}
}
}
_do:
printk(KERN_DEBUG "%s: enabling hwec with deftaps=%d\n",
__func__, deftaps);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
cq.p1 = deftaps;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
void dsp_hwec_disable(struct dsp *dsp)
{
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to disable hwec: dsp is NULL\n",
__func__);
return;
}
printk(KERN_DEBUG "%s: disabling hwec\n", __func__);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
int dsp_hwec_init(void)
{
mISDN_dsp_element_register(dsp_hwec);
return 0;
}
void dsp_hwec_exit(void)
{
mISDN_dsp_element_unregister(dsp_hwec);
}
| gpl-2.0 |
Sony-Kitakami/android_kernel_sony | tools/firewire/decode-fcp.c | 13036 | 5617 | #include <linux/firewire-constants.h>
#include <stdio.h>
#include <stdlib.h>
#include "list.h"
#include "nosy-dump.h"
#define CSR_FCP_COMMAND 0xfffff0000b00ull
#define CSR_FCP_RESPONSE 0xfffff0000d00ull
static const char * const ctype_names[] = {
[0x0] = "control", [0x8] = "not implemented",
[0x1] = "status", [0x9] = "accepted",
[0x2] = "specific inquiry", [0xa] = "rejected",
[0x3] = "notify", [0xb] = "in transition",
[0x4] = "general inquiry", [0xc] = "stable",
[0x5] = "(reserved 0x05)", [0xd] = "changed",
[0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)",
[0x7] = "(reserved 0x07)", [0xf] = "interim",
};
static const char * const subunit_type_names[] = {
[0x00] = "monitor", [0x10] = "(reserved 0x10)",
[0x01] = "audio", [0x11] = "(reserved 0x11)",
[0x02] = "printer", [0x12] = "(reserved 0x12)",
[0x03] = "disc", [0x13] = "(reserved 0x13)",
[0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)",
[0x05] = "tuner", [0x15] = "(reserved 0x15)",
[0x06] = "ca", [0x16] = "(reserved 0x16)",
[0x07] = "camera", [0x17] = "(reserved 0x17)",
[0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)",
[0x09] = "panel", [0x19] = "(reserved 0x19)",
[0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)",
[0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)",
[0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique",
[0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types",
[0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte",
[0x0f] = "(reserved 0x0f)", [0x1f] = "unit",
};
struct avc_enum {
int value;
const char *name;
};
struct avc_field {
const char *name; /* Short name for field. */
int offset; /* Location of field, specified in bits; */
/* negative means from end of packet. */
int width; /* Width of field, 0 means use data_length. */
struct avc_enum *names;
};
struct avc_opcode_info {
const char *name;
struct avc_field fields[8];
};
struct avc_enum power_field_names[] = {
{ 0x70, "on" },
{ 0x60, "off" },
{ }
};
static const struct avc_opcode_info opcode_info[256] = {
/* TA Document 1999026 */
/* AV/C Digital Interface Command Set General Specification 4.0 */
[0xb2] = { "power", {
{ "state", 0, 8, power_field_names }
}
},
[0x30] = { "unit info", {
{ "foo", 0, 8 },
{ "unit_type", 8, 5 },
{ "unit", 13, 3 },
{ "company id", 16, 24 },
}
},
[0x31] = { "subunit info" },
[0x01] = { "reserve" },
[0xb0] = { "version" },
[0x00] = { "vendor dependent" },
[0x02] = { "plug info" },
[0x12] = { "channel usage" },
[0x24] = { "connect" },
[0x20] = { "connect av" },
[0x22] = { "connections" },
[0x11] = { "digital input" },
[0x10] = { "digital output" },
[0x25] = { "disconnect" },
[0x21] = { "disconnect av" },
[0x19] = { "input plug signal format" },
[0x18] = { "output plug signal format" },
[0x1f] = { "general bus setup" },
/* TA Document 1999025 */
/* AV/C Descriptor Mechanism Specification Version 1.0 */
[0x0c] = { "create descriptor" },
[0x08] = { "open descriptor" },
[0x09] = { "read descriptor" },
[0x0a] = { "write descriptor" },
[0x05] = { "open info block" },
[0x06] = { "read info block" },
[0x07] = { "write info block" },
[0x0b] = { "search descriptor" },
[0x0d] = { "object number select" },
/* TA Document 1999015 */
/* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */
[0xb3] = { "rate", {
{ "subfunction", 0, 8 },
{ "result", 8, 8 },
{ "plug_type", 16, 8 },
{ "plug_id", 16, 8 },
}
},
/* TA Document 1999008 */
/* AV/C Audio Subunit Specification 1.0 */
[0xb8] = { "function block" },
/* TA Document 2001001 */
/* AV/C Panel Subunit Specification 1.1 */
[0x7d] = { "gui update" },
[0x7e] = { "push gui data" },
[0x7f] = { "user action" },
[0x7c] = { "pass through" },
/* */
[0x26] = { "asynchronous connection" },
};
struct avc_frame {
uint32_t operand0:8;
uint32_t opcode:8;
uint32_t subunit_id:3;
uint32_t subunit_type:5;
uint32_t ctype:4;
uint32_t cts:4;
};
static void
decode_avc(struct link_transaction *t)
{
struct avc_frame *frame =
(struct avc_frame *) t->request->packet.write_block.data;
const struct avc_opcode_info *info;
const char *name;
char buffer[32];
int i;
info = &opcode_info[frame->opcode];
if (info->name == NULL) {
snprintf(buffer, sizeof(buffer),
"(unknown opcode 0x%02x)", frame->opcode);
name = buffer;
} else {
name = info->name;
}
printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s",
ctype_names[frame->ctype], subunit_type_names[frame->subunit_type],
frame->subunit_id, name);
for (i = 0; info->fields[i].name != NULL; i++)
printf(", %s", info->fields[i].name);
printf("\n");
}
int
decode_fcp(struct link_transaction *t)
{
struct avc_frame *frame =
(struct avc_frame *) t->request->packet.write_block.data;
unsigned long long offset =
((unsigned long long) t->request->packet.common.offset_high << 32) |
t->request->packet.common.offset_low;
if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST)
return 0;
if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) {
switch (frame->cts) {
case 0x00:
decode_avc(t);
break;
case 0x01:
printf("cal fcp frame (cts=0x01)\n");
break;
case 0x02:
printf("ehs fcp frame (cts=0x02)\n");
break;
case 0x03:
printf("havi fcp frame (cts=0x03)\n");
break;
case 0x0e:
printf("vendor specific fcp frame (cts=0x0e)\n");
break;
case 0x0f:
printf("extended cts\n");
break;
default:
printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts);
break;
}
return 1;
}
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.