repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
BSydz/Triumph-Sharp-2.2.2-Custom-Kernel | drivers/video/n411.c | 14290 | 4883 | /*
* linux/drivers/video/n411.c -- Platform device for N411 EPD kit
*
* Copyright (C) 2008, Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
*
* This driver is written to be used with the Hecuba display controller
* board, and tested with the EInk 800x600 display in 1 bit mode.
* The interface between Hecuba and the host is TTL based GPIO. The
* GPIO requirements are 8 writable data lines and 6 lines for control.
* Only 4 of the controls are actually used here but 6 for future use.
* The driver requires the IO addresses for data and control GPIO at
* load time. It is also possible to use this display with a standard
* PC parallel port.
*
* General notes:
* - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <video/hecubafb.h>
static unsigned long dio_addr;
static unsigned long cio_addr;
static unsigned long c2io_addr;
static unsigned long splashval;
static unsigned int nosplash;
static unsigned char ctl;
static void n411_set_ctl(struct hecubafb_par *par, unsigned char bit, unsigned
char state)
{
switch (bit) {
case HCB_CD_BIT:
if (state)
ctl &= ~(HCB_CD_BIT);
else
ctl |= HCB_CD_BIT;
break;
case HCB_DS_BIT:
if (state)
ctl &= ~(HCB_DS_BIT);
else
ctl |= HCB_DS_BIT;
break;
}
outb(ctl, cio_addr);
}
static unsigned char n411_get_ctl(struct hecubafb_par *par)
{
return inb(c2io_addr);
}
static void n411_set_data(struct hecubafb_par *par, unsigned char value)
{
outb(value, dio_addr);
}
static void n411_wait_for_ack(struct hecubafb_par *par, int clear)
{
int timeout;
unsigned char tmp;
timeout = 500;
do {
tmp = n411_get_ctl(par);
if ((tmp & HCB_ACK_BIT) && (!clear))
return;
else if (!(tmp & HCB_ACK_BIT) && (clear))
return;
udelay(1);
} while (timeout--);
printk(KERN_ERR "timed out waiting for ack\n");
}
static int n411_init_control(struct hecubafb_par *par)
{
unsigned char tmp;
/* for init, we want the following setup to be set:
WUP = lo
ACK = hi
DS = hi
RW = hi
CD = lo
*/
/* write WUP to lo, DS to hi, RW to hi, CD to lo */
ctl = HCB_WUP_BIT | HCB_RW_BIT | HCB_CD_BIT ;
n411_set_ctl(par, HCB_DS_BIT, 1);
/* check ACK is not lo */
tmp = n411_get_ctl(par);
if (tmp & HCB_ACK_BIT) {
printk(KERN_ERR "Fail because ACK is already low\n");
return -ENXIO;
}
return 0;
}
static int n411_init_board(struct hecubafb_par *par)
{
int retval;
retval = n411_init_control(par);
if (retval)
return retval;
par->send_command(par, APOLLO_INIT_DISPLAY);
par->send_data(par, 0x81);
/* have to wait while display resets */
udelay(1000);
/* if we were told to splash the screen, we just clear it */
if (!nosplash) {
par->send_command(par, APOLLO_ERASE_DISPLAY);
par->send_data(par, splashval);
}
return 0;
}
static struct hecuba_board n411_board = {
.owner = THIS_MODULE,
.init = n411_init_board,
.set_ctl = n411_set_ctl,
.set_data = n411_set_data,
.wait_for_ack = n411_wait_for_ack,
};
static struct platform_device *n411_device;
static int __init n411_init(void)
{
int ret;
if (!dio_addr || !cio_addr || !c2io_addr) {
printk(KERN_WARNING "no IO addresses supplied\n");
return -EINVAL;
}
/* request our platform independent driver */
request_module("hecubafb");
n411_device = platform_device_alloc("hecubafb", -1);
if (!n411_device)
return -ENOMEM;
platform_device_add_data(n411_device, &n411_board, sizeof(n411_board));
/* this _add binds hecubafb to n411. hecubafb refcounts n411 */
ret = platform_device_add(n411_device);
if (ret)
platform_device_put(n411_device);
return ret;
}
static void __exit n411_exit(void)
{
platform_device_unregister(n411_device);
}
module_init(n411_init);
module_exit(n411_exit);
module_param(nosplash, uint, 0);
MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
module_param(dio_addr, ulong, 0);
MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
module_param(cio_addr, ulong, 0);
MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
module_param(c2io_addr, ulong, 0);
MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
module_param(splashval, ulong, 0);
MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
MODULE_DESCRIPTION("board driver for n411 hecuba/apollo epd kit");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mifl/android_kernel_pantech_p9090 | sound/oss/pas2_midi.c | 14802 | 5011 | /*
* sound/oss/pas2_midi.c
*
* The low level driver for the PAS Midi Interface.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
* Bartlomiej Zolnierkiewicz : Added __init to pas_init_mixer()
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "pas2.h"
extern spinlock_t pas_lock;
static int midi_busy, input_opened;
static int my_dev;
int pas2_mididev=-1;
static unsigned char tmp_queue[256];
static volatile int qlen;
static volatile unsigned char qhead, qtail;
static void (*midi_input_intr) (int dev, unsigned char data);
static int pas_midi_open(int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
int err;
unsigned long flags;
unsigned char ctrl;
if (midi_busy)
return -EBUSY;
/*
* Reset input and output FIFO pointers
*/
pas_write(0x20 | 0x40,
0x178b);
spin_lock_irqsave(&pas_lock, flags);
if ((err = pas_set_intr(0x10)) < 0)
{
spin_unlock_irqrestore(&pas_lock, flags);
return err;
}
/*
* Enable input available and output FIFO empty interrupts
*/
ctrl = 0;
input_opened = 0;
midi_input_intr = input;
if (mode == OPEN_READ || mode == OPEN_READWRITE)
{
ctrl |= 0x04; /* Enable input */
input_opened = 1;
}
if (mode == OPEN_WRITE || mode == OPEN_READWRITE)
{
ctrl |= 0x08 | 0x10; /* Enable output */
}
pas_write(ctrl, 0x178b);
/*
* Acknowledge any pending interrupts
*/
pas_write(0xff, 0x1B88);
spin_unlock_irqrestore(&pas_lock, flags);
midi_busy = 1;
qlen = qhead = qtail = 0;
return 0;
}
static void pas_midi_close(int dev)
{
/*
* Reset FIFO pointers, disable intrs
*/
pas_write(0x20 | 0x40, 0x178b);
pas_remove_intr(0x10);
midi_busy = 0;
}
static int dump_to_midi(unsigned char midi_byte)
{
int fifo_space, x;
fifo_space = ((x = pas_read(0x1B89)) >> 4) & 0x0f;
/*
* The MIDI FIFO space register and it's documentation is nonunderstandable.
* There seem to be no way to differentiate between buffer full and buffer
* empty situations. For this reason we don't never write the buffer
* completely full. In this way we can assume that 0 (or is it 15)
* means that the buffer is empty.
*/
if (fifo_space < 2 && fifo_space != 0) /* Full (almost) */
return 0; /* Ask upper layers to retry after some time */
pas_write(midi_byte, 0x178A);
return 1;
}
static int pas_midi_out(int dev, unsigned char midi_byte)
{
unsigned long flags;
/*
* Drain the local queue first
*/
spin_lock_irqsave(&pas_lock, flags);
while (qlen && dump_to_midi(tmp_queue[qhead]))
{
qlen--;
qhead++;
}
spin_unlock_irqrestore(&pas_lock, flags);
/*
* Output the byte if the local queue is empty.
*/
if (!qlen)
if (dump_to_midi(midi_byte))
return 1;
/*
* Put to the local queue
*/
if (qlen >= 256)
return 0; /* Local queue full */
spin_lock_irqsave(&pas_lock, flags);
tmp_queue[qtail] = midi_byte;
qlen++;
qtail++;
spin_unlock_irqrestore(&pas_lock, flags);
return 1;
}
static int pas_midi_start_read(int dev)
{
return 0;
}
static int pas_midi_end_read(int dev)
{
return 0;
}
static void pas_midi_kick(int dev)
{
}
static int pas_buffer_status(int dev)
{
return qlen;
}
#define MIDI_SYNTH_NAME "Pro Audio Spectrum Midi"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct midi_operations pas_midi_operations =
{
.owner = THIS_MODULE,
.info = {"Pro Audio Spectrum", 0, 0, SNDCARD_PAS},
.converter = &std_midi_synth,
.in_info = {0},
.open = pas_midi_open,
.close = pas_midi_close,
.outputc = pas_midi_out,
.start_read = pas_midi_start_read,
.end_read = pas_midi_end_read,
.kick = pas_midi_kick,
.buffer_status = pas_buffer_status,
};
void __init pas_midi_init(void)
{
int dev = sound_alloc_mididev();
if (dev == -1)
{
printk(KERN_WARNING "pas_midi_init: Too many midi devices detected\n");
return;
}
std_midi_synth.midi_dev = my_dev = dev;
midi_devs[dev] = &pas_midi_operations;
pas2_mididev = dev;
sequencer_init();
}
void pas_midi_interrupt(void)
{
unsigned char stat;
int i, incount;
stat = pas_read(0x1B88);
if (stat & 0x04) /* Input data available */
{
incount = pas_read(0x1B89) & 0x0f; /* Input FIFO size */
if (!incount)
incount = 16;
for (i = 0; i < incount; i++)
if (input_opened)
{
midi_input_intr(my_dev, pas_read(0x178A));
} else
pas_read(0x178A); /* Flush */
}
if (stat & (0x08 | 0x10))
{
spin_lock(&pas_lock);/* called in irq context */
while (qlen && dump_to_midi(tmp_queue[qhead]))
{
qlen--;
qhead++;
}
spin_unlock(&pas_lock);
}
if (stat & 0x40)
{
printk(KERN_WARNING "MIDI output overrun %x,%x\n", pas_read(0x1B89), stat);
}
pas_write(stat, 0x1B88); /* Acknowledge interrupts */
}
| gpl-2.0 |
jpoirier/linux | sound/oss/pas2_midi.c | 14802 | 5011 | /*
* sound/oss/pas2_midi.c
*
* The low level driver for the PAS Midi Interface.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
* Bartlomiej Zolnierkiewicz : Added __init to pas_init_mixer()
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "pas2.h"
extern spinlock_t pas_lock;
static int midi_busy, input_opened;
static int my_dev;
int pas2_mididev=-1;
static unsigned char tmp_queue[256];
static volatile int qlen;
static volatile unsigned char qhead, qtail;
static void (*midi_input_intr) (int dev, unsigned char data);
static int pas_midi_open(int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
int err;
unsigned long flags;
unsigned char ctrl;
if (midi_busy)
return -EBUSY;
/*
* Reset input and output FIFO pointers
*/
pas_write(0x20 | 0x40,
0x178b);
spin_lock_irqsave(&pas_lock, flags);
if ((err = pas_set_intr(0x10)) < 0)
{
spin_unlock_irqrestore(&pas_lock, flags);
return err;
}
/*
* Enable input available and output FIFO empty interrupts
*/
ctrl = 0;
input_opened = 0;
midi_input_intr = input;
if (mode == OPEN_READ || mode == OPEN_READWRITE)
{
ctrl |= 0x04; /* Enable input */
input_opened = 1;
}
if (mode == OPEN_WRITE || mode == OPEN_READWRITE)
{
ctrl |= 0x08 | 0x10; /* Enable output */
}
pas_write(ctrl, 0x178b);
/*
* Acknowledge any pending interrupts
*/
pas_write(0xff, 0x1B88);
spin_unlock_irqrestore(&pas_lock, flags);
midi_busy = 1;
qlen = qhead = qtail = 0;
return 0;
}
static void pas_midi_close(int dev)
{
/*
* Reset FIFO pointers, disable intrs
*/
pas_write(0x20 | 0x40, 0x178b);
pas_remove_intr(0x10);
midi_busy = 0;
}
static int dump_to_midi(unsigned char midi_byte)
{
int fifo_space, x;
fifo_space = ((x = pas_read(0x1B89)) >> 4) & 0x0f;
/*
* The MIDI FIFO space register and it's documentation is nonunderstandable.
* There seem to be no way to differentiate between buffer full and buffer
* empty situations. For this reason we don't never write the buffer
* completely full. In this way we can assume that 0 (or is it 15)
* means that the buffer is empty.
*/
if (fifo_space < 2 && fifo_space != 0) /* Full (almost) */
return 0; /* Ask upper layers to retry after some time */
pas_write(midi_byte, 0x178A);
return 1;
}
static int pas_midi_out(int dev, unsigned char midi_byte)
{
unsigned long flags;
/*
* Drain the local queue first
*/
spin_lock_irqsave(&pas_lock, flags);
while (qlen && dump_to_midi(tmp_queue[qhead]))
{
qlen--;
qhead++;
}
spin_unlock_irqrestore(&pas_lock, flags);
/*
* Output the byte if the local queue is empty.
*/
if (!qlen)
if (dump_to_midi(midi_byte))
return 1;
/*
* Put to the local queue
*/
if (qlen >= 256)
return 0; /* Local queue full */
spin_lock_irqsave(&pas_lock, flags);
tmp_queue[qtail] = midi_byte;
qlen++;
qtail++;
spin_unlock_irqrestore(&pas_lock, flags);
return 1;
}
static int pas_midi_start_read(int dev)
{
return 0;
}
static int pas_midi_end_read(int dev)
{
return 0;
}
static void pas_midi_kick(int dev)
{
}
static int pas_buffer_status(int dev)
{
return qlen;
}
#define MIDI_SYNTH_NAME "Pro Audio Spectrum Midi"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct midi_operations pas_midi_operations =
{
.owner = THIS_MODULE,
.info = {"Pro Audio Spectrum", 0, 0, SNDCARD_PAS},
.converter = &std_midi_synth,
.in_info = {0},
.open = pas_midi_open,
.close = pas_midi_close,
.outputc = pas_midi_out,
.start_read = pas_midi_start_read,
.end_read = pas_midi_end_read,
.kick = pas_midi_kick,
.buffer_status = pas_buffer_status,
};
void __init pas_midi_init(void)
{
int dev = sound_alloc_mididev();
if (dev == -1)
{
printk(KERN_WARNING "pas_midi_init: Too many midi devices detected\n");
return;
}
std_midi_synth.midi_dev = my_dev = dev;
midi_devs[dev] = &pas_midi_operations;
pas2_mididev = dev;
sequencer_init();
}
void pas_midi_interrupt(void)
{
unsigned char stat;
int i, incount;
stat = pas_read(0x1B88);
if (stat & 0x04) /* Input data available */
{
incount = pas_read(0x1B89) & 0x0f; /* Input FIFO size */
if (!incount)
incount = 16;
for (i = 0; i < incount; i++)
if (input_opened)
{
midi_input_intr(my_dev, pas_read(0x178A));
} else
pas_read(0x178A); /* Flush */
}
if (stat & (0x08 | 0x10))
{
spin_lock(&pas_lock);/* called in irq context */
while (qlen && dump_to_midi(tmp_queue[qhead]))
{
qlen--;
qhead++;
}
spin_unlock(&pas_lock);
}
if (stat & 0x40)
{
printk(KERN_WARNING "MIDI output overrun %x,%x\n", pas_read(0x1B89), stat);
}
pas_write(stat, 0x1B88); /* Acknowledge interrupts */
}
| gpl-2.0 |
klock-android/linux | arch/mips/oprofile/common.c | 211 | 3141 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004, 2005 Ralf Baechle
* Copyright (C) 2005 MIPS Technologies, Inc.
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <asm/cpu-info.h>
#include <asm/cpu-type.h>
#include "op_impl.h"
extern struct op_mips_model op_model_mipsxx_ops __weak;
extern struct op_mips_model op_model_loongson2_ops __weak;
extern struct op_mips_model op_model_loongson3_ops __weak;
static struct op_mips_model *model;
static struct op_counter_config ctr[20];
static int op_mips_setup(void)
{
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr);
/* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 1);
return 0;
}
static int op_mips_create_files(struct dentry *root)
{
int i;
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(root, buf);
oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(dir, "event", &ctr[i].event);
oprofilefs_create_ulong(dir, "count", &ctr[i].count);
oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(dir, "user", &ctr[i].user);
oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
/* Dummy. */
oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
return 0;
}
static int op_mips_start(void)
{
on_each_cpu(model->cpu_start, NULL, 1);
return 0;
}
static void op_mips_stop(void)
{
/* Disable performance monitoring for all counters. */
on_each_cpu(model->cpu_stop, NULL, 1);
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
struct op_mips_model *lmodel = NULL;
int res;
switch (current_cpu_type()) {
case CPU_5KC:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_20KC:
case CPU_24K:
case CPU_25KF:
case CPU_34K:
case CPU_1004K:
case CPU_74K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_M5150:
case CPU_LOONGSON1:
case CPU_SB1:
case CPU_SB1A:
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_XLR:
lmodel = &op_model_mipsxx_ops;
break;
case CPU_LOONGSON2:
lmodel = &op_model_loongson2_ops;
break;
case CPU_LOONGSON3:
lmodel = &op_model_loongson3_ops;
break;
};
/*
* Always set the backtrace. This allows unsupported CPU types to still
* use timer-based oprofile.
*/
ops->backtrace = op_mips_backtrace;
if (!lmodel)
return -ENODEV;
res = lmodel->init();
if (res)
return res;
model = lmodel;
ops->create_files = op_mips_create_files;
ops->setup = op_mips_setup;
//ops->shutdown = op_mips_shutdown;
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
lmodel->cpu_type);
return 0;
}
void oprofile_arch_exit(void)
{
if (model)
model->exit();
}
| gpl-2.0 |
codeaurora-unoffical/linux-msm | drivers/lightnvm/pblk-write.c | 211 | 17117 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
* Matias Bjorling <matias@cnexlabs.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* pblk-write.c - pblk's write path from write buffer to media
*/
#include "pblk.h"
#include "pblk-trace.h"
static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
int pos = c_ctx->sentry + i;
int flags;
w_ctx = pblk_rb_w_ctx(rwb, pos);
flags = READ_ONCE(w_ctx->flags);
if (flags & PBLK_FLUSH_ENTRY) {
flags &= ~PBLK_FLUSH_ENTRY;
/* Release flags on context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_dec(&rwb->inflight_flush_point);
#endif
}
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
#endif
ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return ret;
}
static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
list_del(&c_ctx->list);
return pblk_end_w_bio(pblk, rqd, c_ctx);
}
static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct pblk_c_ctx *c, *r;
unsigned long flags;
unsigned long pos;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
#endif
pblk_up_rq(pblk, c_ctx->lun_bitmap);
pos = pblk_rb_sync_init(&pblk->rwb, &flags);
if (pos == c_ctx->sentry) {
pos = pblk_end_w_bio(pblk, rqd, c_ctx);
retry:
list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
rqd = nvm_rq_from_c_ctx(c);
if (c->sentry == pos) {
pos = pblk_end_queued_w_bio(pblk, rqd, c);
goto retry;
}
}
} else {
WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
list_add_tail(&c_ctx->list, &pblk->compl_list);
}
pblk_rb_sync_end(&pblk->rwb, &flags);
}
/* Map remaining sectors in chunk, starting from ppa */
static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
int rqd_ppas)
{
struct pblk_line *line;
struct ppa_addr map_ppa = *ppa;
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
__le64 *lba_list;
u64 paddr;
int done = 0;
int n = 0;
line = pblk_ppa_to_line(pblk, *ppa);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
spin_lock(&line->lock);
while (!done) {
paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
if (!test_and_set_bit(paddr, line->map_bitmap))
line->left_msecs--;
if (n < rqd_ppas && lba_list[paddr] != addr_empty)
line->nr_valid_lbas--;
lba_list[paddr] = addr_empty;
if (!test_and_set_bit(paddr, line->invalid_bitmap))
le32_add_cpu(line->vsc, -1);
done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
n++;
}
line->w_err_gc->has_write_err = 1;
spin_unlock(&line->lock);
}
static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
unsigned int nr_entries)
{
struct pblk_rb *rb = &pblk->rwb;
struct pblk_rb_entry *entry;
struct pblk_line *line;
struct pblk_w_ctx *w_ctx;
struct ppa_addr ppa_l2p;
int flags;
unsigned int i;
spin_lock(&pblk->trans_lock);
for (i = 0; i < nr_entries; i++) {
entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
w_ctx = &entry->w_ctx;
/* Check if the lba has been overwritten */
if (w_ctx->lba != ADDR_EMPTY) {
ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
w_ctx->lba = ADDR_EMPTY;
}
/* Mark up the entry as submittable again */
flags = READ_ONCE(w_ctx->flags);
flags |= PBLK_WRITTEN_DATA;
/* Release flags on write context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
/* Decrease the reference count to the line as we will
* re-map these entries
*/
line = pblk_ppa_to_line(pblk, w_ctx->ppa);
atomic_dec(&line->sec_to_update);
kref_put(&line->ref, pblk_line_put);
}
spin_unlock(&pblk->trans_lock);
}
static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
{
struct pblk_c_ctx *r_ctx;
r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
if (!r_ctx)
return;
r_ctx->lun_bitmap = NULL;
r_ctx->sentry = c_ctx->sentry;
r_ctx->nr_valid = c_ctx->nr_valid;
r_ctx->nr_padded = c_ctx->nr_padded;
spin_lock(&pblk->resubmit_lock);
list_add_tail(&r_ctx->list, &pblk->resubmit_list);
spin_unlock(&pblk->resubmit_lock);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
#endif
}
static void pblk_submit_rec(struct work_struct *work)
{
struct pblk_rec_ctx *recovery =
container_of(work, struct pblk_rec_ctx, ws_rec);
struct pblk *pblk = recovery->pblk;
struct nvm_rq *rqd = recovery->rqd;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
pblk_log_write_err(pblk, rqd);
pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
pblk_queue_resubmit(pblk, c_ctx);
pblk_up_rq(pblk, c_ctx->lun_bitmap);
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
mempool_free(recovery, &pblk->rec_pool);
atomic_dec(&pblk->inflight_io);
pblk_write_kick(pblk);
}
static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_rec_ctx *recovery;
recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
if (!recovery) {
pblk_err(pblk, "could not allocate recovery work\n");
return;
}
recovery->pblk = pblk;
recovery->rqd = rqd;
INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
queue_work(pblk->close_wq, &recovery->ws_rec);
}
static void pblk_end_io_write(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
if (rqd->error) {
pblk_end_w_fail(pblk, rqd);
return;
} else {
if (trace_pblk_chunk_state_enabled())
pblk_check_chunk_state_update(pblk, rqd);
#ifdef CONFIG_NVM_PBLK_DEBUG
WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif
}
pblk_complete_write(pblk, rqd, c_ctx);
atomic_dec(&pblk->inflight_io);
}
static void pblk_end_io_write_meta(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta;
struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int sync;
pblk_up_chunk(pblk, ppa_list[0]);
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
line->w_err_gc->has_write_err = 1;
} else {
if (trace_pblk_chunk_state_enabled())
pblk_check_chunk_state_update(pblk, rqd);
}
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
if (sync == emeta->nr_entries)
pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
GFP_ATOMIC, pblk->close_wq);
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
atomic_dec(&pblk->inflight_io);
}
static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int nr_secs, nvm_end_io_fn(*end_io))
{
/* Setup write request */
rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs;
rqd->is_seq = 1;
rqd->private = pblk;
rqd->end_io = end_io;
return pblk_alloc_rqd_meta(pblk, rqd);
}
static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr *erase_ppa)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line *e_line = pblk_line_get_erase(pblk);
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
unsigned int valid = c_ctx->nr_valid;
unsigned int padded = c_ctx->nr_padded;
unsigned int nr_secs = valid + padded;
unsigned long *lun_bitmap;
int ret;
lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
if (!lun_bitmap)
return -ENOMEM;
c_ctx->lun_bitmap = lun_bitmap;
ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
if (ret) {
kfree(lun_bitmap);
return ret;
}
if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
valid, 0);
else
ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
valid, erase_ppa);
return ret;
}
static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
unsigned int secs_to_flush)
{
int secs_to_sync;
secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
#ifdef CONFIG_NVM_PBLK_DEBUG
if ((!secs_to_sync && secs_to_flush)
|| (secs_to_sync < 0)
|| (secs_to_sync > secs_avail && !secs_to_flush)) {
pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
secs_avail, secs_to_sync, secs_to_flush);
}
#endif
return secs_to_sync;
}
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_emeta *emeta = meta_line->emeta;
struct ppa_addr *ppa_list;
struct pblk_g_ctx *m_ctx;
struct nvm_rq *rqd;
void *data;
u64 paddr;
int rq_ppas = pblk->min_write_pgs;
int id = meta_line->id;
int rq_len;
int i, j;
int ret;
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
m_ctx = nvm_rq_to_pdu(rqd);
m_ctx->private = meta_line;
rq_len = rq_ppas * geo->csecs;
data = ((void *)emeta->buf) + emeta->mem;
ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
if (ret)
goto fail_free_rqd;
ppa_list = nvm_rq_to_ppa_list(rqd);
for (i = 0; i < rqd->nr_ppas; ) {
spin_lock(&meta_line->lock);
paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
spin_unlock(&meta_line->lock);
for (j = 0; j < rq_ppas; j++, i++, paddr++)
ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
}
spin_lock(&l_mg->close_lock);
emeta->mem += rq_len;
if (emeta->mem >= lm->emeta_len[0])
list_del(&meta_line->list);
spin_unlock(&l_mg->close_lock);
pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd, data);
if (ret) {
pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
goto fail_rollback;
}
return NVM_IO_OK;
fail_rollback:
pblk_up_chunk(pblk, ppa_list[0]);
spin_lock(&l_mg->close_lock);
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
spin_unlock(&l_mg->close_lock);
fail_free_rqd:
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
return ret;
}
static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
struct pblk_line *meta_line,
struct nvm_rq *data_rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
struct pblk_line *data_line = pblk_line_get_data(pblk);
struct ppa_addr ppa, ppa_opt;
u64 paddr;
int pos_opt;
/* Schedule a metadata I/O that is half the distance from the data I/O
* with regards to the number of LUNs forming the pblk instance. This
* balances LUN conflicts across every I/O.
*
* When the LUN configuration changes (e.g., due to GC), this distance
* can align, which would result on metadata and data I/Os colliding. In
* this case, modify the distance to not be optimal, but move the
* optimal in the right direction.
*/
paddr = pblk_lookup_page(pblk, meta_line);
ppa = addr_to_gen_ppa(pblk, paddr, 0);
ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
test_bit(pos_opt, data_line->blk_bitmap))
return true;
if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
data_line->meta_distance--;
return false;
}
static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
struct nvm_rq *data_rqd)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *meta_line;
spin_lock(&l_mg->close_lock);
if (list_empty(&l_mg->emeta_list)) {
spin_unlock(&l_mg->close_lock);
return NULL;
}
meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
if (meta_line->emeta->mem >= lm->emeta_len[0]) {
spin_unlock(&l_mg->close_lock);
return NULL;
}
spin_unlock(&l_mg->close_lock);
if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
return NULL;
return meta_line;
}
static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
{
struct ppa_addr erase_ppa;
struct pblk_line *meta_line;
int err;
pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
if (err) {
pblk_err(pblk, "could not setup write request: %d\n", err);
return NVM_IO_ERR;
}
meta_line = pblk_should_submit_meta_io(pblk, rqd);
/* Submit data write for current data line */
err = pblk_submit_io(pblk, rqd, NULL);
if (err) {
pblk_err(pblk, "data I/O submission failed: %d\n", err);
return NVM_IO_ERR;
}
if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
int bit;
atomic_inc(&e_line->left_eblks);
bit = pblk_ppa_to_pos(geo, erase_ppa);
WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
}
}
if (meta_line) {
/* Submit metadata write for previous data line */
err = pblk_submit_meta_io(pblk, meta_line);
if (err) {
pblk_err(pblk, "metadata I/O submission failed: %d",
err);
return NVM_IO_ERR;
}
}
return NVM_IO_OK;
}
static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
c_ctx->nr_padded);
}
static int pblk_submit_write(struct pblk *pblk, int *secs_left)
{
struct bio *bio;
struct nvm_rq *rqd;
unsigned int secs_avail, secs_to_sync, secs_to_com;
unsigned int secs_to_flush, packed_meta_pgs;
unsigned long pos;
unsigned int resubmit;
*secs_left = 0;
spin_lock(&pblk->resubmit_lock);
resubmit = !list_empty(&pblk->resubmit_list);
spin_unlock(&pblk->resubmit_lock);
/* Resubmit failed writes first */
if (resubmit) {
struct pblk_c_ctx *r_ctx;
spin_lock(&pblk->resubmit_lock);
r_ctx = list_first_entry(&pblk->resubmit_list,
struct pblk_c_ctx, list);
list_del(&r_ctx->list);
spin_unlock(&pblk->resubmit_lock);
secs_avail = r_ctx->nr_valid;
pos = r_ctx->sentry;
pblk_prepare_resubmit(pblk, pos, secs_avail);
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
secs_avail);
kfree(r_ctx);
} else {
/* If there are no sectors in the cache,
* flushes (bios without data) will be cleared on
* the cache threads
*/
secs_avail = pblk_rb_read_count(&pblk->rwb);
if (!secs_avail)
return 0;
secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
return 0;
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
pblk_err(pblk, "bad buffer sync calculation\n");
return 0;
}
secs_to_com = (secs_to_sync > secs_avail) ?
secs_avail : secs_to_sync;
pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
}
packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
rqd->bio = bio;
if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
secs_avail)) {
pblk_err(pblk, "corrupted write bio\n");
goto fail_put_bio;
}
if (pblk_submit_io_set(pblk, rqd))
goto fail_free_bio;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(secs_to_sync, &pblk->sub_writes);
#endif
*secs_left = 1;
return 0;
fail_free_bio:
pblk_free_write_rqd(pblk, rqd);
fail_put_bio:
bio_put(bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return -EINTR;
}
int pblk_write_ts(void *data)
{
struct pblk *pblk = data;
int secs_left;
int write_failure = 0;
while (!kthread_should_stop()) {
if (!write_failure) {
write_failure = pblk_submit_write(pblk, &secs_left);
if (secs_left)
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
io_schedule();
}
return 0;
}
| gpl-2.0 |
x13thangelx/droid2we-kernel | net/rds/ib_sysctl.c | 467 | 5013 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include "ib.h"
static struct ctl_table_header *rds_ib_sysctl_hdr;
unsigned long rds_ib_sysctl_max_send_wr = RDS_IB_DEFAULT_SEND_WR;
unsigned long rds_ib_sysctl_max_recv_wr = RDS_IB_DEFAULT_RECV_WR;
unsigned long rds_ib_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE;
static unsigned long rds_ib_sysctl_max_wr_min = 1;
/* hardware will fail CQ creation long before this */
static unsigned long rds_ib_sysctl_max_wr_max = (u32)~0;
unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
/*
* This sysctl does nothing.
*
* Backwards compatibility with RDS 3.0 wire protocol
* disables initial FC credit exchange.
* If it's ever possible to drop 3.0 support,
* setting this to 1 and moving init/refill of send/recv
* rings from ib_cm_connect_complete() back into ib_setup_qp()
* will cause credits to be added before protocol negotiation.
*/
unsigned int rds_ib_sysctl_flow_control = 0;
ctl_table rds_ib_sysctl_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
.procname = "max_send_wr",
.data = &rds_ib_sysctl_max_send_wr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_wr_min,
.extra2 = &rds_ib_sysctl_max_wr_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "max_recv_wr",
.data = &rds_ib_sysctl_max_recv_wr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_wr_min,
.extra2 = &rds_ib_sysctl_max_wr_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "max_unsignaled_wr",
.data = &rds_ib_sysctl_max_unsig_wrs,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_unsig_wr_min,
.extra2 = &rds_ib_sysctl_max_unsig_wr_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "max_unsignaled_bytes",
.data = &rds_ib_sysctl_max_unsig_bytes,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
.extra1 = &rds_ib_sysctl_max_unsig_bytes_min,
.extra2 = &rds_ib_sysctl_max_unsig_bytes_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "max_recv_allocation",
.data = &rds_ib_sysctl_max_recv_allocation,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "flow_control",
.data = &rds_ib_sysctl_flow_control,
.maxlen = sizeof(rds_ib_sysctl_flow_control),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{ .ctl_name = 0}
};
static struct ctl_path rds_ib_sysctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "rds", .ctl_name = CTL_UNNUMBERED, },
{ .procname = "ib", .ctl_name = CTL_UNNUMBERED, },
{ }
};
void rds_ib_sysctl_exit(void)
{
if (rds_ib_sysctl_hdr)
unregister_sysctl_table(rds_ib_sysctl_hdr);
}
int __init rds_ib_sysctl_init(void)
{
rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
if (rds_ib_sysctl_hdr == NULL)
return -ENOMEM;
return 0;
}
| gpl-2.0 |
balticembedded/be-kernel | arch/arm/mach-s5pc100/mach-smdkc100.c | 723 | 4999 | /* linux/arch/arm/mach-s5pc100/mach-smdkc100.c
*
* Copyright 2009 Samsung Electronics Co.
* Author: Byungho Min <bhmin@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/map.h>
#include <mach/regs-fb.h>
#include <video/platform_lcd.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/s5pc100.h>
#include <plat/fb.h>
#include <plat/iic.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define S5PC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define S5PC100_ULCON_DEFAULT S3C2410_LCON_CS8
#define S5PC100_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S3C2440_UFCON_RXTRIG8 | \
S3C2440_UFCON_TXTRIG16)
static struct s3c2410_uartcfg smdkc100_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = S5PC100_UCON_DEFAULT,
.ulcon = S5PC100_ULCON_DEFAULT,
.ufcon = S5PC100_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = S5PC100_UCON_DEFAULT,
.ulcon = S5PC100_ULCON_DEFAULT,
.ufcon = S5PC100_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = S5PC100_UCON_DEFAULT,
.ulcon = S5PC100_ULCON_DEFAULT,
.ufcon = S5PC100_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
.ucon = S5PC100_UCON_DEFAULT,
.ulcon = S5PC100_ULCON_DEFAULT,
.ufcon = S5PC100_UFCON_DEFAULT,
},
};
/* I2C0 */
static struct i2c_board_info i2c_devs0[] __initdata = {
};
/* I2C1 */
static struct i2c_board_info i2c_devs1[] __initdata = {
};
/* LCD power controller */
static void smdkc100_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
/* backlight */
gpio_direction_output(S5PC100_GPD(0), power);
if (power) {
/* module reset */
gpio_direction_output(S5PC100_GPH0(6), 1);
mdelay(100);
gpio_direction_output(S5PC100_GPH0(6), 0);
mdelay(10);
gpio_direction_output(S5PC100_GPH0(6), 1);
mdelay(10);
}
}
static struct plat_lcd_data smdkc100_lcd_power_data = {
.set_power = smdkc100_lcd_power_set,
};
static struct platform_device smdkc100_lcd_powerdev = {
.name = "platform-lcd",
.dev.parent = &s3c_device_fb.dev,
.dev.platform_data = &smdkc100_lcd_power_data,
};
/* Frame Buffer */
static struct s3c_fb_pd_win smdkc100_fb_win0 = {
/* this is to ensure we use win0 */
.win_mode = {
.pixclock = 1000000000000ULL / ((8+13+3+800)*(7+5+1+480)*80),
.left_margin = 8,
.right_margin = 13,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
},
.max_bpp = 32,
.default_bpp = 16,
};
static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
.win[0] = &smdkc100_fb_win0,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.setup_gpio = s5pc100_fb_gpio_setup_24bpp,
};
static struct platform_device *smdkc100_devices[] __initdata = {
&s3c_device_i2c0,
&s3c_device_i2c1,
&s3c_device_fb,
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
&smdkc100_lcd_powerdev,
&s5pc100_device_iis0,
&s5pc100_device_ac97,
};
static void __init smdkc100_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(smdkc100_uartcfgs, ARRAY_SIZE(smdkc100_uartcfgs));
}
static void __init smdkc100_machine_init(void)
{
/* I2C */
s3c_i2c0_set_platdata(NULL);
s3c_i2c1_set_platdata(NULL);
i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0));
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
s3c_fb_set_platdata(&smdkc100_lcd_pdata);
/* LCD init */
gpio_request(S5PC100_GPD(0), "GPD");
gpio_request(S5PC100_GPH0(6), "GPH0");
smdkc100_lcd_power_set(&smdkc100_lcd_power_data, 0);
platform_add_devices(smdkc100_devices, ARRAY_SIZE(smdkc100_devices));
}
MACHINE_START(SMDKC100, "SMDKC100")
/* Maintainer: Byungho Min <bhmin@samsung.com> */
.phys_io = S3C_PA_UART & 0xfff00000,
.io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
.boot_params = S5P_PA_SDRAM + 0x100,
.init_irq = s5pc100_init_irq,
.map_io = smdkc100_map_io,
.init_machine = smdkc100_machine_init,
.timer = &s3c24xx_timer,
MACHINE_END
| gpl-2.0 |
akw28888/kernel_zte_msm8x25q | drivers/net/virtio_net.c | 979 | 32405 | /* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
struct u64_stats_sync syncp;
u64 tx_bytes;
u64 tx_packets;
u64 rx_bytes;
u64 rx_packets;
};
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
struct napi_struct napi;
unsigned int status;
/* Number of input buffers, and max we've ever had. */
unsigned int num, max;
/* I like... big packets and I cannot lie! */
bool big_packets;
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
/* Active statistics */
struct virtnet_stats __percpu *stats;
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
/* Chain pages by the private ptr. */
struct page *pages;
/* fragments + linear part + virtio header */
struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
};
struct skb_vnet_hdr {
union {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mhdr;
};
unsigned int num_sg;
};
struct padded_vnet_hdr {
struct virtio_net_hdr hdr;
/*
* virtio_net_hdr should be in a separated sg buffer because of a
* QEMU bug, and data sg buffer shares same page with this header sg.
* This padding makes next sg 16 byte aligned after virtio_net_hdr.
*/
char padding[6];
};
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
}
/*
* private is used to chain pages for big packets, put the whole
* most recent used list in the beginning for reuse
*/
static void give_pages(struct virtnet_info *vi, struct page *page)
{
struct page *end;
/* Find end of list, sew whole thing into vi->pages. */
for (end = page; end->private; end = (struct page *)end->private);
end->private = (unsigned long)vi->pages;
vi->pages = page;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
if (p) {
vi->pages = (struct page *)p->private;
/* clear private here, it is used to chain pages */
p->private = 0;
} else
p = alloc_page(gfp_mask);
return p;
}
static void skb_xmit_done(struct virtqueue *svq)
{
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
}
static void set_skb_frag(struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int *len)
{
int size = min((unsigned)PAGE_SIZE - offset, *len);
int i = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, i, page, offset, size);
skb->data_len += size;
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
*len -= size;
}
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int len)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
unsigned int copy, hdr_len, offset;
char *p;
p = page_address(page);
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
hdr = skb_vnet_hdr(skb);
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
offset = hdr_len;
} else {
hdr_len = sizeof hdr->hdr;
offset = sizeof(struct padded_vnet_hdr);
}
memcpy(hdr, p, hdr_len);
len -= hdr_len;
p += offset;
copy = len;
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
memcpy(skb_put(skb, copy), p, copy);
len -= copy;
offset += copy;
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
* tries to receive more than is possible. This is usually
* the case of a broken device.
*/
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
if (net_ratelimit())
pr_debug("%s: too much data\n", skb->dev->name);
dev_kfree_skb(skb);
return NULL;
}
while (len) {
set_skb_frag(skb, page, offset, &len);
page = (struct page *)page->private;
offset = 0;
}
if (page)
give_pages(vi, page);
return skb;
}
static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
{
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
struct page *page;
int num_buf, i, len;
num_buf = hdr->mhdr.num_buffers;
while (--num_buf) {
i = skb_shinfo(skb)->nr_frags;
if (i >= MAX_SKB_FRAGS) {
pr_debug("%s: packet too long\n", skb->dev->name);
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
if (len > PAGE_SIZE)
len = PAGE_SIZE;
set_skb_frag(skb, page, 0, &len);
--vi->num;
}
return 0;
}
static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct page *page;
struct skb_vnet_hdr *hdr;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs || vi->big_packets)
give_pages(vi, buf);
else
dev_kfree_skb(buf);
return;
}
if (!vi->mergeable_rx_bufs && !vi->big_packets) {
skb = buf;
len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len);
} else {
page = buf;
skb = page_to_skb(vi, page, len);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
give_pages(vi, page);
return;
}
if (vi->mergeable_rx_bufs)
if (receive_mergeable(vi, skb)) {
dev_kfree_skb(skb);
return;
}
}
hdr = skb_vnet_hdr(skb);
u64_stats_update_begin(&stats->syncp);
stats->rx_bytes += skb->len;
stats->rx_packets++;
u64_stats_update_end(&stats->syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
if (!skb_partial_csum_set(skb,
hdr->hdr.csum_start,
hdr->hdr.csum_offset))
goto frame_err;
} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
if (net_ratelimit())
printk(KERN_WARNING "%s: bad gso type %u.\n",
dev->name, hdr->hdr.gso_type);
goto frame_err;
}
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
printk(KERN_WARNING "%s: zero gso size.\n",
dev->name);
goto frame_err;
}
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
}
netif_receive_skb(skb);
return;
frame_err:
dev->stats.rx_frame_errors++;
dev_kfree_skb(skb);
}
static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
int err;
skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
return err;
}
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
struct page *first, *list = NULL;
char *p;
int i, err, offset;
/* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
if (!first) {
if (list)
give_pages(vi, list);
return -ENOMEM;
}
sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
list = first;
}
first = get_a_page(vi, gfp);
if (!first) {
give_pages(vi, list);
return -ENOMEM;
}
p = page_address(first);
/* vi->rx_sg[0], vi->rx_sg[1] share the same page */
/* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
/* vi->rx_sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
first, gfp);
if (err < 0)
give_pages(vi, first);
return err;
}
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
struct page *page;
int err;
page = get_a_page(vi, gfp);
if (!page)
return -ENOMEM;
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
if (err < 0)
give_pages(vi, page);
return err;
}
/*
* Returns false if we couldn't fill entirely (OOM).
*
* Normally run in the receive path, but can also be run from ndo_open
* before we're receiving packets, or from refill_work which is
* careful to disable receiving (using napi_disable).
*/
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
bool oom;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, gfp);
else if (vi->big_packets)
err = add_recvbuf_big(vi, gfp);
else
err = add_recvbuf_small(vi, gfp);
oom = err == -ENOMEM;
if (err < 0)
break;
++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
virtqueue_kick(vi->rvq);
return !oom;
}
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
static void virtnet_napi_enable(struct virtnet_info *vi)
{
napi_enable(&vi->napi);
/* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
virtqueue_disable_cb(vi->rvq);
local_bh_disable();
__napi_schedule(&vi->napi);
local_bh_enable();
}
}
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
bool still_empty;
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
(buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len);
--vi->num;
received++;
}
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) {
virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
}
}
return received;
}
static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
u64_stats_update_end(&stats->syncp);
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
return tot_sgs;
}
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->hdr.csum_start = skb_checksum_start_offset(skb);
hdr->hdr.csum_offset = skb->csum_offset;
} else {
hdr->hdr.flags = 0;
hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
}
if (skb_is_gso(skb)) {
hdr->hdr.hdr_len = skb_headlen(skb);
hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
}
hdr->mhdr.num_buffers = 0;
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
else
sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
0, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int capacity;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
/* Try to transmit */
capacity = xmit_skb(vi, skb);
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
if (likely(capacity == -ENOMEM)) {
if (net_ratelimit())
dev_warn(&dev->dev,
"TX queue failure: out of memory\n");
} else {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TX queue failure: %d\n",
capacity);
}
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
nf_reset(skb);
/* Apparently nice girls don't return TX_BUSY; stop the queue
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev);
virtqueue_disable_cb(vi->svq);
}
}
}
return NETDEV_TX_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
ret = eth_mac_addr(dev, p);
if (ret)
return ret;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
return 0;
}
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct virtnet_info *vi = netdev_priv(dev);
int cpu;
unsigned int start;
for_each_possible_cpu(cpu) {
struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
u64 tpackets, tbytes, rpackets, rbytes;
do {
start = u64_stats_fetch_begin(&stats->syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
}
tot->tx_dropped = dev->stats.tx_dropped;
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
return tot;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
napi_schedule(&vi->napi);
}
#endif
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, GFP_KERNEL))
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
virtnet_napi_enable(vi);
return 0;
}
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
* never fail unless improperly formated.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *data, int out, int in)
{
struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
unsigned int tmp;
int i;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
out++; /* Add header */
in++; /* Add return status */
ctrl.class = class;
ctrl.cmd = cmd;
sg_init_table(sg, out + in);
sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
for_each_sg(data, s, out + in - 2, i)
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
virtqueue_kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
}
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
}
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
void *buf;
int i;
/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
promisc = ((dev->flags & IFF_PROMISC) != 0);
allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
sg_init_one(sg, &promisc, sizeof(promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC,
sg, 1, 0))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
sg_init_one(sg, &allmulti, sizeof(allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI,
sg, 1, 0))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
}
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
mac_data->entries = uc_count;
i = 0;
netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[uc_count][0];
mac_data->entries = mc_count;
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
sg, 2, 0))
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
kfree(buf);
}
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct virtnet_info *vi = netdev_priv(dev);
ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
ring->rx_pending = ring->rx_max_pending;
ring->tx_pending = ring->tx_max_pending;
}
static void virtnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
};
#define MIN_MTU 68
#define MAX_MTU 65535
static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
.ndo_start_xmit = start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
.ndo_change_mtu = virtnet_change_mtu,
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
};
static void virtnet_update_status(struct virtnet_info *vi)
{
u16 v;
if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
offsetof(struct virtio_net_config, status),
&v) < 0)
return;
/* Ignore unknown (future) status bits */
v &= VIRTIO_NET_S_LINK_UP;
if (vi->status == v)
return;
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
netif_carrier_on(vi->dev);
netif_wake_queue(vi->dev);
} else {
netif_carrier_off(vi->dev);
netif_stop_queue(vi->dev);
}
}
static void virtnet_config_changed(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
virtnet_update_status(vi);
}
static int init_vqs(struct virtnet_info *vi)
{
struct virtqueue *vqs[3];
vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
const char *names[] = { "input", "output", "control" };
int nvqs, err;
/* We expect two virtqueues, receive then send,
* and optionally control. */
nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
if (err)
return err;
vi->rvq = vqs[0];
vi->svq = vqs[1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
vi->cvq = vqs[2];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
}
return 0;
}
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
struct net_device *dev;
struct virtnet_info *vi;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
if (!dev)
return -ENOMEM;
/* Set up network device as normal. */
dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (csum)
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
dev->hw_features |= NETIF_F_TSO;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
dev->hw_features |= NETIF_F_UFO;
if (gso)
dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
/* Configuration may specify what MAC to use. Otherwise random. */
if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len) < 0)
eth_hw_addr_random(dev);
/* Set up our device-specific information */
vi = netdev_priv(dev);
netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
vi->stats = alloc_percpu(struct virtnet_stats);
err = -ENOMEM;
if (vi->stats == NULL)
goto free;
INIT_DELAYED_WORK(&vi->refill, refill_work);
sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
err = init_vqs(vi);
if (err)
goto free_stats;
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
goto free_vqs;
}
/* Last of all, set up some receive buffers. */
try_fill_recv(vi, GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->num == 0) {
err = -ENOMEM;
goto unregister;
}
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev);
virtnet_update_status(vi);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev);
}
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
unregister:
unregister_netdev(dev);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
}
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf)
break;
dev_kfree_skb(buf);
}
while (1) {
buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf)
break;
if (vi->mergeable_rx_bufs || vi->big_packets)
give_pages(vi, buf);
else
dev_kfree_skb(buf);
--vi->num;
}
BUG_ON(vi->num != 0);
}
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
vi->vdev->config->del_vqs(vi->vdev);
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
}
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
unregister_netdev(vi->dev);
remove_vq_common(vi);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
#ifdef CONFIG_PM
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
virtqueue_disable_cb(vi->rvq);
virtqueue_disable_cb(vi->svq);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
virtqueue_disable_cb(vi->cvq);
netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev))
napi_disable(&vi->napi);
remove_vq_common(vi);
return 0;
}
static int virtnet_restore(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int err;
err = init_vqs(vi);
if (err)
return err;
if (netif_running(vi->dev))
virtnet_napi_enable(vi);
netif_device_attach(vi->dev);
if (!try_fill_recv(vi, GFP_KERNEL))
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
return 0;
}
#endif
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
};
static struct virtio_driver virtio_net_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
.config_changed = virtnet_config_changed,
#ifdef CONFIG_PM
.freeze = virtnet_freeze,
.restore = virtnet_restore,
#endif
};
static int __init init(void)
{
return register_virtio_driver(&virtio_net_driver);
}
static void __exit fini(void)
{
unregister_virtio_driver(&virtio_net_driver);
}
module_init(init);
module_exit(fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
slade87/ALE21-Kernel | drivers/mtd/ubi/cdev.c | 979 | 25247 | /*
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file includes implementation of UBI character device operations.
*
* There are two kinds of character devices in UBI: UBI character devices and
* UBI volume character devices. UBI character devices allow users to
* manipulate whole volumes: create, remove, and re-size them. Volume character
* devices provide volume I/O capabilities.
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
*
* Well, there is the third kind of character devices - the UBI control
* character device, which allows to manipulate by UBI devices - create and
* delete them. In other words, it is used for attaching and detaching MTD
* devices.
*/
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/math64.h>
#include <mtd/ubi-user.h>
#include "ubi.h"
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
static int get_exclusive(struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
ubi_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
vol->exclusive = 1;
err = desc->mode;
desc->mode = UBI_EXCLUSIVE;
}
spin_unlock(&vol->ubi->volumes_lock);
return err;
}
/**
* revoke_exclusive - revoke exclusive mode.
* @desc: volume descriptor
* @mode: new mode to switch to
*/
static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
{
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
ubi_assert(vol->readers == 0 && vol->writers == 0);
ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
vol->exclusive = 0;
if (mode == UBI_READONLY)
vol->readers = 1;
else if (mode == UBI_READWRITE)
vol->writers = 1;
else
vol->exclusive = 1;
spin_unlock(&vol->ubi->volumes_lock);
desc->mode = mode;
}
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
int vol_id = iminor(inode) - 1, mode, ubi_num;
ubi_num = ubi_major2num(imajor(inode));
if (ubi_num < 0)
return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
dbg_gen("open device %d, volume %d, mode %d",
ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
file->private_data = desc;
return 0;
}
static int vol_cdev_release(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
dbg_gen("release device %d, volume %d, mode %d",
vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
ubi_close_volume(desc);
return 0;
}
static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
loff_t new_offset;
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
ubi_err("updating");
return -EBUSY;
}
switch (origin) {
case 0: /* SEEK_SET */
new_offset = offset;
break;
case 1: /* SEEK_CUR */
new_offset = file->f_pos + offset;
break;
case 2: /* SEEK_END */
new_offset = vol->used_bytes + offset;
break;
default:
return -EINVAL;
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
ubi_err("bad seek %lld", new_offset);
return -EINVAL;
}
dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
vol->vol_id, offset, origin, new_offset);
file->f_pos = new_offset;
return new_offset;
}
static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
struct inode *inode = file_inode(file);
int err;
mutex_lock(&inode->i_mutex);
err = ubi_sync(ubi->ubi_num);
mutex_unlock(&inode->i_mutex);
return err;
}
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
if (vol->updating) {
ubi_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
ubi_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
return 0;
if (vol->corrupted)
dbg_gen("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
err = copy_to_user(buf, tbuf, len);
if (err) {
err = -EFAULT;
break;
}
buf += len;
len = count > tbuf_size ? tbuf_size : count;
} while (count);
vfree(tbuf);
return err ? err : count_save - count;
}
/*
* This function allows to directly write to dynamic UBI volumes, without
* issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
if (!vol->direct_writes)
return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
ubi_err("unaligned position");
return -EINVAL;
}
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
ubi_err("unaligned write length");
return -EINVAL;
}
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
while (count) {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = copy_from_user(tbuf, buf, len);
if (err) {
err = -EFAULT;
break;
}
err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
buf += len;
len = count > tbuf_size ? tbuf_size : count;
}
vfree(tbuf);
return err ? err : count_save - count;
}
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
if (vol->updating)
err = ubi_more_update_data(ubi, vol, buf, count);
else
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
ubi_err("cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
if (err) {
/*
* The operation is finished, @err contains number of actually
* written bytes.
*/
count = err;
if (vol->changing_leb) {
revoke_exclusive(desc, UBI_READWRITE);
return count;
}
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
if (err) {
ubi_warn("volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
return count;
}
static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Volume update command */
case UBI_IOCVOLUP:
{
int64_t bytes, rsvd_bytes;
if (!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
break;
}
err = copy_from_user(&bytes, argp, sizeof(int64_t));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY) {
err = -EROFS;
break;
}
rsvd_bytes = (long long)vol->reserved_pebs *
ubi->leb_size-vol->data_pad;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
}
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Atomic logical eraseblock change command */
case UBI_IOCEBCH:
{
struct ubi_leb_change_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_leb_change_req));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_leb_change(ubi, vol, &req);
if (req.bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
if (lnum < 0 || lnum >= vol->reserved_pebs) {
err = -EINVAL;
break;
}
dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
/* Logical eraseblock map command */
case UBI_IOCEBMAP:
{
struct ubi_map_req req;
err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_map(desc, req.lnum);
break;
}
/* Logical eraseblock un-map command */
case UBI_IOCEBUNMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_unmap(desc, lnum);
break;
}
/* Check if logical eraseblock is mapped command */
case UBI_IOCEBISMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_is_mapped(desc, lnum);
break;
}
/* Set volume property command */
case UBI_IOCSETVOLPROP:
{
struct ubi_set_vol_prop_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_set_vol_prop_req));
if (err) {
err = -EFAULT;
break;
}
switch (req.property) {
case UBI_VOL_PROP_DIRECT_WRITE:
mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
break;
}
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/**
* verify_mkvol_req - verify volume creation request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function zero if the request is correct, and %-EINVAL if not.
*/
static int verify_mkvol_req(const struct ubi_device *ubi,
const struct ubi_mkvol_req *req)
{
int n, err = -EINVAL;
if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
req->name_len < 0)
goto bad;
if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
req->vol_id != UBI_VOL_NUM_AUTO)
goto bad;
if (req->alignment == 0)
goto bad;
if (req->bytes == 0)
goto bad;
if (req->vol_type != UBI_DYNAMIC_VOLUME &&
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
if (req->alignment > ubi->leb_size)
goto bad;
n = req->alignment & (ubi->min_io_size - 1);
if (req->alignment != 1 && n)
goto bad;
if (!req->name[0] || !req->name_len)
goto bad;
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
}
n = strnlen(req->name, req->name_len + 1);
if (n != req->name_len)
goto bad;
return 0;
bad:
ubi_err("bad volume creation request");
ubi_dump_mkvol_req(req);
return err;
}
/**
* verify_rsvol_req - verify volume re-size request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function returns zero if the request is correct, and %-EINVAL if not.
*/
static int verify_rsvol_req(const struct ubi_device *ubi,
const struct ubi_rsvol_req *req)
{
if (req->bytes <= 0)
return -EINVAL;
if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
return -EINVAL;
return 0;
}
/**
* rename_volumes - rename UBI volumes.
* @ubi: UBI device description object
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
* the request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int rename_volumes(struct ubi_device *ubi,
struct ubi_rnvol_req *req)
{
int i, n, err;
struct list_head rename_list;
struct ubi_rename_entry *re, *re1;
if (req->count < 0 || req->count > UBI_MAX_RNVOL)
return -EINVAL;
if (req->count == 0)
return 0;
/* Validate volume IDs and names in the request */
for (i = 0; i < req->count; i++) {
if (req->ents[i].vol_id < 0 ||
req->ents[i].vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (req->ents[i].name_len < 0)
return -EINVAL;
if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
return -ENAMETOOLONG;
req->ents[i].name[req->ents[i].name_len] = '\0';
n = strlen(req->ents[i].name);
if (n != req->ents[i].name_len)
err = -EINVAL;
}
/* Make sure volume IDs and names are unique */
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
ubi_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
ubi_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
}
}
/* Create the re-name list */
INIT_LIST_HEAD(&rename_list);
for (i = 0; i < req->count; i++) {
int vol_id = req->ents[i].vol_id;
int name_len = req->ents[i].name_len;
const char *name = req->ents[i].name;
re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re) {
err = -ENOMEM;
goto out_free;
}
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
ubi_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
/* Skip this re-naming if the name does not really change */
if (re->desc->vol->name_len == name_len &&
!memcmp(re->desc->vol->name, name, name_len)) {
ubi_close_volume(re->desc);
kfree(re);
continue;
}
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
if (list_empty(&rename_list))
return 0;
/* Find out the volumes which have to be removed */
list_for_each_entry(re, &rename_list, list) {
struct ubi_volume_desc *desc;
int no_remove_needed = 0;
/*
* Volume @re->vol_id is going to be re-named to
* @re->new_name, while its current name is @name. If a volume
* with name @re->new_name currently exists, it has to be
* removed, unless it is also re-named in the request (@req).
*/
list_for_each_entry(re1, &rename_list, list) {
if (re->new_name_len == re1->desc->vol->name_len &&
!memcmp(re->new_name, re1->desc->vol->name,
re1->desc->vol->name_len)) {
no_remove_needed = 1;
break;
}
}
if (no_remove_needed)
continue;
/*
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
/* Re-naming into a non-existing volume name */
continue;
/* The volume exists but busy, or an error occurred */
ubi_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
ubi_close_volume(re->desc);
list_del(&re->list);
kfree(re);
}
return err;
}
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
switch (cmd) {
/* Create volume command */
case UBI_IOCMKVOL:
{
struct ubi_mkvol_req req;
dbg_gen("create volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_mkvol_req(ubi, &req);
if (err)
break;
mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
mutex_unlock(&ubi->device_mutex);
if (err)
break;
err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
break;
}
/* Remove volume command */
case UBI_IOCRMVOL:
{
int vol_id;
dbg_gen("remove volume");
err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
* 'struct ubi_volume' object will be freed when
* 'ubi_close_volume()' will call 'put_device()'.
*/
ubi_close_volume(desc);
break;
}
/* Re-size volume command */
case UBI_IOCRSVOL:
{
int pebs;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_rsvol_req(ubi, &req);
if (err)
break;
desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
/* Re-name volumes command */
case UBI_IOCRNVOL:
{
struct ubi_rnvol_req *req;
dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
break;
};
err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
if (err) {
err = -EFAULT;
kfree(req);
break;
}
err = rename_volumes(ubi, req);
kfree(req);
break;
}
default:
err = -ENOTTY;
break;
}
ubi_put_device(ubi);
return err;
}
static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
switch (cmd) {
/* Attach an MTD device command */
case UBI_IOCATT:
{
struct ubi_attach_req req;
struct mtd_info *mtd;
dbg_gen("attach MTD device");
err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
if (err) {
err = -EFAULT;
break;
}
if (req.mtd_num < 0 ||
(req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
err = -EINVAL;
break;
}
mtd = get_mtd_device(NULL, req.mtd_num);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
break;
}
/*
* Note, further request verification is done by
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
req.max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
else
/* @err contains UBI device number */
err = put_user(err, (__user int32_t *)argp);
break;
}
/* Detach an MTD device command */
case UBI_IOCDET:
{
int ubi_num;
dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_detach_mtd_dev(ubi_num, 0);
mutex_unlock(&ubi_devices_mutex);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
#ifdef CONFIG_COMPAT
static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return vol_cdev_ioctl(file, cmd, translated_arg);
}
static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ubi_cdev_ioctl(file, cmd, translated_arg);
}
static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ctrl_cdev_ioctl(file, cmd, translated_arg);
}
#else
#define vol_cdev_compat_ioctl NULL
#define ubi_cdev_compat_ioctl NULL
#define ctrl_cdev_compat_ioctl NULL
#endif
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
.open = vol_cdev_open,
.release = vol_cdev_release,
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
.compat_ioctl = vol_cdev_compat_ioctl,
};
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = ubi_cdev_compat_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = ctrl_cdev_compat_ioctl,
.llseek = no_llseek,
};
| gpl-2.0 |
Borkata/adam-nv-3.1 | arch/arm/mach-mv78xx0/common.c | 1235 | 10325 | /*
* arch/arm/mach-mv78xx0/common.c
*
* Core functions for Marvell MV78xx0 SoCs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/mbus.h>
#include <linux/ata_platform.h>
#include <linux/ethtool.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <mach/mv78xx0.h>
#include <mach/bridge-regs.h>
#include <plat/cache-feroceon-l2.h>
#include <plat/orion_nand.h>
#include <plat/time.h>
#include <plat/common.h>
#include "common.h"
static int get_tclk(void);
/*****************************************************************************
* Common bits
****************************************************************************/
int mv78xx0_core_index(void)
{
u32 extra;
/*
* Read Extra Features register.
*/
__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (extra));
return !!(extra & 0x00004000);
}
static int get_hclk(void)
{
int hclk;
/*
* HCLK tick rate is configured by DEV_D[7:5] pins.
*/
switch ((readl(SAMPLE_AT_RESET_LOW) >> 5) & 7) {
case 0:
hclk = 166666667;
break;
case 1:
hclk = 200000000;
break;
case 2:
hclk = 266666667;
break;
case 3:
hclk = 333333333;
break;
case 4:
hclk = 400000000;
break;
default:
panic("unknown HCLK PLL setting: %.8x\n",
readl(SAMPLE_AT_RESET_LOW));
}
return hclk;
}
static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk)
{
u32 cfg;
/*
* Core #0 PCLK/L2CLK is configured by bits [13:8], core #1
* PCLK/L2CLK by bits [19:14].
*/
if (core_index == 0) {
cfg = (readl(SAMPLE_AT_RESET_LOW) >> 8) & 0x3f;
} else {
cfg = (readl(SAMPLE_AT_RESET_LOW) >> 14) & 0x3f;
}
/*
* Bits [11:8] ([17:14] for core #1) configure the PCLK:HCLK
* ratio (1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6).
*/
*pclk = ((u64)hclk * (2 + (cfg & 0xf))) >> 1;
/*
* Bits [13:12] ([19:18] for core #1) configure the PCLK:L2CLK
* ratio (1, 2, 3).
*/
*l2clk = *pclk / (((cfg >> 4) & 3) + 1);
}
static int get_tclk(void)
{
int tclk;
/*
* TCLK tick rate is configured by DEV_A[2:0] strap pins.
*/
switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
case 1:
tclk = 166666667;
break;
case 3:
tclk = 200000000;
break;
default:
panic("unknown TCLK PLL setting: %.8x\n",
readl(SAMPLE_AT_RESET_HIGH));
}
return tclk;
}
/*****************************************************************************
* I/O Address Mapping
****************************************************************************/
static struct map_desc mv78xx0_io_desc[] __initdata = {
{
.virtual = MV78XX0_CORE_REGS_VIRT_BASE,
.pfn = 0,
.length = MV78XX0_CORE_REGS_SIZE,
.type = MT_DEVICE,
}, {
.virtual = MV78XX0_PCIE_IO_VIRT_BASE(0),
.pfn = __phys_to_pfn(MV78XX0_PCIE_IO_PHYS_BASE(0)),
.length = MV78XX0_PCIE_IO_SIZE * 8,
.type = MT_DEVICE,
}, {
.virtual = MV78XX0_REGS_VIRT_BASE,
.pfn = __phys_to_pfn(MV78XX0_REGS_PHYS_BASE),
.length = MV78XX0_REGS_SIZE,
.type = MT_DEVICE,
},
};
void __init mv78xx0_map_io(void)
{
unsigned long phys;
/*
* Map the right set of per-core registers depending on
* which core we are running on.
*/
if (mv78xx0_core_index() == 0) {
phys = MV78XX0_CORE0_REGS_PHYS_BASE;
} else {
phys = MV78XX0_CORE1_REGS_PHYS_BASE;
}
mv78xx0_io_desc[0].pfn = __phys_to_pfn(phys);
iotable_init(mv78xx0_io_desc, ARRAY_SIZE(mv78xx0_io_desc));
}
/*****************************************************************************
* EHCI
****************************************************************************/
void __init mv78xx0_ehci0_init(void)
{
orion_ehci_init(&mv78xx0_mbus_dram_info,
USB0_PHYS_BASE, IRQ_MV78XX0_USB_0);
}
/*****************************************************************************
* EHCI1
****************************************************************************/
void __init mv78xx0_ehci1_init(void)
{
orion_ehci_1_init(&mv78xx0_mbus_dram_info,
USB1_PHYS_BASE, IRQ_MV78XX0_USB_1);
}
/*****************************************************************************
* EHCI2
****************************************************************************/
void __init mv78xx0_ehci2_init(void)
{
orion_ehci_2_init(&mv78xx0_mbus_dram_info,
USB2_PHYS_BASE, IRQ_MV78XX0_USB_2);
}
/*****************************************************************************
* GE00
****************************************************************************/
void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data, &mv78xx0_mbus_dram_info,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
IRQ_MV78XX0_GE_ERR, get_tclk());
}
/*****************************************************************************
* GE01
****************************************************************************/
void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data, &mv78xx0_mbus_dram_info,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
NO_IRQ, get_tclk());
}
/*****************************************************************************
* GE10
****************************************************************************/
void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
{
u32 dev, rev;
/*
* On the Z0, ge10 and ge11 are internally connected back
* to back, and not brought out.
*/
mv78xx0_pcie_id(&dev, &rev);
if (dev == MV78X00_Z0_DEV_ID) {
eth_data->phy_addr = MV643XX_ETH_PHY_NONE;
eth_data->speed = SPEED_1000;
eth_data->duplex = DUPLEX_FULL;
}
orion_ge10_init(eth_data, &mv78xx0_mbus_dram_info,
GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
NO_IRQ, get_tclk());
}
/*****************************************************************************
* GE11
****************************************************************************/
void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
{
u32 dev, rev;
/*
* On the Z0, ge10 and ge11 are internally connected back
* to back, and not brought out.
*/
mv78xx0_pcie_id(&dev, &rev);
if (dev == MV78X00_Z0_DEV_ID) {
eth_data->phy_addr = MV643XX_ETH_PHY_NONE;
eth_data->speed = SPEED_1000;
eth_data->duplex = DUPLEX_FULL;
}
orion_ge11_init(eth_data, &mv78xx0_mbus_dram_info,
GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
NO_IRQ, get_tclk());
}
/*****************************************************************************
* I2C
****************************************************************************/
void __init mv78xx0_i2c_init(void)
{
orion_i2c_init(I2C_0_PHYS_BASE, IRQ_MV78XX0_I2C_0, 8);
orion_i2c_1_init(I2C_1_PHYS_BASE, IRQ_MV78XX0_I2C_1, 8);
}
/*****************************************************************************
* SATA
****************************************************************************/
void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
{
orion_sata_init(sata_data, &mv78xx0_mbus_dram_info,
SATA_PHYS_BASE, IRQ_MV78XX0_SATA);
}
/*****************************************************************************
* UART0
****************************************************************************/
void __init mv78xx0_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
IRQ_MV78XX0_UART_0, get_tclk());
}
/*****************************************************************************
* UART1
****************************************************************************/
void __init mv78xx0_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
IRQ_MV78XX0_UART_1, get_tclk());
}
/*****************************************************************************
* UART2
****************************************************************************/
void __init mv78xx0_uart2_init(void)
{
orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
IRQ_MV78XX0_UART_2, get_tclk());
}
/*****************************************************************************
* UART3
****************************************************************************/
void __init mv78xx0_uart3_init(void)
{
orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
IRQ_MV78XX0_UART_3, get_tclk());
}
/*****************************************************************************
* Time handling
****************************************************************************/
void __init mv78xx0_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
}
static void mv78xx0_timer_init(void)
{
orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR,
IRQ_MV78XX0_TIMER_1, get_tclk());
}
struct sys_timer mv78xx0_timer = {
.init = mv78xx0_timer_init,
};
/*****************************************************************************
* General
****************************************************************************/
static char * __init mv78xx0_id(void)
{
u32 dev, rev;
mv78xx0_pcie_id(&dev, &rev);
if (dev == MV78X00_Z0_DEV_ID) {
if (rev == MV78X00_REV_Z0)
return "MV78X00-Z0";
else
return "MV78X00-Rev-Unsupported";
} else if (dev == MV78100_DEV_ID) {
if (rev == MV78100_REV_A0)
return "MV78100-A0";
else if (rev == MV78100_REV_A1)
return "MV78100-A1";
else
return "MV78100-Rev-Unsupported";
} else if (dev == MV78200_DEV_ID) {
if (rev == MV78100_REV_A0)
return "MV78200-A0";
else
return "MV78200-Rev-Unsupported";
} else {
return "Device-Unknown";
}
}
static int __init is_l2_writethrough(void)
{
return !!(readl(CPU_CONTROL) & L2_WRITETHROUGH);
}
void __init mv78xx0_init(void)
{
int core_index;
int hclk;
int pclk;
int l2clk;
int tclk;
core_index = mv78xx0_core_index();
hclk = get_hclk();
get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
tclk = get_tclk();
printk(KERN_INFO "%s ", mv78xx0_id());
printk("core #%d, ", core_index);
printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000);
mv78xx0_setup_cpu_mbus();
#ifdef CONFIG_CACHE_FEROCEON_L2
feroceon_l2_init(is_l2_writethrough());
#endif
}
| gpl-2.0 |
Ezekeel/GLaDOS-nexus-prime | arch/arm/plat-s3c24xx/devs.c | 1747 | 11793 | /* linux/arch/arm/plat-s3c24xx/devs.c
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Base S3C24XX platform device definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/fb.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <mach/irqs.h>
#include <asm/irq.h>
#include <plat/regs-serial.h>
#include <plat/udc.h>
#include <plat/mci.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/regs-spi.h>
#include <plat/ts.h>
/* Serial port registrations */
static struct resource s3c2410_uart0_resource[] = {
[0] = {
.start = S3C2410_PA_UART0,
.end = S3C2410_PA_UART0 + 0x3fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_S3CUART_RX0,
.end = IRQ_S3CUART_ERR0,
.flags = IORESOURCE_IRQ,
}
};
static struct resource s3c2410_uart1_resource[] = {
[0] = {
.start = S3C2410_PA_UART1,
.end = S3C2410_PA_UART1 + 0x3fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_S3CUART_RX1,
.end = IRQ_S3CUART_ERR1,
.flags = IORESOURCE_IRQ,
}
};
static struct resource s3c2410_uart2_resource[] = {
[0] = {
.start = S3C2410_PA_UART2,
.end = S3C2410_PA_UART2 + 0x3fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_S3CUART_RX2,
.end = IRQ_S3CUART_ERR2,
.flags = IORESOURCE_IRQ,
}
};
static struct resource s3c2410_uart3_resource[] = {
[0] = {
.start = S3C2443_PA_UART3,
.end = S3C2443_PA_UART3 + 0x3fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_S3CUART_RX3,
.end = IRQ_S3CUART_ERR3,
.flags = IORESOURCE_IRQ,
},
};
struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
[0] = {
.resources = s3c2410_uart0_resource,
.nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
},
[1] = {
.resources = s3c2410_uart1_resource,
.nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
},
[2] = {
.resources = s3c2410_uart2_resource,
.nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
},
[3] = {
.resources = s3c2410_uart3_resource,
.nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
},
};
/* LCD Controller */
static struct resource s3c_lcd_resource[] = {
[0] = {
.start = S3C24XX_PA_LCD,
.end = S3C24XX_PA_LCD + S3C24XX_SZ_LCD - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_LCD,
.end = IRQ_LCD,
.flags = IORESOURCE_IRQ,
}
};
static u64 s3c_device_lcd_dmamask = 0xffffffffUL;
struct platform_device s3c_device_lcd = {
.name = "s3c2410-lcd",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_lcd_resource),
.resource = s3c_lcd_resource,
.dev = {
.dma_mask = &s3c_device_lcd_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_lcd);
void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd)
{
struct s3c2410fb_mach_info *npd;
npd = kmemdup(pd, sizeof(*npd), GFP_KERNEL);
if (npd) {
s3c_device_lcd.dev.platform_data = npd;
npd->displays = kmemdup(pd->displays,
sizeof(struct s3c2410fb_display) * npd->num_displays,
GFP_KERNEL);
if (!npd->displays)
printk(KERN_ERR "no memory for LCD display data\n");
} else {
printk(KERN_ERR "no memory for LCD platform data\n");
}
}
/* Touchscreen */
static struct resource s3c_ts_resource[] = {
[0] = {
.start = S3C24XX_PA_ADC,
.end = S3C24XX_PA_ADC + S3C24XX_SZ_ADC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TC,
.end = IRQ_TC,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device s3c_device_ts = {
.name = "s3c2410-ts",
.id = -1,
.dev.parent = &s3c_device_adc.dev,
.num_resources = ARRAY_SIZE(s3c_ts_resource),
.resource = s3c_ts_resource,
};
EXPORT_SYMBOL(s3c_device_ts);
static struct s3c2410_ts_mach_info s3c2410ts_info;
void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *hard_s3c2410ts_info)
{
memcpy(&s3c2410ts_info, hard_s3c2410ts_info, sizeof(struct s3c2410_ts_mach_info));
s3c_device_ts.dev.platform_data = &s3c2410ts_info;
}
/* USB Device (Gadget)*/
static struct resource s3c_usbgadget_resource[] = {
[0] = {
.start = S3C24XX_PA_USBDEV,
.end = S3C24XX_PA_USBDEV + S3C24XX_SZ_USBDEV - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_USBD,
.end = IRQ_USBD,
.flags = IORESOURCE_IRQ,
}
};
struct platform_device s3c_device_usbgadget = {
.name = "s3c2410-usbgadget",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_usbgadget_resource),
.resource = s3c_usbgadget_resource,
};
EXPORT_SYMBOL(s3c_device_usbgadget);
void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *pd)
{
struct s3c2410_udc_mach_info *npd;
npd = kmalloc(sizeof(*npd), GFP_KERNEL);
if (npd) {
memcpy(npd, pd, sizeof(*npd));
s3c_device_usbgadget.dev.platform_data = npd;
} else {
printk(KERN_ERR "no memory for udc platform data\n");
}
}
/* USB High Speed 2.0 Device (Gadget) */
static struct resource s3c_hsudc_resource[] = {
[0] = {
.start = S3C2416_PA_HSUDC,
.end = S3C2416_PA_HSUDC + S3C2416_SZ_HSUDC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_USBD,
.end = IRQ_USBD,
.flags = IORESOURCE_IRQ,
}
};
static u64 s3c_hsudc_dmamask = DMA_BIT_MASK(32);
struct platform_device s3c_device_usb_hsudc = {
.name = "s3c-hsudc",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_hsudc_resource),
.resource = s3c_hsudc_resource,
.dev = {
.dma_mask = &s3c_hsudc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd)
{
struct s3c24xx_hsudc_platdata *npd;
npd = kmalloc(sizeof(*npd), GFP_KERNEL);
if (npd) {
memcpy(npd, pd, sizeof(*npd));
s3c_device_usb_hsudc.dev.platform_data = npd;
} else {
printk(KERN_ERR "no memory for udc platform data\n");
}
}
/* IIS */
static struct resource s3c_iis_resource[] = {
[0] = {
.start = S3C24XX_PA_IIS,
.end = S3C24XX_PA_IIS + S3C24XX_SZ_IIS -1,
.flags = IORESOURCE_MEM,
}
};
static u64 s3c_device_iis_dmamask = 0xffffffffUL;
struct platform_device s3c_device_iis = {
.name = "s3c24xx-iis",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_iis_resource),
.resource = s3c_iis_resource,
.dev = {
.dma_mask = &s3c_device_iis_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_iis);
/* RTC */
static struct resource s3c_rtc_resource[] = {
[0] = {
.start = S3C24XX_PA_RTC,
.end = S3C24XX_PA_RTC + 0xff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_RTC,
.end = IRQ_RTC,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = IRQ_TICK,
.end = IRQ_TICK,
.flags = IORESOURCE_IRQ
}
};
struct platform_device s3c_device_rtc = {
.name = "s3c2410-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_rtc_resource),
.resource = s3c_rtc_resource,
};
EXPORT_SYMBOL(s3c_device_rtc);
/* ADC */
static struct resource s3c_adc_resource[] = {
[0] = {
.start = S3C24XX_PA_ADC,
.end = S3C24XX_PA_ADC + S3C24XX_SZ_ADC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TC,
.end = IRQ_TC,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = IRQ_ADC,
.end = IRQ_ADC,
.flags = IORESOURCE_IRQ,
}
};
struct platform_device s3c_device_adc = {
.name = "s3c24xx-adc",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_adc_resource),
.resource = s3c_adc_resource,
};
/* SDI */
static struct resource s3c_sdi_resource[] = {
[0] = {
.start = S3C24XX_PA_SDI,
.end = S3C24XX_PA_SDI + S3C24XX_SZ_SDI - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SDI,
.end = IRQ_SDI,
.flags = IORESOURCE_IRQ,
}
};
struct platform_device s3c_device_sdi = {
.name = "s3c2410-sdi",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_sdi_resource),
.resource = s3c_sdi_resource,
};
EXPORT_SYMBOL(s3c_device_sdi);
void __init s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
{
struct s3c24xx_mci_pdata *npd;
npd = kmemdup(pdata, sizeof(struct s3c24xx_mci_pdata), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory to copy pdata", __func__);
s3c_device_sdi.dev.platform_data = npd;
}
/* SPI (0) */
static struct resource s3c_spi0_resource[] = {
[0] = {
.start = S3C24XX_PA_SPI,
.end = S3C24XX_PA_SPI + 0x1f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
static u64 s3c_device_spi0_dmamask = 0xffffffffUL;
struct platform_device s3c_device_spi0 = {
.name = "s3c2410-spi",
.id = 0,
.num_resources = ARRAY_SIZE(s3c_spi0_resource),
.resource = s3c_spi0_resource,
.dev = {
.dma_mask = &s3c_device_spi0_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_spi0);
/* SPI (1) */
static struct resource s3c_spi1_resource[] = {
[0] = {
.start = S3C24XX_PA_SPI + S3C2410_SPI1,
.end = S3C24XX_PA_SPI + S3C2410_SPI1 + 0x1f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
static u64 s3c_device_spi1_dmamask = 0xffffffffUL;
struct platform_device s3c_device_spi1 = {
.name = "s3c2410-spi",
.id = 1,
.num_resources = ARRAY_SIZE(s3c_spi1_resource),
.resource = s3c_spi1_resource,
.dev = {
.dma_mask = &s3c_device_spi1_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_spi1);
#ifdef CONFIG_CPU_S3C2440
/* Camif Controller */
static struct resource s3c_camif_resource[] = {
[0] = {
.start = S3C2440_PA_CAMIF,
.end = S3C2440_PA_CAMIF + S3C2440_SZ_CAMIF - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_CAM,
.end = IRQ_CAM,
.flags = IORESOURCE_IRQ,
}
};
static u64 s3c_device_camif_dmamask = 0xffffffffUL;
struct platform_device s3c_device_camif = {
.name = "s3c2440-camif",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_camif_resource),
.resource = s3c_camif_resource,
.dev = {
.dma_mask = &s3c_device_camif_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_camif);
/* AC97 */
static struct resource s3c_ac97_resource[] = {
[0] = {
.start = S3C2440_PA_AC97,
.end = S3C2440_PA_AC97 + S3C2440_SZ_AC97 -1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_S3C244x_AC97,
.end = IRQ_S3C244x_AC97,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "PCM out",
.start = DMACH_PCM_OUT,
.end = DMACH_PCM_OUT,
.flags = IORESOURCE_DMA,
},
[3] = {
.name = "PCM in",
.start = DMACH_PCM_IN,
.end = DMACH_PCM_IN,
.flags = IORESOURCE_DMA,
},
[4] = {
.name = "Mic in",
.start = DMACH_MIC_IN,
.end = DMACH_MIC_IN,
.flags = IORESOURCE_DMA,
},
};
static u64 s3c_device_audio_dmamask = 0xffffffffUL;
struct platform_device s3c_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_ac97_resource),
.resource = s3c_ac97_resource,
.dev = {
.dma_mask = &s3c_device_audio_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c_device_ac97);
/* ASoC I2S */
struct platform_device s3c2412_device_iis = {
.name = "s3c2412-iis",
.id = -1,
.dev = {
.dma_mask = &s3c_device_audio_dmamask,
.coherent_dma_mask = 0xffffffffUL
}
};
EXPORT_SYMBOL(s3c2412_device_iis);
#endif // CONFIG_CPU_S32440
| gpl-2.0 |
vcgato29/linux | drivers/net/wireless/ath/ath6kl/htc_pipe.c | 1747 | 44288 | /*
* Copyright (c) 2007-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "debug.h"
#include "hif-ops.h"
#define HTC_PACKET_CONTAINER_ALLOCATION 32
#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
static int ath6kl_htc_pipe_tx(struct htc_target *handle,
struct htc_packet *packet);
static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
/* htc pipe tx path */
static inline void restore_tx_packet(struct htc_packet *packet)
{
if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
}
}
static void do_send_completion(struct htc_endpoint *ep,
struct list_head *queue_to_indicate)
{
struct htc_packet *packet;
if (list_empty(queue_to_indicate)) {
/* nothing to indicate */
return;
}
if (ep->ep_cb.tx_comp_multi != NULL) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: calling ep %d, send complete multiple callback (%d pkts)\n",
__func__, ep->eid,
get_queue_depth(queue_to_indicate));
/*
* a multiple send complete handler is being used,
* pass the queue to the handler
*/
ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
/*
* all packets are now owned by the callback,
* reset queue to be safe
*/
INIT_LIST_HEAD(queue_to_indicate);
} else {
/* using legacy EpTxComplete */
do {
packet = list_first_entry(queue_to_indicate,
struct htc_packet, list);
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: calling ep %d send complete callback on packet 0x%p\n",
__func__, ep->eid, packet);
ep->ep_cb.tx_complete(ep->target, packet);
} while (!list_empty(queue_to_indicate));
}
}
static void send_packet_completion(struct htc_target *target,
struct htc_packet *packet)
{
struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
struct list_head container;
restore_tx_packet(packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* do completion */
do_send_completion(ep, &container);
}
static void get_htc_packet_credit_based(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *queue)
{
int credits_required;
int remainder;
u8 send_flags;
struct htc_packet *packet;
unsigned int transfer_len;
/* NOTE : the TX lock is held when this function is called */
/* loop until we can grab as many packets out of the queue as we can */
while (true) {
send_flags = 0;
if (list_empty(&ep->txq))
break;
/* get packet at head, but don't remove it */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got head packet:0x%p , queue depth: %d\n",
__func__, packet, get_queue_depth(&ep->txq));
transfer_len = packet->act_len + HTC_HDR_LENGTH;
if (transfer_len <= target->tgt_cred_sz) {
credits_required = 1;
} else {
/* figure out how many credits this message requires */
credits_required = transfer_len / target->tgt_cred_sz;
remainder = transfer_len % target->tgt_cred_sz;
if (remainder)
credits_required++;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
__func__, credits_required, ep->cred_dist.credits);
if (ep->eid == ENDPOINT_0) {
/*
* endpoint 0 is special, it always has a credit and
* does not require credit based flow control
*/
credits_required = 0;
} else {
if (ep->cred_dist.credits < credits_required)
break;
ep->cred_dist.credits -= credits_required;
ep->ep_st.cred_cosumd += credits_required;
/* check if we need credits back from the target */
if (ep->cred_dist.credits <
ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: host needs credits\n",
__func__);
}
}
/* now we can fully dequeue */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
/* save the number of credits this packet consumed */
packet->info.tx.cred_used = credits_required;
/* save send flags */
packet->info.tx.flags = send_flags;
packet->info.tx.seqno = ep->seqno;
ep->seqno++;
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
}
static void get_htc_packet(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *queue, int resources)
{
struct htc_packet *packet;
/* NOTE : the TX lock is held when this function is called */
/* loop until we can grab as many packets out of the queue as we can */
while (resources) {
if (list_empty(&ep->txq))
break;
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got packet:0x%p , new queue depth: %d\n",
__func__, packet, get_queue_depth(&ep->txq));
packet->info.tx.seqno = ep->seqno;
packet->info.tx.flags = 0;
packet->info.tx.cred_used = 0;
ep->seqno++;
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
resources--;
}
}
static int htc_issue_packets(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *pkt_queue)
{
int status = 0;
u16 payload_len;
struct sk_buff *skb;
struct htc_frame_hdr *htc_hdr;
struct htc_packet *packet;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: queue: 0x%p, pkts %d\n", __func__,
pkt_queue, get_queue_depth(pkt_queue));
while (!list_empty(pkt_queue)) {
packet = list_first_entry(pkt_queue, struct htc_packet, list);
list_del(&packet->list);
skb = packet->skb;
if (!skb) {
WARN_ON_ONCE(1);
status = -EINVAL;
break;
}
payload_len = packet->act_len;
/* setup HTC frame header */
htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
sizeof(*htc_hdr));
if (!htc_hdr) {
WARN_ON_ONCE(1);
status = -EINVAL;
break;
}
packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
/* Endianess? */
put_unaligned((u16) payload_len, &htc_hdr->payld_len);
htc_hdr->flags = packet->info.tx.flags;
htc_hdr->eid = (u8) packet->endpoint;
htc_hdr->ctrl[0] = 0;
htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
spin_lock_bh(&target->tx_lock);
/* store in look up queue to match completions */
list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
ep->ep_st.tx_issued += 1;
spin_unlock_bh(&target->tx_lock);
status = ath6kl_hif_pipe_send(target->dev->ar,
ep->pipe.pipeid_ul, NULL, skb);
if (status != 0) {
if (status != -ENOMEM) {
/* TODO: if more than 1 endpoint maps to the
* same PipeID, it is possible to run out of
* resources in the HIF layer.
* Don't emit the error
*/
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: failed status:%d\n",
__func__, status);
}
spin_lock_bh(&target->tx_lock);
list_del(&packet->list);
/* reclaim credits */
ep->cred_dist.credits += packet->info.tx.cred_used;
spin_unlock_bh(&target->tx_lock);
/* put it back into the callers queue */
list_add(&packet->list, pkt_queue);
break;
}
}
if (status != 0) {
while (!list_empty(pkt_queue)) {
if (status != -ENOMEM) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: failed pkt:0x%p status:%d\n",
__func__, packet, status);
}
packet = list_first_entry(pkt_queue,
struct htc_packet, list);
list_del(&packet->list);
packet->status = status;
send_packet_completion(target, packet);
}
}
return status;
}
static enum htc_send_queue_result htc_try_send(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *txq)
{
struct list_head send_queue; /* temp queue to hold packets */
struct htc_packet *packet, *tmp_pkt;
struct ath6kl *ar = target->dev->ar;
enum htc_send_full_action action;
int tx_resources, overflow, txqueue_depth, i, good_pkts;
u8 pipeid;
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
__func__, txq,
(txq == NULL) ? 0 : get_queue_depth(txq));
/* init the local send queue */
INIT_LIST_HEAD(&send_queue);
/*
* txq equals to NULL means
* caller didn't provide a queue, just wants us to
* check queues and send
*/
if (txq != NULL) {
if (list_empty(txq)) {
/* empty queue */
return HTC_SEND_QUEUE_DROP;
}
spin_lock_bh(&target->tx_lock);
txqueue_depth = get_queue_depth(&ep->txq);
spin_unlock_bh(&target->tx_lock);
if (txqueue_depth >= ep->max_txq_depth) {
/* we've already overflowed */
overflow = get_queue_depth(txq);
} else {
/* get how much we will overflow by */
overflow = txqueue_depth;
overflow += get_queue_depth(txq);
/* get how much we will overflow the TX queue by */
overflow -= ep->max_txq_depth;
}
/* if overflow is negative or zero, we are okay */
if (overflow > 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
__func__, ep->eid, overflow, txqueue_depth,
ep->max_txq_depth);
}
if ((overflow <= 0) ||
(ep->ep_cb.tx_full == NULL)) {
/*
* all packets will fit or caller did not provide send
* full indication handler -- just move all of them
* to the local send_queue object
*/
list_splice_tail_init(txq, &send_queue);
} else {
good_pkts = get_queue_depth(txq) - overflow;
if (good_pkts < 0) {
WARN_ON_ONCE(1);
return HTC_SEND_QUEUE_DROP;
}
/* we have overflowed, and a callback is provided */
/* dequeue all non-overflow packets to the sendqueue */
for (i = 0; i < good_pkts; i++) {
/* pop off caller's queue */
packet = list_first_entry(txq,
struct htc_packet,
list);
/* move to local queue */
list_move_tail(&packet->list, &send_queue);
}
/*
* the caller's queue has all the packets that won't fit
* walk through the caller's queue and indicate each to
* the send full handler
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
action = ep->ep_cb.tx_full(ep->target, packet);
if (action == HTC_SEND_FULL_DROP) {
/* callback wants the packet dropped */
ep->ep_st.tx_dropped += 1;
/* leave this one in the caller's queue
* for cleanup */
} else {
/* callback wants to keep this packet,
* move from caller's queue to the send
* queue */
list_move_tail(&packet->list,
&send_queue);
}
}
if (list_empty(&send_queue)) {
/* no packets made it in, caller will cleanup */
return HTC_SEND_QUEUE_DROP;
}
}
}
if (!ep->pipe.tx_credit_flow_enabled) {
tx_resources =
ath6kl_hif_pipe_get_free_queue_number(ar,
ep->pipe.pipeid_ul);
} else {
tx_resources = 0;
}
spin_lock_bh(&target->tx_lock);
if (!list_empty(&send_queue)) {
/* transfer packets to tail */
list_splice_tail_init(&send_queue, &ep->txq);
if (!list_empty(&send_queue)) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_DROP;
}
INIT_LIST_HEAD(&send_queue);
}
/* increment tx processing count on entry */
ep->tx_proc_cnt++;
if (ep->tx_proc_cnt > 1) {
/*
* Another thread or task is draining the TX queues on this
* endpoint that thread will reset the tx processing count
* when the queue is drained.
*/
ep->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_OK;
}
/***** beyond this point only 1 thread may enter ******/
/*
* Now drain the endpoint TX queue for transmission as long as we have
* enough transmit resources.
*/
while (true) {
if (get_queue_depth(&ep->txq) == 0)
break;
if (ep->pipe.tx_credit_flow_enabled) {
/*
* Credit based mechanism provides flow control
* based on target transmit resource availability,
* we assume that the HIF layer will always have
* bus resources greater than target transmit
* resources.
*/
get_htc_packet_credit_based(target, ep, &send_queue);
} else {
/*
* Get all packets for this endpoint that we can
* for this pass.
*/
get_htc_packet(target, ep, &send_queue, tx_resources);
}
if (get_queue_depth(&send_queue) == 0) {
/*
* Didn't get packets due to out of resources or TX
* queue was drained.
*/
break;
}
spin_unlock_bh(&target->tx_lock);
/* send what we can */
htc_issue_packets(target, ep, &send_queue);
if (!ep->pipe.tx_credit_flow_enabled) {
pipeid = ep->pipe.pipeid_ul;
tx_resources =
ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
}
spin_lock_bh(&target->tx_lock);
}
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_OK;
}
/* htc control packet manipulation */
static void destroy_htc_txctrl_packet(struct htc_packet *packet)
{
struct sk_buff *skb;
skb = packet->skb;
dev_kfree_skb(skb);
kfree(packet);
}
static struct htc_packet *build_htc_txctrl_packet(void)
{
struct htc_packet *packet = NULL;
struct sk_buff *skb;
packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
if (packet == NULL)
return NULL;
skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
if (skb == NULL) {
kfree(packet);
return NULL;
}
packet->skb = skb;
return packet;
}
static void htc_free_txctrl_packet(struct htc_target *target,
struct htc_packet *packet)
{
destroy_htc_txctrl_packet(packet);
}
static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
{
return build_htc_txctrl_packet();
}
static void htc_txctrl_complete(struct htc_target *target,
struct htc_packet *packet)
{
htc_free_txctrl_packet(target, packet);
}
#define MAX_MESSAGE_SIZE 1536
static int htc_setup_target_buffer_assignments(struct htc_target *target)
{
int status, credits, credit_per_maxmsg, i;
struct htc_pipe_txcredit_alloc *entry;
unsigned int hif_usbaudioclass = 0;
credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
credit_per_maxmsg++;
/* TODO, this should be configured by the caller! */
credits = target->tgt_creds;
entry = &target->pipe.txcredit_alloc[0];
status = -ENOMEM;
/* FIXME: hif_usbaudioclass is always zero */
if (hif_usbaudioclass) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: For USB Audio Class- Total:%d\n",
__func__, credits);
entry++;
entry++;
/* Setup VO Service To have Max Credits */
entry->service_id = WMI_DATA_VO_SVC;
entry->credit_alloc = (credits - 6);
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_CONTROL_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
/* leftovers go to best effort */
entry++;
entry++;
entry->service_id = WMI_DATA_BE_SVC;
entry->credit_alloc = (u8) credits;
status = 0;
} else {
entry++;
entry->service_id = WMI_DATA_VI_SVC;
entry->credit_alloc = credits / 4;
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_DATA_VO_SVC;
entry->credit_alloc = credits / 4;
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_CONTROL_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_DATA_BK_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
/* leftovers go to best effort */
entry++;
entry->service_id = WMI_DATA_BE_SVC;
entry->credit_alloc = (u8) credits;
status = 0;
}
if (status == 0) {
for (i = 0; i < ENDPOINT_MAX; i++) {
if (target->pipe.txcredit_alloc[i].service_id != 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
i,
target->pipe.txcredit_alloc[i].
service_id,
target->pipe.txcredit_alloc[i].
credit_alloc);
}
}
}
return status;
}
/* process credit reports and call distribution function */
static void htc_process_credit_report(struct htc_target *target,
struct htc_credit_report *rpt,
int num_entries,
enum htc_endpoint_id from_ep)
{
int total_credits = 0, i;
struct htc_endpoint *ep;
/* lock out TX while we update credits */
spin_lock_bh(&target->tx_lock);
for (i = 0; i < num_entries; i++, rpt++) {
if (rpt->eid >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock);
return;
}
ep = &target->endpoint[rpt->eid];
ep->cred_dist.credits += rpt->credits;
if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
spin_unlock_bh(&target->tx_lock);
htc_try_send(target, ep, NULL);
spin_lock_bh(&target->tx_lock);
}
total_credits += rpt->credits;
}
ath6kl_dbg(ATH6KL_DBG_HTC,
"Report indicated %d credits to distribute\n",
total_credits);
spin_unlock_bh(&target->tx_lock);
}
/* flush endpoint TX queue */
static void htc_flush_tx_endpoint(struct htc_target *target,
struct htc_endpoint *ep, u16 tag)
{
struct htc_packet *packet;
spin_lock_bh(&target->tx_lock);
while (get_queue_depth(&ep->txq)) {
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
packet->status = 0;
send_packet_completion(target, packet);
}
spin_unlock_bh(&target->tx_lock);
}
/*
* In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
* since upper layers expects struct htc_packet containers we use the completed
* skb and lookup it's corresponding HTC packet buffer from a lookup list.
* This is extra overhead that can be fixed by re-aligning HIF interfaces with
* HTC.
*/
static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
struct htc_endpoint *ep,
struct sk_buff *skb)
{
struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
spin_lock_bh(&target->tx_lock);
/*
* interate from the front of tx lookup queue
* this lookup should be fast since lower layers completes in-order and
* so the completed packet should be at the head of the list generally
*/
list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
list) {
/* check for removal */
if (skb == packet->skb) {
/* found it */
list_del(&packet->list);
found_packet = packet;
break;
}
}
spin_unlock_bh(&target->tx_lock);
return found_packet;
}
static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
struct htc_target *target = ar->htc_target;
struct htc_frame_hdr *htc_hdr;
struct htc_endpoint *ep;
struct htc_packet *packet;
u8 ep_id, *netdata;
u32 netlen;
netdata = skb->data;
netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
ep_id = htc_hdr->eid;
ep = &target->endpoint[ep_id];
packet = htc_lookup_tx_packet(target, ep, skb);
if (packet == NULL) {
/* may have already been flushed and freed */
ath6kl_err("HTC TX lookup failed!\n");
} else {
/* will be giving this buffer back to upper layers */
packet->status = 0;
send_packet_completion(target, packet);
}
skb = NULL;
if (!ep->pipe.tx_credit_flow_enabled) {
/*
* note: when using TX credit flow, the re-checking of queues
* happens when credits flow back from the target. in the
* non-TX credit case, we recheck after the packet completes
*/
htc_try_send(target, ep, NULL);
}
return 0;
}
static int htc_send_packets_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_endpoint *ep;
struct htc_packet *packet, *tmp_pkt;
if (list_empty(pkt_queue))
return -EINVAL;
/* get first packet to find out which ep the packets will go into */
packet = list_first_entry(pkt_queue, struct htc_packet, list);
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
return -EINVAL;
}
ep = &target->endpoint[packet->endpoint];
htc_try_send(target, ep, pkt_queue);
/* do completion on any packets that couldn't get in */
if (!list_empty(pkt_queue)) {
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ENOMEM;
}
do_send_completion(ep, pkt_queue);
}
return 0;
}
/* htc pipe rx path */
static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
{
struct htc_packet *packet;
spin_lock_bh(&target->rx_lock);
if (target->pipe.htc_packet_pool == NULL) {
spin_unlock_bh(&target->rx_lock);
return NULL;
}
packet = target->pipe.htc_packet_pool;
target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
spin_unlock_bh(&target->rx_lock);
packet->list.next = NULL;
return packet;
}
static void free_htc_packet_container(struct htc_target *target,
struct htc_packet *packet)
{
struct list_head *lh;
spin_lock_bh(&target->rx_lock);
if (target->pipe.htc_packet_pool == NULL) {
target->pipe.htc_packet_pool = packet;
packet->list.next = NULL;
} else {
lh = (struct list_head *) target->pipe.htc_packet_pool;
packet->list.next = lh;
target->pipe.htc_packet_pool = packet;
}
spin_unlock_bh(&target->rx_lock);
}
static int htc_process_trailer(struct htc_target *target, u8 *buffer,
int len, enum htc_endpoint_id from_ep)
{
struct htc_credit_report *report;
struct htc_record_hdr *record;
u8 *record_buf, *orig_buf;
int orig_len, status;
orig_buf = buffer;
orig_len = len;
status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
status = -EINVAL;
break;
}
/* these are byte aligned structs */
record = (struct htc_record_hdr *) buffer;
len -= sizeof(struct htc_record_hdr);
buffer += sizeof(struct htc_record_hdr);
if (record->len > len) {
/* no room left in buffer for record */
ath6kl_dbg(ATH6KL_DBG_HTC,
"invalid length: %d (id:%d) buffer has: %d bytes left\n",
record->len, record->rec_id, len);
status = -EINVAL;
break;
}
/* start of record follows the header */
record_buf = buffer;
switch (record->rec_id) {
case HTC_RECORD_CREDITS:
if (record->len < sizeof(struct htc_credit_report)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
report = (struct htc_credit_report *) record_buf;
htc_process_credit_report(target, report,
record->len / sizeof(*report),
from_ep);
break;
default:
ath6kl_dbg(ATH6KL_DBG_HTC,
"unhandled record: id:%d length:%d\n",
record->rec_id, record->len);
break;
}
if (status != 0)
break;
/* advance buffer past this record for next time around */
buffer += record->len;
len -= record->len;
}
return status;
}
static void do_recv_completion(struct htc_endpoint *ep,
struct list_head *queue_to_indicate)
{
struct htc_packet *packet;
if (list_empty(queue_to_indicate)) {
/* nothing to indicate */
return;
}
/* using legacy EpRecv */
while (!list_empty(queue_to_indicate)) {
packet = list_first_entry(queue_to_indicate,
struct htc_packet, list);
list_del(&packet->list);
ep->ep_cb.rx(ep->target, packet);
}
return;
}
static void recv_packet_completion(struct htc_target *target,
struct htc_endpoint *ep,
struct htc_packet *packet)
{
struct list_head container;
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* do completion */
do_recv_completion(ep, &container);
}
static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
u8 pipeid)
{
struct htc_target *target = ar->htc_target;
u8 *netdata, *trailer, hdr_info;
struct htc_frame_hdr *htc_hdr;
u32 netlen, trailerlen = 0;
struct htc_packet *packet;
struct htc_endpoint *ep;
u16 payload_len;
int status = 0;
/*
* ar->htc_target can be NULL due to a race condition that can occur
* during driver initialization(we do 'ath6kl_hif_power_on' before
* initializing 'ar->htc_target' via 'ath6kl_htc_create').
* 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
* usb_complete_t/callback function for 'usb_fill_bulk_urb'.
* Thus the possibility of ar->htc_target being NULL
* via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/
if (WARN_ON_ONCE(!target)) {
ath6kl_err("Target not yet initialized\n");
status = -EINVAL;
goto free_skb;
}
netdata = skb->data;
netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: invalid EndpointID=%d\n",
htc_hdr->eid);
status = -EINVAL;
goto free_skb;
}
ep = &target->endpoint[htc_hdr->eid];
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
if (netlen < (payload_len + HTC_HDR_LENGTH)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: insufficient length, got:%d expected =%u\n",
netlen, payload_len + HTC_HDR_LENGTH);
status = -EINVAL;
goto free_skb;
}
/* get flags to check for trailer */
hdr_info = htc_hdr->flags;
if (hdr_info & HTC_FLG_RX_TRAILER) {
/* extract the trailer length */
hdr_info = htc_hdr->ctrl[0];
if ((hdr_info < sizeof(struct htc_record_hdr)) ||
(hdr_info > payload_len)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"invalid header: payloadlen should be %d, CB[0]: %d\n",
payload_len, hdr_info);
status = -EINVAL;
goto free_skb;
}
trailerlen = hdr_info;
/* process trailer after hdr/apps payload */
trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
payload_len - hdr_info;
status = htc_process_trailer(target, trailer, hdr_info,
htc_hdr->eid);
if (status != 0)
goto free_skb;
}
if (((int) payload_len - (int) trailerlen) <= 0) {
/* zero length packet with trailer, just drop these */
goto free_skb;
}
if (htc_hdr->eid == ENDPOINT_0) {
/* handle HTC control message */
if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
/*
* fatal: target should not send unsolicited
* messageson the endpoint 0
*/
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC ignores Rx Ctrl after setup complete\n");
status = -EINVAL;
goto free_skb;
}
/* remove HTC header */
skb_pull(skb, HTC_HDR_LENGTH);
netdata = skb->data;
netlen = skb->len;
spin_lock_bh(&target->rx_lock);
target->pipe.ctrl_response_valid = true;
target->pipe.ctrl_response_len = min_t(int, netlen,
HTC_MAX_CTRL_MSG_LEN);
memcpy(target->pipe.ctrl_response_buf, netdata,
target->pipe.ctrl_response_len);
spin_unlock_bh(&target->rx_lock);
dev_kfree_skb(skb);
skb = NULL;
goto free_skb;
}
/*
* TODO: the message based HIF architecture allocates net bufs
* for recv packets since it bridges that HIF to upper layers,
* which expects HTC packets, we form the packets here
*/
packet = alloc_htc_packet_container(target);
if (packet == NULL) {
status = -ENOMEM;
goto free_skb;
}
packet->status = 0;
packet->endpoint = htc_hdr->eid;
packet->pkt_cntxt = skb;
/* TODO: for backwards compatibility */
packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
/*
* TODO: this is a hack because the driver layer will set the
* actual len of the skb again which will just double the len
*/
skb_trim(skb, 0);
recv_packet_completion(target, ep, packet);
/* recover the packet container */
free_htc_packet_container(target, packet);
skb = NULL;
free_skb:
dev_kfree_skb(skb);
return status;
}
static void htc_flush_rx_queue(struct htc_target *target,
struct htc_endpoint *ep)
{
struct list_head container;
struct htc_packet *packet;
spin_lock_bh(&target->rx_lock);
while (1) {
if (list_empty(&ep->rx_bufq))
break;
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
packet->status = -ECANCELED;
packet->act_len = 0;
ath6kl_dbg(ATH6KL_DBG_HTC,
"Flushing RX packet:0x%p, length:%d, ep:%d\n",
packet, packet->buf_len,
packet->endpoint);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* give the packet back */
do_recv_completion(ep, &container);
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
/* polling routine to wait for a control packet to be received */
static int htc_wait_recv_ctrl_message(struct htc_target *target)
{
int count = HTC_TARGET_RESPONSE_POLL_COUNT;
while (count > 0) {
spin_lock_bh(&target->rx_lock);
if (target->pipe.ctrl_response_valid) {
target->pipe.ctrl_response_valid = false;
spin_unlock_bh(&target->rx_lock);
break;
}
spin_unlock_bh(&target->rx_lock);
count--;
msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
}
if (count <= 0) {
ath6kl_warn("htc pipe control receive timeout!\n");
return -ETIMEDOUT;
}
return 0;
}
static void htc_rxctrl_complete(struct htc_target *context,
struct htc_packet *packet)
{
struct sk_buff *skb = packet->skb;
if (packet->endpoint == ENDPOINT_0 &&
packet->status == -ECANCELED &&
skb != NULL)
dev_kfree_skb(skb);
}
/* htc pipe initialization */
static void reset_endpoint_states(struct htc_target *target)
{
struct htc_endpoint *ep;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
ep = &target->endpoint[i];
ep->svc_id = 0;
ep->len_max = 0;
ep->max_txq_depth = 0;
ep->eid = i;
INIT_LIST_HEAD(&ep->txq);
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
ep->pipe.tx_credit_flow_enabled = true;
}
}
/* start HTC, this is called after all services are connected */
static int htc_config_target_hif_pipe(struct htc_target *target)
{
return 0;
}
/* htc service functions */
static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
{
u8 allocation = 0;
int i;
for (i = 0; i < ENDPOINT_MAX; i++) {
if (target->pipe.txcredit_alloc[i].service_id == service_id)
allocation =
target->pipe.txcredit_alloc[i].credit_alloc;
}
if (allocation == 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Service TX : 0x%2.2X : allocation is zero!\n",
service_id);
}
return allocation;
}
static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
struct htc_service_connect_req *conn_req,
struct htc_service_connect_resp *conn_resp)
{
struct ath6kl *ar = target->dev->ar;
struct htc_packet *packet = NULL;
struct htc_conn_service_resp *resp_msg;
struct htc_conn_service_msg *conn_msg;
enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
bool disable_credit_flowctrl = false;
unsigned int max_msg_size = 0;
struct htc_endpoint *ep;
int length, status = 0;
struct sk_buff *skb;
u8 tx_alloc;
u16 flags;
if (conn_req->svc_id == 0) {
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
/* special case for pseudo control service */
assigned_epid = ENDPOINT_0;
max_msg_size = HTC_MAX_CTRL_MSG_LEN;
tx_alloc = 0;
} else {
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;
goto free_packet;
}
/* allocate a packet to send to the target */
packet = htc_alloc_txctrl_packet(target);
if (packet == NULL) {
WARN_ON_ONCE(1);
status = -ENOMEM;
goto free_packet;
}
skb = packet->skb;
length = sizeof(struct htc_conn_service_msg);
/* assemble connect service message */
conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
length);
if (conn_msg == NULL) {
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
memset(conn_msg, 0,
sizeof(struct htc_conn_service_msg));
conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
/* tell target desired recv alloc for this ep */
flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
conn_msg->conn_flags |= cpu_to_le16(flags);
if (conn_req->conn_flags &
HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
disable_credit_flowctrl = true;
}
set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
length,
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
status = ath6kl_htc_pipe_tx(target, packet);
/* we don't own it anymore */
packet = NULL;
if (status != 0)
goto free_packet;
/* wait for response */
status = htc_wait_recv_ctrl_message(target);
if (status != 0)
goto free_packet;
/* we controlled the buffer creation so it has to be
* properly aligned
*/
resp_msg = (struct htc_conn_service_resp *)
target->pipe.ctrl_response_buf;
if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
(target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
/* this message is not valid */
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
ath6kl_dbg(ATH6KL_DBG_TRC,
"%s: service 0x%X conn resp: status: %d ep: %d\n",
__func__, resp_msg->svc_id, resp_msg->status,
resp_msg->eid);
conn_resp->resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != HTC_SERVICE_SUCCESS) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"Target failed service 0x%X connect request (status:%d)\n",
resp_msg->svc_id, resp_msg->status);
status = -EINVAL;
goto free_packet;
}
assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
}
/* the rest are parameter checks so set the error status */
status = -EINVAL;
if (assigned_epid >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
goto free_packet;
}
if (max_msg_size == 0) {
WARN_ON_ONCE(1);
goto free_packet;
}
ep = &target->endpoint[assigned_epid];
ep->eid = assigned_epid;
if (ep->svc_id != 0) {
/* endpoint already in use! */
WARN_ON_ONCE(1);
goto free_packet;
}
/* return assigned endpoint to caller */
conn_resp->endpoint = assigned_epid;
conn_resp->len_max = max_msg_size;
/* setup the endpoint */
ep->svc_id = conn_req->svc_id; /* this marks ep in use */
ep->max_txq_depth = conn_req->max_txq_depth;
ep->len_max = max_msg_size;
ep->cred_dist.credits = tx_alloc;
ep->cred_dist.cred_sz = target->tgt_cred_sz;
ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
if (max_msg_size % target->tgt_cred_sz)
ep->cred_dist.cred_per_msg++;
/* copy all the callbacks */
ep->ep_cb = conn_req->ep_cb;
/* initialize tx_drop_packet_threshold */
ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
&ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
if (status != 0)
goto free_packet;
ath6kl_dbg(ATH6KL_DBG_HTC,
"SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
ep->svc_id, ep->pipe.pipeid_ul,
ep->pipe.pipeid_dl, ep->eid);
if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
ep->pipe.tx_credit_flow_enabled = false;
ath6kl_dbg(ATH6KL_DBG_HTC,
"SVC: 0x%4.4X ep:%d TX flow control off\n",
ep->svc_id, assigned_epid);
}
free_packet:
if (packet != NULL)
htc_free_txctrl_packet(target, packet);
return status;
}
/* htc export functions */
static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
{
int status = 0;
struct htc_endpoint *ep = NULL;
struct htc_target *target = NULL;
struct htc_packet *packet;
int i;
target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
if (target == NULL) {
ath6kl_err("htc create unable to allocate memory\n");
status = -ENOMEM;
goto fail_htc_create;
}
spin_lock_init(&target->htc_lock);
spin_lock_init(&target->rx_lock);
spin_lock_init(&target->tx_lock);
reset_endpoint_states(target);
for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
if (packet != NULL)
free_htc_packet_container(target, packet);
}
target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
goto fail_htc_create;
}
target->dev->ar = ar;
target->dev->htc_cnxt = target;
/* Get HIF default pipe for HTC message exchange */
ep = &target->endpoint[ENDPOINT_0];
ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
return target;
fail_htc_create:
if (status != 0) {
if (target != NULL)
ath6kl_htc_pipe_cleanup(target);
target = NULL;
}
return target;
}
/* cleanup the HTC instance */
static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
{
struct htc_packet *packet;
while (true) {
packet = alloc_htc_packet_container(target);
if (packet == NULL)
break;
kfree(packet);
}
kfree(target->dev);
/* kfree our instance */
kfree(target);
}
static int ath6kl_htc_pipe_start(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_setup_comp_ext_msg *setup;
struct htc_packet *packet;
htc_config_target_hif_pipe(target);
/* allocate a buffer to send */
packet = htc_alloc_txctrl_packet(target);
if (packet == NULL) {
WARN_ON_ONCE(1);
return -ENOMEM;
}
skb = packet->skb;
/* assemble setup complete message */
setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
sizeof(*setup));
memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
set_htc_pkt_info(packet, NULL, (u8 *) setup,
sizeof(struct htc_setup_comp_ext_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
return ath6kl_htc_pipe_tx(target, packet);
}
static void ath6kl_htc_pipe_stop(struct htc_target *target)
{
int i;
struct htc_endpoint *ep;
/* cleanup endpoints */
for (i = 0; i < ENDPOINT_MAX; i++) {
ep = &target->endpoint[i];
htc_flush_rx_queue(target, ep);
htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
}
reset_endpoint_states(target);
target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
}
static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
int num;
spin_lock_bh(&target->rx_lock);
num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
spin_unlock_bh(&target->rx_lock);
return num;
}
static int ath6kl_htc_pipe_tx(struct htc_target *target,
struct htc_packet *packet)
{
struct list_head queue;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: endPointId: %d, buffer: 0x%p, length: %d\n",
__func__, packet->endpoint, packet->buf,
packet->act_len);
INIT_LIST_HEAD(&queue);
list_add_tail(&packet->list, &queue);
return htc_send_packets_multiple(target, &queue);
}
static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
{
struct htc_ready_ext_msg *ready_msg;
struct htc_service_connect_req connect;
struct htc_service_connect_resp resp;
int status = 0;
status = htc_wait_recv_ctrl_message(target);
if (status != 0)
return status;
if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
ath6kl_warn("invalid htc pipe ready msg len: %d\n",
target->pipe.ctrl_response_len);
return -ECOMM;
}
ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
ready_msg->ver2_0_info.msg_id);
return -ECOMM;
}
ath6kl_dbg(ATH6KL_DBG_HTC,
"Target Ready! : transmit resources : %d size:%d\n",
ready_msg->ver2_0_info.cred_cnt,
ready_msg->ver2_0_info.cred_sz);
target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
return -ECOMM;
htc_setup_target_buffer_assignments(target);
/* setup our pseudo HTC control endpoint connection */
memset(&connect, 0, sizeof(connect));
memset(&resp, 0, sizeof(resp));
connect.ep_cb.tx_complete = htc_txctrl_complete;
connect.ep_cb.rx = htc_rxctrl_complete;
connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
connect.svc_id = HTC_CTRL_RSVD_SVC;
/* connect fake service */
status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
return status;
}
static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
enum htc_endpoint_id endpoint, u16 tag)
{
struct htc_endpoint *ep = &target->endpoint[endpoint];
if (ep->svc_id == 0) {
WARN_ON_ONCE(1);
/* not in use.. */
return;
}
htc_flush_tx_endpoint(target, ep, tag);
}
static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_packet *packet, *tmp_pkt, *first;
struct htc_endpoint *ep;
int status = 0;
if (list_empty(pkt_queue))
return -EINVAL;
first = list_first_entry(pkt_queue, struct htc_packet, list);
if (first->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
__func__, first->endpoint, get_queue_depth(pkt_queue),
first->buf_len);
ep = &target->endpoint[first->endpoint];
spin_lock_bh(&target->rx_lock);
/* store receive packets */
list_splice_tail_init(pkt_queue, &ep->rx_bufq);
spin_unlock_bh(&target->rx_lock);
if (status != 0) {
/* walk through queue and mark each one canceled */
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ECANCELED;
}
do_recv_completion(ep, pkt_queue);
}
return status;
}
static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
enum htc_endpoint_id ep,
bool active)
{
/* TODO */
}
static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
{
struct htc_endpoint *endpoint;
struct htc_packet *packet, *tmp_pkt;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i];
spin_lock_bh(&target->rx_lock);
list_for_each_entry_safe(packet, tmp_pkt,
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
}
static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
struct ath6kl_htc_credit_info *info)
{
return 0;
}
static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
.create = ath6kl_htc_pipe_create,
.wait_target = ath6kl_htc_pipe_wait_target,
.start = ath6kl_htc_pipe_start,
.conn_service = ath6kl_htc_pipe_conn_service,
.tx = ath6kl_htc_pipe_tx,
.stop = ath6kl_htc_pipe_stop,
.cleanup = ath6kl_htc_pipe_cleanup,
.flush_txep = ath6kl_htc_pipe_flush_txep,
.flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
.activity_changed = ath6kl_htc_pipe_activity_changed,
.get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
.add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
.credit_setup = ath6kl_htc_pipe_credit_setup,
.tx_complete = ath6kl_htc_pipe_tx_complete,
.rx_complete = ath6kl_htc_pipe_rx_complete,
};
void ath6kl_htc_pipe_attach(struct ath6kl *ar)
{
ar->htc_ops = &ath6kl_htc_pipe_ops;
}
| gpl-2.0 |
phenyl-sphinx/linux | arch/arm/mach-davinci/da830.c | 1747 | 39977 | /*
* TI DA830/OMAP L137 chip specific setup
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
* 2009 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
#include <mach/psc.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
#include "clock.h"
#include "mux.h"
/* Offsets of the 8 compare registers on the da830 */
#define DA830_CMP12_0 0x60
#define DA830_CMP12_1 0x64
#define DA830_CMP12_2 0x68
#define DA830_CMP12_3 0x6c
#define DA830_CMP12_4 0x70
#define DA830_CMP12_5 0x74
#define DA830_CMP12_6 0x78
#define DA830_CMP12_7 0x7c
#define DA830_REF_FREQ 24000000
static struct pll_data pll0_data = {
.num = 1,
.phys_base = DA8XX_PLL0_BASE,
.flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
};
static struct clk ref_clk = {
.name = "ref_clk",
.rate = DA830_REF_FREQ,
};
static struct clk pll0_clk = {
.name = "pll0",
.parent = &ref_clk,
.pll_data = &pll0_data,
.flags = CLK_PLL,
};
static struct clk pll0_aux_clk = {
.name = "pll0_aux_clk",
.parent = &pll0_clk,
.flags = CLK_PLL | PRE_PLL,
};
static struct clk pll0_sysclk2 = {
.name = "pll0_sysclk2",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV2,
};
static struct clk pll0_sysclk3 = {
.name = "pll0_sysclk3",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV3,
};
static struct clk pll0_sysclk4 = {
.name = "pll0_sysclk4",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV4,
};
static struct clk pll0_sysclk5 = {
.name = "pll0_sysclk5",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV5,
};
static struct clk pll0_sysclk6 = {
.name = "pll0_sysclk6",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV6,
};
static struct clk pll0_sysclk7 = {
.name = "pll0_sysclk7",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV7,
};
static struct clk i2c0_clk = {
.name = "i2c0",
.parent = &pll0_aux_clk,
};
static struct clk timerp64_0_clk = {
.name = "timer0",
.parent = &pll0_aux_clk,
};
static struct clk timerp64_1_clk = {
.name = "timer1",
.parent = &pll0_aux_clk,
};
static struct clk arm_rom_clk = {
.name = "arm_rom",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_ARM_RAM_ROM,
.flags = ALWAYS_ENABLED,
};
static struct clk scr0_ss_clk = {
.name = "scr0_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR0_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk scr1_ss_clk = {
.name = "scr1_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR1_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk scr2_ss_clk = {
.name = "scr2_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR2_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk dmax_clk = {
.name = "dmax",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_PRUSS,
.flags = ALWAYS_ENABLED,
};
static struct clk tpcc_clk = {
.name = "tpcc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPCC,
.flags = ALWAYS_ENABLED | CLK_PSC,
};
static struct clk tptc0_clk = {
.name = "tptc0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPTC0,
.flags = ALWAYS_ENABLED,
};
static struct clk tptc1_clk = {
.name = "tptc1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPTC1,
.flags = ALWAYS_ENABLED,
};
static struct clk mmcsd_clk = {
.name = "mmcsd",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_MMC_SD,
};
static struct clk uart0_clk = {
.name = "uart0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_UART0,
};
static struct clk uart1_clk = {
.name = "uart1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART1,
.gpsc = 1,
};
static struct clk uart2_clk = {
.name = "uart2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART2,
.gpsc = 1,
};
static struct clk spi0_clk = {
.name = "spi0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SPI0,
};
static struct clk spi1_clk = {
.name = "spi1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_SPI1,
.gpsc = 1,
};
static struct clk ecap0_clk = {
.name = "ecap0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk ecap1_clk = {
.name = "ecap1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk ecap2_clk = {
.name = "ecap2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk pwm0_clk = {
.name = "pwm0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk pwm1_clk = {
.name = "pwm1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk pwm2_clk = {
.name = "pwm2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk eqep0_clk = {
.name = "eqep0",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
.gpsc = 1,
};
static struct clk eqep1_clk = {
.name = "eqep1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
.gpsc = 1,
};
static struct clk lcdc_clk = {
.name = "lcdc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_LCDC,
.gpsc = 1,
};
static struct clk mcasp0_clk = {
.name = "mcasp0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_McASP0,
.gpsc = 1,
};
static struct clk mcasp1_clk = {
.name = "mcasp1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP1,
.gpsc = 1,
};
static struct clk mcasp2_clk = {
.name = "mcasp2",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP2,
.gpsc = 1,
};
static struct clk usb20_clk = {
.name = "usb20",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_USB20,
.gpsc = 1,
};
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll0_sysclk3,
.lpsc = DA8XX_LPSC0_EMIF25,
.flags = ALWAYS_ENABLED,
};
static struct clk aintc_clk = {
.name = "aintc",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC0_AINTC,
.flags = ALWAYS_ENABLED,
};
static struct clk secu_mgr_clk = {
.name = "secu_mgr",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC0_SECU_MGR,
.flags = ALWAYS_ENABLED,
};
static struct clk emac_clk = {
.name = "emac",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_CPGMAC,
.gpsc = 1,
};
static struct clk gpio_clk = {
.name = "gpio",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_GPIO,
.gpsc = 1,
};
static struct clk i2c1_clk = {
.name = "i2c1",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_I2C,
.gpsc = 1,
};
static struct clk usb11_clk = {
.name = "usb11",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_USB11,
.gpsc = 1,
};
static struct clk emif3_clk = {
.name = "emif3",
.parent = &pll0_sysclk5,
.lpsc = DA8XX_LPSC1_EMIF3C,
.gpsc = 1,
.flags = ALWAYS_ENABLED,
};
static struct clk arm_clk = {
.name = "arm",
.parent = &pll0_sysclk6,
.lpsc = DA8XX_LPSC0_ARM,
.flags = ALWAYS_ENABLED,
};
static struct clk rmii_clk = {
.name = "rmii",
.parent = &pll0_sysclk7,
};
static struct clk_lookup da830_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll0", &pll0_clk),
CLK(NULL, "pll0_aux", &pll0_aux_clk),
CLK(NULL, "pll0_sysclk2", &pll0_sysclk2),
CLK(NULL, "pll0_sysclk3", &pll0_sysclk3),
CLK(NULL, "pll0_sysclk4", &pll0_sysclk4),
CLK(NULL, "pll0_sysclk5", &pll0_sysclk5),
CLK(NULL, "pll0_sysclk6", &pll0_sysclk6),
CLK(NULL, "pll0_sysclk7", &pll0_sysclk7),
CLK("i2c_davinci.1", NULL, &i2c0_clk),
CLK(NULL, "timer0", &timerp64_0_clk),
CLK("davinci-wdt", NULL, &timerp64_1_clk),
CLK(NULL, "arm_rom", &arm_rom_clk),
CLK(NULL, "scr0_ss", &scr0_ss_clk),
CLK(NULL, "scr1_ss", &scr1_ss_clk),
CLK(NULL, "scr2_ss", &scr2_ss_clk),
CLK(NULL, "dmax", &dmax_clk),
CLK(NULL, "tpcc", &tpcc_clk),
CLK(NULL, "tptc0", &tptc0_clk),
CLK(NULL, "tptc1", &tptc1_clk),
CLK("da830-mmc.0", NULL, &mmcsd_clk),
CLK("serial8250.0", NULL, &uart0_clk),
CLK("serial8250.1", NULL, &uart1_clk),
CLK("serial8250.2", NULL, &uart2_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
CLK("spi_davinci.1", NULL, &spi1_clk),
CLK(NULL, "ecap0", &ecap0_clk),
CLK(NULL, "ecap1", &ecap1_clk),
CLK(NULL, "ecap2", &ecap2_clk),
CLK(NULL, "pwm0", &pwm0_clk),
CLK(NULL, "pwm1", &pwm1_clk),
CLK(NULL, "pwm2", &pwm2_clk),
CLK("eqep.0", NULL, &eqep0_clk),
CLK("eqep.1", NULL, &eqep1_clk),
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
CLK(NULL, "usb20", &usb20_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
CLK(NULL, "secu_mgr", &secu_mgr_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_mdio.0", "fck", &emac_clk),
CLK(NULL, "gpio", &gpio_clk),
CLK("i2c_davinci.2", NULL, &i2c1_clk),
CLK(NULL, "usb11", &usb11_clk),
CLK(NULL, "emif3", &emif3_clk),
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
CLK(NULL, NULL, NULL),
};
/*
* Device specific mux setup
*
* soc description mux mode mode mux dbg
* reg offset mask mode
*/
static const struct mux_config da830_pins[] = {
#ifdef CONFIG_DAVINCI_MUX
MUX_CFG(DA830, GPIO7_14, 0, 0, 0xf, 1, false)
MUX_CFG(DA830, RTCK, 0, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_15, 0, 4, 0xf, 1, false)
MUX_CFG(DA830, EMU_0, 0, 4, 0xf, 8, false)
MUX_CFG(DA830, EMB_SDCKE, 0, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_CLK_GLUE, 0, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_CLK, 0, 12, 0xf, 2, false)
MUX_CFG(DA830, NEMB_CS_0, 0, 16, 0xf, 1, false)
MUX_CFG(DA830, NEMB_CAS, 0, 20, 0xf, 1, false)
MUX_CFG(DA830, NEMB_RAS, 0, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE, 0, 28, 0xf, 1, false)
MUX_CFG(DA830, EMB_BA_1, 1, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_BA_0, 1, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_0, 1, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_1, 1, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_2, 1, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_3, 1, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_4, 1, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_5, 1, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO7_0, 1, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_1, 1, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_2, 1, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_3, 1, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_4, 1, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_5, 1, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_6, 1, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_7, 1, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_A_6, 2, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_7, 2, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_8, 2, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_9, 2, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_10, 2, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_11, 2, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_12, 2, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_31, 2, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO7_8, 2, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_9, 2, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_10, 2, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_11, 2, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_12, 2, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_13, 2, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_13, 2, 24, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_30, 3, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_29, 3, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_28, 3, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_27, 3, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_26, 3, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_25, 3, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_24, 3, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_23, 3, 28, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_22, 4, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_21, 4, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_20, 4, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_19, 4, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_18, 4, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_17, 4, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_16, 4, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_3, 4, 28, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_2, 5, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_0, 5, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_1, 5, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_2, 5, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_3, 5, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_4, 5, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_5, 5, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_6, 5, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO6_0, 5, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_1, 5, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_2, 5, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_3, 5, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_4, 5, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_5, 5, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_6, 5, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_7, 6, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_8, 6, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_9, 6, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_10, 6, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_11, 6, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_12, 6, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_13, 6, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_14, 6, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO6_7, 6, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_8, 6, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_9, 6, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_10, 6, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_11, 6, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_12, 6, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_13, 6, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_14, 6, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_15, 7, 0, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_1, 7, 4, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_0, 7, 8, 0xf, 1, false)
MUX_CFG(DA830, SPI0_SOMI_0, 7, 12, 0xf, 1, false)
MUX_CFG(DA830, SPI0_SIMO_0, 7, 16, 0xf, 1, false)
MUX_CFG(DA830, SPI0_CLK, 7, 20, 0xf, 1, false)
MUX_CFG(DA830, NSPI0_ENA, 7, 24, 0xf, 1, false)
MUX_CFG(DA830, NSPI0_SCS_0, 7, 28, 0xf, 1, false)
MUX_CFG(DA830, EQEP0I, 7, 12, 0xf, 2, false)
MUX_CFG(DA830, EQEP0S, 7, 16, 0xf, 2, false)
MUX_CFG(DA830, EQEP1I, 7, 20, 0xf, 2, false)
MUX_CFG(DA830, NUART0_CTS, 7, 24, 0xf, 2, false)
MUX_CFG(DA830, NUART0_RTS, 7, 28, 0xf, 2, false)
MUX_CFG(DA830, EQEP0A, 7, 24, 0xf, 4, false)
MUX_CFG(DA830, EQEP0B, 7, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO6_15, 7, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_14, 7, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_15, 7, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_0, 7, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_1, 7, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_2, 7, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_3, 7, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_4, 7, 28, 0xf, 8, false)
MUX_CFG(DA830, SPI1_SOMI_0, 8, 0, 0xf, 1, false)
MUX_CFG(DA830, SPI1_SIMO_0, 8, 4, 0xf, 1, false)
MUX_CFG(DA830, SPI1_CLK, 8, 8, 0xf, 1, false)
MUX_CFG(DA830, UART0_RXD, 8, 12, 0xf, 1, false)
MUX_CFG(DA830, UART0_TXD, 8, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_10, 8, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR1_11, 8, 24, 0xf, 1, false)
MUX_CFG(DA830, NSPI1_ENA, 8, 28, 0xf, 1, false)
MUX_CFG(DA830, I2C1_SCL, 8, 0, 0xf, 2, false)
MUX_CFG(DA830, I2C1_SDA, 8, 4, 0xf, 2, false)
MUX_CFG(DA830, EQEP1S, 8, 8, 0xf, 2, false)
MUX_CFG(DA830, I2C0_SDA, 8, 12, 0xf, 2, false)
MUX_CFG(DA830, I2C0_SCL, 8, 16, 0xf, 2, false)
MUX_CFG(DA830, UART2_RXD, 8, 28, 0xf, 2, false)
MUX_CFG(DA830, TM64P0_IN12, 8, 12, 0xf, 4, false)
MUX_CFG(DA830, TM64P0_OUT12, 8, 16, 0xf, 4, false)
MUX_CFG(DA830, GPIO5_5, 8, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_6, 8, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_7, 8, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_8, 8, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_9, 8, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_10, 8, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_11, 8, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_12, 8, 28, 0xf, 8, false)
MUX_CFG(DA830, NSPI1_SCS_0, 9, 0, 0xf, 1, false)
MUX_CFG(DA830, USB0_DRVVBUS, 9, 4, 0xf, 1, false)
MUX_CFG(DA830, AHCLKX0, 9, 8, 0xf, 1, false)
MUX_CFG(DA830, ACLKX0, 9, 12, 0xf, 1, false)
MUX_CFG(DA830, AFSX0, 9, 16, 0xf, 1, false)
MUX_CFG(DA830, AHCLKR0, 9, 20, 0xf, 1, false)
MUX_CFG(DA830, ACLKR0, 9, 24, 0xf, 1, false)
MUX_CFG(DA830, AFSR0, 9, 28, 0xf, 1, false)
MUX_CFG(DA830, UART2_TXD, 9, 0, 0xf, 2, false)
MUX_CFG(DA830, AHCLKX2, 9, 8, 0xf, 2, false)
MUX_CFG(DA830, ECAP0_APWM0, 9, 12, 0xf, 2, false)
MUX_CFG(DA830, RMII_MHZ_50_CLK, 9, 20, 0xf, 2, false)
MUX_CFG(DA830, ECAP1_APWM1, 9, 24, 0xf, 2, false)
MUX_CFG(DA830, USB_REFCLKIN, 9, 8, 0xf, 4, false)
MUX_CFG(DA830, GPIO5_13, 9, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_15, 9, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_11, 9, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_12, 9, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_13, 9, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_14, 9, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_15, 9, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_12, 9, 28, 0xf, 8, false)
MUX_CFG(DA830, AMUTE0, 10, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR0_0, 10, 4, 0xf, 1, false)
MUX_CFG(DA830, AXR0_1, 10, 8, 0xf, 1, false)
MUX_CFG(DA830, AXR0_2, 10, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR0_3, 10, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR0_4, 10, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR0_5, 10, 24, 0xf, 1, false)
MUX_CFG(DA830, AXR0_6, 10, 28, 0xf, 1, false)
MUX_CFG(DA830, RMII_TXD_0, 10, 4, 0xf, 2, false)
MUX_CFG(DA830, RMII_TXD_1, 10, 8, 0xf, 2, false)
MUX_CFG(DA830, RMII_TXEN, 10, 12, 0xf, 2, false)
MUX_CFG(DA830, RMII_CRS_DV, 10, 16, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXD_0, 10, 20, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXD_1, 10, 24, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXER, 10, 28, 0xf, 2, false)
MUX_CFG(DA830, AFSR2, 10, 4, 0xf, 4, false)
MUX_CFG(DA830, ACLKX2, 10, 8, 0xf, 4, false)
MUX_CFG(DA830, AXR2_3, 10, 12, 0xf, 4, false)
MUX_CFG(DA830, AXR2_2, 10, 16, 0xf, 4, false)
MUX_CFG(DA830, AXR2_1, 10, 20, 0xf, 4, false)
MUX_CFG(DA830, AFSX2, 10, 24, 0xf, 4, false)
MUX_CFG(DA830, ACLKR2, 10, 28, 0xf, 4, false)
MUX_CFG(DA830, NRESETOUT, 10, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_0, 10, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_1, 10, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_2, 10, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_3, 10, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_4, 10, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_5, 10, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_6, 10, 28, 0xf, 8, false)
MUX_CFG(DA830, AXR0_7, 11, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR0_8, 11, 4, 0xf, 1, false)
MUX_CFG(DA830, UART1_RXD, 11, 8, 0xf, 1, false)
MUX_CFG(DA830, UART1_TXD, 11, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR0_11, 11, 16, 0xf, 1, false)
MUX_CFG(DA830, AHCLKX1, 11, 20, 0xf, 1, false)
MUX_CFG(DA830, ACLKX1, 11, 24, 0xf, 1, false)
MUX_CFG(DA830, AFSX1, 11, 28, 0xf, 1, false)
MUX_CFG(DA830, MDIO_CLK, 11, 0, 0xf, 2, false)
MUX_CFG(DA830, MDIO_D, 11, 4, 0xf, 2, false)
MUX_CFG(DA830, AXR0_9, 11, 8, 0xf, 2, false)
MUX_CFG(DA830, AXR0_10, 11, 12, 0xf, 2, false)
MUX_CFG(DA830, EPWM0B, 11, 20, 0xf, 2, false)
MUX_CFG(DA830, EPWM0A, 11, 24, 0xf, 2, false)
MUX_CFG(DA830, EPWMSYNCI, 11, 28, 0xf, 2, false)
MUX_CFG(DA830, AXR2_0, 11, 16, 0xf, 4, false)
MUX_CFG(DA830, EPWMSYNC0, 11, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO3_7, 11, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_8, 11, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_9, 11, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_10, 11, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_11, 11, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_14, 11, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_15, 11, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_10, 11, 28, 0xf, 8, false)
MUX_CFG(DA830, AHCLKR1, 12, 0, 0xf, 1, false)
MUX_CFG(DA830, ACLKR1, 12, 4, 0xf, 1, false)
MUX_CFG(DA830, AFSR1, 12, 8, 0xf, 1, false)
MUX_CFG(DA830, AMUTE1, 12, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR1_0, 12, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_1, 12, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR1_2, 12, 24, 0xf, 1, false)
MUX_CFG(DA830, AXR1_3, 12, 28, 0xf, 1, false)
MUX_CFG(DA830, ECAP2_APWM2, 12, 4, 0xf, 2, false)
MUX_CFG(DA830, EHRPWMGLUETZ, 12, 12, 0xf, 2, false)
MUX_CFG(DA830, EQEP1A, 12, 28, 0xf, 2, false)
MUX_CFG(DA830, GPIO4_11, 12, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_12, 12, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_13, 12, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_14, 12, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_0, 12, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_1, 12, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_2, 12, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_3, 12, 28, 0xf, 8, false)
MUX_CFG(DA830, AXR1_4, 13, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR1_5, 13, 4, 0xf, 1, false)
MUX_CFG(DA830, AXR1_6, 13, 8, 0xf, 1, false)
MUX_CFG(DA830, AXR1_7, 13, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR1_8, 13, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_9, 13, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_0, 13, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_1, 13, 28, 0xf, 1, false)
MUX_CFG(DA830, EQEP1B, 13, 0, 0xf, 2, false)
MUX_CFG(DA830, EPWM2B, 13, 4, 0xf, 2, false)
MUX_CFG(DA830, EPWM2A, 13, 8, 0xf, 2, false)
MUX_CFG(DA830, EPWM1B, 13, 12, 0xf, 2, false)
MUX_CFG(DA830, EPWM1A, 13, 16, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_0, 13, 24, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_1, 13, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_0, 13, 24, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_1, 13, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO4_4, 13, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_5, 13, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_6, 13, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_7, 13, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_8, 13, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_9, 13, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_0, 13, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_1, 13, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_D_2, 14, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_3, 14, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_4, 14, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_5, 14, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_6, 14, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_7, 14, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_8, 14, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_9, 14, 28, 0xf, 1, false)
MUX_CFG(DA830, MMCSD_DAT_2, 14, 0, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_3, 14, 4, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_4, 14, 8, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_5, 14, 12, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_6, 14, 16, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_7, 14, 20, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_8, 14, 24, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_9, 14, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_2, 14, 0, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_3, 14, 4, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_4, 14, 8, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_5, 14, 12, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_6, 14, 16, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_7, 14, 20, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_8, 14, 24, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_9, 14, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO0_2, 14, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_3, 14, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_4, 14, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_5, 14, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_6, 14, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_7, 14, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_8, 14, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_9, 14, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_D_10, 15, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_11, 15, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_12, 15, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_13, 15, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_14, 15, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_15, 15, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_0, 15, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_1, 15, 28, 0xf, 1, false)
MUX_CFG(DA830, UHPI_HD_10, 15, 0, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_11, 15, 4, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_12, 15, 8, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_13, 15, 12, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_14, 15, 16, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_15, 15, 20, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_7, 15, 24, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_CLK, 15, 28, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_10, 15, 0, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_11, 15, 4, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_12, 15, 8, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_13, 15, 12, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_14, 15, 16, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_15, 15, 20, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HCNTL0, 15, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO0_10, 15, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_11, 15, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_12, 15, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_13, 15, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_14, 15, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_15, 15, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_0, 15, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_1, 15, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_A_2, 16, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_3, 16, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_4, 16, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_5, 16, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_6, 16, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_7, 16, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_8, 16, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_9, 16, 28, 0xf, 1, false)
MUX_CFG(DA830, MMCSD_CMD, 16, 0, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_6, 16, 4, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_3, 16, 8, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_2, 16, 12, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_1, 16, 16, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_0, 16, 20, 0xf, 2, false)
MUX_CFG(DA830, LCD_PCLK, 16, 24, 0xf, 2, false)
MUX_CFG(DA830, LCD_HSYNC, 16, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HCNTL1, 16, 0, 0xf, 4, false)
MUX_CFG(DA830, GPIO1_2, 16, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_3, 16, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_4, 16, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_5, 16, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_6, 16, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_7, 16, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_8, 16, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_9, 16, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_A_10, 17, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_11, 17, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_12, 17, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_BA_1, 17, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_BA_0, 17, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_CLK, 17, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_SDCKE, 17, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CAS, 17, 28, 0xf, 1, false)
MUX_CFG(DA830, LCD_VSYNC, 17, 0, 0xf, 2, false)
MUX_CFG(DA830, NLCD_AC_ENB_CS, 17, 4, 0xf, 2, false)
MUX_CFG(DA830, LCD_MCLK, 17, 8, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_5, 17, 12, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_4, 17, 16, 0xf, 2, false)
MUX_CFG(DA830, OBSCLK, 17, 20, 0xf, 2, false)
MUX_CFG(DA830, NEMA_CS_4, 17, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HHWIL, 17, 12, 0xf, 4, false)
MUX_CFG(DA830, AHCLKR2, 17, 20, 0xf, 4, false)
MUX_CFG(DA830, GPIO1_10, 17, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_11, 17, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_12, 17, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_13, 17, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_14, 17, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_15, 17, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_0, 17, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_1, 17, 28, 0xf, 8, false)
MUX_CFG(DA830, NEMA_RAS, 18, 0, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE, 18, 4, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_0, 18, 8, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_2, 18, 12, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_3, 18, 16, 0xf, 1, false)
MUX_CFG(DA830, NEMA_OE, 18, 20, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE_DQM_1, 18, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE_DQM_0, 18, 28, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_5, 18, 0, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HRNW, 18, 4, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HAS, 18, 8, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HCS, 18, 12, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HDS1, 18, 20, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HDS2, 18, 24, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HINT, 18, 28, 0xf, 2, false)
MUX_CFG(DA830, AXR0_12, 18, 4, 0xf, 4, false)
MUX_CFG(DA830, AMUTE2, 18, 16, 0xf, 4, false)
MUX_CFG(DA830, AXR0_13, 18, 20, 0xf, 4, false)
MUX_CFG(DA830, AXR0_14, 18, 24, 0xf, 4, false)
MUX_CFG(DA830, AXR0_15, 18, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO2_2, 18, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_3, 18, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_4, 18, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_5, 18, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_6, 18, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_7, 18, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_8, 18, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_9, 18, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_WAIT_0, 19, 0, 0xf, 1, false)
MUX_CFG(DA830, NUHPI_HRDY, 19, 0, 0xf, 2, false)
MUX_CFG(DA830, GPIO2_10, 19, 0, 0xf, 8, false)
#endif
};
const short da830_emif25_pins[] __initconst = {
DA830_EMA_D_0, DA830_EMA_D_1, DA830_EMA_D_2, DA830_EMA_D_3,
DA830_EMA_D_4, DA830_EMA_D_5, DA830_EMA_D_6, DA830_EMA_D_7,
DA830_EMA_D_8, DA830_EMA_D_9, DA830_EMA_D_10, DA830_EMA_D_11,
DA830_EMA_D_12, DA830_EMA_D_13, DA830_EMA_D_14, DA830_EMA_D_15,
DA830_EMA_A_0, DA830_EMA_A_1, DA830_EMA_A_2, DA830_EMA_A_3,
DA830_EMA_A_4, DA830_EMA_A_5, DA830_EMA_A_6, DA830_EMA_A_7,
DA830_EMA_A_8, DA830_EMA_A_9, DA830_EMA_A_10, DA830_EMA_A_11,
DA830_EMA_A_12, DA830_EMA_BA_0, DA830_EMA_BA_1, DA830_EMA_CLK,
DA830_EMA_SDCKE, DA830_NEMA_CS_4, DA830_NEMA_CS_5, DA830_NEMA_WE,
DA830_NEMA_CS_0, DA830_NEMA_CS_2, DA830_NEMA_CS_3, DA830_NEMA_OE,
DA830_NEMA_WE_DQM_1, DA830_NEMA_WE_DQM_0, DA830_EMA_WAIT_0,
-1
};
const short da830_spi0_pins[] __initconst = {
DA830_SPI0_SOMI_0, DA830_SPI0_SIMO_0, DA830_SPI0_CLK, DA830_NSPI0_ENA,
DA830_NSPI0_SCS_0,
-1
};
const short da830_spi1_pins[] __initconst = {
DA830_SPI1_SOMI_0, DA830_SPI1_SIMO_0, DA830_SPI1_CLK, DA830_NSPI1_ENA,
DA830_NSPI1_SCS_0,
-1
};
const short da830_mmc_sd_pins[] __initconst = {
DA830_MMCSD_DAT_0, DA830_MMCSD_DAT_1, DA830_MMCSD_DAT_2,
DA830_MMCSD_DAT_3, DA830_MMCSD_DAT_4, DA830_MMCSD_DAT_5,
DA830_MMCSD_DAT_6, DA830_MMCSD_DAT_7, DA830_MMCSD_CLK,
DA830_MMCSD_CMD,
-1
};
const short da830_uart0_pins[] __initconst = {
DA830_NUART0_CTS, DA830_NUART0_RTS, DA830_UART0_RXD, DA830_UART0_TXD,
-1
};
const short da830_uart1_pins[] __initconst = {
DA830_UART1_RXD, DA830_UART1_TXD,
-1
};
const short da830_uart2_pins[] __initconst = {
DA830_UART2_RXD, DA830_UART2_TXD,
-1
};
const short da830_usb20_pins[] __initconst = {
DA830_USB0_DRVVBUS, DA830_USB_REFCLKIN,
-1
};
const short da830_usb11_pins[] __initconst = {
DA830_USB_REFCLKIN,
-1
};
const short da830_uhpi_pins[] __initconst = {
DA830_UHPI_HD_0, DA830_UHPI_HD_1, DA830_UHPI_HD_2, DA830_UHPI_HD_3,
DA830_UHPI_HD_4, DA830_UHPI_HD_5, DA830_UHPI_HD_6, DA830_UHPI_HD_7,
DA830_UHPI_HD_8, DA830_UHPI_HD_9, DA830_UHPI_HD_10, DA830_UHPI_HD_11,
DA830_UHPI_HD_12, DA830_UHPI_HD_13, DA830_UHPI_HD_14, DA830_UHPI_HD_15,
DA830_UHPI_HCNTL0, DA830_UHPI_HCNTL1, DA830_UHPI_HHWIL, DA830_UHPI_HRNW,
DA830_NUHPI_HAS, DA830_NUHPI_HCS, DA830_NUHPI_HDS1, DA830_NUHPI_HDS2,
DA830_NUHPI_HINT, DA830_NUHPI_HRDY,
-1
};
const short da830_cpgmac_pins[] __initconst = {
DA830_RMII_TXD_0, DA830_RMII_TXD_1, DA830_RMII_TXEN, DA830_RMII_CRS_DV,
DA830_RMII_RXD_0, DA830_RMII_RXD_1, DA830_RMII_RXER, DA830_MDIO_CLK,
DA830_MDIO_D,
-1
};
const short da830_emif3c_pins[] __initconst = {
DA830_EMB_SDCKE, DA830_EMB_CLK_GLUE, DA830_EMB_CLK, DA830_NEMB_CS_0,
DA830_NEMB_CAS, DA830_NEMB_RAS, DA830_NEMB_WE, DA830_EMB_BA_1,
DA830_EMB_BA_0, DA830_EMB_A_0, DA830_EMB_A_1, DA830_EMB_A_2,
DA830_EMB_A_3, DA830_EMB_A_4, DA830_EMB_A_5, DA830_EMB_A_6,
DA830_EMB_A_7, DA830_EMB_A_8, DA830_EMB_A_9, DA830_EMB_A_10,
DA830_EMB_A_11, DA830_EMB_A_12, DA830_NEMB_WE_DQM_3,
DA830_NEMB_WE_DQM_2, DA830_EMB_D_0, DA830_EMB_D_1, DA830_EMB_D_2,
DA830_EMB_D_3, DA830_EMB_D_4, DA830_EMB_D_5, DA830_EMB_D_6,
DA830_EMB_D_7, DA830_EMB_D_8, DA830_EMB_D_9, DA830_EMB_D_10,
DA830_EMB_D_11, DA830_EMB_D_12, DA830_EMB_D_13, DA830_EMB_D_14,
DA830_EMB_D_15, DA830_EMB_D_16, DA830_EMB_D_17, DA830_EMB_D_18,
DA830_EMB_D_19, DA830_EMB_D_20, DA830_EMB_D_21, DA830_EMB_D_22,
DA830_EMB_D_23, DA830_EMB_D_24, DA830_EMB_D_25, DA830_EMB_D_26,
DA830_EMB_D_27, DA830_EMB_D_28, DA830_EMB_D_29, DA830_EMB_D_30,
DA830_EMB_D_31, DA830_NEMB_WE_DQM_1, DA830_NEMB_WE_DQM_0,
-1
};
const short da830_mcasp0_pins[] __initconst = {
DA830_AHCLKX0, DA830_ACLKX0, DA830_AFSX0,
DA830_AHCLKR0, DA830_ACLKR0, DA830_AFSR0, DA830_AMUTE0,
DA830_AXR0_0, DA830_AXR0_1, DA830_AXR0_2, DA830_AXR0_3,
DA830_AXR0_4, DA830_AXR0_5, DA830_AXR0_6, DA830_AXR0_7,
DA830_AXR0_8, DA830_AXR0_9, DA830_AXR0_10, DA830_AXR0_11,
DA830_AXR0_12, DA830_AXR0_13, DA830_AXR0_14, DA830_AXR0_15,
-1
};
const short da830_mcasp1_pins[] __initconst = {
DA830_AHCLKX1, DA830_ACLKX1, DA830_AFSX1,
DA830_AHCLKR1, DA830_ACLKR1, DA830_AFSR1, DA830_AMUTE1,
DA830_AXR1_0, DA830_AXR1_1, DA830_AXR1_2, DA830_AXR1_3,
DA830_AXR1_4, DA830_AXR1_5, DA830_AXR1_6, DA830_AXR1_7,
DA830_AXR1_8, DA830_AXR1_9, DA830_AXR1_10, DA830_AXR1_11,
-1
};
const short da830_mcasp2_pins[] __initconst = {
DA830_AHCLKX2, DA830_ACLKX2, DA830_AFSX2,
DA830_AHCLKR2, DA830_ACLKR2, DA830_AFSR2, DA830_AMUTE2,
DA830_AXR2_0, DA830_AXR2_1, DA830_AXR2_2, DA830_AXR2_3,
-1
};
const short da830_i2c0_pins[] __initconst = {
DA830_I2C0_SDA, DA830_I2C0_SCL,
-1
};
const short da830_i2c1_pins[] __initconst = {
DA830_I2C1_SCL, DA830_I2C1_SDA,
-1
};
const short da830_lcdcntl_pins[] __initconst = {
DA830_LCD_D_0, DA830_LCD_D_1, DA830_LCD_D_2, DA830_LCD_D_3,
DA830_LCD_D_4, DA830_LCD_D_5, DA830_LCD_D_6, DA830_LCD_D_7,
DA830_LCD_D_8, DA830_LCD_D_9, DA830_LCD_D_10, DA830_LCD_D_11,
DA830_LCD_D_12, DA830_LCD_D_13, DA830_LCD_D_14, DA830_LCD_D_15,
DA830_LCD_PCLK, DA830_LCD_HSYNC, DA830_LCD_VSYNC, DA830_NLCD_AC_ENB_CS,
DA830_LCD_MCLK,
-1
};
const short da830_pwm_pins[] __initconst = {
DA830_ECAP0_APWM0, DA830_ECAP1_APWM1, DA830_EPWM0B, DA830_EPWM0A,
DA830_EPWMSYNCI, DA830_EPWMSYNC0, DA830_ECAP2_APWM2, DA830_EHRPWMGLUETZ,
DA830_EPWM2B, DA830_EPWM2A, DA830_EPWM1B, DA830_EPWM1A,
-1
};
const short da830_ecap0_pins[] __initconst = {
DA830_ECAP0_APWM0,
-1
};
const short da830_ecap1_pins[] __initconst = {
DA830_ECAP1_APWM1,
-1
};
const short da830_ecap2_pins[] __initconst = {
DA830_ECAP2_APWM2,
-1
};
const short da830_eqep0_pins[] __initconst = {
DA830_EQEP0I, DA830_EQEP0S, DA830_EQEP0A, DA830_EQEP0B,
-1
};
const short da830_eqep1_pins[] __initconst = {
DA830_EQEP1I, DA830_EQEP1S, DA830_EQEP1A, DA830_EQEP1B,
-1
};
/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
static u8 da830_default_priorities[DA830_N_CP_INTC_IRQ] = {
[IRQ_DA8XX_COMMTX] = 7,
[IRQ_DA8XX_COMMRX] = 7,
[IRQ_DA8XX_NINT] = 7,
[IRQ_DA8XX_EVTOUT0] = 7,
[IRQ_DA8XX_EVTOUT1] = 7,
[IRQ_DA8XX_EVTOUT2] = 7,
[IRQ_DA8XX_EVTOUT3] = 7,
[IRQ_DA8XX_EVTOUT4] = 7,
[IRQ_DA8XX_EVTOUT5] = 7,
[IRQ_DA8XX_EVTOUT6] = 7,
[IRQ_DA8XX_EVTOUT7] = 7,
[IRQ_DA8XX_CCINT0] = 7,
[IRQ_DA8XX_CCERRINT] = 7,
[IRQ_DA8XX_TCERRINT0] = 7,
[IRQ_DA8XX_AEMIFINT] = 7,
[IRQ_DA8XX_I2CINT0] = 7,
[IRQ_DA8XX_MMCSDINT0] = 7,
[IRQ_DA8XX_MMCSDINT1] = 7,
[IRQ_DA8XX_ALLINT0] = 7,
[IRQ_DA8XX_RTC] = 7,
[IRQ_DA8XX_SPINT0] = 7,
[IRQ_DA8XX_TINT12_0] = 7,
[IRQ_DA8XX_TINT34_0] = 7,
[IRQ_DA8XX_TINT12_1] = 7,
[IRQ_DA8XX_TINT34_1] = 7,
[IRQ_DA8XX_UARTINT0] = 7,
[IRQ_DA8XX_KEYMGRINT] = 7,
[IRQ_DA830_MPUERR] = 7,
[IRQ_DA8XX_CHIPINT0] = 7,
[IRQ_DA8XX_CHIPINT1] = 7,
[IRQ_DA8XX_CHIPINT2] = 7,
[IRQ_DA8XX_CHIPINT3] = 7,
[IRQ_DA8XX_TCERRINT1] = 7,
[IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7,
[IRQ_DA8XX_C0_RX_PULSE] = 7,
[IRQ_DA8XX_C0_TX_PULSE] = 7,
[IRQ_DA8XX_C0_MISC_PULSE] = 7,
[IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7,
[IRQ_DA8XX_C1_RX_PULSE] = 7,
[IRQ_DA8XX_C1_TX_PULSE] = 7,
[IRQ_DA8XX_C1_MISC_PULSE] = 7,
[IRQ_DA8XX_MEMERR] = 7,
[IRQ_DA8XX_GPIO0] = 7,
[IRQ_DA8XX_GPIO1] = 7,
[IRQ_DA8XX_GPIO2] = 7,
[IRQ_DA8XX_GPIO3] = 7,
[IRQ_DA8XX_GPIO4] = 7,
[IRQ_DA8XX_GPIO5] = 7,
[IRQ_DA8XX_GPIO6] = 7,
[IRQ_DA8XX_GPIO7] = 7,
[IRQ_DA8XX_GPIO8] = 7,
[IRQ_DA8XX_I2CINT1] = 7,
[IRQ_DA8XX_LCDINT] = 7,
[IRQ_DA8XX_UARTINT1] = 7,
[IRQ_DA8XX_MCASPINT] = 7,
[IRQ_DA8XX_ALLINT1] = 7,
[IRQ_DA8XX_SPINT1] = 7,
[IRQ_DA8XX_UHPI_INT1] = 7,
[IRQ_DA8XX_USB_INT] = 7,
[IRQ_DA8XX_IRQN] = 7,
[IRQ_DA8XX_RWAKEUP] = 7,
[IRQ_DA8XX_UARTINT2] = 7,
[IRQ_DA8XX_DFTSSINT] = 7,
[IRQ_DA8XX_EHRPWM0] = 7,
[IRQ_DA8XX_EHRPWM0TZ] = 7,
[IRQ_DA8XX_EHRPWM1] = 7,
[IRQ_DA8XX_EHRPWM1TZ] = 7,
[IRQ_DA830_EHRPWM2] = 7,
[IRQ_DA830_EHRPWM2TZ] = 7,
[IRQ_DA8XX_ECAP0] = 7,
[IRQ_DA8XX_ECAP1] = 7,
[IRQ_DA8XX_ECAP2] = 7,
[IRQ_DA830_EQEP0] = 7,
[IRQ_DA830_EQEP1] = 7,
[IRQ_DA830_T12CMPINT0_0] = 7,
[IRQ_DA830_T12CMPINT1_0] = 7,
[IRQ_DA830_T12CMPINT2_0] = 7,
[IRQ_DA830_T12CMPINT3_0] = 7,
[IRQ_DA830_T12CMPINT4_0] = 7,
[IRQ_DA830_T12CMPINT5_0] = 7,
[IRQ_DA830_T12CMPINT6_0] = 7,
[IRQ_DA830_T12CMPINT7_0] = 7,
[IRQ_DA830_T12CMPINT0_1] = 7,
[IRQ_DA830_T12CMPINT1_1] = 7,
[IRQ_DA830_T12CMPINT2_1] = 7,
[IRQ_DA830_T12CMPINT3_1] = 7,
[IRQ_DA830_T12CMPINT4_1] = 7,
[IRQ_DA830_T12CMPINT5_1] = 7,
[IRQ_DA830_T12CMPINT6_1] = 7,
[IRQ_DA830_T12CMPINT7_1] = 7,
[IRQ_DA8XX_ARMCLKSTOPREQ] = 7,
};
static struct map_desc da830_io_desc[] = {
{
.virtual = IO_VIRT,
.pfn = __phys_to_pfn(IO_PHYS),
.length = IO_SIZE,
.type = MT_DEVICE
},
{
.virtual = DA8XX_CP_INTC_VIRT,
.pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE),
.length = DA8XX_CP_INTC_SIZE,
.type = MT_DEVICE
},
};
static u32 da830_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da830_ids[] = {
{
.variant = 0x0,
.part_no = 0xb7df,
.manufacturer = 0x017, /* 0x02f >> 1 */
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev1.0",
},
{
.variant = 0x8,
.part_no = 0xb7df,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev1.1",
},
{
.variant = 0x9,
.part_no = 0xb7df,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev2.0",
},
};
static struct davinci_gpio_platform_data da830_gpio_platform_data = {
.ngpio = 128,
};
int __init da830_register_gpio(void)
{
return da8xx_register_gpio(&da830_gpio_platform_data);
}
static struct davinci_timer_instance da830_timer_instance[2] = {
{
.base = DA8XX_TIMER64P0_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_0,
.top_irq = IRQ_DA8XX_TINT34_0,
.cmp_off = DA830_CMP12_0,
.cmp_irq = IRQ_DA830_T12CMPINT0_0,
},
{
.base = DA8XX_TIMER64P1_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_1,
.top_irq = IRQ_DA8XX_TINT34_1,
.cmp_off = DA830_CMP12_0,
.cmp_irq = IRQ_DA830_T12CMPINT0_1,
},
};
/*
* T0_BOT: Timer 0, bottom : Used for clock_event & clocksource
* T0_TOP: Timer 0, top : Used by DSP
* T1_BOT, T1_TOP: Timer 1, bottom & top: Used for watchdog timer
*/
static struct davinci_timer_info da830_timer_info = {
.timers = da830_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_BOT,
};
static struct davinci_soc_info davinci_soc_info_da830 = {
.io_desc = da830_io_desc,
.io_desc_num = ARRAY_SIZE(da830_io_desc),
.jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da830_ids,
.ids_num = ARRAY_SIZE(da830_ids),
.cpu_clks = da830_clks,
.psc_bases = da830_psc_bases,
.psc_bases_num = ARRAY_SIZE(da830_psc_bases),
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
.intc_base = DA8XX_CP_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_CP_INTC,
.intc_irq_prios = da830_default_priorities,
.intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
.emac_pdata = &da8xx_emac_pdata,
};
void __init da830_init(void)
{
davinci_common_init(&davinci_soc_info_da830);
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
}
| gpl-2.0 |
skritchz/android_kernel_motorola_surnia | arch/x86/kernel/cpu/perf_event_amd_uncore.c | 2003 | 12797 | /*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Jacob Shin <jacob.shin@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <asm/cpufeature.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
#define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4
#define MAX_COUNTERS NUM_COUNTERS_NB
#define RDPMC_BASE_NB 6
#define RDPMC_BASE_L2 10
#define COUNTER_SHIFT 16
struct amd_uncore {
int id;
int refcnt;
int cpu;
int num_counters;
int rdpmc_base;
u32 msr_base;
cpumask_t *active_mask;
struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS];
struct amd_uncore *free_when_cpu_online;
};
static struct amd_uncore * __percpu *amd_uncore_nb;
static struct amd_uncore * __percpu *amd_uncore_l2;
static struct pmu amd_nb_pmu;
static struct pmu amd_l2_pmu;
static cpumask_t amd_nb_active_mask;
static cpumask_t amd_l2_active_mask;
static bool is_nb_event(struct perf_event *event)
{
return event->pmu->type == amd_nb_pmu.type;
}
static bool is_l2_event(struct perf_event *event)
{
return event->pmu->type == amd_l2_pmu.type;
}
static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
{
if (is_nb_event(event) && amd_uncore_nb)
return *per_cpu_ptr(amd_uncore_nb, event->cpu);
else if (is_l2_event(event) && amd_uncore_l2)
return *per_cpu_ptr(amd_uncore_l2, event->cpu);
return NULL;
}
static void amd_uncore_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 prev, new;
s64 delta;
/*
* since we do not enable counter overflow interrupts,
* we do not have to worry about prev_count changing on us
*/
prev = local64_read(&hwc->prev_count);
rdpmcl(hwc->event_base_rdpmc, new);
local64_set(&hwc->prev_count, new);
delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
delta >>= COUNTER_SHIFT;
local64_add(delta, &event->count);
}
static void amd_uncore_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (flags & PERF_EF_RELOAD)
wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
hwc->state = 0;
wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
perf_event_update_userpage(event);
}
static void amd_uncore_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
hwc->state |= PERF_HES_STOPPED;
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
amd_uncore_read(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
static int amd_uncore_add(struct perf_event *event, int flags)
{
int i;
struct amd_uncore *uncore = event_to_amd_uncore(event);
struct hw_perf_event *hwc = &event->hw;
/* are we already assigned? */
if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
goto out;
for (i = 0; i < uncore->num_counters; i++) {
if (uncore->events[i] == event) {
hwc->idx = i;
goto out;
}
}
/* if not, take the first available counter */
hwc->idx = -1;
for (i = 0; i < uncore->num_counters; i++) {
if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
hwc->idx = i;
break;
}
}
out:
if (hwc->idx == -1)
return -EBUSY;
hwc->config_base = uncore->msr_base + (2 * hwc->idx);
hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
amd_uncore_start(event, PERF_EF_RELOAD);
return 0;
}
static void amd_uncore_del(struct perf_event *event, int flags)
{
int i;
struct amd_uncore *uncore = event_to_amd_uncore(event);
struct hw_perf_event *hwc = &event->hw;
amd_uncore_stop(event, PERF_EF_UPDATE);
for (i = 0; i < uncore->num_counters; i++) {
if (cmpxchg(&uncore->events[i], event, NULL) == event)
break;
}
hwc->idx = -1;
}
static int amd_uncore_event_init(struct perf_event *event)
{
struct amd_uncore *uncore;
struct hw_perf_event *hwc = &event->hw;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* NB and L2 counters (MSRs) are shared across all cores that share the
* same NB / L2 cache. Interrupts can be directed to a single target
* core, however, event counts generated by processes running on other
* cores cannot be masked out. So we do not support sampling and
* per-thread events.
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
/* NB and L2 counters do not have usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
/* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
if (event->cpu < 0)
return -EINVAL;
uncore = event_to_amd_uncore(event);
if (!uncore)
return -ENODEV;
/*
* since request can come in to any of the shared cores, we will remap
* to a single common cpu.
*/
event->cpu = uncore->cpu;
return 0;
}
static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int n;
cpumask_t *active_mask;
struct pmu *pmu = dev_get_drvdata(dev);
if (pmu->type == amd_nb_pmu.type)
active_mask = &amd_nb_active_mask;
else if (pmu->type == amd_l2_pmu.type)
active_mask = &amd_l2_active_mask;
else
return 0;
n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
buf[n++] = '\n';
buf[n] = '\0';
return n;
}
static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
static struct attribute *amd_uncore_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static struct attribute_group amd_uncore_attr_group = {
.attrs = amd_uncore_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-7,32-35");
PMU_FORMAT_ATTR(umask, "config:8-15");
static struct attribute *amd_uncore_format_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
NULL,
};
static struct attribute_group amd_uncore_format_group = {
.name = "format",
.attrs = amd_uncore_format_attr,
};
static const struct attribute_group *amd_uncore_attr_groups[] = {
&amd_uncore_attr_group,
&amd_uncore_format_group,
NULL,
};
static struct pmu amd_nb_pmu = {
.attr_groups = amd_uncore_attr_groups,
.name = "amd_nb",
.event_init = amd_uncore_event_init,
.add = amd_uncore_add,
.del = amd_uncore_del,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
};
static struct pmu amd_l2_pmu = {
.attr_groups = amd_uncore_attr_groups,
.name = "amd_l2",
.event_init = amd_uncore_event_init,
.add = amd_uncore_add,
.del = amd_uncore_del,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
};
static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
{
return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
cpu_to_node(cpu));
}
static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
{
struct amd_uncore *uncore;
if (amd_uncore_nb) {
uncore = amd_uncore_alloc(cpu);
uncore->cpu = cpu;
uncore->num_counters = NUM_COUNTERS_NB;
uncore->rdpmc_base = RDPMC_BASE_NB;
uncore->msr_base = MSR_F15H_NB_PERF_CTL;
uncore->active_mask = &amd_nb_active_mask;
uncore->pmu = &amd_nb_pmu;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
}
if (amd_uncore_l2) {
uncore = amd_uncore_alloc(cpu);
uncore->cpu = cpu;
uncore->num_counters = NUM_COUNTERS_L2;
uncore->rdpmc_base = RDPMC_BASE_L2;
uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore->active_mask = &amd_l2_active_mask;
uncore->pmu = &amd_l2_pmu;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
}
}
static struct amd_uncore *
__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
struct amd_uncore * __percpu *uncores)
{
unsigned int cpu;
struct amd_uncore *that;
for_each_online_cpu(cpu) {
that = *per_cpu_ptr(uncores, cpu);
if (!that)
continue;
if (this == that)
continue;
if (this->id == that->id) {
that->free_when_cpu_online = this;
this = that;
break;
}
}
this->refcnt++;
return this;
}
static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
{
unsigned int eax, ebx, ecx, edx;
struct amd_uncore *uncore;
if (amd_uncore_nb) {
uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
uncore->id = ecx & 0xff;
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
}
if (amd_uncore_l2) {
unsigned int apicid = cpu_data(cpu).apicid;
unsigned int nshared;
uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
nshared = ((eax >> 14) & 0xfff) + 1;
uncore->id = apicid - (apicid % nshared);
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
}
}
static void __cpuinit uncore_online(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
kfree(uncore->free_when_cpu_online);
uncore->free_when_cpu_online = NULL;
if (cpu == uncore->cpu)
cpumask_set_cpu(cpu, uncore->active_mask);
}
static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_online(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_online(cpu, amd_uncore_l2);
}
static void __cpuinit uncore_down_prepare(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
unsigned int i;
struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
if (this->cpu != cpu)
return;
/* this cpu is going down, migrate to a shared sibling if possible */
for_each_online_cpu(i) {
struct amd_uncore *that = *per_cpu_ptr(uncores, i);
if (cpu == i)
continue;
if (this == that) {
perf_pmu_migrate_context(this->pmu, cpu, i);
cpumask_clear_cpu(cpu, that->active_mask);
cpumask_set_cpu(i, that->active_mask);
that->cpu = i;
break;
}
}
}
static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_down_prepare(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_down_prepare(cpu, amd_uncore_l2);
}
static void __cpuinit uncore_dead(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
if (cpu == uncore->cpu)
cpumask_clear_cpu(cpu, uncore->active_mask);
if (!--uncore->refcnt)
kfree(uncore);
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
}
static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_dead(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_dead(cpu, amd_uncore_l2);
}
static int __cpuinit
amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
amd_uncore_cpu_up_prepare(cpu);
break;
case CPU_STARTING:
amd_uncore_cpu_starting(cpu);
break;
case CPU_ONLINE:
amd_uncore_cpu_online(cpu);
break;
case CPU_DOWN_PREPARE:
amd_uncore_cpu_down_prepare(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
amd_uncore_cpu_dead(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
.notifier_call = amd_uncore_cpu_notifier,
.priority = CPU_PRI_PERF + 1,
};
static void __init init_cpu_already_online(void *dummy)
{
unsigned int cpu = smp_processor_id();
amd_uncore_cpu_starting(cpu);
amd_uncore_cpu_online(cpu);
}
static int __init amd_uncore_init(void)
{
unsigned int cpu;
int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
if (!cpu_has_topoext)
return -ENODEV;
if (cpu_has_perfctr_nb) {
amd_uncore_nb = alloc_percpu(struct amd_uncore *);
perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
printk(KERN_INFO "perf: AMD NB counters detected\n");
ret = 0;
}
if (cpu_has_perfctr_l2) {
amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
printk(KERN_INFO "perf: AMD L2I counters detected\n");
ret = 0;
}
if (ret)
return -ENODEV;
get_online_cpus();
/* init cpus already online before registering for hotplug notifier */
for_each_online_cpu(cpu) {
amd_uncore_cpu_up_prepare(cpu);
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
}
register_cpu_notifier(&amd_uncore_cpu_notifier_block);
put_online_cpus();
return 0;
}
device_initcall(amd_uncore_init);
| gpl-2.0 |
MrHyde03/android_kernel_samsung_konawifixx | sound/soc/codecs/wm8737.c | 3027 | 19595 | /*
* wm8737.c -- WM8737 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8737.h"
#define WM8737_NUM_SUPPLIES 4
static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = {
"DCVDD",
"DBVDD",
"AVDD",
"MVDD",
};
/* codec private data */
struct wm8737_priv {
enum snd_soc_control_type control_type;
struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES];
unsigned int mclk;
};
static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = {
0x00C3, /* R0 - Left PGA volume */
0x00C3, /* R1 - Right PGA volume */
0x0007, /* R2 - AUDIO path L */
0x0007, /* R3 - AUDIO path R */
0x0000, /* R4 - 3D Enhance */
0x0000, /* R5 - ADC Control */
0x0000, /* R6 - Power Management */
0x000A, /* R7 - Audio Format */
0x0000, /* R8 - Clocking */
0x000F, /* R9 - MIC Preamp Control */
0x0003, /* R10 - Misc Bias Control */
0x0000, /* R11 - Noise Gate */
0x007C, /* R12 - ALC1 */
0x0000, /* R13 - ALC2 */
0x0032, /* R14 - ALC3 */
};
static int wm8737_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8737_RESET, 0);
}
static const unsigned int micboost_tlv[] = {
TLV_DB_RANGE_HEAD(4),
0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0);
static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0);
static const char *micbias_enum_text[] = {
"25%",
"50%",
"75%",
"100%",
};
static const struct soc_enum micbias_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text);
static const char *low_cutoff_text[] = {
"Low", "High"
};
static const struct soc_enum low_3d =
SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text);
static const char *high_cutoff_text[] = {
"High", "Low"
};
static const struct soc_enum high_3d =
SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text);
static const char *alc_fn_text[] = {
"Disabled", "Right", "Left", "Stereo"
};
static const struct soc_enum alc_fn =
SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text);
static const char *alc_hold_text[] = {
"0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms",
"170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s",
"10.916s", "21.832s", "43.691s"
};
static const struct soc_enum alc_hold =
SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text);
static const char *alc_atk_text[] = {
"8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms",
"1.075s", "2.15s", "4.3s", "8.6s"
};
static const struct soc_enum alc_atk =
SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text);
static const char *alc_dcy_text[] = {
"33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s",
"4.3s", "8.6s", "17.2s", "34.41s"
};
static const struct soc_enum alc_dcy =
SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text);
static const struct snd_kcontrol_new wm8737_snd_controls[] = {
SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
6, 3, 0, micboost_tlv),
SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
4, 1, 0),
SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
3, 1, 0),
SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME,
WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv),
SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
2, 1, 0),
SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0),
SOC_ENUM("Mic PGA Bias", micbias_enum),
SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0),
SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1),
SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
SOC_ENUM("3D Low Cut-off", low_3d),
SOC_ENUM("3D High Cut-off", low_3d),
SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0,
ng_tlv),
SOC_ENUM("ALC", alc_fn),
SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv),
SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv),
SOC_ENUM("ALC Hold Time", alc_hold),
SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0),
SOC_ENUM("ALC Attack Time", alc_atk),
SOC_ENUM("ALC Decay Time", alc_dcy),
};
static const char *linsel_text[] = {
"LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC",
};
static const struct soc_enum linsel_enum =
SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text);
static const struct snd_kcontrol_new linsel_mux =
SOC_DAPM_ENUM("LINSEL", linsel_enum);
static const char *rinsel_text[] = {
"RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC",
};
static const struct soc_enum rinsel_enum =
SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text);
static const struct snd_kcontrol_new rinsel_mux =
SOC_DAPM_ENUM("RINSEL", rinsel_enum);
static const char *bypass_text[] = {
"Direct", "Preamp"
};
static const struct soc_enum lbypass_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text);
static const struct snd_kcontrol_new lbypass_mux =
SOC_DAPM_ENUM("Left Bypass", lbypass_enum);
static const struct soc_enum rbypass_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text);
static const struct snd_kcontrol_new rbypass_mux =
SOC_DAPM_ENUM("Left Bypass", rbypass_enum);
static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT2"),
SND_SOC_DAPM_INPUT("LINPUT3"),
SND_SOC_DAPM_INPUT("RINPUT1"),
SND_SOC_DAPM_INPUT("RINPUT2"),
SND_SOC_DAPM_INPUT("RINPUT3"),
SND_SOC_DAPM_INPUT("LACIN"),
SND_SOC_DAPM_INPUT("RACIN"),
SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux),
SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux),
SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux),
SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux),
SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0),
SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0),
SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0),
SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0),
};
static const struct snd_soc_dapm_route intercon[] = {
{ "LINSEL", "LINPUT1", "LINPUT1" },
{ "LINSEL", "LINPUT2", "LINPUT2" },
{ "LINSEL", "LINPUT3", "LINPUT3" },
{ "LINSEL", "LINPUT1 DC", "LINPUT1" },
{ "RINSEL", "RINPUT1", "RINPUT1" },
{ "RINSEL", "RINPUT2", "RINPUT2" },
{ "RINSEL", "RINPUT3", "RINPUT3" },
{ "RINSEL", "RINPUT1 DC", "RINPUT1" },
{ "Left Preamp Mux", "Preamp", "LINSEL" },
{ "Left Preamp Mux", "Direct", "LACIN" },
{ "Right Preamp Mux", "Preamp", "RINSEL" },
{ "Right Preamp Mux", "Direct", "RACIN" },
{ "PGAL", NULL, "Left Preamp Mux" },
{ "PGAR", NULL, "Right Preamp Mux" },
{ "ADCL", NULL, "PGAL" },
{ "ADCR", NULL, "PGAR" },
{ "AIF", NULL, "ADCL" },
{ "AIF", NULL, "ADCR" },
};
static int wm8737_add_widgets(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
ARRAY_SIZE(wm8737_dapm_widgets));
snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
return 0;
}
/* codec mclk clock divider coefficients */
static const struct {
u32 mclk;
u32 rate;
u8 usb;
u8 sr;
} coeff_div[] = {
{ 12288000, 8000, 0, 0x4 },
{ 12288000, 12000, 0, 0x8 },
{ 12288000, 16000, 0, 0xa },
{ 12288000, 24000, 0, 0x1c },
{ 12288000, 32000, 0, 0xc },
{ 12288000, 48000, 0, 0 },
{ 12288000, 96000, 0, 0xe },
{ 11289600, 8000, 0, 0x14 },
{ 11289600, 11025, 0, 0x18 },
{ 11289600, 22050, 0, 0x1a },
{ 11289600, 44100, 0, 0x10 },
{ 11289600, 88200, 0, 0x1e },
{ 18432000, 8000, 0, 0x5 },
{ 18432000, 12000, 0, 0x9 },
{ 18432000, 16000, 0, 0xb },
{ 18432000, 24000, 0, 0x1b },
{ 18432000, 32000, 0, 0xd },
{ 18432000, 48000, 0, 0x1 },
{ 18432000, 96000, 0, 0x1f },
{ 16934400, 8000, 0, 0x15 },
{ 16934400, 11025, 0, 0x19 },
{ 16934400, 22050, 0, 0x1b },
{ 16934400, 44100, 0, 0x11 },
{ 16934400, 88200, 0, 0x1f },
{ 12000000, 8000, 1, 0x4 },
{ 12000000, 11025, 1, 0x19 },
{ 12000000, 12000, 1, 0x8 },
{ 12000000, 16000, 1, 0xa },
{ 12000000, 22050, 1, 0x1b },
{ 12000000, 24000, 1, 0x1c },
{ 12000000, 32000, 1, 0xc },
{ 12000000, 44100, 1, 0x11 },
{ 12000000, 48000, 1, 0x0 },
{ 12000000, 88200, 1, 0x1f },
{ 12000000, 96000, 1, 0xe },
};
static int wm8737_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int i;
u16 clocking = 0;
u16 af = 0;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (coeff_div[i].rate != params_rate(params))
continue;
if (coeff_div[i].mclk == wm8737->mclk)
break;
if (coeff_div[i].mclk == wm8737->mclk * 2) {
clocking |= WM8737_CLKDIV2;
break;
}
}
if (i == ARRAY_SIZE(coeff_div)) {
dev_err(codec->dev, "%dHz MCLK can't support %dHz\n",
wm8737->mclk, params_rate(params));
return -EINVAL;
}
clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
af |= 0x8;
break;
case SNDRV_PCM_FORMAT_S24_LE:
af |= 0x10;
break;
case SNDRV_PCM_FORMAT_S32_LE:
af |= 0x18;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af);
snd_soc_update_bits(codec, WM8737_CLOCKING,
WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK,
clocking);
return 0;
}
static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int i;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (freq == coeff_div[i].mclk ||
freq == coeff_div[i].mclk * 2) {
wm8737->mclk = freq;
return 0;
}
}
dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq);
return -EINVAL;
}
static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 af = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
af |= WM8737_MS;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
af |= 0x2;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
af |= 0x1;
break;
case SND_SOC_DAIFMT_DSP_A:
af |= 0x3;
break;
case SND_SOC_DAIFMT_DSP_B:
af |= 0x13;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
af |= WM8737_LRP;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT,
WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af);
return 0;
}
static int wm8737_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/* VMID at 2*75k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 0);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev,
"Failed to enable supplies: %d\n",
ret);
return ret;
}
snd_soc_cache_sync(codec);
/* Fast VMID ramp at 2*2.5k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 0x4);
/* Bring VMID up */
snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
WM8737_VMID_MASK |
WM8737_VREF_MASK,
WM8737_VMID_MASK |
WM8737_VREF_MASK);
msleep(500);
}
/* VMID at 2*300k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 2);
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
WM8737_VMID_MASK | WM8737_VREF_MASK, 0);
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8737_RATES SNDRV_PCM_RATE_8000_96000
#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops wm8737_dai_ops = {
.hw_params = wm8737_hw_params,
.set_sysclk = wm8737_set_dai_sysclk,
.set_fmt = wm8737_set_dai_fmt,
};
static struct snd_soc_dai_driver wm8737_dai = {
.name = "wm8737",
.capture = {
.stream_name = "Capture",
.channels_min = 2, /* Mono modes not yet supported */
.channels_max = 2,
.rates = WM8737_RATES,
.formats = WM8737_FORMATS,
},
.ops = &wm8737_dai_ops,
};
#ifdef CONFIG_PM
static int wm8737_suspend(struct snd_soc_codec *codec, pm_message_t state)
{
wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8737_resume(struct snd_soc_codec *codec)
{
wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define wm8737_suspend NULL
#define wm8737_resume NULL
#endif
static int wm8737_probe(struct snd_soc_codec *codec)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int ret, i;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++)
wm8737->supplies[i].supply = wm8737_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = wm8737_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
goto err_enable;
}
snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU,
WM8737_LVU);
snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU,
WM8737_RVU);
wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
snd_soc_add_controls(codec, wm8737_snd_controls,
ARRAY_SIZE(wm8737_snd_controls));
wm8737_add_widgets(codec);
return 0;
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
err_get:
regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
return ret;
}
static int wm8737_remove(struct snd_soc_codec *codec)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
.probe = wm8737_probe,
.remove = wm8737_remove,
.suspend = wm8737_suspend,
.resume = wm8737_resume,
.set_bias_level = wm8737_set_bias_level,
.reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8737_reg,
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8737_priv *wm8737;
int ret;
wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
if (wm8737 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8737);
wm8737->control_type = SND_SOC_I2C;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8737, &wm8737_dai, 1);
if (ret < 0)
kfree(wm8737);
return ret;
}
static __devexit int wm8737_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
return 0;
}
static const struct i2c_device_id wm8737_i2c_id[] = {
{ "wm8737", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id);
static struct i2c_driver wm8737_i2c_driver = {
.driver = {
.name = "wm8737",
.owner = THIS_MODULE,
},
.probe = wm8737_i2c_probe,
.remove = __devexit_p(wm8737_i2c_remove),
.id_table = wm8737_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
static int __devinit wm8737_spi_probe(struct spi_device *spi)
{
struct wm8737_priv *wm8737;
int ret;
wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
if (wm8737 == NULL)
return -ENOMEM;
wm8737->control_type = SND_SOC_SPI;
spi_set_drvdata(spi, wm8737);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8737, &wm8737_dai, 1);
if (ret < 0)
kfree(wm8737);
return ret;
}
static int __devexit wm8737_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
kfree(spi_get_drvdata(spi));
return 0;
}
static struct spi_driver wm8737_spi_driver = {
.driver = {
.name = "wm8737",
.owner = THIS_MODULE,
},
.probe = wm8737_spi_probe,
.remove = __devexit_p(wm8737_spi_remove),
};
#endif /* CONFIG_SPI_MASTER */
static int __init wm8737_modinit(void)
{
int ret;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8737_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n",
ret);
}
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8737_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n",
ret);
}
#endif
return 0;
}
module_init(wm8737_modinit);
static void __exit wm8737_exit(void)
{
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8737_spi_driver);
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8737_i2c_driver);
#endif
}
module_exit(wm8737_exit);
MODULE_DESCRIPTION("ASoC WM8737 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sfumato77/rk3x_kernel_3.0.36 | drivers/media/dvb/dvb-usb/af9005-remote.c | 3027 | 4316 | /* DVB USB compliant Linux driver for the Afatech 9005
* USB1.1 DVB-T receiver.
*
* Standard remote decode function
*
* Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
*
* Thanks to Afatech who kindly provided information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* see Documentation/dvb/REDME.dvb-usb for more information
*/
#include "af9005.h"
/* debug */
static int dvb_usb_af9005_remote_debug;
module_param_named(debug, dvb_usb_af9005_remote_debug, int, 0644);
MODULE_PARM_DESC(debug,
"enable (1) or disable (0) debug messages."
DVB_USB_DEBUG_STATUS);
#define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args)
struct rc_map_table rc_map_af9005_table[] = {
{0x01b7, KEY_POWER},
{0x01a7, KEY_VOLUMEUP},
{0x0187, KEY_CHANNELUP},
{0x017f, KEY_MUTE},
{0x01bf, KEY_VOLUMEDOWN},
{0x013f, KEY_CHANNELDOWN},
{0x01df, KEY_1},
{0x015f, KEY_2},
{0x019f, KEY_3},
{0x011f, KEY_4},
{0x01ef, KEY_5},
{0x016f, KEY_6},
{0x01af, KEY_7},
{0x0127, KEY_8},
{0x0107, KEY_9},
{0x01cf, KEY_ZOOM},
{0x014f, KEY_0},
{0x018f, KEY_GOTO}, /* marked jump on the remote */
{0x00bd, KEY_POWER},
{0x007d, KEY_VOLUMEUP},
{0x00fd, KEY_CHANNELUP},
{0x009d, KEY_MUTE},
{0x005d, KEY_VOLUMEDOWN},
{0x00dd, KEY_CHANNELDOWN},
{0x00ad, KEY_1},
{0x006d, KEY_2},
{0x00ed, KEY_3},
{0x008d, KEY_4},
{0x004d, KEY_5},
{0x00cd, KEY_6},
{0x00b5, KEY_7},
{0x0075, KEY_8},
{0x00f5, KEY_9},
{0x0095, KEY_ZOOM},
{0x0055, KEY_0},
{0x00d5, KEY_GOTO}, /* marked jump on the remote */
};
int rc_map_af9005_table_size = ARRAY_SIZE(rc_map_af9005_table);
static int repeatable_keys[] = {
KEY_VOLUMEUP,
KEY_VOLUMEDOWN,
KEY_CHANNELUP,
KEY_CHANNELDOWN
};
int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
int *state)
{
u16 mark, space;
u32 result;
u8 cust, dat, invdat;
int i;
if (len >= 6) {
mark = (u16) (data[0] << 8) + data[1];
space = (u16) (data[2] << 8) + data[3];
if (space * 3 < mark) {
for (i = 0; i < ARRAY_SIZE(repeatable_keys); i++) {
if (d->last_event == repeatable_keys[i]) {
*state = REMOTE_KEY_REPEAT;
*event = d->last_event;
deb_decode("repeat key, event %x\n",
*event);
return 0;
}
}
deb_decode("repeated key ignored (non repeatable)\n");
return 0;
} else if (len >= 33 * 4) { /*32 bits + start code */
result = 0;
for (i = 4; i < 4 + 32 * 4; i += 4) {
result <<= 1;
mark = (u16) (data[i] << 8) + data[i + 1];
mark >>= 1;
space = (u16) (data[i + 2] << 8) + data[i + 3];
space >>= 1;
if (mark * 2 > space)
result += 1;
}
deb_decode("key pressed, raw value %x\n", result);
if ((result & 0xff000000) != 0xfe000000) {
deb_decode
("doesn't start with 0xfe, ignored\n");
return 0;
}
cust = (result >> 16) & 0xff;
dat = (result >> 8) & 0xff;
invdat = (~result) & 0xff;
if (dat != invdat) {
deb_decode("code != inverted code\n");
return 0;
}
for (i = 0; i < rc_map_af9005_table_size; i++) {
if (rc5_custom(&rc_map_af9005_table[i]) == cust
&& rc5_data(&rc_map_af9005_table[i]) == dat) {
*event = rc_map_af9005_table[i].keycode;
*state = REMOTE_KEY_PRESSED;
deb_decode
("key pressed, event %x\n", *event);
return 0;
}
}
deb_decode("not found in table\n");
}
}
return 0;
}
EXPORT_SYMBOL(rc_map_af9005_table);
EXPORT_SYMBOL(rc_map_af9005_table_size);
EXPORT_SYMBOL(af9005_rc_decode);
MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
MODULE_DESCRIPTION
("Standard remote control decoder for Afatech 9005 DVB-T USB1.1 stick");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
0xD34D/-0xD34D--Kindle-Fire-Kernel | arch/mips/alchemy/devboards/pb1500/platform.c | 3795 | 1638 | /*
* Pb1500 board platform device registration
*
* Copyright (C) 2009 Manuel Lauss
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
#include "../platform.h"
static int __init pb1500_dev_init(void)
{
int swapped;
/* PCMCIA. single socket, identical to Pb1500 */
db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR,
PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
PCMCIA_MEM_PHYS_ADDR,
PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
PCMCIA_IO_PHYS_ADDR,
PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
AU1500_GPIO11_INT, /* card */
AU1500_GPIO9_INT, /* insert */
/*AU1500_GPIO10_INT*/0, /* stschg */
0, /* eject */
0); /* id */
swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT;
db1x_register_norflash(64 * 1024 * 1024, 4, swapped);
return 0;
}
device_initcall(pb1500_dev_init);
| gpl-2.0 |
omnirom/android_kernel_asus_me301t | arch/mips/alchemy/common/gpiolib-au1000.c | 3795 | 4060 | /*
* Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org>
* GPIOLIB support for Au1000, Au1500, Au1100, Au1550 and Au12x0.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Notes :
* au1000 SoC have only one GPIO block : GPIO1
* Au1100, Au15x0, Au12x0 have a second one : GPIO2
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio.h>
static int gpio2_get(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
}
static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
{
alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
}
static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
value);
}
static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio1_get(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
}
static void gpio1_set(struct gpio_chip *chip,
unsigned offset, int value)
{
alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
}
static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
}
static int gpio1_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
value);
}
static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
}
struct gpio_chip alchemy_gpio_chip[] = {
[0] = {
.label = "alchemy-gpio1",
.direction_input = gpio1_direction_input,
.direction_output = gpio1_direction_output,
.get = gpio1_get,
.set = gpio1_set,
.to_irq = gpio1_to_irq,
.base = ALCHEMY_GPIO1_BASE,
.ngpio = ALCHEMY_GPIO1_NUM,
},
[1] = {
.label = "alchemy-gpio2",
.direction_input = gpio2_direction_input,
.direction_output = gpio2_direction_output,
.get = gpio2_get,
.set = gpio2_set,
.to_irq = gpio2_to_irq,
.base = ALCHEMY_GPIO2_BASE,
.ngpio = ALCHEMY_GPIO2_NUM,
},
};
static int __init alchemy_gpiolib_init(void)
{
gpiochip_add(&alchemy_gpio_chip[0]);
if (alchemy_get_cputype() != ALCHEMY_CPU_AU1000)
gpiochip_add(&alchemy_gpio_chip[1]);
return 0;
}
arch_initcall(alchemy_gpiolib_init);
| gpl-2.0 |
dongsupark/linux | drivers/media/rc/keymaps/rc-medion-x10-digitainer.c | 4819 | 3728 | /*
* Medion X10 RF remote keytable (Digitainer variant)
*
* Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi>
*
* This keymap is for a variant that has a distinctive scrollwheel instead of
* up/down buttons (tested with P/N 40009936 / 20018268), reportedly
* originally shipped with Medion Digitainer but now sold separately simply as
* an "X10" remote.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table medion_x10_digitainer[] = {
{ 0x02, KEY_POWER },
{ 0x2c, KEY_TV },
{ 0x2d, KEY_VIDEO },
{ 0x04, KEY_DVD }, /* CD/DVD */
{ 0x16, KEY_TEXT }, /* "teletext" icon, i.e. a screen with lines */
{ 0x06, KEY_AUDIO },
{ 0x2e, KEY_RADIO },
{ 0x31, KEY_EPG }, /* a screen with an open book */
{ 0x05, KEY_IMAGES }, /* Photo */
{ 0x2f, KEY_INFO },
{ 0x78, KEY_UP }, /* scrollwheel up 1 notch */
/* 0x79..0x7f: 2-8 notches, driver repeats 0x78 entry */
{ 0x70, KEY_DOWN }, /* scrollwheel down 1 notch */
/* 0x71..0x77: 2-8 notches, driver repeats 0x70 entry */
{ 0x19, KEY_MENU },
{ 0x1d, KEY_LEFT },
{ 0x1e, KEY_OK }, /* scrollwheel press */
{ 0x1f, KEY_RIGHT },
{ 0x20, KEY_BACK },
{ 0x09, KEY_VOLUMEUP },
{ 0x08, KEY_VOLUMEDOWN },
{ 0x00, KEY_MUTE },
{ 0x1b, KEY_SELECT }, /* also has "U" rotated 90 degrees CCW */
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_CHANNELDOWN },
{ 0x1c, KEY_LAST },
{ 0x32, KEY_RED }, /* also Audio */
{ 0x33, KEY_GREEN }, /* also Subtitle */
{ 0x34, KEY_YELLOW }, /* also Angle */
{ 0x35, KEY_BLUE }, /* also Title */
{ 0x28, KEY_STOP },
{ 0x29, KEY_PAUSE },
{ 0x25, KEY_PLAY },
{ 0x21, KEY_PREVIOUS },
{ 0x18, KEY_CAMERA },
{ 0x23, KEY_NEXT },
{ 0x24, KEY_REWIND },
{ 0x27, KEY_RECORD },
{ 0x26, KEY_FORWARD },
{ 0x0d, KEY_1 },
{ 0x0e, KEY_2 },
{ 0x0f, KEY_3 },
{ 0x10, KEY_4 },
{ 0x11, KEY_5 },
{ 0x12, KEY_6 },
{ 0x13, KEY_7 },
{ 0x14, KEY_8 },
{ 0x15, KEY_9 },
{ 0x17, KEY_0 },
/* these do not actually exist on this remote, but these scancodes
* exist on all other Medion X10 remotes and adding them here allows
* such remotes to be adequately usable with this keymap in case
* this keymap is wrongly used with them (which is quite possible as
* there are lots of different Medion X10 remotes): */
{ 0x1a, KEY_UP },
{ 0x22, KEY_DOWN },
};
static struct rc_map_list medion_x10_digitainer_map = {
.map = {
.scan = medion_x10_digitainer,
.size = ARRAY_SIZE(medion_x10_digitainer),
.rc_type = RC_TYPE_OTHER,
.name = RC_MAP_MEDION_X10_DIGITAINER,
}
};
static int __init init_rc_map_medion_x10_digitainer(void)
{
return rc_map_register(&medion_x10_digitainer_map);
}
static void __exit exit_rc_map_medion_x10_digitainer(void)
{
rc_map_unregister(&medion_x10_digitainer_map);
}
module_init(init_rc_map_medion_x10_digitainer)
module_exit(exit_rc_map_medion_x10_digitainer)
MODULE_DESCRIPTION("Medion X10 RF remote keytable (Digitainer variant)");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
snak3ater/kernel_msm | sound/soc/txx9/txx9aclc-ac97.c | 5075 | 6315 | /*
* TXx9 ACLC AC97 driver
*
* Copyright (C) 2009 Atsushi Nemoto
*
* Based on RBTX49xx patch from CELF patch archive.
* (C) Copyright TOSHIBA CORPORATION 2004-2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "txx9aclc.h"
#define AC97_DIR \
(SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
#define AC97_RATES \
SNDRV_PCM_RATE_8000_48000
#ifdef __BIG_ENDIAN
#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_BE
#else
#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_LE
#endif
static DECLARE_WAIT_QUEUE_HEAD(ac97_waitq);
/* REVISIT: How to find txx9aclc_drvdata from snd_ac97? */
static struct txx9aclc_plat_drvdata *txx9aclc_drvdata;
static int txx9aclc_regready(struct txx9aclc_plat_drvdata *drvdata)
{
return __raw_readl(drvdata->base + ACINTSTS) & ACINT_REGACCRDY;
}
/* AC97 controller reads codec register */
static unsigned short txx9aclc_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
u32 dat;
if (!(__raw_readl(base + ACINTSTS) & ACINT_CODECRDY(ac97->num)))
return 0xffff;
reg |= ac97->num << 7;
dat = (reg << ACREGACC_REG_SHIFT) | ACREGACC_READ;
__raw_writel(dat, base + ACREGACC);
__raw_writel(ACINT_REGACCRDY, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(txx9aclc_drvdata), HZ)) {
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
printk(KERN_ERR "ac97 read timeout (reg %#x)\n", reg);
dat = 0xffff;
goto done;
}
dat = __raw_readl(base + ACREGACC);
if (((dat >> ACREGACC_REG_SHIFT) & 0xff) != reg) {
printk(KERN_ERR "reg mismatch %x with %x\n",
dat, reg);
dat = 0xffff;
goto done;
}
dat = (dat >> ACREGACC_DAT_SHIFT) & 0xffff;
done:
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
return dat;
}
/* AC97 controller writes to codec register */
static void txx9aclc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
__raw_writel(((reg | (ac97->num << 7)) << ACREGACC_REG_SHIFT) |
(val << ACREGACC_DAT_SHIFT),
base + ACREGACC);
__raw_writel(ACINT_REGACCRDY, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(txx9aclc_drvdata), HZ)) {
printk(KERN_ERR
"ac97 write timeout (reg %#x)\n", reg);
}
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
}
static void txx9aclc_ac97_cold_reset(struct snd_ac97 *ac97)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
__raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
mmiowb();
udelay(1);
__raw_writel(ACCTL_ENLINK, base + ACCTLEN);
/* wait for primary codec ready status */
__raw_writel(ready, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq,
(__raw_readl(base + ACINTSTS) & ready) == ready,
HZ)) {
dev_err(&ac97->dev, "primary codec is not ready "
"(status %#x)\n",
__raw_readl(base + ACINTSTS));
}
__raw_writel(ACINT_REGACCRDY, base + ACINTSTS);
__raw_writel(ready, base + ACINTDIS);
}
/* AC97 controller operations */
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = txx9aclc_ac97_read,
.write = txx9aclc_ac97_write,
.reset = txx9aclc_ac97_cold_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static irqreturn_t txx9aclc_ac97_irq(int irq, void *dev_id)
{
struct txx9aclc_plat_drvdata *drvdata = dev_id;
void __iomem *base = drvdata->base;
__raw_writel(__raw_readl(base + ACINTMSTS), base + ACINTDIS);
wake_up(&ac97_waitq);
return IRQ_HANDLED;
}
static int txx9aclc_ac97_probe(struct snd_soc_dai *dai)
{
txx9aclc_drvdata = snd_soc_dai_get_drvdata(dai);
return 0;
}
static int txx9aclc_ac97_remove(struct snd_soc_dai *dai)
{
struct txx9aclc_plat_drvdata *drvdata = snd_soc_dai_get_drvdata(dai);
/* disable AC-link */
__raw_writel(ACCTL_ENLINK, drvdata->base + ACCTLDIS);
txx9aclc_drvdata = NULL;
return 0;
}
static struct snd_soc_dai_driver txx9aclc_ac97_dai = {
.ac97_control = 1,
.probe = txx9aclc_ac97_probe,
.remove = txx9aclc_ac97_remove,
.playback = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
};
static int __devinit txx9aclc_ac97_dev_probe(struct platform_device *pdev)
{
struct txx9aclc_plat_drvdata *drvdata;
struct resource *r;
int err;
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -EBUSY;
if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
dev_name(&pdev->dev)))
return -EBUSY;
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
platform_set_drvdata(pdev, drvdata);
drvdata->physbase = r->start;
if (sizeof(drvdata->physbase) > sizeof(r->start) &&
r->start >= TXX9_DIRECTMAP_BASE &&
r->start < TXX9_DIRECTMAP_BASE + 0x400000)
drvdata->physbase |= 0xf00000000ull;
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base)
return -EBUSY;
err = devm_request_irq(&pdev->dev, irq, txx9aclc_ac97_irq,
0, dev_name(&pdev->dev), drvdata);
if (err < 0)
return err;
return snd_soc_register_dai(&pdev->dev, &txx9aclc_ac97_dai);
}
static int __devexit txx9aclc_ac97_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver txx9aclc_ac97_driver = {
.probe = txx9aclc_ac97_dev_probe,
.remove = __devexit_p(txx9aclc_ac97_dev_remove),
.driver = {
.name = "txx9aclc-ac97",
.owner = THIS_MODULE,
},
};
module_platform_driver(txx9aclc_ac97_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("TXx9 ACLC AC97 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:txx9aclc-ac97");
| gpl-2.0 |
Clumsy-Kernel-Development/M8_Kernel | drivers/gpio/gpio-janz-ttl.c | 5075 | 5428 | /*
* Janz MODULbus VMOD-TTL GPIO Driver
*
* Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/mfd/janz.h>
#define DRV_NAME "janz-ttl"
#define PORTA_DIRECTION 0x23
#define PORTB_DIRECTION 0x2B
#define PORTC_DIRECTION 0x06
#define PORTA_IOCTL 0x24
#define PORTB_IOCTL 0x2C
#define PORTC_IOCTL 0x07
#define MASTER_INT_CTL 0x00
#define MASTER_CONF_CTL 0x01
#define CONF_PAE (1 << 2)
#define CONF_PBE (1 << 7)
#define CONF_PCE (1 << 4)
struct ttl_control_regs {
__be16 portc;
__be16 portb;
__be16 porta;
__be16 control;
};
struct ttl_module {
struct gpio_chip gpio;
/* base address of registers */
struct ttl_control_regs __iomem *regs;
u8 portc_shadow;
u8 portb_shadow;
u8 porta_shadow;
spinlock_t lock;
};
static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
{
struct ttl_module *mod = dev_get_drvdata(gpio->dev);
u8 *shadow;
int ret;
if (offset < 8) {
shadow = &mod->porta_shadow;
} else if (offset < 16) {
shadow = &mod->portb_shadow;
offset -= 8;
} else {
shadow = &mod->portc_shadow;
offset -= 16;
}
spin_lock(&mod->lock);
ret = *shadow & (1 << offset);
spin_unlock(&mod->lock);
return ret;
}
static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
{
struct ttl_module *mod = dev_get_drvdata(gpio->dev);
void __iomem *port;
u8 *shadow;
if (offset < 8) {
port = &mod->regs->porta;
shadow = &mod->porta_shadow;
} else if (offset < 16) {
port = &mod->regs->portb;
shadow = &mod->portb_shadow;
offset -= 8;
} else {
port = &mod->regs->portc;
shadow = &mod->portc_shadow;
offset -= 16;
}
spin_lock(&mod->lock);
if (value)
*shadow |= (1 << offset);
else
*shadow &= ~(1 << offset);
iowrite16be(*shadow, port);
spin_unlock(&mod->lock);
}
static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
{
iowrite16be(reg, &mod->regs->control);
iowrite16be(val, &mod->regs->control);
}
static void __devinit ttl_setup_device(struct ttl_module *mod)
{
/* reset the device to a known state */
iowrite16be(0x0000, &mod->regs->control);
iowrite16be(0x0001, &mod->regs->control);
iowrite16be(0x0000, &mod->regs->control);
/* put all ports in open-drain mode */
ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
/* set all ports as outputs */
ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
/* set all ports to drive zeroes */
iowrite16be(0x0000, &mod->regs->porta);
iowrite16be(0x0000, &mod->regs->portb);
iowrite16be(0x0000, &mod->regs->portc);
/* enable all ports */
ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
}
static int __devinit ttl_probe(struct platform_device *pdev)
{
struct janz_platform_data *pdata;
struct device *dev = &pdev->dev;
struct ttl_module *mod;
struct gpio_chip *gpio;
struct resource *res;
int ret;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(dev, "no platform data\n");
ret = -ENXIO;
goto out_return;
}
mod = kzalloc(sizeof(*mod), GFP_KERNEL);
if (!mod) {
dev_err(dev, "unable to allocate private data\n");
ret = -ENOMEM;
goto out_return;
}
platform_set_drvdata(pdev, mod);
spin_lock_init(&mod->lock);
/* get access to the MODULbus registers for this module */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "MODULbus registers not found\n");
ret = -ENODEV;
goto out_free_mod;
}
mod->regs = ioremap(res->start, resource_size(res));
if (!mod->regs) {
dev_err(dev, "MODULbus registers not ioremap\n");
ret = -ENOMEM;
goto out_free_mod;
}
ttl_setup_device(mod);
/* Initialize the GPIO data structures */
gpio = &mod->gpio;
gpio->dev = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
gpio->set = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
gpio->base = -1;
gpio->ngpio = 20;
ret = gpiochip_add(gpio);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
goto out_iounmap_regs;
}
return 0;
out_iounmap_regs:
iounmap(mod->regs);
out_free_mod:
kfree(mod);
out_return:
return ret;
}
static int __devexit ttl_remove(struct platform_device *pdev)
{
struct ttl_module *mod = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret;
ret = gpiochip_remove(&mod->gpio);
if (ret) {
dev_err(dev, "unable to remove GPIO chip\n");
return ret;
}
iounmap(mod->regs);
kfree(mod);
return 0;
}
static struct platform_driver ttl_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ttl_probe,
.remove = __devexit_p(ttl_remove),
};
module_platform_driver(ttl_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ttl");
| gpl-2.0 |
jxxhwy/A850S_JB_KERNEL | sound/soc/blackfin/bf5xx-ac97.c | 5075 | 9664 | /*
* bf5xx-ac97.c -- AC97 support for the ADI blackfin chip.
*
* Author: Roy Huang
* Created: 11th. June 2007
* Copyright: Analog Device Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <asm/irq.h>
#include <asm/portmux.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include "bf5xx-sport.h"
#include "bf5xx-ac97.h"
/* Anomaly notes:
* 05000250 - AD1980 is running in TDM mode and RFS/TFS are generated by SPORT
* contrtoller. But, RFSDIV and TFSDIV are always set to 16*16-1,
* while the max AC97 data size is 13*16. The DIV is always larger
* than data size. AD73311 and ad2602 are not running in TDM mode.
* AD1836 and AD73322 depend on external RFS/TFS only. So, this
* anomaly does not affect blackfin sound drivers.
*/
static struct sport_device *ac97_sport_handle;
void bf5xx_pcm_to_ac97(struct ac97_frame *dst, const __u16 *src,
size_t count, unsigned int chan_mask)
{
while (count--) {
dst->ac97_tag = TAG_VALID;
if (chan_mask & SP_FL) {
dst->ac97_pcm_r = *src++;
dst->ac97_tag |= TAG_PCM_RIGHT;
}
if (chan_mask & SP_FR) {
dst->ac97_pcm_l = *src++;
dst->ac97_tag |= TAG_PCM_LEFT;
}
#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
if (chan_mask & SP_SR) {
dst->ac97_sl = *src++;
dst->ac97_tag |= TAG_PCM_SL;
}
if (chan_mask & SP_SL) {
dst->ac97_sr = *src++;
dst->ac97_tag |= TAG_PCM_SR;
}
if (chan_mask & SP_LFE) {
dst->ac97_lfe = *src++;
dst->ac97_tag |= TAG_PCM_LFE;
}
if (chan_mask & SP_FC) {
dst->ac97_center = *src++;
dst->ac97_tag |= TAG_PCM_CENTER;
}
#endif
dst++;
}
}
EXPORT_SYMBOL(bf5xx_pcm_to_ac97);
void bf5xx_ac97_to_pcm(const struct ac97_frame *src, __u16 *dst,
size_t count)
{
while (count--) {
*(dst++) = src->ac97_pcm_l;
*(dst++) = src->ac97_pcm_r;
src++;
}
}
EXPORT_SYMBOL(bf5xx_ac97_to_pcm);
static unsigned int sport_tx_curr_frag(struct sport_device *sport)
{
return sport->tx_curr_frag = sport_curr_offset_tx(sport) /
sport->tx_fragsize;
}
static void enqueue_cmd(struct snd_ac97 *ac97, __u16 addr, __u16 data)
{
struct sport_device *sport = ac97_sport_handle;
int *cmd_count = sport->private_data;
int nextfrag = sport_tx_curr_frag(sport);
struct ac97_frame *nextwrite;
sport_incfrag(sport, &nextfrag, 1);
nextwrite = (struct ac97_frame *)(sport->tx_buf +
nextfrag * sport->tx_fragsize);
pr_debug("sport->tx_buf:%p, nextfrag:0x%x nextwrite:%p, cmd_count:%d\n",
sport->tx_buf, nextfrag, nextwrite, cmd_count[nextfrag]);
nextwrite[cmd_count[nextfrag]].ac97_tag |= TAG_CMD;
nextwrite[cmd_count[nextfrag]].ac97_addr = addr;
nextwrite[cmd_count[nextfrag]].ac97_data = data;
++cmd_count[nextfrag];
pr_debug("ac97_sport: Inserting %02x/%04x into fragment %d\n",
addr >> 8, data, nextfrag);
}
static unsigned short bf5xx_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct sport_device *sport_handle = ac97_sport_handle;
struct ac97_frame out_frame[2], in_frame[2];
pr_debug("%s enter 0x%x\n", __func__, reg);
/* When dma descriptor is enabled, the register should not be read */
if (sport_handle->tx_run || sport_handle->rx_run) {
pr_err("Could you send a mail to cliff.cai@analog.com "
"to report this?\n");
return -EFAULT;
}
memset(&out_frame, 0, 2 * sizeof(struct ac97_frame));
memset(&in_frame, 0, 2 * sizeof(struct ac97_frame));
out_frame[0].ac97_tag = TAG_VALID | TAG_CMD;
out_frame[0].ac97_addr = ((reg << 8) | 0x8000);
sport_send_and_recv(sport_handle, (unsigned char *)&out_frame,
(unsigned char *)&in_frame,
2 * sizeof(struct ac97_frame));
return in_frame[1].ac97_data;
}
void bf5xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct sport_device *sport_handle = ac97_sport_handle;
pr_debug("%s enter 0x%x:0x%04x\n", __func__, reg, val);
if (sport_handle->tx_run) {
enqueue_cmd(ac97, (reg << 8), val); /* write */
enqueue_cmd(ac97, (reg << 8) | 0x8000, 0); /* read back */
} else {
struct ac97_frame frame;
memset(&frame, 0, sizeof(struct ac97_frame));
frame.ac97_tag = TAG_VALID | TAG_CMD;
frame.ac97_addr = (reg << 8);
frame.ac97_data = val;
sport_send_and_recv(sport_handle, (unsigned char *)&frame, \
NULL, sizeof(struct ac97_frame));
}
}
static void bf5xx_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct sport_device *sport_handle = ac97_sport_handle;
u16 gpio = P_IDENT(sport_handle->pin_req[3]);
pr_debug("%s enter\n", __func__);
peripheral_free_list(sport_handle->pin_req);
gpio_request(gpio, "bf5xx-ac97");
gpio_direction_output(gpio, 1);
udelay(2);
gpio_set_value(gpio, 0);
udelay(1);
gpio_free(gpio);
peripheral_request_list(sport_handle->pin_req, "soc-audio");
}
static void bf5xx_ac97_cold_reset(struct snd_ac97 *ac97)
{
#ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET
pr_debug("%s enter\n", __func__);
/* It is specified for bf548-ezkit */
gpio_set_value(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 0);
/* Keep reset pin low for 1 ms */
mdelay(1);
gpio_set_value(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 1);
/* Wait for bit clock recover */
mdelay(1);
#else
pr_info("%s: Not implemented\n", __func__);
#endif
}
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = bf5xx_ac97_read,
.write = bf5xx_ac97_write,
.warm_reset = bf5xx_ac97_warm_reset,
.reset = bf5xx_ac97_cold_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
#ifdef CONFIG_PM
static int bf5xx_ac97_suspend(struct snd_soc_dai *dai)
{
struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
pr_debug("%s : sport %d\n", __func__, dai->id);
if (!dai->active)
return 0;
if (dai->capture_active)
sport_rx_stop(sport);
if (dai->playback_active)
sport_tx_stop(sport);
return 0;
}
static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
{
int ret;
struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
pr_debug("%s : sport %d\n", __func__, dai->id);
if (!dai->active)
return 0;
#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
ret = sport_set_multichannel(sport, 16, 0x3FF, 1);
#else
ret = sport_set_multichannel(sport, 16, 0x1F, 1);
#endif
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_rx(sport, IRFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport, ITFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
return 0;
}
#else
#define bf5xx_ac97_suspend NULL
#define bf5xx_ac97_resume NULL
#endif
static struct snd_soc_dai_driver bfin_ac97_dai = {
.ac97_control = 1,
.suspend = bf5xx_ac97_suspend,
.resume = bf5xx_ac97_resume,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
.channels_max = 6,
#else
.channels_max = 2,
#endif
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE, },
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE, },
};
static int __devinit asoc_bfin_ac97_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
#ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET
/* Request PB3 as reset pin */
if (gpio_request(CONFIG_SND_BF5XX_RESET_GPIO_NUM, "SND_AD198x RESET")) {
pr_err("Failed to request GPIO_%d for reset\n",
CONFIG_SND_BF5XX_RESET_GPIO_NUM);
ret = -1;
goto gpio_err;
}
gpio_direction_output(CONFIG_SND_BF5XX_RESET_GPIO_NUM, 1);
#endif
sport_handle = sport_init(pdev, 2, sizeof(struct ac97_frame),
PAGE_SIZE);
if (!sport_handle) {
ret = -ENODEV;
goto sport_err;
}
/*SPORT works in TDM mode to simulate AC97 transfers*/
#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
ret = sport_set_multichannel(sport_handle, 16, 0x3FF, 1);
#else
ret = sport_set_multichannel(sport_handle, 16, 0x1F, 1);
#endif
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
goto sport_config_err;
}
ret = sport_config_rx(sport_handle, IRFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
goto sport_config_err;
}
ret = sport_config_tx(sport_handle, ITFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
goto sport_config_err;
}
ret = snd_soc_register_dai(&pdev->dev, &bfin_ac97_dai);
if (ret) {
pr_err("Failed to register DAI: %d\n", ret);
goto sport_config_err;
}
ac97_sport_handle = sport_handle;
return 0;
sport_config_err:
sport_done(sport_handle);
sport_err:
#ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET
gpio_free(CONFIG_SND_BF5XX_RESET_GPIO_NUM);
gpio_err:
#endif
return ret;
}
static int __devexit asoc_bfin_ac97_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
snd_soc_unregister_dai(&pdev->dev);
sport_done(sport_handle);
#ifdef CONFIG_SND_BF5XX_HAVE_COLD_RESET
gpio_free(CONFIG_SND_BF5XX_RESET_GPIO_NUM);
#endif
return 0;
}
static struct platform_driver asoc_bfin_ac97_driver = {
.driver = {
.name = "bfin-ac97",
.owner = THIS_MODULE,
},
.probe = asoc_bfin_ac97_probe,
.remove = __devexit_p(asoc_bfin_ac97_remove),
};
module_platform_driver(asoc_bfin_ac97_driver);
MODULE_AUTHOR("Roy Huang");
MODULE_DESCRIPTION("AC97 driver for ADI Blackfin");
MODULE_LICENSE("GPL");
| gpl-2.0 |
omnirom/android_kernel_xiaomi_aries | sound/soc/txx9/txx9aclc-ac97.c | 5075 | 6315 | /*
* TXx9 ACLC AC97 driver
*
* Copyright (C) 2009 Atsushi Nemoto
*
* Based on RBTX49xx patch from CELF patch archive.
* (C) Copyright TOSHIBA CORPORATION 2004-2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "txx9aclc.h"
#define AC97_DIR \
(SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
#define AC97_RATES \
SNDRV_PCM_RATE_8000_48000
#ifdef __BIG_ENDIAN
#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_BE
#else
#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_LE
#endif
static DECLARE_WAIT_QUEUE_HEAD(ac97_waitq);
/* REVISIT: How to find txx9aclc_drvdata from snd_ac97? */
static struct txx9aclc_plat_drvdata *txx9aclc_drvdata;
static int txx9aclc_regready(struct txx9aclc_plat_drvdata *drvdata)
{
return __raw_readl(drvdata->base + ACINTSTS) & ACINT_REGACCRDY;
}
/* AC97 controller reads codec register */
static unsigned short txx9aclc_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
u32 dat;
if (!(__raw_readl(base + ACINTSTS) & ACINT_CODECRDY(ac97->num)))
return 0xffff;
reg |= ac97->num << 7;
dat = (reg << ACREGACC_REG_SHIFT) | ACREGACC_READ;
__raw_writel(dat, base + ACREGACC);
__raw_writel(ACINT_REGACCRDY, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(txx9aclc_drvdata), HZ)) {
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
printk(KERN_ERR "ac97 read timeout (reg %#x)\n", reg);
dat = 0xffff;
goto done;
}
dat = __raw_readl(base + ACREGACC);
if (((dat >> ACREGACC_REG_SHIFT) & 0xff) != reg) {
printk(KERN_ERR "reg mismatch %x with %x\n",
dat, reg);
dat = 0xffff;
goto done;
}
dat = (dat >> ACREGACC_DAT_SHIFT) & 0xffff;
done:
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
return dat;
}
/* AC97 controller writes to codec register */
static void txx9aclc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
__raw_writel(((reg | (ac97->num << 7)) << ACREGACC_REG_SHIFT) |
(val << ACREGACC_DAT_SHIFT),
base + ACREGACC);
__raw_writel(ACINT_REGACCRDY, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(txx9aclc_drvdata), HZ)) {
printk(KERN_ERR
"ac97 write timeout (reg %#x)\n", reg);
}
__raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
}
static void txx9aclc_ac97_cold_reset(struct snd_ac97 *ac97)
{
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
void __iomem *base = drvdata->base;
u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
__raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
mmiowb();
udelay(1);
__raw_writel(ACCTL_ENLINK, base + ACCTLEN);
/* wait for primary codec ready status */
__raw_writel(ready, base + ACINTEN);
if (!wait_event_timeout(ac97_waitq,
(__raw_readl(base + ACINTSTS) & ready) == ready,
HZ)) {
dev_err(&ac97->dev, "primary codec is not ready "
"(status %#x)\n",
__raw_readl(base + ACINTSTS));
}
__raw_writel(ACINT_REGACCRDY, base + ACINTSTS);
__raw_writel(ready, base + ACINTDIS);
}
/* AC97 controller operations */
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = txx9aclc_ac97_read,
.write = txx9aclc_ac97_write,
.reset = txx9aclc_ac97_cold_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static irqreturn_t txx9aclc_ac97_irq(int irq, void *dev_id)
{
struct txx9aclc_plat_drvdata *drvdata = dev_id;
void __iomem *base = drvdata->base;
__raw_writel(__raw_readl(base + ACINTMSTS), base + ACINTDIS);
wake_up(&ac97_waitq);
return IRQ_HANDLED;
}
static int txx9aclc_ac97_probe(struct snd_soc_dai *dai)
{
txx9aclc_drvdata = snd_soc_dai_get_drvdata(dai);
return 0;
}
static int txx9aclc_ac97_remove(struct snd_soc_dai *dai)
{
struct txx9aclc_plat_drvdata *drvdata = snd_soc_dai_get_drvdata(dai);
/* disable AC-link */
__raw_writel(ACCTL_ENLINK, drvdata->base + ACCTLDIS);
txx9aclc_drvdata = NULL;
return 0;
}
static struct snd_soc_dai_driver txx9aclc_ac97_dai = {
.ac97_control = 1,
.probe = txx9aclc_ac97_probe,
.remove = txx9aclc_ac97_remove,
.playback = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
.channels_min = 2,
.channels_max = 2,
},
};
static int __devinit txx9aclc_ac97_dev_probe(struct platform_device *pdev)
{
struct txx9aclc_plat_drvdata *drvdata;
struct resource *r;
int err;
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -EBUSY;
if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
dev_name(&pdev->dev)))
return -EBUSY;
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
platform_set_drvdata(pdev, drvdata);
drvdata->physbase = r->start;
if (sizeof(drvdata->physbase) > sizeof(r->start) &&
r->start >= TXX9_DIRECTMAP_BASE &&
r->start < TXX9_DIRECTMAP_BASE + 0x400000)
drvdata->physbase |= 0xf00000000ull;
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base)
return -EBUSY;
err = devm_request_irq(&pdev->dev, irq, txx9aclc_ac97_irq,
0, dev_name(&pdev->dev), drvdata);
if (err < 0)
return err;
return snd_soc_register_dai(&pdev->dev, &txx9aclc_ac97_dai);
}
static int __devexit txx9aclc_ac97_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver txx9aclc_ac97_driver = {
.probe = txx9aclc_ac97_dev_probe,
.remove = __devexit_p(txx9aclc_ac97_dev_remove),
.driver = {
.name = "txx9aclc-ac97",
.owner = THIS_MODULE,
},
};
module_platform_driver(txx9aclc_ac97_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("TXx9 ACLC AC97 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:txx9aclc-ac97");
| gpl-2.0 |
bcm216xx/android_kernel_rhea | arch/powerpc/platforms/pasemi/iommu.c | 7635 | 7117 | /*
* Copyright (C) 2005-2008, PA Semi, Inc
*
* Maintained by: Olof Johansson <olof@lixom.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/firmware.h>
#define IOBMAP_PAGE_SHIFT 12
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
#define IOB_BASE 0xe0000000
#define IOB_SIZE 0x3000
/* Configuration registers */
#define IOBCAP_REG 0x40
#define IOBCOM_REG 0x100
/* Enable IOB address translation */
#define IOBCOM_ATEN 0x00000100
/* Address decode configuration register */
#define IOB_AD_REG 0x14c
/* IOBCOM_AD_REG fields */
#define IOB_AD_VGPRT 0x00000e00
#define IOB_AD_VGAEN 0x00000100
/* Direct mapping settings */
#define IOB_AD_MPSEL_MASK 0x00000030
#define IOB_AD_MPSEL_B38 0x00000000
#define IOB_AD_MPSEL_B40 0x00000010
#define IOB_AD_MPSEL_B42 0x00000020
/* Translation window size / enable */
#define IOB_AD_TRNG_MASK 0x00000003
#define IOB_AD_TRNG_256M 0x00000000
#define IOB_AD_TRNG_2G 0x00000001
#define IOB_AD_TRNG_128G 0x00000003
#define IOB_TABLEBASE_REG 0x154
/* Base of the 64 4-byte L1 registers */
#define IOB_XLT_L1_REGBASE 0x2b00
/* Register to invalidate TLB entries */
#define IOB_AT_INVAL_TLB_REG 0x2d00
/* The top two bits of the level 1 entry contains valid and type flags */
#define IOBMAP_L1E_V 0x40000000
#define IOBMAP_L1E_V_B 0x80000000
/* For big page entries, the bottom two bits contains flags */
#define IOBMAP_L1E_BIG_CACHED 0x00000002
#define IOBMAP_L1E_BIG_PRIORITY 0x00000001
/* For regular level 2 entries, top 2 bits contain valid and cache flags */
#define IOBMAP_L2E_V 0x80000000
#define IOBMAP_L2E_V_CACHED 0xc0000000
static void __iomem *iob;
static u32 iob_l1_emptyval;
static u32 iob_l2_emptyval;
static u32 *iob_l2_base;
static struct iommu_table iommu_table_iobmap;
static int iommu_table_iobmap_inited;
static int iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u32 *ip;
u32 rpn;
unsigned long bus_addr;
pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT;
*(ip++) = IOBMAP_L2E_V | rpn;
/* invalidate tlb, can be optimized more */
out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
uaddr += IOBMAP_PAGE_SIZE;
bus_addr += IOBMAP_PAGE_SIZE;
}
return 0;
}
static void iobmap_free(struct iommu_table *tbl, long index,
long npages)
{
u32 *ip;
unsigned long bus_addr;
pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
*(ip++) = iob_l2_emptyval;
/* invalidate tlb, can be optimized more */
out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
bus_addr += IOBMAP_PAGE_SIZE;
}
}
static void iommu_table_iobmap_setup(void)
{
pr_debug(" -> %s\n", __func__);
iommu_table_iobmap.it_busno = 0;
iommu_table_iobmap.it_offset = 0;
/* it_size is in number of entries */
iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT;
/* Initialize the common IOMMU code */
iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
iommu_table_iobmap.it_index = 0;
/* XXXOJN tune this to avoid IOB cache invals.
* Should probably be 8 (64 bytes)
*/
iommu_table_iobmap.it_blocksize = 4;
iommu_init_table(&iommu_table_iobmap, 0);
pr_debug(" <- %s\n", __func__);
}
static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
{
pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
if (!iommu_table_iobmap_inited) {
iommu_table_iobmap_inited = 1;
iommu_table_iobmap_setup();
}
}
static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
{
pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
/* For non-LPAR environment, don't translate anything for the DMA
* engine. The exception to this is if the user has enabled
* CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops;
return;
}
#endif
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}
int __init iob_init(struct device_node *dn)
{
unsigned long tmp;
u32 regword;
int i;
pr_debug(" -> %s\n", __func__);
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
panic("IOBMAP: Cannot allocate spare page!");
/* Empty l1 is marked invalid */
iob_l1_emptyval = 0;
/* Empty l2 is mapped to dummy page */
iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
iob = ioremap(IOB_BASE, IOB_SIZE);
if (!iob)
panic("IOBMAP: Cannot map registers!");
/* setup direct mapping of the L1 entries */
for (i = 0; i < 64; i++) {
/* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
}
/* set 2GB translation window, based at 0 */
regword = in_le32(iob+IOB_AD_REG);
regword &= ~IOB_AD_TRNG_MASK;
regword |= IOB_AD_TRNG_2G;
out_le32(iob+IOB_AD_REG, regword);
/* Enable translation */
regword = in_le32(iob+IOBCOM_REG);
regword |= IOBCOM_ATEN;
out_le32(iob+IOBCOM_REG, regword);
pr_debug(" <- %s\n", __func__);
return 0;
}
/* These are called very early. */
void __init iommu_init_early_pasemi(void)
{
int iommu_off;
#ifndef CONFIG_PPC_PASEMI_IOMMU
iommu_off = 1;
#else
iommu_off = of_chosen &&
of_get_property(of_chosen, "linux,iommu-off", NULL);
#endif
if (iommu_off)
return;
iob_init(NULL);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi;
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi;
ppc_md.tce_build = iobmap_build;
ppc_md.tce_free = iobmap_free;
set_pci_dma_ops(&dma_iommu_ops);
}
void __init alloc_iobmap_l2(void)
{
#ifndef CONFIG_PPC_PASEMI_IOMMU
return;
#endif
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
}
| gpl-2.0 |
zzpianoman/android_kernel_samsung_tuna | drivers/macintosh/via-pmu-event.c | 15571 | 2121 | /*
* via-pmu event device for reporting some events that come through the PMU
*
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/input.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include "via-pmu-event.h"
static struct input_dev *pmu_input_dev;
static int __init via_pmu_event_init(void)
{
int err;
/* do other models report button/lid status? */
if (pmu_get_model() != PMU_KEYLARGO_BASED)
return -ENODEV;
pmu_input_dev = input_allocate_device();
if (!pmu_input_dev)
return -ENOMEM;
pmu_input_dev->name = "PMU";
pmu_input_dev->id.bustype = BUS_HOST;
pmu_input_dev->id.vendor = 0x0001;
pmu_input_dev->id.product = 0x0001;
pmu_input_dev->id.version = 0x0100;
set_bit(EV_KEY, pmu_input_dev->evbit);
set_bit(EV_SW, pmu_input_dev->evbit);
set_bit(KEY_POWER, pmu_input_dev->keybit);
set_bit(SW_LID, pmu_input_dev->swbit);
err = input_register_device(pmu_input_dev);
if (err)
input_free_device(pmu_input_dev);
return err;
}
void via_pmu_event(int key, int down)
{
if (unlikely(!pmu_input_dev))
return;
switch (key) {
case PMU_EVT_POWER:
input_report_key(pmu_input_dev, KEY_POWER, down);
break;
case PMU_EVT_LID:
input_report_switch(pmu_input_dev, SW_LID, down);
break;
default:
/* no such key handled */
return;
}
input_sync(pmu_input_dev);
}
late_initcall(via_pmu_event_init);
| gpl-2.0 |
suncycheng/linux | drivers/acpi/acpica/rsmisc.c | 212 | 20919 | /*******************************************************************************
*
* Module Name: rsmisc - Miscellaneous resource descriptors
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsmisc")
#define INIT_RESOURCE_TYPE(i) i->resource_offset
#define INIT_RESOURCE_LENGTH(i) i->aml_offset
#define INIT_TABLE_LENGTH(i) i->value
#define COMPARE_OPCODE(i) i->resource_offset
#define COMPARE_TARGET(i) i->aml_offset
#define COMPARE_VALUE(i) i->value
/*******************************************************************************
*
* FUNCTION: acpi_rs_convert_aml_to_resource
*
* PARAMETERS: resource - Pointer to the resource descriptor
* aml - Where the AML descriptor is returned
* info - Pointer to appropriate conversion table
*
* RETURN: Status
*
* DESCRIPTION: Convert an external AML resource descriptor to the corresponding
* internal resource descriptor
*
******************************************************************************/
acpi_status
acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
union aml_resource *aml,
struct acpi_rsconvert_info *info)
{
acpi_rs_length aml_resource_length;
void *source;
void *destination;
char *target;
u8 count;
u8 flags_mode = FALSE;
u16 item_count = 0;
u16 temp16 = 0;
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
if (!info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (((acpi_size)resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
"Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
resource, resource->type, resource->length));
}
/* Extract the resource Length field (does not include header length) */
aml_resource_length = acpi_ut_get_resource_length(aml);
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
while (count) {
/*
* Source is the external AML byte stream buffer,
* destination is the internal resource descriptor
*/
source = ACPI_ADD_PTR(void, aml, info->aml_offset);
destination =
ACPI_ADD_PTR(void, resource, info->resource_offset);
switch (info->opcode) {
case ACPI_RSC_INITGET:
/*
* Get the resource type and the initial (minimum) length
*/
memset(resource, 0, INIT_RESOURCE_LENGTH(info));
resource->type = INIT_RESOURCE_TYPE(info);
resource->length = INIT_RESOURCE_LENGTH(info);
break;
case ACPI_RSC_INITSET:
break;
case ACPI_RSC_FLAGINIT:
flags_mode = TRUE;
break;
case ACPI_RSC_1BITFLAG:
/*
* Mask and shift the flag bit
*/
ACPI_SET8(destination,
((ACPI_GET8(source) >> info->value) & 0x01));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
ACPI_SET8(destination,
((ACPI_GET8(source) >> info->value) & 0x03));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
ACPI_SET8(destination,
((ACPI_GET8(source) >> info->value) & 0x07));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
ACPI_SET8(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
break;
case ACPI_RSC_COUNT16:
item_count = aml_resource_length;
ACPI_SET16(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
break;
case ACPI_RSC_COUNT_GPIO_PIN:
target = ACPI_ADD_PTR(void, aml, info->value);
item_count = ACPI_GET16(target) - ACPI_GET16(source);
resource->length = resource->length + item_count;
item_count = item_count / 2;
ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET8(source);
ACPI_SET8(destination, item_count);
resource->length =
resource->length + (info->value * item_count);
break;
case ACPI_RSC_COUNT_GPIO_RES:
/*
* Vendor data is optional (length/offset may both be zero)
* Examine vendor data length field first
*/
target = ACPI_ADD_PTR(void, aml, (info->value + 2));
if (ACPI_GET16(target)) {
/* Use vendor offset to get resource source length */
target = ACPI_ADD_PTR(void, aml, info->value);
item_count =
ACPI_GET16(target) - ACPI_GET16(source);
} else {
/* No vendor data to worry about */
item_count = aml->large_header.resource_length +
sizeof(struct aml_resource_large_header) -
ACPI_GET16(source);
}
resource->length = resource->length + item_count;
ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_VEN:
item_count = ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_RES:
item_count = (aml_resource_length +
sizeof(struct aml_resource_large_header))
- ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_LENGTH:
resource->length = resource->length + info->value;
break;
case ACPI_RSC_MOVE8:
case ACPI_RSC_MOVE16:
case ACPI_RSC_MOVE32:
case ACPI_RSC_MOVE64:
/*
* Raw data move. Use the Info value field unless item_count has
* been previously initialized via a COUNT opcode
*/
if (info->value) {
item_count = info->value;
}
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_GPIO_PIN:
/* Generate and set the PIN data pointer */
target = (char *)ACPI_ADD_PTR(void, resource,
(resource->length -
item_count * 2));
*(u16 **)destination = ACPI_CAST_PTR(u16, target);
/* Copy the PIN data */
source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
acpi_rs_move_data(target, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_GPIO_RES:
/* Generate and set the resource_source string pointer */
target = (char *)ACPI_ADD_PTR(void, resource,
(resource->length -
item_count));
*(u8 **)destination = ACPI_CAST_PTR(u8, target);
/* Copy the resource_source string */
source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
acpi_rs_move_data(target, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_SERIAL_VEN:
/* Generate and set the Vendor Data pointer */
target = (char *)ACPI_ADD_PTR(void, resource,
(resource->length -
item_count));
*(u8 **)destination = ACPI_CAST_PTR(u8, target);
/* Copy the Vendor Data */
source = ACPI_ADD_PTR(void, aml, info->value);
acpi_rs_move_data(target, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_SERIAL_RES:
/* Generate and set the resource_source string pointer */
target = (char *)ACPI_ADD_PTR(void, resource,
(resource->length -
item_count));
*(u8 **)destination = ACPI_CAST_PTR(u8, target);
/* Copy the resource_source string */
source =
ACPI_ADD_PTR(void, aml,
(ACPI_GET16(source) + info->value));
acpi_rs_move_data(target, source, item_count,
info->opcode);
break;
case ACPI_RSC_SET8:
memset(destination, info->aml_offset, info->value);
break;
case ACPI_RSC_DATA8:
target = ACPI_ADD_PTR(char, resource, info->value);
memcpy(destination, source, ACPI_GET16(target));
break;
case ACPI_RSC_ADDRESS:
/*
* Common handler for address descriptor flags
*/
if (!acpi_rs_get_address_common(resource, aml)) {
return_ACPI_STATUS
(AE_AML_INVALID_RESOURCE_TYPE);
}
break;
case ACPI_RSC_SOURCE:
/*
* Optional resource_source (Index and String)
*/
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
info->value,
destination, aml, NULL);
break;
case ACPI_RSC_SOURCEX:
/*
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
target = ACPI_ADD_PTR(char, resource,
info->aml_offset +
(item_count * 4));
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
(acpi_rs_length)
(((item_count -
1) * sizeof(u32)) +
info->value),
destination, aml,
target);
break;
case ACPI_RSC_BITMASK:
/*
* 8-bit encoded bitmask (DMA macro)
*/
item_count =
acpi_rs_decode_bitmask(ACPI_GET8(source),
destination);
if (item_count) {
resource->length += (item_count - 1);
}
target = ACPI_ADD_PTR(char, resource, info->value);
ACPI_SET8(target, item_count);
break;
case ACPI_RSC_BITMASK16:
/*
* 16-bit encoded bitmask (IRQ macro)
*/
ACPI_MOVE_16_TO_16(&temp16, source);
item_count =
acpi_rs_decode_bitmask(temp16, destination);
if (item_count) {
resource->length += (item_count - 1);
}
target = ACPI_ADD_PTR(char, resource, info->value);
ACPI_SET8(target, item_count);
break;
case ACPI_RSC_EXIT_NE:
/*
* control - Exit conversion if not equal
*/
switch (info->resource_offset) {
case ACPI_RSC_COMPARE_AML_LENGTH:
if (aml_resource_length != info->value) {
goto exit;
}
break;
case ACPI_RSC_COMPARE_VALUE:
if (ACPI_GET8(source) != info->value) {
goto exit;
}
break;
default:
ACPI_ERROR((AE_INFO,
"Invalid conversion sub-opcode"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
break;
default:
ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
count--;
info++;
}
exit:
if (!flags_mode) {
/* Round the resource struct length up to the next boundary (32 or 64) */
resource->length = (u32)
ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_convert_resource_to_aml
*
* PARAMETERS: resource - Pointer to the resource descriptor
* aml - Where the AML descriptor is returned
* info - Pointer to appropriate conversion table
*
* RETURN: Status
*
* DESCRIPTION: Convert an internal resource descriptor to the corresponding
* external AML resource descriptor.
*
******************************************************************************/
acpi_status
acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
union aml_resource *aml,
struct acpi_rsconvert_info *info)
{
void *source = NULL;
void *destination;
char *target;
acpi_rsdesc_size aml_length = 0;
u8 count;
u16 temp16 = 0;
u16 item_count = 0;
ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
if (!info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
while (count) {
/*
* Source is the internal resource descriptor,
* destination is the external AML byte stream buffer
*/
source = ACPI_ADD_PTR(void, resource, info->resource_offset);
destination = ACPI_ADD_PTR(void, aml, info->aml_offset);
switch (info->opcode) {
case ACPI_RSC_INITSET:
memset(aml, 0, INIT_RESOURCE_LENGTH(info));
aml_length = INIT_RESOURCE_LENGTH(info);
acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
aml_length, aml);
break;
case ACPI_RSC_INITGET:
break;
case ACPI_RSC_FLAGINIT:
/*
* Clear the flag byte
*/
ACPI_SET8(destination, 0);
break;
case ACPI_RSC_1BITFLAG:
/*
* Mask and shift the flag bit
*/
ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
((ACPI_GET8(source) & 0x01) << info->
value));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
((ACPI_GET8(source) & 0x03) << info->
value));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
((ACPI_GET8(source) & 0x07) << info->
value));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
ACPI_SET8(destination, item_count);
aml_length = (u16)
(aml_length + (info->value * (item_count - 1)));
break;
case ACPI_RSC_COUNT16:
item_count = ACPI_GET16(source);
aml_length = (u16) (aml_length + item_count);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_GPIO_PIN:
item_count = ACPI_GET16(source);
ACPI_SET16(destination, aml_length);
aml_length = (u16)(aml_length + item_count * 2);
target = ACPI_ADD_PTR(void, aml, info->value);
ACPI_SET16(target, aml_length);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET16(source);
ACPI_SET16(destination, item_count);
aml_length =
(u16)(aml_length + (info->value * item_count));
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_GPIO_RES:
/* Set resource source string length */
item_count = ACPI_GET16(source);
ACPI_SET16(destination, aml_length);
/* Compute offset for the Vendor Data */
aml_length = (u16)(aml_length + item_count);
target = ACPI_ADD_PTR(void, aml, info->value);
/* Set vendor offset only if there is vendor data */
if (resource->data.gpio.vendor_length) {
ACPI_SET16(target, aml_length);
}
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_SERIAL_VEN:
item_count = ACPI_GET16(source);
ACPI_SET16(destination, item_count + info->value);
aml_length = (u16)(aml_length + item_count);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_SERIAL_RES:
item_count = ACPI_GET16(source);
aml_length = (u16)(aml_length + item_count);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_LENGTH:
acpi_rs_set_resource_length(info->value, aml);
break;
case ACPI_RSC_MOVE8:
case ACPI_RSC_MOVE16:
case ACPI_RSC_MOVE32:
case ACPI_RSC_MOVE64:
if (info->value) {
item_count = info->value;
}
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_GPIO_PIN:
destination = (char *)ACPI_ADD_PTR(void, aml,
ACPI_GET16
(destination));
source = *(u16 **)source;
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_GPIO_RES:
/* Used for both resource_source string and vendor_data */
destination = (char *)ACPI_ADD_PTR(void, aml,
ACPI_GET16
(destination));
source = *(u8 **)source;
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_SERIAL_VEN:
destination = (char *)ACPI_ADD_PTR(void, aml,
(aml_length -
item_count));
source = *(u8 **)source;
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_MOVE_SERIAL_RES:
destination = (char *)ACPI_ADD_PTR(void, aml,
(aml_length -
item_count));
source = *(u8 **)source;
acpi_rs_move_data(destination, source, item_count,
info->opcode);
break;
case ACPI_RSC_ADDRESS:
/* Set the Resource Type, General Flags, and Type-Specific Flags */
acpi_rs_set_address_common(aml, resource);
break;
case ACPI_RSC_SOURCEX:
/*
* Optional resource_source (Index and String)
*/
aml_length =
acpi_rs_set_resource_source(aml,
(acpi_rs_length)
aml_length, source);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_SOURCE:
/*
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
aml_length =
acpi_rs_set_resource_source(aml, info->value,
source);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_BITMASK:
/*
* 8-bit encoded bitmask (DMA macro)
*/
ACPI_SET8(destination,
acpi_rs_encode_bitmask(source,
*ACPI_ADD_PTR(u8,
resource,
info->
value)));
break;
case ACPI_RSC_BITMASK16:
/*
* 16-bit encoded bitmask (IRQ macro)
*/
temp16 =
acpi_rs_encode_bitmask(source,
*ACPI_ADD_PTR(u8, resource,
info->value));
ACPI_MOVE_16_TO_16(destination, &temp16);
break;
case ACPI_RSC_EXIT_LE:
/*
* control - Exit conversion if less than or equal
*/
if (item_count <= info->value) {
goto exit;
}
break;
case ACPI_RSC_EXIT_NE:
/*
* control - Exit conversion if not equal
*/
switch (COMPARE_OPCODE(info)) {
case ACPI_RSC_COMPARE_VALUE:
if (*ACPI_ADD_PTR(u8, resource,
COMPARE_TARGET(info)) !=
COMPARE_VALUE(info)) {
goto exit;
}
break;
default:
ACPI_ERROR((AE_INFO,
"Invalid conversion sub-opcode"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
break;
case ACPI_RSC_EXIT_EQ:
/*
* control - Exit conversion if equal
*/
if (*ACPI_ADD_PTR(u8, resource,
COMPARE_TARGET(info)) ==
COMPARE_VALUE(info)) {
goto exit;
}
break;
default:
ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
count--;
info++;
}
exit:
return_ACPI_STATUS(AE_OK);
}
#if 0
/* Previous resource validations */
if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
return_ACPI_STATUS(AE_SUPPORT);
}
if (resource->data.start_dpf.performance_robustness >= 3) {
return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE);
}
if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
/*
* Only [active_high, edge_sensitive] or [active_low, level_sensitive]
* polarity/trigger interrupts are allowed (ACPI spec, section
* "IRQ Format"), so 0x00 and 0x09 are illegal.
*/
ACPI_ERROR((AE_INFO,
"Invalid interrupt polarity/trigger in resource list, 0x%X",
aml->irq.flags));
return_ACPI_STATUS(AE_BAD_DATA);
}
resource->data.extended_irq.interrupt_count = temp8;
if (temp8 < 1) {
/* Must have at least one IRQ */
return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
}
if (resource->data.dma.transfer == 0x03) {
ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)"));
return_ACPI_STATUS(AE_BAD_DATA);
}
#endif
| gpl-2.0 |
nicolaerosia/linux-bn-omap4 | drivers/scsi/device_handler/scsi_dh_rdac.c | 212 | 21606 | /*
* LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
*
* Copyright (C) 2005 Mike Christie. All rights reserved.
* Copyright (C) Chandra Seetharaman, IBM Corp. 2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/module.h>
#define RDAC_NAME "rdac"
#define RDAC_RETRY_COUNT 5
/*
* LSI mode page stuff
*
* These struct definitions and the forming of the
* mode page were taken from the LSI RDAC 2.4 GPL'd
* driver, and then converted to Linux conventions.
*/
#define RDAC_QUIESCENCE_TIME 20
/*
* Page Codes
*/
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
/*
* Controller modes definitions
*/
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
/*
* RDAC Options field
*/
#define RDAC_FORCED_QUIESENCE 0x02
#define RDAC_TIMEOUT (60 * HZ)
#define RDAC_RETRIES 3
struct rdac_mode_6_hdr {
u8 data_len;
u8 medium_type;
u8 device_params;
u8 block_desc_len;
};
struct rdac_mode_10_hdr {
u16 data_len;
u8 medium_type;
u8 device_params;
u16 reserved;
u16 block_desc_len;
};
struct rdac_mode_common {
u8 controller_serial[16];
u8 alt_controller_serial[16];
u8 rdac_mode[2];
u8 alt_rdac_mode[2];
u8 quiescence_timeout;
u8 rdac_options;
};
struct rdac_pg_legacy {
struct rdac_mode_6_hdr hdr;
u8 page_code;
u8 page_len;
struct rdac_mode_common common;
#define MODE6_MAX_LUN 32
u8 lun_table[MODE6_MAX_LUN];
u8 reserved2[32];
u8 reserved3;
u8 reserved4;
};
struct rdac_pg_expanded {
struct rdac_mode_10_hdr hdr;
u8 page_code;
u8 subpage_code;
u8 page_len[2];
struct rdac_mode_common common;
u8 lun_table[256];
u8 reserved3;
u8 reserved4;
};
struct c9_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC9 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "vace" */
u8 avte_cvp;
u8 path_prio;
u8 reserved2[38];
};
#define SUBSYS_ID_LEN 16
#define SLOT_ID_LEN 2
#define ARRAY_LABEL_LEN 31
struct c4_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC4 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "subs" */
u8 subsys_id[SUBSYS_ID_LEN];
u8 revision[4];
u8 slot_id[SLOT_ID_LEN];
u8 reserved[2];
};
#define UNIQUE_ID_LEN 16
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "edid" */
u8 reserved2[3];
u8 vol_uniq_id_len;
u8 vol_uniq_id[16];
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
u8 array_unique_id[UNIQUE_ID_LEN];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
struct rdac_controller {
u8 array_id[UNIQUE_ID_LEN];
int use_ms10;
struct kref kref;
struct list_head node; /* list of all controllers */
union {
struct rdac_pg_legacy legacy;
struct rdac_pg_expanded expanded;
} mode_select;
u8 index;
u8 array_name[ARRAY_LABEL_LEN];
struct Scsi_Host *host;
spinlock_t ms_lock;
int ms_queued;
struct work_struct ms_work;
struct scsi_device *ms_sdev;
struct list_head ms_head;
struct list_head dh_list;
};
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "swr4" */
u8 sw_version[3];
u8 sw_date[3];
u8 features_enabled;
u8 max_lun_supported;
u8 partitions[239]; /* Total allocation length should be 0xFF */
};
struct rdac_dh_data {
struct list_head node;
struct rdac_controller *ctlr;
struct scsi_device *sdev;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
#define RDAC_MODE 0
#define RDAC_MODE_AVT 1
#define RDAC_MODE_IOSHIP 2
unsigned char mode;
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
#define RDAC_LUN_UNOWNED 0
#define RDAC_LUN_OWNED 1
char lun_state;
#define RDAC_PREFERRED 0
#define RDAC_NON_PREFERRED 1
char preferred;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
struct c8_inquiry c8;
struct c9_inquiry c9;
} inq;
};
static const char *mode[] = {
"RDAC",
"AVT",
"IOSHIP",
};
static const char *lun_state[] =
{
"unowned",
"owned",
};
struct rdac_queue_data {
struct list_head entry;
struct rdac_dh_data *h;
activate_complete callback_fn;
void *callback_data;
};
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
static struct workqueue_struct *kmpath_rdacd;
static void send_mode_select(struct work_struct *work);
/*
* module parameter to enable rdac debug logging.
* 2 bits for each type of logging, only two types defined for now
* Can be enhanced if required at later point
*/
static int rdac_logging = 1;
module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
"Default is 1 - failover logging enabled, "
"set it to 0xF to enable all the logs");
#define RDAC_LOG_FAILOVER 0
#define RDAC_LOG_SENSE 2
#define RDAC_LOG_BITS 2
#define RDAC_LOG_LEVEL(SHIFT) \
((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
#define RDAC_LOG(SHIFT, sdev, f, arg...) \
do { \
if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
static struct request *get_rdac_req(struct scsi_device *sdev,
void *buffer, unsigned buflen, int rw)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
rq = blk_get_request(q, rw, GFP_NOIO);
if (IS_ERR(rq)) {
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_get_request failed.\n");
return NULL;
}
blk_rq_set_block_pc(rq);
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
return rq;
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_queue_data *qdata;
u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
data_size = sizeof(struct rdac_pg_expanded);
rdac_pg = &h->ctlr->mode_select.expanded;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
data_size = sizeof(struct rdac_pg_legacy);
rdac_pg = &h->ctlr->mode_select.legacy;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
list_for_each_entry(qdata, list, entry) {
lun_table[qdata->h->lun] = 0x81;
}
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
return NULL;
/* Prepare the command. */
if (h->ctlr->use_ms10) {
rq->cmd[0] = MODE_SELECT_10;
rq->cmd[7] = data_size >> 8;
rq->cmd[8] = data_size & 0xff;
} else {
rq->cmd[0] = MODE_SELECT;
rq->cmd[4] = data_size;
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
return rq;
}
static void release_controller(struct kref *kref)
{
struct rdac_controller *ctlr;
ctlr = container_of(kref, struct rdac_controller, kref);
list_del(&ctlr->node);
kfree(ctlr);
}
static struct rdac_controller *get_controller(int index, char *array_name,
u8 *array_id, struct scsi_device *sdev)
{
struct rdac_controller *ctlr, *tmp;
list_for_each_entry(tmp, &ctlr_list, node) {
if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
(tmp->index == index) &&
(tmp->host == sdev->host)) {
kref_get(&tmp->kref);
return tmp;
}
}
ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
if (!ctlr)
return NULL;
/* initialize fields of controller */
memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
ctlr->index = index;
ctlr->host = sdev->host;
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
ctlr->ms_queued = 0;
ctlr->ms_sdev = NULL;
spin_lock_init(&ctlr->ms_lock);
INIT_WORK(&ctlr->ms_work, send_mode_select);
INIT_LIST_HEAD(&ctlr->ms_head);
list_add(&ctlr->node, &ctlr_list);
INIT_LIST_HEAD(&ctlr->dh_list);
return ctlr;
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
unsigned int len, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
if (!rq)
goto done;
/* Prepare the command. */
rq->cmd[0] = INQUIRY;
rq->cmd[1] = 1;
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
blk_put_request(rq);
done:
return err;
}
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
char *array_name, u8 *array_id)
{
int err, i;
struct c8_inquiry *inqp;
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
if (inqp->page_code != 0xc8)
return SCSI_DH_NOSYS;
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
return SCSI_DH_NOSYS;
h->lun = inqp->lun[7]; /* Uses only the last byte */
for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
*(array_name+i) = inqp->array_user_label[(2*i)+1];
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
memset(array_id, 0, UNIQUE_ID_LEN);
memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
}
return err;
}
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err, access_state;
struct rdac_dh_data *tmp;
struct c9_inquiry *inqp;
h->state = RDAC_STATE_ACTIVE;
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c9;
/* detect the operating mode */
if ((inqp->avte_cvp >> 5) & 0x1)
h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
else if (inqp->avte_cvp >> 7)
h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
else
h->mode = RDAC_MODE; /* LUN in RDAC mode */
/* Update ownership */
if (inqp->avte_cvp & 0x1) {
h->lun_state = RDAC_LUN_OWNED;
access_state = SCSI_ACCESS_STATE_OPTIMAL;
} else {
h->lun_state = RDAC_LUN_UNOWNED;
if (h->mode == RDAC_MODE) {
h->state = RDAC_STATE_PASSIVE;
access_state = SCSI_ACCESS_STATE_STANDBY;
} else
access_state = SCSI_ACCESS_STATE_ACTIVE;
}
/* Update path prio*/
if (inqp->path_prio & 0x1) {
h->preferred = RDAC_PREFERRED;
access_state |= SCSI_ACCESS_STATE_PREFERRED;
} else
h->preferred = RDAC_NON_PREFERRED;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
/* h->sdev should always be valid */
BUG_ON(!tmp->sdev);
tmp->sdev->access_state = access_state;
}
rcu_read_unlock();
}
return err;
}
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
int err, index;
struct c4_inquiry *inqp;
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
/* get the controller index */
if (inqp->slot_id[1] == 0x31)
index = 0;
else
index = 1;
spin_lock(&list_lock);
h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
}
spin_unlock(&list_lock);
}
return err;
}
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c2_inquiry *inqp;
err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
* If more than MODE6_MAX_LUN luns are supported, use
* mode select 10
*/
if (inqp->max_lun_supported >= MODE6_MAX_LUN)
h->ctlr->use_ms10 = 1;
else
h->ctlr->use_ms10 = 0;
}
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev,
unsigned char *sensebuf)
{
struct scsi_sense_hdr sense_hdr;
int err = SCSI_DH_IO, ret;
struct rdac_dh_data *h = sdev->handler_data;
ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
switch (sense_hdr.sense_key) {
case NO_SENSE:
case ABORTED_COMMAND:
case UNIT_ATTENTION:
err = SCSI_DH_RETRY;
break;
case NOT_READY:
if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
/* LUN Not Ready and is in the Process of Becoming
* Ready
*/
err = SCSI_DH_RETRY;
break;
case ILLEGAL_REQUEST:
if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
/*
* Command Lock contention
*/
err = SCSI_DH_IMM_RETRY;
break;
default:
break;
}
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
done:
return err;
}
static void send_mode_select(struct work_struct *work)
{
struct rdac_controller *ctlr =
container_of(work, struct rdac_controller, ms_work);
struct request *rq;
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
struct request_queue *q = sdev->request_queue;
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
ctlr->ms_queued = 0;
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
err = blk_execute_rq(q, NULL, rq, 1);
blk_put_request(rq);
if (err != SCSI_DH_OK) {
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
if (err == SCSI_DH_IMM_RETRY)
goto retry;
}
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT completed",
(char *) h->ctlr->array_name, h->ctlr->index);
}
done:
list_for_each_entry_safe(qdata, tmp, &list, entry) {
list_del(&qdata->entry);
if (err == SCSI_DH_OK)
qdata->h->state = RDAC_STATE_ACTIVE;
if (qdata->callback_fn)
qdata->callback_fn(qdata->callback_data, err);
kfree(qdata);
}
return;
}
static int queue_mode_select(struct scsi_device *sdev,
activate_complete fn, void *data)
{
struct rdac_queue_data *qdata;
struct rdac_controller *ctlr;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata)
return SCSI_DH_RETRY;
qdata->h = sdev->handler_data;
qdata->callback_fn = fn;
qdata->callback_data = data;
ctlr = qdata->h->ctlr;
spin_lock(&ctlr->ms_lock);
list_add_tail(&qdata->entry, &ctlr->ms_head);
if (!ctlr->ms_queued) {
ctlr->ms_queued = 1;
ctlr->ms_sdev = sdev;
queue_work(kmpath_rdacd, &ctlr->ms_work);
}
spin_unlock(&ctlr->ms_lock);
return SCSI_DH_OK;
}
static int rdac_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
struct rdac_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
int act = 0;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto done;
switch (h->mode) {
case RDAC_MODE:
if (h->lun_state == RDAC_LUN_UNOWNED)
act = 1;
break;
case RDAC_MODE_IOSHIP:
if ((h->lun_state == RDAC_LUN_UNOWNED) &&
(h->preferred == RDAC_PREFERRED))
act = 1;
break;
default:
break;
}
if (act) {
err = queue_mode_select(sdev, fn, data);
if (err == SCSI_DH_OK)
return 0;
}
done:
if (fn)
fn(data, err);
return 0;
}
static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct rdac_dh_data *h = sdev->handler_data;
int ret = BLKPREP_OK;
if (h->state != RDAC_STATE_ACTIVE) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
return ret;
}
static int rdac_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = sdev->handler_data;
RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
"I/O returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
/* LUN Not Ready - Logical Unit Not Ready and is in
* the process of becoming ready
* Just retry.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
/* LUN Not Ready - Storage firmware incompatible
* Manual code synchonisation required.
*
* Nothing we can do here. Try to bypass the path.
*/
return SUCCESS;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
/* LUN Not Ready - Quiescense in progress
*
* Just retry and wait.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
/* LUN Not Ready - Quiescense in progress
* or has been achieved
* Just retry.
*/
return ADD_TO_MLQUEUE;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
/* Invalid Request - Current Logical Unit Ownership.
* Controller is not the current owner of the LUN,
* Fail the path, so that the other path be used.
*/
h->state = RDAC_STATE_PASSIVE;
return SUCCESS;
}
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
/*
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
/*
* Quiescence in progress , just retry.
*/
return ADD_TO_MLQUEUE;
break;
}
/* success just means we do not care what scsi-ml does */
return SCSI_RETURN_NOT_HANDLED;
}
static int rdac_bus_attach(struct scsi_device *sdev)
{
struct rdac_dh_data *h;
int err;
char array_name[ARRAY_LABEL_LEN];
char array_id[UNIQUE_ID_LEN];
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
return -ENOMEM;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
err = get_lun_info(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
err = initialize_controller(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto clean_ctlr;
err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto clean_ctlr;
sdev_printk(KERN_NOTICE, sdev,
"%s: LUN %d (%s) (%s)\n",
RDAC_NAME, h->lun, mode[(int)h->mode],
lun_state[(int)h->lun_state]);
sdev->handler_data = h;
return 0;
clean_ctlr:
spin_lock(&list_lock);
kref_put(&h->ctlr->kref, release_controller);
spin_unlock(&list_lock);
failed:
kfree(h);
return -EINVAL;
}
static void rdac_bus_detach( struct scsi_device *sdev )
{
struct rdac_dh_data *h = sdev->handler_data;
if (h->ctlr && h->ctlr->ms_queued)
flush_workqueue(kmpath_rdacd);
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
kfree(h);
}
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
};
static int __init rdac_init(void)
{
int r;
r = scsi_register_device_handler(&rdac_dh);
if (r != 0) {
printk(KERN_ERR "Failed to register scsi device handler.");
goto done;
}
/*
* Create workqueue to handle mode selects for rdac
*/
kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
r = -EINVAL;
}
done:
return r;
}
static void __exit rdac_exit(void)
{
destroy_workqueue(kmpath_rdacd);
scsi_unregister_device_handler(&rdac_dh);
}
module_init(rdac_init);
module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pbeeler/Linux-stable | fs/fuse/cuse.c | 468 | 15958 | /*
* CUSE: Character device in Userspace
*
* Copyright (C) 2008-2009 SUSE Linux Products GmbH
* Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
* CUSE enables character devices to be implemented from userland much
* like FUSE allows filesystems. On initialization /dev/cuse is
* created. By opening the file and replying to the CUSE_INIT request
* userland CUSE server can create a character device. After that the
* operation is very similar to FUSE.
*
* A CUSE instance involves the following objects.
*
* cuse_conn : contains fuse_conn and serves as bonding structure
* channel : file handle connected to the userland CUSE server
* cdev : the implemented character device
* dev : generic device for cdev
*
* Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with
* devices, it's called 'channel' to reduce confusion.
*
* channel determines when the character device dies. When channel is
* closed, everything begins to destruct. The cuse_conn is taken off
* the lookup table preventing further access from cdev, cdev and
* generic device are removed and the base reference of cuse_conn is
* put.
*
* On each open, the matching cuse_conn is looked up and if found an
* additional reference is taken which is released when the file is
* closed.
*/
#include <linux/fuse.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/aio.h>
#include <linux/kdev_t.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/magic.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
#include "fuse_i.h"
#define CUSE_CONNTBL_LEN 64
struct cuse_conn {
struct list_head list; /* linked on cuse_conntbl */
struct fuse_conn fc; /* fuse connection */
struct cdev *cdev; /* associated character device */
struct device *dev; /* device representing @cdev */
/* init parameters, set once during initialization */
bool unrestricted_ioctl;
};
static DEFINE_MUTEX(cuse_lock); /* protects registration */
static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
static struct class *cuse_class;
static struct cuse_conn *fc_to_cc(struct fuse_conn *fc)
{
return container_of(fc, struct cuse_conn, fc);
}
static struct list_head *cuse_conntbl_head(dev_t devt)
{
return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN];
}
/**************************************************************************
* CUSE frontend operations
*
* These are file operations for the character device.
*
* On open, CUSE opens a file from the FUSE mnt and stores it to
* private_data of the open file. All other ops call FUSE ops on the
* FUSE file.
*/
static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = 0;
struct iovec iov = { .iov_base = buf, .iov_len = count };
struct fuse_io_priv io = { .async = 0, .file = file };
struct iov_iter ii;
iov_iter_init(&ii, READ, &iov, 1, count);
return fuse_direct_io(&io, &ii, &pos, FUSE_DIO_CUSE);
}
static ssize_t cuse_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = 0;
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
struct fuse_io_priv io = { .async = 0, .file = file };
struct iov_iter ii;
iov_iter_init(&ii, WRITE, &iov, 1, count);
/*
* No locking or generic_write_checks(), the server is
* responsible for locking and sanity checks.
*/
return fuse_direct_io(&io, &ii, &pos,
FUSE_DIO_WRITE | FUSE_DIO_CUSE);
}
static int cuse_open(struct inode *inode, struct file *file)
{
dev_t devt = inode->i_cdev->dev;
struct cuse_conn *cc = NULL, *pos;
int rc;
/* look up and get the connection */
mutex_lock(&cuse_lock);
list_for_each_entry(pos, cuse_conntbl_head(devt), list)
if (pos->dev->devt == devt) {
fuse_conn_get(&pos->fc);
cc = pos;
break;
}
mutex_unlock(&cuse_lock);
/* dead? */
if (!cc)
return -ENODEV;
/*
* Generic permission check is already done against the chrdev
* file, proceed to open.
*/
rc = fuse_do_open(&cc->fc, 0, file, 0);
if (rc)
fuse_conn_put(&cc->fc);
return rc;
}
static int cuse_release(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
fuse_sync_release(ff, file->f_flags);
fuse_conn_put(fc);
return 0;
}
static long cuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fuse_file *ff = file->private_data;
struct cuse_conn *cc = fc_to_cc(ff->fc);
unsigned int flags = 0;
if (cc->unrestricted_ioctl)
flags |= FUSE_IOCTL_UNRESTRICTED;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fuse_file *ff = file->private_data;
struct cuse_conn *cc = fc_to_cc(ff->fc);
unsigned int flags = FUSE_IOCTL_COMPAT;
if (cc->unrestricted_ioctl)
flags |= FUSE_IOCTL_UNRESTRICTED;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static const struct file_operations cuse_frontend_fops = {
.owner = THIS_MODULE,
.read = cuse_read,
.write = cuse_write,
.open = cuse_open,
.release = cuse_release,
.unlocked_ioctl = cuse_file_ioctl,
.compat_ioctl = cuse_file_compat_ioctl,
.poll = fuse_file_poll,
.llseek = noop_llseek,
};
/**************************************************************************
* CUSE channel initialization and destruction
*/
struct cuse_devinfo {
const char *name;
};
/**
* cuse_parse_one - parse one key=value pair
* @pp: i/o parameter for the current position
* @end: points to one past the end of the packed string
* @keyp: out parameter for key
* @valp: out parameter for value
*
* *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends
* at @end - 1. This function parses one pair and set *@keyp to the
* start of the key and *@valp to the start of the value. Note that
* the original string is modified such that the key string is
* terminated with '\0'. *@pp is updated to point to the next string.
*
* RETURNS:
* 1 on successful parse, 0 on EOF, -errno on failure.
*/
static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
{
char *p = *pp;
char *key, *val;
while (p < end && *p == '\0')
p++;
if (p == end)
return 0;
if (end[-1] != '\0') {
printk(KERN_ERR "CUSE: info not properly terminated\n");
return -EINVAL;
}
key = val = p;
p += strlen(p);
if (valp) {
strsep(&val, "=");
if (!val)
val = key + strlen(key);
key = strstrip(key);
val = strstrip(val);
} else
key = strstrip(key);
if (!strlen(key)) {
printk(KERN_ERR "CUSE: zero length info key specified\n");
return -EINVAL;
}
*pp = p;
*keyp = key;
if (valp)
*valp = val;
return 1;
}
/**
* cuse_parse_dev_info - parse device info
* @p: device info string
* @len: length of device info string
* @devinfo: out parameter for parsed device info
*
* Parse @p to extract device info and store it into @devinfo. String
* pointed to by @p is modified by parsing and @devinfo points into
* them, so @p shouldn't be freed while @devinfo is in use.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
{
char *end = p + len;
char *uninitialized_var(key), *uninitialized_var(val);
int rc;
while (true) {
rc = cuse_parse_one(&p, end, &key, &val);
if (rc < 0)
return rc;
if (!rc)
break;
if (strcmp(key, "DEVNAME") == 0)
devinfo->name = val;
else
printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n",
key);
}
if (!devinfo->name || !strlen(devinfo->name)) {
printk(KERN_ERR "CUSE: DEVNAME unspecified\n");
return -EINVAL;
}
return 0;
}
static void cuse_gendev_release(struct device *dev)
{
kfree(dev);
}
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
* This function creates the character device and sets up all the
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
*/
static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
{
struct cuse_conn *cc = fc_to_cc(fc), *pos;
struct cuse_init_out *arg = req->out.args[0].value;
struct page *page = req->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
struct cdev *cdev;
dev_t devt;
int rc, i;
if (req->out.h.error ||
arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
goto err;
}
fc->minor = arg->minor;
fc->max_read = max_t(unsigned, arg->max_read, 4096);
fc->max_write = max_t(unsigned, arg->max_write, 4096);
/* parse init reply */
cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size,
&devinfo);
if (rc)
goto err;
/* determine and reserve devt */
devt = MKDEV(arg->dev_major, arg->dev_minor);
if (!MAJOR(devt))
rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name);
else
rc = register_chrdev_region(devt, 1, devinfo.name);
if (rc) {
printk(KERN_ERR "CUSE: failed to register chrdev region\n");
goto err;
}
/* devt determined, create device */
rc = -ENOMEM;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto err_region;
device_initialize(dev);
dev_set_uevent_suppress(dev, 1);
dev->class = cuse_class;
dev->devt = devt;
dev->release = cuse_gendev_release;
dev_set_drvdata(dev, cc);
dev_set_name(dev, "%s", devinfo.name);
mutex_lock(&cuse_lock);
/* make sure the device-name is unique */
for (i = 0; i < CUSE_CONNTBL_LEN; ++i) {
list_for_each_entry(pos, &cuse_conntbl[i], list)
if (!strcmp(dev_name(pos->dev), dev_name(dev)))
goto err_unlock;
}
rc = device_add(dev);
if (rc)
goto err_unlock;
/* register cdev */
rc = -ENOMEM;
cdev = cdev_alloc();
if (!cdev)
goto err_unlock;
cdev->owner = THIS_MODULE;
cdev->ops = &cuse_frontend_fops;
rc = cdev_add(cdev, devt, 1);
if (rc)
goto err_cdev;
cc->dev = dev;
cc->cdev = cdev;
/* make the device available */
list_add(&cc->list, cuse_conntbl_head(devt));
mutex_unlock(&cuse_lock);
/* announce device availability */
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
kfree(arg);
__free_page(page);
return;
err_cdev:
cdev_del(cdev);
err_unlock:
mutex_unlock(&cuse_lock);
put_device(dev);
err_region:
unregister_chrdev_region(devt, 1);
err:
fuse_conn_kill(fc);
goto out;
}
static int cuse_send_init(struct cuse_conn *cc)
{
int rc;
struct fuse_req *req;
struct page *page;
struct fuse_conn *fc = &cc->fc;
struct cuse_init_in *arg;
void *outarg;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
req = fuse_get_req_for_background(fc, 1);
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto err;
}
rc = -ENOMEM;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto err_put_req;
outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
if (!outarg)
goto err_free_page;
arg = &req->misc.cuse_init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
arg->flags |= CUSE_UNRESTRICTED_IOCTL;
req->in.h.opcode = CUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct cuse_init_in);
req->in.args[0].value = arg;
req->out.numargs = 2;
req->out.args[0].size = sizeof(struct cuse_init_out);
req->out.args[0].value = outarg;
req->out.args[1].size = CUSE_INIT_INFO_MAX;
req->out.argvar = 1;
req->out.argpages = 1;
req->pages[0] = page;
req->page_descs[0].length = req->out.args[1].size;
req->num_pages = 1;
req->end = cuse_process_init_reply;
fuse_request_send_background(fc, req);
return 0;
err_free_page:
__free_page(page);
err_put_req:
fuse_put_request(fc, req);
err:
return rc;
}
static void cuse_fc_release(struct fuse_conn *fc)
{
struct cuse_conn *cc = fc_to_cc(fc);
kfree_rcu(cc, fc.rcu);
}
/**
* cuse_channel_open - open method for /dev/cuse
* @inode: inode for /dev/cuse
* @file: file struct being opened
*
* Userland CUSE server can create a CUSE device by opening /dev/cuse
* and replying to the initialization request kernel sends. This
* function is responsible for handling CUSE device initialization.
* Because the fd opened by this function is used during
* initialization, this function only creates cuse_conn and sends
* init. The rest is delegated to a kthread.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_channel_open(struct inode *inode, struct file *file)
{
struct cuse_conn *cc;
int rc;
/* set up cuse_conn */
cc = kzalloc(sizeof(*cc), GFP_KERNEL);
if (!cc)
return -ENOMEM;
fuse_conn_init(&cc->fc);
INIT_LIST_HEAD(&cc->list);
cc->fc.release = cuse_fc_release;
cc->fc.connected = 1;
cc->fc.initialized = 1;
rc = cuse_send_init(cc);
if (rc) {
fuse_conn_put(&cc->fc);
return rc;
}
file->private_data = &cc->fc; /* channel owns base reference to cc */
return 0;
}
/**
* cuse_channel_release - release method for /dev/cuse
* @inode: inode for /dev/cuse
* @file: file struct being closed
*
* Disconnect the channel, deregister CUSE device and initiate
* destruction by putting the default reference.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_channel_release(struct inode *inode, struct file *file)
{
struct cuse_conn *cc = fc_to_cc(file->private_data);
int rc;
/* remove from the conntbl, no more access from this point on */
mutex_lock(&cuse_lock);
list_del_init(&cc->list);
mutex_unlock(&cuse_lock);
/* remove device */
if (cc->dev)
device_unregister(cc->dev);
if (cc->cdev) {
unregister_chrdev_region(cc->cdev->dev, 1);
cdev_del(cc->cdev);
}
rc = fuse_dev_release(inode, file); /* puts the base reference */
return rc;
}
static struct file_operations cuse_channel_fops; /* initialized during init */
/**************************************************************************
* Misc stuff and module initializatiion
*
* CUSE exports the same set of attributes to sysfs as fusectl.
*/
static ssize_t cuse_class_waiting_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cuse_conn *cc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting));
}
static DEVICE_ATTR(waiting, 0400, cuse_class_waiting_show, NULL);
static ssize_t cuse_class_abort_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cuse_conn *cc = dev_get_drvdata(dev);
fuse_abort_conn(&cc->fc);
return count;
}
static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store);
static struct attribute *cuse_class_dev_attrs[] = {
&dev_attr_waiting.attr,
&dev_attr_abort.attr,
NULL,
};
ATTRIBUTE_GROUPS(cuse_class_dev);
static struct miscdevice cuse_miscdev = {
.minor = CUSE_MINOR,
.name = "cuse",
.fops = &cuse_channel_fops,
};
MODULE_ALIAS_MISCDEV(CUSE_MINOR);
MODULE_ALIAS("devname:cuse");
static int __init cuse_init(void)
{
int i, rc;
/* init conntbl */
for (i = 0; i < CUSE_CONNTBL_LEN; i++)
INIT_LIST_HEAD(&cuse_conntbl[i]);
/* inherit and extend fuse_dev_operations */
cuse_channel_fops = fuse_dev_operations;
cuse_channel_fops.owner = THIS_MODULE;
cuse_channel_fops.open = cuse_channel_open;
cuse_channel_fops.release = cuse_channel_release;
cuse_class = class_create(THIS_MODULE, "cuse");
if (IS_ERR(cuse_class))
return PTR_ERR(cuse_class);
cuse_class->dev_groups = cuse_class_dev_groups;
rc = misc_register(&cuse_miscdev);
if (rc) {
class_destroy(cuse_class);
return rc;
}
return 0;
}
static void __exit cuse_exit(void)
{
misc_deregister(&cuse_miscdev);
class_destroy(cuse_class);
}
module_init(cuse_init);
module_exit(cuse_exit);
MODULE_AUTHOR("Tejun Heo <tj@kernel.org>");
MODULE_DESCRIPTION("Character device in Userspace");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jrfastab/rocker-net-next | arch/tile/kernel/single_step.c | 468 | 22119 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* A code-rewriter that enables instruction single-stepping.
*/
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/mman.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/prctl.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <arch/abi.h>
#include <arch/spr_def.h>
#include <arch/opcode.h>
#ifndef __tilegx__ /* Hardware support for single step unavailable. */
#define signExtend17(val) sign_extend((val), 17)
#define TILE_X1_MASK (0xffffffffULL << 31)
enum mem_op {
MEMOP_NONE,
MEMOP_LOAD,
MEMOP_STORE,
MEMOP_LOAD_POSTINCR,
MEMOP_STORE_POSTINCR
};
static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
s32 offset)
{
tilepro_bundle_bits result;
/* mask out the old offset */
tilepro_bundle_bits mask = create_BrOff_X1(-1);
result = n & (~mask);
/* or in the new offset */
result |= create_BrOff_X1(offset);
return result;
}
static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
int src)
{
tilepro_bundle_bits result;
tilepro_bundle_bits op;
result = n & (~TILE_X1_MASK);
op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
create_Dest_X1(dest) |
create_SrcB_X1(TREG_ZERO) |
create_SrcA_X1(src) ;
result |= op;
return result;
}
static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
{
return move_X1(n, TREG_ZERO, TREG_ZERO);
}
static inline tilepro_bundle_bits addi_X1(
tilepro_bundle_bits n, int dest, int src, int imm)
{
n &= ~TILE_X1_MASK;
n |= (create_SrcA_X1(src) |
create_Dest_X1(dest) |
create_Imm8_X1(imm) |
create_S_X1(0) |
create_Opcode_X1(IMM_0_OPCODE_X1) |
create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
return n;
}
static tilepro_bundle_bits rewrite_load_store_unaligned(
struct single_step_state *state,
tilepro_bundle_bits bundle,
struct pt_regs *regs,
enum mem_op mem_op,
int size, int sign_ext)
{
unsigned char __user *addr;
int val_reg, addr_reg, err, val;
int align_ctl;
align_ctl = unaligned_fixup;
switch (task_thread_info(current)->align_ctl) {
case PR_UNALIGN_NOPRINT:
align_ctl = 1;
break;
case PR_UNALIGN_SIGBUS:
align_ctl = 0;
break;
}
/* Get address and value registers */
if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
addr_reg = get_SrcA_Y2(bundle);
val_reg = get_SrcBDest_Y2(bundle);
} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
addr_reg = get_SrcA_X1(bundle);
val_reg = get_Dest_X1(bundle);
} else {
addr_reg = get_SrcA_X1(bundle);
val_reg = get_SrcB_X1(bundle);
}
/*
* If registers are not GPRs, don't try to handle it.
*
* FIXME: we could handle non-GPR loads by getting the real value
* from memory, writing it to the single step buffer, using a
* temp_reg to hold a pointer to that memory, then executing that
* instruction and resetting temp_reg. For non-GPR stores, it's a
* little trickier; we could use the single step buffer for that
* too, but we'd have to add some more state bits so that we could
* call back in here to copy that value to the real target. For
* now, we just handle the simple case.
*/
if ((val_reg >= PTREGS_NR_GPRS &&
(val_reg != TREG_ZERO ||
mem_op == MEMOP_LOAD ||
mem_op == MEMOP_LOAD_POSTINCR)) ||
addr_reg >= PTREGS_NR_GPRS)
return bundle;
/* If it's aligned, don't handle it specially */
addr = (void __user *)regs->regs[addr_reg];
if (((unsigned long)addr % size) == 0)
return bundle;
/*
* Return SIGBUS with the unaligned address, if requested.
* Note that we return SIGBUS even for completely invalid addresses
* as long as they are in fact unaligned; this matches what the
* tilepro hardware would be doing, if it could provide us with the
* actual bad address in an SPR, which it doesn't.
*/
if (align_ctl == 0) {
siginfo_t info = {
.si_signo = SIGBUS,
.si_code = BUS_ADRALN,
.si_addr = addr
};
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
return (tilepro_bundle_bits) 0;
}
/* Handle unaligned load/store */
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
unsigned short val_16;
switch (size) {
case 2:
err = copy_from_user(&val_16, addr, sizeof(val_16));
val = sign_ext ? ((short)val_16) : val_16;
break;
case 4:
err = copy_from_user(&val, addr, sizeof(val));
break;
default:
BUG();
}
if (err == 0) {
state->update_reg = val_reg;
state->update_value = val;
state->update = 1;
}
} else {
unsigned short val_16;
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
switch (size) {
case 2:
val_16 = val;
err = copy_to_user(addr, &val_16, sizeof(val_16));
break;
case 4:
err = copy_to_user(addr, &val, sizeof(val));
break;
default:
BUG();
}
}
if (err) {
siginfo_t info = {
.si_signo = SIGBUS,
.si_code = BUS_ADRALN,
.si_addr = addr
};
trace_unhandled_signal("bad address for unaligned fixup", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
return (tilepro_bundle_bits) 0;
}
if (unaligned_printk || unaligned_fixup_count == 0) {
pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
current->pid, current->comm, regs->pc,
mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
"load" : "store",
(unsigned long)addr);
if (!unaligned_printk) {
#define P pr_info
P("\n");
P("Unaligned fixups in the kernel will slow your application considerably.\n");
P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
P("access will become a SIGBUS you can debug. No further warnings will be\n");
P("shown so as to avoid additional slowdown, but you can track the number\n");
P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
P("\n");
#undef P
}
}
++unaligned_fixup_count;
if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
/* Convert the Y2 instruction to a prefetch. */
bundle &= ~(create_SrcBDest_Y2(-1) |
create_Opcode_Y2(-1));
bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
create_Opcode_Y2(LW_OPCODE_Y2));
/* Replace the load postincr with an addi */
} else if (mem_op == MEMOP_LOAD_POSTINCR) {
bundle = addi_X1(bundle, addr_reg, addr_reg,
get_Imm8_X1(bundle));
/* Replace the store postincr with an addi */
} else if (mem_op == MEMOP_STORE_POSTINCR) {
bundle = addi_X1(bundle, addr_reg, addr_reg,
get_Dest_Imm8_X1(bundle));
} else {
/* Convert the X1 instruction to a nop. */
bundle &= ~(create_Opcode_X1(-1) |
create_UnShOpcodeExtension_X1(-1) |
create_UnOpcodeExtension_X1(-1));
bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
create_UnShOpcodeExtension_X1(
UN_0_SHUN_0_OPCODE_X1) |
create_UnOpcodeExtension_X1(
NOP_UN_0_SHUN_0_OPCODE_X1));
}
return bundle;
}
/*
* Called after execve() has started the new image. This allows us
* to reset the info state. Note that the the mmap'ed memory, if there
* was any, has already been unmapped by the exec.
*/
void single_step_execve(void)
{
struct thread_info *ti = current_thread_info();
kfree(ti->step_state);
ti->step_state = NULL;
}
/*
* single_step_once() - entry point when single stepping has been triggered.
* @regs: The machine register state
*
* When we arrive at this routine via a trampoline, the single step
* engine copies the executing bundle to the single step buffer.
* If the instruction is a condition branch, then the target is
* reset to one past the next instruction. If the instruction
* sets the lr, then that is noted. If the instruction is a jump
* or call, then the new target pc is preserved and the current
* bundle instruction set to null.
*
* The necessary post-single-step rewriting information is stored in
* single_step_state-> We use data segment values because the
* stack will be rewound when we run the rewritten single-stepped
* instruction.
*/
void single_step_once(struct pt_regs *regs)
{
extern tilepro_bundle_bits __single_step_ill_insn;
extern tilepro_bundle_bits __single_step_j_insn;
extern tilepro_bundle_bits __single_step_addli_insn;
extern tilepro_bundle_bits __single_step_auli_insn;
struct thread_info *info = (void *)current_thread_info();
struct single_step_state *state = info->step_state;
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
tilepro_bundle_bits __user *buffer, *pc;
tilepro_bundle_bits bundle;
int temp_reg;
int target_reg = TREG_LR;
int err;
enum mem_op mem_op = MEMOP_NONE;
int size = 0, sign_ext = 0; /* happy compiler */
int align_ctl;
align_ctl = unaligned_fixup;
switch (task_thread_info(current)->align_ctl) {
case PR_UNALIGN_NOPRINT:
align_ctl = 1;
break;
case PR_UNALIGN_SIGBUS:
align_ctl = 0;
break;
}
asm(
" .pushsection .rodata.single_step\n"
" .align 8\n"
" .globl __single_step_ill_insn\n"
"__single_step_ill_insn:\n"
" ill\n"
" .globl __single_step_addli_insn\n"
"__single_step_addli_insn:\n"
" { nop; addli r0, zero, 0 }\n"
" .globl __single_step_auli_insn\n"
"__single_step_auli_insn:\n"
" { nop; auli r0, r0, 0 }\n"
" .globl __single_step_j_insn\n"
"__single_step_j_insn:\n"
" j .\n"
" .popsection\n"
);
/*
* Enable interrupts here to allow touching userspace and the like.
* The callers expect this: do_trap() already has interrupts
* enabled, and do_work_pending() handles functions that enable
* interrupts internally.
*/
local_irq_enable();
if (state == NULL) {
/* allocate a page of writable, executable memory */
state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
if (state == NULL) {
pr_err("Out of kernel memory trying to single-step\n");
return;
}
/* allocate a cache line of writable, executable memory */
buffer = (void __user *) vm_mmap(NULL, 0, 64,
PROT_EXEC | PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0);
if (IS_ERR((void __force *)buffer)) {
kfree(state);
pr_err("Out of kernel pages trying to single-step\n");
return;
}
state->buffer = buffer;
state->is_enabled = 0;
info->step_state = state;
/* Validate our stored instruction patterns */
BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
ADDLI_OPCODE_X1);
BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
AULI_OPCODE_X1);
BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
}
/*
* If we are returning from a syscall, we still haven't hit the
* "ill" for the swint1 instruction. So back the PC up to be
* pointing at the swint1, but we'll actually return directly
* back to the "ill" so we come back in via SIGILL as if we
* had "executed" the swint1 without ever being in kernel space.
*/
if (regs->faultnum == INT_SWINT_1)
regs->pc -= 8;
pc = (tilepro_bundle_bits __user *)(regs->pc);
if (get_user(bundle, pc) != 0) {
pr_err("Couldn't read instruction at %p trying to step\n", pc);
return;
}
/* We'll follow the instruction with 2 ill op bundles */
state->orig_pc = (unsigned long)pc;
state->next_pc = (unsigned long)(pc + 1);
state->branch_next_pc = 0;
state->update = 0;
if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
/* two wide, check for control flow */
int opcode = get_Opcode_X1(bundle);
switch (opcode) {
/* branches */
case BRANCH_OPCODE_X1:
{
s32 offset = signExtend17(get_BrOff_X1(bundle));
/*
* For branches, we use a rewriting trick to let the
* hardware evaluate whether the branch is taken or
* untaken. We record the target offset and then
* rewrite the branch instruction to target 1 insn
* ahead if the branch is taken. We then follow the
* rewritten branch with two bundles, each containing
* an "ill" instruction. The supervisor examines the
* pc after the single step code is executed, and if
* the pc is the first ill instruction, then the
* branch (if any) was not taken. If the pc is the
* second ill instruction, then the branch was
* taken. The new pc is computed for these cases, and
* inserted into the registers for the thread. If
* the pc is the start of the single step code, then
* an exception or interrupt was taken before the
* code started processing, and the same "original"
* pc is restored. This change, different from the
* original implementation, has the advantage of
* executing a single user instruction.
*/
state->branch_next_pc = (unsigned long)(pc + offset);
/* rewrite branch offset to go forward one bundle */
bundle = set_BrOff_X1(bundle, 2);
}
break;
/* jumps */
case JALB_OPCODE_X1:
case JALF_OPCODE_X1:
state->update = 1;
state->next_pc =
(unsigned long) (pc + get_JOffLong_X1(bundle));
break;
case JB_OPCODE_X1:
case JF_OPCODE_X1:
state->next_pc =
(unsigned long) (pc + get_JOffLong_X1(bundle));
bundle = nop_X1(bundle);
break;
case SPECIAL_0_OPCODE_X1:
switch (get_RRROpcodeExtension_X1(bundle)) {
/* jump-register */
case JALRP_SPECIAL_0_OPCODE_X1:
case JALR_SPECIAL_0_OPCODE_X1:
state->update = 1;
state->next_pc =
regs->regs[get_SrcA_X1(bundle)];
break;
case JRP_SPECIAL_0_OPCODE_X1:
case JR_SPECIAL_0_OPCODE_X1:
state->next_pc =
regs->regs[get_SrcA_X1(bundle)];
bundle = nop_X1(bundle);
break;
case LNK_SPECIAL_0_OPCODE_X1:
state->update = 1;
target_reg = get_Dest_X1(bundle);
break;
/* stores */
case SH_SPECIAL_0_OPCODE_X1:
mem_op = MEMOP_STORE;
size = 2;
break;
case SW_SPECIAL_0_OPCODE_X1:
mem_op = MEMOP_STORE;
size = 4;
break;
}
break;
/* loads and iret */
case SHUN_0_OPCODE_X1:
if (get_UnShOpcodeExtension_X1(bundle) ==
UN_0_SHUN_0_OPCODE_X1) {
switch (get_UnOpcodeExtension_X1(bundle)) {
case LH_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 1;
break;
case LH_U_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 0;
break;
case LW_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 4;
break;
case IRET_UN_0_SHUN_0_OPCODE_X1:
{
unsigned long ex0_0 = __insn_mfspr(
SPR_EX_CONTEXT_0_0);
unsigned long ex0_1 = __insn_mfspr(
SPR_EX_CONTEXT_0_1);
/*
* Special-case it if we're iret'ing
* to PL0 again. Otherwise just let
* it run and it will generate SIGILL.
*/
if (EX1_PL(ex0_1) == USER_PL) {
state->next_pc = ex0_0;
regs->ex1 = ex0_1;
bundle = nop_X1(bundle);
}
}
}
}
break;
/* postincrement operations */
case IMM_0_OPCODE_X1:
switch (get_ImmOpcodeExtension_X1(bundle)) {
case LWADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 4;
break;
case LHADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 2;
sign_ext = 1;
break;
case LHADD_U_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 2;
sign_ext = 0;
break;
case SWADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_STORE_POSTINCR;
size = 4;
break;
case SHADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_STORE_POSTINCR;
size = 2;
break;
default:
break;
}
break;
}
if (state->update) {
/*
* Get an available register. We start with a
* bitmask with 1's for available registers.
* We truncate to the low 32 registers since
* we are guaranteed to have set bits in the
* low 32 bits, then use ctz to pick the first.
*/
u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
(1ULL << get_SrcA_X0(bundle)) |
(1ULL << get_SrcB_X0(bundle)) |
(1ULL << target_reg));
temp_reg = __builtin_ctz(mask);
state->update_reg = temp_reg;
state->update_value = regs->regs[temp_reg];
regs->regs[temp_reg] = (unsigned long) (pc+1);
regs->flags |= PT_FLAGS_RESTORE_REGS;
bundle = move_X1(bundle, target_reg, temp_reg);
}
} else {
int opcode = get_Opcode_Y2(bundle);
switch (opcode) {
/* loads */
case LH_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 1;
break;
case LH_U_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 0;
break;
case LW_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 4;
break;
/* stores */
case SH_OPCODE_Y2:
mem_op = MEMOP_STORE;
size = 2;
break;
case SW_OPCODE_Y2:
mem_op = MEMOP_STORE;
size = 4;
break;
}
}
/*
* Check if we need to rewrite an unaligned load/store.
* Returning zero is a special value meaning we generated a signal.
*/
if (mem_op != MEMOP_NONE && align_ctl >= 0) {
bundle = rewrite_load_store_unaligned(state, bundle, regs,
mem_op, size, sign_ext);
if (bundle == 0)
return;
}
/* write the bundle to our execution area */
buffer = state->buffer;
err = __put_user(bundle, buffer++);
/*
* If we're really single-stepping, we take an INT_ILL after.
* If we're just handling an unaligned access, we can just
* jump directly back to where we were in user code.
*/
if (is_single_step) {
err |= __put_user(__single_step_ill_insn, buffer++);
err |= __put_user(__single_step_ill_insn, buffer++);
} else {
long delta;
if (state->update) {
/* We have some state to update; do it inline */
int ha16;
bundle = __single_step_addli_insn;
bundle |= create_Dest_X1(state->update_reg);
bundle |= create_Imm16_X1(state->update_value);
err |= __put_user(bundle, buffer++);
bundle = __single_step_auli_insn;
bundle |= create_Dest_X1(state->update_reg);
bundle |= create_SrcA_X1(state->update_reg);
ha16 = (state->update_value + 0x8000) >> 16;
bundle |= create_Imm16_X1(ha16);
err |= __put_user(bundle, buffer++);
state->update = 0;
}
/* End with a jump back to the next instruction */
delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
(unsigned long)buffer) >>
TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
bundle = __single_step_j_insn;
bundle |= create_JOffLong_X1(delta);
err |= __put_user(bundle, buffer++);
}
if (err) {
pr_err("Fault when writing to single-step buffer\n");
return;
}
/*
* Flush the buffer.
* We do a local flush only, since this is a thread-specific buffer.
*/
__flush_icache_range((unsigned long)state->buffer,
(unsigned long)buffer);
/* Indicate enabled */
state->is_enabled = is_single_step;
regs->pc = (unsigned long)state->buffer;
/* Fault immediately if we are coming back from a syscall. */
if (regs->faultnum == INT_SWINT_1)
regs->pc += 8;
}
#else
static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
/*
* Called directly on the occasion of an interrupt.
*
* If the process doesn't have single step set, then we use this as an
* opportunity to turn single step off.
*
* It has been mentioned that we could conditionally turn off single stepping
* on each entry into the kernel and rely on single_step_once to turn it
* on for the processes that matter (as we already do), but this
* implementation is somewhat more efficient in that we muck with registers
* once on a bum interrupt rather than on every entry into the kernel.
*
* If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
* so we have to run through this process again before we can say that an
* instruction has executed.
*
* swint will set CANCELED, but it's a legitimate instruction. Fortunately
* it changes the PC. If it hasn't changed, then we know that the interrupt
* wasn't generated by swint and we'll need to run this process again before
* we can say an instruction has executed.
*
* If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
* on with our lives.
*/
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
if (is_single_step == 0) {
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
} else if ((*ss_pc != regs->pc) ||
(!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
send_sigtrap(current, regs);
}
}
/*
* Called from need_singlestep. Set up the control registers and the enable
* register, then return back.
*/
void single_step_once(struct pt_regs *regs)
{
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
*ss_pc = regs->pc;
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
}
void single_step_execve(void)
{
/* Nothing */
}
#endif /* !__tilegx__ */
| gpl-2.0 |
Perferom/android_kernel_zte_msm7x27 | sound/core/seq/seq.c | 1748 | 3996 | /*
* ALSA sequencer main module
* Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/seq_kernel.h>
#include "seq_clientmgr.h"
#include "seq_memory.h"
#include "seq_queue.h"
#include "seq_lock.h"
#include "seq_timer.h"
#include "seq_system.h"
#include "seq_info.h"
#include <sound/seq_device.h>
#if defined(CONFIG_SND_SEQ_DUMMY_MODULE)
int seq_client_load[15] = {[0] = SNDRV_SEQ_CLIENT_DUMMY, [1 ... 14] = -1};
#else
int seq_client_load[15] = {[0 ... 14] = -1};
#endif
int seq_default_timer_class = SNDRV_TIMER_CLASS_GLOBAL;
int seq_default_timer_sclass = SNDRV_TIMER_SCLASS_NONE;
int seq_default_timer_card = -1;
int seq_default_timer_device =
#ifdef CONFIG_SND_SEQ_HRTIMER_DEFAULT
SNDRV_TIMER_GLOBAL_HRTIMER
#elif defined(CONFIG_SND_SEQ_RTCTIMER_DEFAULT)
SNDRV_TIMER_GLOBAL_RTC
#else
SNDRV_TIMER_GLOBAL_SYSTEM
#endif
;
int seq_default_timer_subdevice = 0;
int seq_default_timer_resolution = 0; /* Hz */
MODULE_AUTHOR("Frank van de Pol <fvdpol@coil.demon.nl>, Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer.");
MODULE_LICENSE("GPL");
module_param_array(seq_client_load, int, NULL, 0444);
MODULE_PARM_DESC(seq_client_load, "The numbers of global (system) clients to load through kmod.");
module_param(seq_default_timer_class, int, 0644);
MODULE_PARM_DESC(seq_default_timer_class, "The default timer class.");
module_param(seq_default_timer_sclass, int, 0644);
MODULE_PARM_DESC(seq_default_timer_sclass, "The default timer slave class.");
module_param(seq_default_timer_card, int, 0644);
MODULE_PARM_DESC(seq_default_timer_card, "The default timer card number.");
module_param(seq_default_timer_device, int, 0644);
MODULE_PARM_DESC(seq_default_timer_device, "The default timer device number.");
module_param(seq_default_timer_subdevice, int, 0644);
MODULE_PARM_DESC(seq_default_timer_subdevice, "The default timer subdevice number.");
module_param(seq_default_timer_resolution, int, 0644);
MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz.");
/*
* INIT PART
*/
static int __init alsa_seq_init(void)
{
int err;
snd_seq_autoload_lock();
if ((err = client_init_data()) < 0)
goto error;
/* init memory, room for selected events */
if ((err = snd_sequencer_memory_init()) < 0)
goto error;
/* init event queues */
if ((err = snd_seq_queues_init()) < 0)
goto error;
/* register sequencer device */
if ((err = snd_sequencer_device_init()) < 0)
goto error;
/* register proc interface */
if ((err = snd_seq_info_init()) < 0)
goto error;
/* register our internal client */
if ((err = snd_seq_system_client_init()) < 0)
goto error;
error:
snd_seq_autoload_unlock();
return err;
}
static void __exit alsa_seq_exit(void)
{
/* unregister our internal client */
snd_seq_system_client_done();
/* unregister proc interface */
snd_seq_info_done();
/* delete timing queues */
snd_seq_queues_delete();
/* unregister sequencer device */
snd_sequencer_device_done();
/* release event memory */
snd_sequencer_memory_done();
}
module_init(alsa_seq_init)
module_exit(alsa_seq_exit)
| gpl-2.0 |
bio4554/ker.nl | drivers/net/wireless/mwifiex/uap_event.c | 2260 | 8283 | /*
* Marvell Wireless LAN device driver: AP event handling
*
* Copyright (C) 2012, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "main.h"
#include "11n.h"
/*
* This function will return the pointer to station entry in station list
* table which matches specified mac address.
* This function should be called after acquiring RA list spinlock.
* NULL is returned if station entry is not found in associated STA list.
*/
struct mwifiex_sta_node *
mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
{
struct mwifiex_sta_node *node;
if (!mac)
return NULL;
list_for_each_entry(node, &priv->sta_list, list) {
if (!memcmp(node->mac_addr, mac, ETH_ALEN))
return node;
}
return NULL;
}
/*
* This function will add a sta_node entry to associated station list
* table with the given mac address.
* If entry exist already, existing entry is returned.
* If received mac address is NULL, NULL is returned.
*/
static struct mwifiex_sta_node *
mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
{
struct mwifiex_sta_node *node;
unsigned long flags;
if (!mac)
return NULL;
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
node = mwifiex_get_sta_entry(priv, mac);
if (node)
goto done;
node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
if (!node)
goto done;
memcpy(node->mac_addr, mac, ETH_ALEN);
list_add_tail(&node->list, &priv->sta_list);
done:
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return node;
}
/*
* This function will search for HT IE in association request IEs
* and set station HT parameters accordingly.
*/
static void
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
int ies_len, struct mwifiex_sta_node *node)
{
const struct ieee80211_ht_cap *ht_cap;
if (!ies)
return;
ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
if (ht_cap) {
node->is_11n_enabled = 1;
node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
IEEE80211_HT_CAP_MAX_AMSDU ?
MWIFIEX_TX_DATA_BUF_SIZE_8K :
MWIFIEX_TX_DATA_BUF_SIZE_4K;
} else {
node->is_11n_enabled = 0;
}
return;
}
/*
* This function will delete a station entry from station list
*/
static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
{
struct mwifiex_sta_node *node, *tmp;
unsigned long flags;
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
node = mwifiex_get_sta_entry(priv, mac);
if (node) {
list_for_each_entry_safe(node, tmp, &priv->sta_list,
list) {
list_del(&node->list);
kfree(node);
}
}
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return;
}
/*
* This function will delete all stations from associated station list.
*/
static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *node, *tmp;
unsigned long flags;
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
list_del(&node->list);
kfree(node);
}
INIT_LIST_HEAD(&priv->sta_list);
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return;
}
/*
* This function handles AP interface specific events generated by firmware.
*
* Event specific routines are called by this function based
* upon the generated event cause.
*
*
* Events supported for AP -
* - EVENT_UAP_STA_ASSOC
* - EVENT_UAP_STA_DEAUTH
* - EVENT_UAP_BSS_ACTIVE
* - EVENT_UAP_BSS_START
* - EVENT_UAP_BSS_IDLE
* - EVENT_UAP_MIC_COUNTERMEASURES:
*/
int mwifiex_process_uap_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
int len, i;
u32 eventcause = adapter->event_cause;
struct station_info sinfo;
struct mwifiex_assoc_event *event;
struct mwifiex_sta_node *node;
u8 *deauth_mac;
struct host_cmd_ds_11n_batimeout *ba_timeout;
u16 ctrl;
switch (eventcause) {
case EVENT_UAP_STA_ASSOC:
memset(&sinfo, 0, sizeof(sinfo));
event = (struct mwifiex_assoc_event *)
(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
len = -1;
if (ieee80211_is_assoc_req(event->frame_control))
len = 0;
else if (ieee80211_is_reassoc_req(event->frame_control))
/* There will be ETH_ALEN bytes of
* current_ap_addr before the re-assoc ies.
*/
len = ETH_ALEN;
if (len != -1) {
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
sinfo.assoc_req_ies = &event->data[len];
len = (u8 *)sinfo.assoc_req_ies -
(u8 *)&event->frame_control;
sinfo.assoc_req_ies_len =
le16_to_cpu(event->len) - (u16)len;
}
}
cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
GFP_KERNEL);
node = mwifiex_add_sta_entry(priv, event->sta_addr);
if (!node) {
dev_warn(adapter->dev,
"could not create station entry!\n");
return -1;
}
if (!priv->ap_11n_enabled)
break;
mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
sinfo.assoc_req_ies_len, node);
for (i = 0; i < MAX_NUM_TID; i++) {
if (node->is_11n_enabled)
node->ampdu_sta[i] =
priv->aggr_prio_tbl[i].ampdu_user;
else
node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
break;
case EVENT_UAP_STA_DEAUTH:
deauth_mac = adapter->event_body +
MWIFIEX_UAP_EVENT_EXTRA_HEADER;
cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
if (priv->ap_11n_enabled) {
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
}
mwifiex_del_sta_entry(priv, deauth_mac);
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
mwifiex_clean_txrx(priv);
mwifiex_del_all_sta_list(priv);
break;
case EVENT_UAP_BSS_ACTIVE:
priv->media_connected = true;
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_UAP_BSS_START:
dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
if (priv->media_connected) {
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
adapter->tx_buf_size);
}
break;
case EVENT_ADDBA:
dev_dbg(adapter->dev, "event: ADDBA Request\n");
if (priv->media_connected)
mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body);
break;
case EVENT_DELBA:
dev_dbg(adapter->dev, "event: DELBA Request\n");
if (priv->media_connected)
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
dev_dbg(adapter->dev, "event: BA Stream timeout\n");
if (priv->media_connected) {
ba_timeout = (void *)adapter->event_body;
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
break;
}
return 0;
}
| gpl-2.0 |
scanno/android_kernel_asus_me301t | drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 2516 | 31728 | /*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
static void radeon_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
}
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int xres = mode->hdisplay;
int yres = mode->vdisplay;
bool hscale = true, vscale = true;
int hsync_wid;
int vsync_wid;
int hsync_start;
int blank_width;
u32 scale, inc, crtc_more_cntl;
u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
(RADEON_VERT_STRETCH_RESERVED |
RADEON_VERT_AUTO_RATIO_INC);
fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
(RADEON_HORZ_FP_LOOP_STRETCH |
RADEON_HORZ_AUTO_RATIO_INC);
crtc_more_cntl = 0;
if ((rdev->family == CHIP_RS100) ||
(rdev->family == CHIP_RS200)) {
/* This is to workaround the asic bug for RMX, some versions
of BIOS dosen't have this register initialized correctly. */
crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
}
fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
if (!hsync_wid)
hsync_wid = 1;
hsync_start = mode->crtc_hsync_start - 8;
fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
| ((hsync_wid & 0x3f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
? RADEON_CRTC_H_SYNC_POL
: 0));
fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
| ((mode->crtc_vdisplay - 1) << 16));
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
if (!vsync_wid)
vsync_wid = 1;
fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
| ((vsync_wid & 0x1f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
? RADEON_CRTC_V_SYNC_POL
: 0));
fp_horz_vert_active = 0;
if (native_mode->hdisplay == 0 ||
native_mode->vdisplay == 0) {
hscale = false;
vscale = false;
} else {
if (xres > native_mode->hdisplay)
xres = native_mode->hdisplay;
if (yres > native_mode->vdisplay)
yres = native_mode->vdisplay;
if (xres == native_mode->hdisplay)
hscale = false;
if (yres == native_mode->vdisplay)
vscale = false;
}
switch (radeon_crtc->rmx_type) {
case RMX_FULL:
case RMX_ASPECT:
if (!hscale)
fp_horz_stretch |= ((xres/8-1) << 16);
else {
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
/ native_mode->hdisplay + 1;
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
RADEON_HORZ_STRETCH_BLEND |
RADEON_HORZ_STRETCH_ENABLE |
((native_mode->hdisplay/8-1) << 16));
}
if (!vscale)
fp_vert_stretch |= ((yres-1) << 12);
else {
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
/ native_mode->vdisplay + 1;
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
RADEON_VERT_STRETCH_ENABLE |
RADEON_VERT_STRETCH_BLEND |
((native_mode->vdisplay-1) << 12));
}
break;
case RMX_CENTER:
fp_horz_stretch |= ((xres/8-1) << 16);
fp_vert_stretch |= ((yres-1) << 12);
crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
RADEON_CRTC_AUTO_VERT_CENTER_EN);
blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
if (blank_width > 110)
blank_width = 110;
fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
if (!hsync_wid)
hsync_wid = 1;
fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
| ((hsync_wid & 0x3f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
? RADEON_CRTC_H_SYNC_POL
: 0));
fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
| ((mode->crtc_vdisplay - 1) << 16));
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
if (!vsync_wid)
vsync_wid = 1;
fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
| ((vsync_wid & 0x1f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
? RADEON_CRTC_V_SYNC_POL
: 0)));
fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
(((native_mode->hdisplay / 8) & 0x1ff) << 16));
break;
case RMX_OFF:
default:
fp_horz_stretch |= ((xres/8-1) << 16);
fp_vert_stretch |= ((yres-1) << 12);
break;
}
WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
}
void radeon_restore_common_regs(struct drm_device *dev)
{
/* don't need this yet */
}
static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
int i = 0;
/* FIXME: Certain revisions of R300 can't recover here. Not sure of
the cause yet, but this workaround will mask the problem for now.
Other chips usually will pass at the very first test, so the
workaround shouldn't have any effect on them. */
for (i = 0;
(i < 10000 &&
RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
i++);
}
static void radeon_pll_write_update(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
WREG32_PLL_P(RADEON_PPLL_REF_DIV,
RADEON_PPLL_ATOMIC_UPDATE_W,
~(RADEON_PPLL_ATOMIC_UPDATE_W));
}
static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
int i = 0;
/* FIXME: Certain revisions of R300 can't recover here. Not sure of
the cause yet, but this workaround will mask the problem for now.
Other chips usually will pass at the very first test, so the
workaround shouldn't have any effect on them. */
for (i = 0;
(i < 10000 &&
RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
i++);
}
static void radeon_pll2_write_update(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
RADEON_P2PLL_ATOMIC_UPDATE_W,
~(RADEON_P2PLL_ATOMIC_UPDATE_W));
}
static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
uint16_t fb_div)
{
unsigned int vcoFreq;
if (!ref_div)
return 1;
vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
* 3 parts, each part having a fixed PLL gain value.
*/
if (vcoFreq >= 30000)
/*
* [300..max] MHz : 7
*/
return 7;
else if (vcoFreq >= 18000)
/*
* [180..300) MHz : 4
*/
return 4;
else
/*
* [0..180) MHz : 1
*/
return 1;
}
void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t mask;
if (radeon_crtc->crtc_id)
mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
else
mask = (RADEON_CRTC_DISPLAY_DIS |
RADEON_CRTC_VSYNC_DIS |
RADEON_CRTC_HSYNC_DIS);
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
}
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
}
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
}
int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
}
int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags;
int format;
uint32_t gen_cntl_reg, gen_cntl_val;
int r;
DRM_DEBUG_KMS("\n");
/* no fb bound */
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
if (atomic) {
radeon_fb = to_radeon_framebuffer(fb);
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
switch (target_fb->bits_per_pixel) {
case 8:
format = 2;
break;
case 15: /* 555 */
format = 3;
break;
case 16: /* 565 */
format = 4;
break;
case 24: /* RGB */
format = 5;
break;
case 32: /* xRGB */
format = 6;
break;
default:
return false;
}
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
if (tiling_flags & RADEON_TILING_MICRO)
DRM_ERROR("trying to scanout microtiled buffer\n");
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
base -= radeon_crtc->legacy_display_base_addr;
crtc_offset_cntl = 0;
pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
R300_CRTC_MICRO_TILE_BUFFER_DIS |
R300_CRTC_MACRO_TILE_EN);
else
crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
} else {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
R300_CRTC_MICRO_TILE_BUFFER_DIS |
R300_CRTC_MACRO_TILE_EN);
else
crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
}
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
crtc_tile_x0_y0 = x | (y << 16);
base &= ~0x7ff;
} else {
int byteshift = target_fb->bits_per_pixel >> 4;
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
crtc_offset_cntl |= (y % 16);
}
} else {
int offset = y * pitch_pixels + x;
switch (target_fb->bits_per_pixel) {
case 8:
offset *= 1;
break;
case 15:
case 16:
offset *= 2;
break;
case 24:
offset *= 3;
break;
case 32:
offset *= 4;
break;
default:
return false;
}
base += offset;
}
base &= ~7;
if (radeon_crtc->crtc_id == 1)
gen_cntl_reg = RADEON_CRTC2_GEN_CNTL;
else
gen_cntl_reg = RADEON_CRTC_GEN_CNTL;
gen_cntl_val = RREG32(gen_cntl_reg);
gen_cntl_val &= ~(0xf << 8);
gen_cntl_val |= (format << 8);
gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(gen_cntl_reg, gen_cntl_val);
crtc_offset = (u32)base;
WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
if (ASIC_IS_R300(rdev)) {
if (radeon_crtc->crtc_id)
WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
else
WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
}
WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
radeon_bandwidth_update(rdev);
return 0;
}
static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_encoder *encoder;
int format;
int hsync_start;
int hsync_wid;
int vsync_wid;
uint32_t crtc_h_total_disp;
uint32_t crtc_h_sync_strt_wid;
uint32_t crtc_v_total_disp;
uint32_t crtc_v_sync_strt_wid;
bool is_tv = false;
DRM_DEBUG_KMS("\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
is_tv = true;
DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
break;
}
}
}
switch (crtc->fb->bits_per_pixel) {
case 8:
format = 2;
break;
case 15: /* 555 */
format = 3;
break;
case 16: /* 565 */
format = 4;
break;
case 24: /* RGB */
format = 5;
break;
case 32: /* xRGB */
format = 6;
break;
default:
return false;
}
crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
if (!hsync_wid)
hsync_wid = 1;
hsync_start = mode->crtc_hsync_start - 8;
crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
| ((hsync_wid & 0x3f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
? RADEON_CRTC_H_SYNC_POL
: 0));
/* This works for double scan mode. */
crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
| ((mode->crtc_vdisplay - 1) << 16));
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
if (!vsync_wid)
vsync_wid = 1;
crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
| ((vsync_wid & 0x1f) << 16)
| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
? RADEON_CRTC_V_SYNC_POL
: 0));
if (radeon_crtc->crtc_id) {
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
/* if TV DAC is enabled for another crtc and keep it enabled */
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS
| RADEON_CRTC2_DISP_DIS
| RADEON_CRTC2_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
? RADEON_CRTC2_DBL_SCAN_EN
: 0)
| ((mode->flags & DRM_MODE_FLAG_CSYNC)
? RADEON_CRTC2_CSYNC_EN
: 0)
| ((mode->flags & DRM_MODE_FLAG_INTERLACE)
? RADEON_CRTC2_INTERLACE_EN
: 0));
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
crtc2_gen_cntl |= RADEON_CRTC2_EN;
disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
} else {
uint32_t crtc_gen_cntl;
uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl;
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
? RADEON_CRTC_DBL_SCAN_EN
: 0)
| ((mode->flags & DRM_MODE_FLAG_CSYNC)
? RADEON_CRTC_CSYNC_EN
: 0)
| ((mode->flags & DRM_MODE_FLAG_INTERLACE)
? RADEON_CRTC_INTERLACE_EN
: 0));
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
crtc_gen_cntl |= RADEON_CRTC_EN;
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
RADEON_CRTC_VSYNC_DIS |
RADEON_CRTC_HSYNC_DIS |
RADEON_CRTC_DISPLAY_DIS);
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
}
if (is_tv)
radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
&crtc_h_sync_strt_wid, &crtc_v_total_disp,
&crtc_v_sync_strt_wid);
WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
return true;
}
static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_encoder *encoder;
uint32_t feedback_div = 0;
uint32_t frac_fb_div = 0;
uint32_t reference_div = 0;
uint32_t post_divider = 0;
uint32_t freq = 0;
uint8_t pll_gain;
bool use_bios_divs = false;
/* PLL registers */
uint32_t pll_ref_div = 0;
uint32_t pll_fb_post_div = 0;
uint32_t htotal_cntl = 0;
bool is_tv = false;
struct radeon_pll *pll;
struct {
int divider;
int bitvalue;
} *post_div, post_divs[] = {
/* From RAGE 128 VR/RAGE 128 GL Register
* Reference Manual (Technical Reference
* Manual P/N RRG-G04100-C Rev. 0.04), page
* 3-17 (PLL_DIV_[3:0]).
*/
{ 1, 0 }, /* VCLK_SRC */
{ 2, 1 }, /* VCLK_SRC/2 */
{ 4, 2 }, /* VCLK_SRC/4 */
{ 8, 3 }, /* VCLK_SRC/8 */
{ 3, 4 }, /* VCLK_SRC/3 */
{ 16, 5 }, /* VCLK_SRC/16 */
{ 6, 6 }, /* VCLK_SRC/6 */
{ 12, 7 }, /* VCLK_SRC/12 */
{ 0, 0 }
};
if (radeon_crtc->crtc_id)
pll = &rdev->clock.p2pll;
else
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
is_tv = true;
break;
}
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
if (lvds->use_bios_dividers) {
pll_ref_div = lvds->panel_ref_divider;
pll_fb_post_div = (lvds->panel_fb_divider |
(lvds->panel_post_divider << 16));
htotal_cntl = 0;
use_bios_divs = true;
}
}
}
pll->flags |= RADEON_PLL_USE_REF_DIV;
}
}
}
DRM_DEBUG_KMS("\n");
if (!use_bios_divs) {
radeon_compute_pll_legacy(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
&reference_div, &post_divider);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
break;
}
if (!post_div->divider)
post_div = &post_divs[0];
DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
(unsigned)freq,
feedback_div,
reference_div,
post_divider);
pll_ref_div = reference_div;
#if defined(__powerpc__) && (0) /* TODO */
/* apparently programming this otherwise causes a hang??? */
if (info->MacModel == RADEON_MAC_IBOOK)
pll_fb_post_div = 0x000600ad;
else
#endif
pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16));
htotal_cntl = mode->htotal & 0x7;
}
pll_gain = radeon_compute_pll_gain(pll->reference_freq,
pll_ref_div & 0x3ff,
pll_fb_post_div & 0x7ff);
if (radeon_crtc->crtc_id) {
uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
if (is_tv) {
radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
&pll_ref_div, &pll_fb_post_div,
&pixclks_cntl);
}
WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
RADEON_PIX2CLK_SRC_SEL_CPUCLK,
~(RADEON_PIX2CLK_SRC_SEL_MASK));
WREG32_PLL_P(RADEON_P2PLL_CNTL,
RADEON_P2PLL_RESET
| RADEON_P2PLL_ATOMIC_UPDATE_EN
| ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
~(RADEON_P2PLL_RESET
| RADEON_P2PLL_ATOMIC_UPDATE_EN
| RADEON_P2PLL_PVG_MASK));
WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
pll_ref_div,
~RADEON_P2PLL_REF_DIV_MASK);
WREG32_PLL_P(RADEON_P2PLL_DIV_0,
pll_fb_post_div,
~RADEON_P2PLL_FB0_DIV_MASK);
WREG32_PLL_P(RADEON_P2PLL_DIV_0,
pll_fb_post_div,
~RADEON_P2PLL_POST0_DIV_MASK);
radeon_pll2_write_update(dev);
radeon_pll2_wait_for_read_update_complete(dev);
WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
WREG32_PLL_P(RADEON_P2PLL_CNTL,
0,
~(RADEON_P2PLL_RESET
| RADEON_P2PLL_SLEEP
| RADEON_P2PLL_ATOMIC_UPDATE_EN));
DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
(unsigned)pll_ref_div,
(unsigned)pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_P2PLL_CNTL));
DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
(unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
(unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
(unsigned)((pll_fb_post_div &
RADEON_P2PLL_POST0_DIV_MASK) >> 16));
mdelay(50); /* Let the clock to lock */
WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
~(RADEON_PIX2CLK_SRC_SEL_MASK));
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
} else {
uint32_t pixclks_cntl;
if (is_tv) {
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
&pll_fb_post_div, &pixclks_cntl);
}
if (rdev->flags & RADEON_IS_MOBILITY) {
/* A temporal workaround for the occasional blanking on certain laptop panels.
This appears to related to the PLL divider registers (fail to lock?).
It occurs even when all dividers are the same with their old settings.
In this case we really don't need to fiddle with PLL registers.
By doing this we can avoid the blanking problem with some panels.
*/
if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
(pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
(RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
WREG32_P(RADEON_CLOCK_CNTL_INDEX,
RADEON_PLL_DIV_SEL,
~(RADEON_PLL_DIV_SEL));
r100_pll_errata_after_index(rdev);
return;
}
}
WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
RADEON_VCLK_SRC_SEL_CPUCLK,
~(RADEON_VCLK_SRC_SEL_MASK));
WREG32_PLL_P(RADEON_PPLL_CNTL,
RADEON_PPLL_RESET
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
| ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
~(RADEON_PPLL_RESET
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
| RADEON_PPLL_PVG_MASK));
WREG32_P(RADEON_CLOCK_CNTL_INDEX,
RADEON_PLL_DIV_SEL,
~(RADEON_PLL_DIV_SEL));
r100_pll_errata_after_index(rdev);
if (ASIC_IS_R300(rdev) ||
(rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480)) {
if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
/* When restoring console mode, use saved PPLL_REF_DIV
* setting.
*/
WREG32_PLL_P(RADEON_PPLL_REF_DIV,
pll_ref_div,
0);
} else {
/* R300 uses ref_div_acc field as real ref divider */
WREG32_PLL_P(RADEON_PPLL_REF_DIV,
(pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
~R300_PPLL_REF_DIV_ACC_MASK);
}
} else
WREG32_PLL_P(RADEON_PPLL_REF_DIV,
pll_ref_div,
~RADEON_PPLL_REF_DIV_MASK);
WREG32_PLL_P(RADEON_PPLL_DIV_3,
pll_fb_post_div,
~RADEON_PPLL_FB3_DIV_MASK);
WREG32_PLL_P(RADEON_PPLL_DIV_3,
pll_fb_post_div,
~RADEON_PPLL_POST3_DIV_MASK);
radeon_pll_write_update(dev);
radeon_pll_wait_for_read_update_complete(dev);
WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
WREG32_PLL_P(RADEON_PPLL_CNTL,
0,
~(RADEON_PPLL_RESET
| RADEON_PPLL_SLEEP
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
pll_ref_div,
pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_PPLL_CNTL));
DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
(pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
mdelay(50); /* Let the clock to lock */
WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
RADEON_VCLK_SRC_SEL_PPLLCLK,
~(RADEON_VCLK_SRC_SEL_MASK));
if (is_tv)
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
}
}
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
}
static int radeon_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
/* TODO TV */
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
} else {
if (radeon_crtc->rmx_type != RMX_OFF) {
/* FIXME: only first crtc has rmx what should we
* do ?
*/
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
}
}
return 0;
}
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
*/
list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
}
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
/*
* Reenable the CRTCs that should be running.
*/
list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.dpms = radeon_crtc_dpms,
.mode_fixup = radeon_crtc_mode_fixup,
.mode_set = radeon_crtc_mode_set,
.mode_set_base = radeon_crtc_set_base,
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
};
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
if (radeon_crtc->crtc_id == 1)
radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
}
| gpl-2.0 |
HelllGuest/sprout_kernel | drivers/ata/pata_mpiix.c | 2772 | 7056 | /*
* pata_mpiix.c - Intel MPIIX PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* The MPIIX is different enough to the PIIX4 and friends that we give it
* a separate driver. The old ide/pci code handles this by just not tuning
* MPIIX at all.
*
* The MPIIX also differs in another important way from the majority of PIIX
* devices. The chip is a bridge (pardon the pun) between the old world of
* ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
* IDE controller is not decoded in PCI space and the chip does not claim to
* be IDE class PCI. This requires slightly non-standard probe logic compared
* with PCI IDE and also that we do not disable the device when our driver is
* unloaded (as it has many other functions).
*
* The driver consciously keeps this logic internally to avoid pushing quirky
* PATA history into the clean libata layer.
*
* Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
* hard disk present this driver will not detect it. This is not a bug. In this
* configuration the secondary port of the MPIIX is disabled and the addresses
* are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
* to operate.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_mpiix"
#define DRV_VERSION "0.7.7"
enum {
IDETIM = 0x6C, /* IDE control register */
IORDY = (1 << 1),
PPE = (1 << 2),
FTIM = (1 << 0),
ENABLED = (1 << 15),
SECONDARY = (1 << 14)
};
static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* mpiix_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. The MPIIX allows us to program the
* IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
* prefetching or IORDY are used.
*
* This would get very ugly because we can only program timing for one
* device at a time, the other gets PIO0. Fortunately libata calls
* our qc_issue command before a command is issued so we can flip the
* timings back and forth to reduce the pain.
*/
static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int control = 0;
int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 idetim;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(pdev, IDETIM, &idetim);
/* Mask the IORDY/TIME/PPE for this device */
if (adev->class == ATA_DEV_ATA)
control |= PPE; /* Enable prefetch/posting for disk */
if (ata_pio_need_iordy(adev))
control |= IORDY;
if (pio > 1)
control |= FTIM; /* This drive is on the fast timing bank */
/* Mask out timing and clear both TIME bank selects */
idetim &= 0xCCEE;
idetim &= ~(0x07 << (4 * adev->devno));
idetim |= control << (4 * adev->devno);
idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
pci_write_config_word(pdev, IDETIM, idetim);
/* We use ap->private_data as a pointer to the device currently
loaded for timing */
ap->private_data = adev;
}
/**
* mpiix_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If modes have been configured and the channel data is not loaded
then load it. We have to check if pio_mode is set as the core code
does not set adev->pio_mode to XFER_PIO_0 while probing as would be
logical */
if (adev->pio_mode && adev != ap->private_data)
mpiix_set_piomode(ap, adev);
return ata_sff_qc_issue(qc);
}
static struct scsi_host_template mpiix_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations mpiix_port_ops = {
.inherits = &ata_sff_port_ops,
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* Single threaded by the PCI probe logic */
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
u16 idetim;
int cmd, ctl, irq;
ata_print_version_once(&dev->dev, DRV_VERSION);
host = ata_host_alloc(&dev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
/* MPIIX has many functions which can be turned on or off according
to other devices present. Make sure IDE is enabled before we try
and use it */
pci_read_config_word(dev, IDETIM, &idetim);
if (!(idetim & ENABLED))
return -ENODEV;
/* See if it's primary or secondary channel... */
if (!(idetim & SECONDARY)) {
cmd = 0x1F0;
ctl = 0x3F6;
irq = 14;
} else {
cmd = 0x170;
ctl = 0x376;
irq = 15;
}
cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
if (!cmd_addr || !ctl_addr)
return -ENOMEM;
ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
/* We do our own plumbing to avoid leaking special cases for whacko
ancient hardware into the core code. There are two issues to
worry about. #1 The chip is a bridge so if in legacy mode and
without BARs set fools the setup. #2 If you pci_disable_device
the MPIIX your box goes castors up */
ap->ops = &mpiix_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = cmd_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ioaddr.altstatus_addr = ctl_addr;
/* Let libata fill in the port details */
ata_sff_std_ports(&ap->ioaddr);
/* activate host */
return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED,
&mpiix_sht);
}
static const struct pci_device_id mpiix[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
{ },
};
static struct pci_driver mpiix_pci_driver = {
.name = DRV_NAME,
.id_table = mpiix,
.probe = mpiix_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(mpiix_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mpiix);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
HtcLegacy/android_kernel_htc_protou | drivers/net/ethernet/broadcom/b44.c | 4820 | 59208 | /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
* Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
* Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
*
* Distribute under GPL.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "b44.h"
#define DRV_MODULE_NAME "b44"
#define DRV_MODULE_VERSION "2.0"
#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
#define B44_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
#define B44_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define B44_MIN_MTU 60
#define B44_MAX_MTU 1500
#define B44_RX_RING_SIZE 512
#define B44_DEF_RX_RING_PENDING 200
#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
B44_RX_RING_SIZE)
#define B44_TX_RING_SIZE 512
#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
B44_TX_RING_SIZE)
#define TX_RING_GAP(BP) \
(B44_TX_RING_SIZE - (BP)->tx_pending)
#define TX_BUFFS_AVAIL(BP) \
(((BP)->tx_cons <= (BP)->tx_prod) ? \
(BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
(BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
/* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
/* b44 internal pattern match filter info */
#define B44_PATTERN_BASE 0x400
#define B44_PATTERN_SIZE 0x80
#define B44_PMASK_BASE 0x600
#define B44_PMASK_SIZE 0x10
#define B44_MAX_PATTERNS 16
#define B44_ETHIPV6UDP_HLEN 62
#define B44_ETHIPV4UDP_HLEN 42
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
module_param(b44_debug, int, 0);
MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
{ 0 } /* terminate list with empty entry */
};
MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
static struct pci_driver b44_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = b44_pci_tbl,
};
#endif /* CONFIG_B44_PCI */
static const struct ssb_device_id b44_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
SSB_DEVTABLE_END
};
MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
static void b44_halt(struct b44 *);
static void b44_init_rings(struct b44 *);
#define B44_FULL_RESET 1
#define B44_FULL_RESET_SKIP_PHY 2
#define B44_PARTIAL_RESET 3
#define B44_CHIP_RESET_FULL 4
#define B44_CHIP_RESET_PARTIAL 5
static void b44_init_hw(struct b44 *, int);
static int dma_desc_sync_size;
static int instance;
static const char b44_gstrings[][ETH_GSTRING_LEN] = {
#define _B44(x...) # x,
B44_STAT_REG_DECLARE
#undef _B44
};
static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
dma_addr_t dma_base,
unsigned long offset,
enum dma_data_direction dir)
{
dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
dma_desc_sync_size, dir);
}
static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
dma_addr_t dma_base,
unsigned long offset,
enum dma_data_direction dir)
{
dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
dma_desc_sync_size, dir);
}
static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
{
return ssb_read32(bp->sdev, reg);
}
static inline void bw32(const struct b44 *bp,
unsigned long reg, unsigned long val)
{
ssb_write32(bp->sdev, reg, val);
}
static int b44_wait_bit(struct b44 *bp, unsigned long reg,
u32 bit, unsigned long timeout, const int clear)
{
unsigned long i;
for (i = 0; i < timeout; i++) {
u32 val = br32(bp, reg);
if (clear && !(val & bit))
break;
if (!clear && (val & bit))
break;
udelay(10);
}
if (i == timeout) {
if (net_ratelimit())
netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
bit, reg, clear ? "clear" : "set");
return -ENODEV;
}
return 0;
}
static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
{
u32 val;
bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
(index << CAM_CTRL_INDEX_SHIFT)));
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
val = br32(bp, B44_CAM_DATA_LO);
data[2] = (val >> 24) & 0xFF;
data[3] = (val >> 16) & 0xFF;
data[4] = (val >> 8) & 0xFF;
data[5] = (val >> 0) & 0xFF;
val = br32(bp, B44_CAM_DATA_HI);
data[0] = (val >> 8) & 0xFF;
data[1] = (val >> 0) & 0xFF;
}
static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
{
u32 val;
val = ((u32) data[2]) << 24;
val |= ((u32) data[3]) << 16;
val |= ((u32) data[4]) << 8;
val |= ((u32) data[5]) << 0;
bw32(bp, B44_CAM_DATA_LO, val);
val = (CAM_DATA_HI_VALID |
(((u32) data[0]) << 8) |
(((u32) data[1]) << 0));
bw32(bp, B44_CAM_DATA_HI, val);
bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
(index << CAM_CTRL_INDEX_SHIFT)));
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
}
static inline void __b44_disable_ints(struct b44 *bp)
{
bw32(bp, B44_IMASK, 0);
}
static void b44_disable_ints(struct b44 *bp)
{
__b44_disable_ints(bp);
/* Flush posted writes. */
br32(bp, B44_IMASK);
}
static void b44_enable_ints(struct b44 *bp)
{
bw32(bp, B44_IMASK, bp->imask);
}
static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
{
int err;
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
(phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
return err;
}
static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
{
bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
(phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
(val & MDIO_DATA_DATA)));
return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
}
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
return 0;
return __b44_readphy(bp, bp->phy_addr, reg, val);
}
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
{
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
return 0;
return __b44_writephy(bp, bp->phy_addr, reg, val);
}
/* miilib interface */
static int b44_mii_read(struct net_device *dev, int phy_id, int location)
{
u32 val;
struct b44 *bp = netdev_priv(dev);
int rc = __b44_readphy(bp, phy_id, location, &val);
if (rc)
return 0xffffffff;
return val;
}
static void b44_mii_write(struct net_device *dev, int phy_id, int location,
int val)
{
struct b44 *bp = netdev_priv(dev);
__b44_writephy(bp, phy_id, location, val);
}
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
return 0;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
return err;
udelay(100);
err = b44_readphy(bp, MII_BMCR, &val);
if (!err) {
if (val & BMCR_RESET) {
netdev_err(bp->dev, "PHY Reset would not complete\n");
err = -ENODEV;
}
}
return err;
}
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
{
u32 val;
bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
bp->flags |= pause_flags;
val = br32(bp, B44_RXCONFIG);
if (pause_flags & B44_FLAG_RX_PAUSE)
val |= RXCONFIG_FLOW;
else
val &= ~RXCONFIG_FLOW;
bw32(bp, B44_RXCONFIG, val);
val = br32(bp, B44_MAC_FLOW);
if (pause_flags & B44_FLAG_TX_PAUSE)
val |= (MAC_FLOW_PAUSE_ENAB |
(0xc0 & MAC_FLOW_RX_HI_WATER));
else
val &= ~MAC_FLOW_PAUSE_ENAB;
bw32(bp, B44_MAC_FLOW, val);
}
static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
{
u32 pause_enab = 0;
/* The driver supports only rx pause by default because
the b44 mac tx pause mechanism generates excessive
pause frames.
Use ethtool to turn on b44 tx pause if necessary.
*/
if ((local & ADVERTISE_PAUSE_CAP) &&
(local & ADVERTISE_PAUSE_ASYM)){
if ((remote & LPA_PAUSE_ASYM) &&
!(remote & LPA_PAUSE_CAP))
pause_enab |= B44_FLAG_RX_PAUSE;
}
__b44_set_flow_ctrl(bp, pause_enab);
}
#ifdef CONFIG_BCM47XX
#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
u32 val;
int err;
/*
* workaround for bad hardware design in Linksys WAP54G v1.0
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
if (!(val & BMCR_ISOLATE))
return;
val &= ~BMCR_ISOLATE;
err = __b44_writephy(bp, 0, MII_BMCR, val);
if (err)
goto error;
}
return;
error:
pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
}
#else
static inline void b44_wap54g10_workaround(struct b44 *bp)
{
}
#endif
static int b44_setup_phy(struct b44 *bp)
{
u32 val;
int err;
b44_wap54g10_workaround(bp);
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
return 0;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
val & MII_ALEDCTRL_ALLMSK)) != 0)
goto out;
if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
goto out;
if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
val | MII_TLEDCTRL_ENABLE)) != 0)
goto out;
if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
u32 adv = ADVERTISE_CSMA;
if (bp->flags & B44_FLAG_ADV_10HALF)
adv |= ADVERTISE_10HALF;
if (bp->flags & B44_FLAG_ADV_10FULL)
adv |= ADVERTISE_10FULL;
if (bp->flags & B44_FLAG_ADV_100HALF)
adv |= ADVERTISE_100HALF;
if (bp->flags & B44_FLAG_ADV_100FULL)
adv |= ADVERTISE_100FULL;
if (bp->flags & B44_FLAG_PAUSE_AUTO)
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
goto out;
if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
BMCR_ANRESTART))) != 0)
goto out;
} else {
u32 bmcr;
if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
goto out;
bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
if (bp->flags & B44_FLAG_100_BASE_T)
bmcr |= BMCR_SPEED100;
if (bp->flags & B44_FLAG_FULL_DUPLEX)
bmcr |= BMCR_FULLDPLX;
if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
goto out;
/* Since we will not be negotiating there is no safe way
* to determine if the link partner supports flow control
* or not. So just disable it completely in this case.
*/
b44_set_flow_ctrl(bp, 0, 0);
}
out:
return err;
}
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
u32 *val;
val = &bp->hw_stats.tx_good_octets;
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
/* Pad */
reg += 8*4UL;
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
}
static void b44_link_report(struct b44 *bp)
{
if (!netif_carrier_ok(bp->dev)) {
netdev_info(bp->dev, "Link is down\n");
} else {
netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
(bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
(bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
(bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
(bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
}
}
static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
bp->flags |= B44_FLAG_100_BASE_T;
bp->flags |= B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev)) {
u32 val = br32(bp, B44_TX_CTRL);
val |= TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
netif_carrier_on(bp->dev);
b44_link_report(bp);
}
return;
}
if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
!b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
(bmsr != 0xffff)) {
if (aux & MII_AUXCTRL_SPEED)
bp->flags |= B44_FLAG_100_BASE_T;
else
bp->flags &= ~B44_FLAG_100_BASE_T;
if (aux & MII_AUXCTRL_DUPLEX)
bp->flags |= B44_FLAG_FULL_DUPLEX;
else
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev) &&
(bmsr & BMSR_LSTATUS)) {
u32 val = br32(bp, B44_TX_CTRL);
u32 local_adv, remote_adv;
if (bp->flags & B44_FLAG_FULL_DUPLEX)
val |= TX_CTRL_DUPLEX;
else
val &= ~TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
!b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
!b44_readphy(bp, MII_LPA, &remote_adv))
b44_set_flow_ctrl(bp, local_adv, remote_adv);
/* Link now up */
netif_carrier_on(bp->dev);
b44_link_report(bp);
} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
/* Link now down */
netif_carrier_off(bp->dev);
b44_link_report(bp);
}
if (bmsr & BMSR_RFAULT)
netdev_warn(bp->dev, "Remote fault detected in PHY\n");
if (bmsr & BMSR_JCD)
netdev_warn(bp->dev, "Jabber detected in PHY\n");
}
}
static void b44_timer(unsigned long __opaque)
{
struct b44 *bp = (struct b44 *) __opaque;
spin_lock_irq(&bp->lock);
b44_check_phy(bp);
b44_stats_update(bp);
spin_unlock_irq(&bp->lock);
mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
}
static void b44_tx(struct b44 *bp)
{
u32 cur, cons;
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
/* XXX needs updating when NETIF_F_SG is supported */
for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
struct ring_info *rp = &bp->tx_buffers[cons];
struct sk_buff *skb = rp->skb;
BUG_ON(skb == NULL);
dma_unmap_single(bp->sdev->dma_dev,
rp->mapping,
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
if (netif_queue_stopped(bp->dev) &&
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
bw32(bp, B44_GPTIMER, 0);
}
/* Works like this. This chip writes a 'struct rx_header" 30 bytes
* before the DMA address you give it. So we allocate 30 more bytes
* for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
* point the chip at 30 bytes past where the rx_header will go.
*/
static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
{
struct dma_desc *dp;
struct ring_info *src_map, *map;
struct rx_header *rh;
struct sk_buff *skb;
dma_addr_t mapping;
int dest_idx;
u32 ctrl;
src_map = NULL;
if (src_idx >= 0)
src_map = &bp->rx_buffers[src_idx];
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
map = &bp->rx_buffers[dest_idx];
skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
/* Sigh... */
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
bp->force_copybreak = 1;
}
rh = (struct rx_header *) skb->data;
rh->len = 0;
rh->flags = 0;
map->skb = skb;
map->mapping = mapping;
if (src_map != NULL)
src_map->skb = NULL;
ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT;
dp = &bp->rx_ring[dest_idx];
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
if (bp->flags & B44_FLAG_RX_RING_HACK)
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
dest_idx * sizeof(*dp),
DMA_BIDIRECTIONAL);
return RX_PKT_BUF_SZ;
}
static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
{
struct dma_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
struct rx_header *rh;
int dest_idx;
__le32 ctrl;
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
dest_desc = &bp->rx_ring[dest_idx];
dest_map = &bp->rx_buffers[dest_idx];
src_desc = &bp->rx_ring[src_idx];
src_map = &bp->rx_buffers[src_idx];
dest_map->skb = src_map->skb;
rh = (struct rx_header *) src_map->skb->data;
rh->len = 0;
rh->flags = 0;
dest_map->mapping = src_map->mapping;
if (bp->flags & B44_FLAG_RX_RING_HACK)
b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
src_idx * sizeof(*src_desc),
DMA_BIDIRECTIONAL);
ctrl = src_desc->ctrl;
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
else
ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
dest_desc->ctrl = ctrl;
dest_desc->addr = src_desc->addr;
src_map->skb = NULL;
if (bp->flags & B44_FLAG_RX_RING_HACK)
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
dest_idx * sizeof(*dest_desc),
DMA_BIDIRECTIONAL);
dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
}
static int b44_rx(struct b44 *bp, int budget)
{
int received;
u32 cons, prod;
received = 0;
prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
prod /= sizeof(struct dma_desc);
cons = bp->rx_cons;
while (cons != prod && budget > 0) {
struct ring_info *rp = &bp->rx_buffers[cons];
struct sk_buff *skb = rp->skb;
dma_addr_t map = rp->mapping;
struct rx_header *rh;
u16 len;
dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
rh = (struct rx_header *) skb->data;
len = le16_to_cpu(rh->len);
if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
(rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
drop_it:
b44_recycle_rx(bp, cons, bp->rx_prod);
drop_it_no_recycle:
bp->dev->stats.rx_dropped++;
goto next_pkt;
}
if (len == 0) {
int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rh->len);
} while (len == 0 && i++ < 5);
if (len == 0)
goto drop_it;
}
/* Omit CRC. */
len -= 4;
if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
int skb_size;
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
if (skb_size < 0)
goto drop_it;
dma_unmap_single(bp->sdev->dma_dev, map,
skb_size, DMA_FROM_DEVICE);
/* Leave out rx_header */
skb_put(skb, len + RX_PKT_OFFSET);
skb_pull(skb, RX_PKT_OFFSET);
} else {
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
copy_skb = netdev_alloc_skb(bp->dev, len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
/* DMA sync done above, copy just the actual packet */
skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
copy_skb->data, len);
skb = copy_skb;
}
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bp->dev);
netif_receive_skb(skb);
received++;
budget--;
next_pkt:
bp->rx_prod = (bp->rx_prod + 1) &
(B44_RX_RING_SIZE - 1);
cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
}
bp->rx_cons = cons;
bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
return received;
}
static int b44_poll(struct napi_struct *napi, int budget)
{
struct b44 *bp = container_of(napi, struct b44, napi);
int work_done;
unsigned long flags;
spin_lock_irqsave(&bp->lock, flags);
if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
/* spin_lock(&bp->tx_lock); */
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
bp->istat &= ~ISTAT_RFO;
b44_disable_ints(bp);
ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
netif_wake_queue(bp->dev);
}
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
if (bp->istat & ISTAT_RX)
work_done += b44_rx(bp, budget);
if (bp->istat & ISTAT_ERRORS) {
spin_lock_irqsave(&bp->lock, flags);
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
netif_wake_queue(bp->dev);
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
}
if (work_done < budget) {
napi_complete(napi);
b44_enable_ints(bp);
}
return work_done;
}
static irqreturn_t b44_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct b44 *bp = netdev_priv(dev);
u32 istat, imask;
int handled = 0;
spin_lock(&bp->lock);
istat = br32(bp, B44_ISTAT);
imask = br32(bp, B44_IMASK);
/* The interrupt mask register controls which interrupt bits
* will actually raise an interrupt to the CPU when set by hw/firmware,
* but doesn't mask off the bits.
*/
istat &= imask;
if (istat) {
handled = 1;
if (unlikely(!netif_running(dev))) {
netdev_info(dev, "late interrupt\n");
goto irq_ack;
}
if (napi_schedule_prep(&bp->napi)) {
/* NOTE: These writes are posted by the readback of
* the ISTAT register below.
*/
bp->istat = istat;
__b44_disable_ints(bp);
__napi_schedule(&bp->napi);
}
irq_ack:
bw32(bp, B44_ISTAT, istat);
br32(bp, B44_ISTAT);
}
spin_unlock(&bp->lock);
return IRQ_RETVAL(handled);
}
static void b44_tx_timeout(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
netdev_err(dev, "transmit timed out, resetting\n");
spin_lock_irq(&bp->lock);
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
netif_wake_queue(dev);
}
static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
int rc = NETDEV_TX_OK;
dma_addr_t mapping;
u32 len, entry, ctrl;
unsigned long flags;
len = skb->len;
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
netif_stop_queue(dev);
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_out;
}
mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
struct sk_buff *bounce_skb;
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
len, DMA_TO_DEVICE);
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
dma_unmap_single(bp->sdev->dma_dev, mapping,
len, DMA_TO_DEVICE);
dev_kfree_skb_any(bounce_skb);
goto err_out;
}
skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
dev_kfree_skb_any(skb);
skb = bounce_skb;
}
entry = bp->tx_prod;
bp->tx_buffers[entry].skb = skb;
bp->tx_buffers[entry].mapping = mapping;
ctrl = (len & DESC_CTRL_LEN);
ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
if (entry == (B44_TX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT;
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
if (bp->flags & B44_FLAG_TX_RING_HACK)
b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
entry * sizeof(bp->tx_ring[0]),
DMA_TO_DEVICE);
entry = NEXT_TX(entry);
bp->tx_prod = entry;
wmb();
bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_BUGGY_TXPTR)
bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_REORDER_BUG)
br32(bp, B44_DMATX_PTR);
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
out_unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return rc;
err_out:
rc = NETDEV_TX_BUSY;
goto out_unlock;
}
static int b44_change_mtu(struct net_device *dev, int new_mtu)
{
struct b44 *bp = netdev_priv(dev);
if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
return -EINVAL;
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
*/
dev->mtu = new_mtu;
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
dev->mtu = new_mtu;
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
return 0;
}
/* Free up pending packets in all rx/tx rings.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
* end up in the driver. bp->lock is not held and we are not
* in an interrupt context and thus may sleep.
*/
static void b44_free_rings(struct b44 *bp)
{
struct ring_info *rp;
int i;
for (i = 0; i < B44_RX_RING_SIZE; i++) {
rp = &bp->rx_buffers[i];
if (rp->skb == NULL)
continue;
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
/* XXX needs changes once NETIF_F_SG is set... */
for (i = 0; i < B44_TX_RING_SIZE; i++) {
rp = &bp->tx_buffers[i];
if (rp->skb == NULL)
continue;
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
}
/* Initialize tx/rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
* end up in the driver.
*/
static void b44_init_rings(struct b44 *bp)
{
int i;
b44_free_rings(bp);
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
if (bp->flags & B44_FLAG_RX_RING_HACK)
dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
if (bp->flags & B44_FLAG_TX_RING_HACK)
dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
DMA_TABLE_BYTES, DMA_TO_DEVICE);
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
break;
}
}
/*
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down.
*/
static void b44_free_consistent(struct b44 *bp)
{
kfree(bp->rx_buffers);
bp->rx_buffers = NULL;
kfree(bp->tx_buffers);
bp->tx_buffers = NULL;
if (bp->rx_ring) {
if (bp->flags & B44_FLAG_RX_RING_HACK) {
dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
kfree(bp->rx_ring);
} else
dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
bp->flags &= ~B44_FLAG_RX_RING_HACK;
}
if (bp->tx_ring) {
if (bp->flags & B44_FLAG_TX_RING_HACK) {
dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
DMA_TABLE_BYTES, DMA_TO_DEVICE);
kfree(bp->tx_ring);
} else
dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
bp->flags &= ~B44_FLAG_TX_RING_HACK;
}
}
/*
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down. Can sleep.
*/
static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
{
int size;
size = B44_RX_RING_SIZE * sizeof(struct ring_info);
bp->rx_buffers = kzalloc(size, gfp);
if (!bp->rx_buffers)
goto out_err;
size = B44_TX_RING_SIZE * sizeof(struct ring_info);
bp->tx_buffers = kzalloc(size, gfp);
if (!bp->tx_buffers)
goto out_err;
size = DMA_TABLE_BYTES;
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
/* Allocation may have failed due to pci_alloc_consistent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *rx_ring;
dma_addr_t rx_ring_dma;
rx_ring = kzalloc(size, gfp);
if (!rx_ring)
goto out_err;
rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
DMA_TABLE_BYTES,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
rx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(rx_ring);
goto out_err;
}
bp->rx_ring = rx_ring;
bp->rx_ring_dma = rx_ring_dma;
bp->flags |= B44_FLAG_RX_RING_HACK;
}
bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->tx_ring_dma, gfp);
if (!bp->tx_ring) {
/* Allocation may have failed due to ssb_dma_alloc_consistent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *tx_ring;
dma_addr_t tx_ring_dma;
tx_ring = kzalloc(size, gfp);
if (!tx_ring)
goto out_err;
tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
DMA_TABLE_BYTES,
DMA_TO_DEVICE);
if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
tx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(tx_ring);
goto out_err;
}
bp->tx_ring = tx_ring;
bp->tx_ring_dma = tx_ring_dma;
bp->flags |= B44_FLAG_TX_RING_HACK;
}
return 0;
out_err:
b44_free_consistent(bp);
return -ENOMEM;
}
/* bp->lock is held. */
static void b44_clear_stats(struct b44 *bp)
{
unsigned long reg;
bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
br32(bp, reg);
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
br32(bp, reg);
}
/* bp->lock is held. */
static void b44_chip_reset(struct b44 *bp, int reset_kind)
{
struct ssb_device *sdev = bp->sdev;
bool was_enabled;
was_enabled = ssb_device_is_enabled(bp->sdev);
ssb_device_enable(bp->sdev, 0);
ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
if (was_enabled) {
bw32(bp, B44_RCV_LAZY, 0);
bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
bw32(bp, B44_DMATX_CTRL, 0);
bp->tx_prod = bp->tx_cons = 0;
if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
100, 0);
}
bw32(bp, B44_DMARX_CTRL, 0);
bp->rx_prod = bp->rx_cons = 0;
}
b44_clear_stats(bp);
/*
* Don't enable PHY if we are doing a partial reset
* we are probably going to power down
*/
if (reset_kind == B44_CHIP_RESET_PARTIAL)
return;
switch (sdev->bus->bustype) {
case SSB_BUSTYPE_SSB:
bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
(DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
B44_MDC_RATIO)
& MDIO_CTRL_MAXF_MASK)));
break;
case SSB_BUSTYPE_PCI:
bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
(0x0d & MDIO_CTRL_MAXF_MASK)));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
WARN_ON(1); /* A device with this bus does not exist. */
break;
}
br32(bp, B44_MDIO_CTRL);
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(bp, B44_ENET_CTRL);
bp->flags &= ~B44_FLAG_INTERNAL_PHY;
} else {
u32 val = br32(bp, B44_DEVCTRL);
if (val & DEVCTRL_EPR) {
bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
br32(bp, B44_DEVCTRL);
udelay(100);
}
bp->flags |= B44_FLAG_INTERNAL_PHY;
}
}
/* bp->lock is held. */
static void b44_halt(struct b44 *bp)
{
b44_disable_ints(bp);
/* reset PHY */
b44_phy_reset(bp);
/* power down PHY */
netdev_info(bp->dev, "powering down PHY\n");
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
/* bp->lock is held. */
static void __b44_set_mac_addr(struct b44 *bp)
{
bw32(bp, B44_CAM_CTRL, 0);
if (!(bp->dev->flags & IFF_PROMISC)) {
u32 val;
__b44_cam_write(bp, bp->dev->dev_addr, 0);
val = br32(bp, B44_CAM_CTRL);
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
static int b44_set_mac_addr(struct net_device *dev, void *p)
{
struct b44 *bp = netdev_priv(dev);
struct sockaddr *addr = p;
u32 val;
if (netif_running(dev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&bp->lock);
val = br32(bp, B44_RXCONFIG);
if (!(val & RXCONFIG_CAM_ABSENT))
__b44_set_mac_addr(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
/* Called at device open time to get the chip ready for
* packet processing. Invoked with bp->lock held.
*/
static void __b44_set_rx_mode(struct net_device *);
static void b44_init_hw(struct b44 *bp, int reset_kind)
{
u32 val;
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
if (reset_kind == B44_FULL_RESET) {
b44_phy_reset(bp);
b44_setup_phy(bp);
}
/* Enable CRC32, set proper LED modes and power on PHY */
bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
/* This sets the MAC address too. */
__b44_set_rx_mode(bp->dev);
/* MTU + eth header + possible VLAN tag + struct rx_header */
bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
if (reset_kind == B44_PARTIAL_RESET) {
bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
(RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
} else {
bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
(RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
bw32(bp, B44_DMARX_PTR, bp->rx_pending);
bp->rx_prod = bp->rx_pending;
bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
}
val = br32(bp, B44_ENET_CTRL);
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
}
static int b44_open(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
int err;
err = b44_alloc_consistent(bp, GFP_KERNEL);
if (err)
goto out;
napi_enable(&bp->napi);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
b44_check_phy(bp);
err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
if (unlikely(err < 0)) {
napi_disable(&bp->napi);
b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
b44_free_rings(bp);
b44_free_consistent(bp);
goto out;
}
init_timer(&bp->timer);
bp->timer.expires = jiffies + HZ;
bp->timer.data = (unsigned long) bp;
bp->timer.function = b44_timer;
add_timer(&bp->timer);
b44_enable_ints(bp);
netif_start_queue(dev);
out:
return err;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void b44_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
b44_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
{
u32 i;
u32 *pattern = (u32 *) pp;
for (i = 0; i < bytes; i += sizeof(u32)) {
bw32(bp, B44_FILT_ADDR, table_offset + i);
bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
}
}
static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
{
int magicsync = 6;
int k, j, len = offset;
int ethaddr_bytes = ETH_ALEN;
memset(ppattern + offset, 0xff, magicsync);
for (j = 0; j < magicsync; j++)
set_bit(len++, (unsigned long *) pmask);
for (j = 0; j < B44_MAX_PATTERNS; j++) {
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
ethaddr_bytes = ETH_ALEN;
else
ethaddr_bytes = B44_PATTERN_SIZE - len;
if (ethaddr_bytes <=0)
break;
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
set_bit(len++, (unsigned long *) pmask);
}
}
return len - 1;
}
/* Setup magic packet patterns in the b44 WOL
* pattern matching filter.
*/
static void b44_setup_pseudo_magicp(struct b44 *bp)
{
u32 val;
int plen0, plen1, plen2;
u8 *pwol_pattern;
u8 pwol_mask[B44_PMASK_SIZE];
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
if (!pwol_pattern) {
pr_err("Memory not available for WOL\n");
return;
}
/* Ipv4 magic packet pattern - pattern 0.*/
memset(pwol_mask, 0, B44_PMASK_SIZE);
plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV4UDP_HLEN);
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
/* Raw ethernet II magic packet pattern - pattern 1 */
memset(pwol_pattern, 0, B44_PATTERN_SIZE);
memset(pwol_mask, 0, B44_PMASK_SIZE);
plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
ETH_HLEN);
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
B44_PATTERN_BASE + B44_PATTERN_SIZE);
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
B44_PMASK_BASE + B44_PMASK_SIZE);
/* Ipv6 magic packet pattern - pattern 2 */
memset(pwol_pattern, 0, B44_PATTERN_SIZE);
memset(pwol_mask, 0, B44_PMASK_SIZE);
plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV6UDP_HLEN);
bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
kfree(pwol_pattern);
/* set these pattern's lengths: one less than each real length */
val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
bw32(bp, B44_WKUP_LEN, val);
/* enable wakeup pattern matching */
val = br32(bp, B44_DEVCTRL);
bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
}
#ifdef CONFIG_B44_PCI
static void b44_setup_wol_pci(struct b44 *bp)
{
u16 val;
if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
}
}
#else
static inline void b44_setup_wol_pci(struct b44 *bp) { }
#endif /* CONFIG_B44_PCI */
static void b44_setup_wol(struct b44 *bp)
{
u32 val;
bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
if (bp->flags & B44_FLAG_B0_ANDLATER) {
bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
val = bp->dev->dev_addr[2] << 24 |
bp->dev->dev_addr[3] << 16 |
bp->dev->dev_addr[4] << 8 |
bp->dev->dev_addr[5];
bw32(bp, B44_ADDR_LO, val);
val = bp->dev->dev_addr[0] << 8 |
bp->dev->dev_addr[1];
bw32(bp, B44_ADDR_HI, val);
val = br32(bp, B44_DEVCTRL);
bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
} else {
b44_setup_pseudo_magicp(bp);
}
b44_setup_wol_pci(bp);
}
static int b44_close(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&bp->napi);
del_timer_sync(&bp->timer);
spin_lock_irq(&bp->lock);
b44_halt(bp);
b44_free_rings(bp);
netif_carrier_off(dev);
spin_unlock_irq(&bp->lock);
free_irq(dev->irq, dev);
if (bp->flags & B44_FLAG_WOL_ENABLE) {
b44_init_hw(bp, B44_PARTIAL_RESET);
b44_setup_wol(bp);
}
b44_free_consistent(bp);
return 0;
}
static struct net_device_stats *b44_get_stats(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
/* Convert HW stats into netdevice stats. */
nstat->rx_packets = hwstat->rx_pkts;
nstat->tx_packets = hwstat->tx_pkts;
nstat->rx_bytes = hwstat->rx_octets;
nstat->tx_bytes = hwstat->tx_octets;
nstat->tx_errors = (hwstat->tx_jabber_pkts +
hwstat->tx_oversize_pkts +
hwstat->tx_underruns +
hwstat->tx_excessive_cols +
hwstat->tx_late_cols);
nstat->multicast = hwstat->tx_multicast_pkts;
nstat->collisions = hwstat->tx_total_cols;
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
hwstat->rx_undersize);
nstat->rx_over_errors = hwstat->rx_missed_pkts;
nstat->rx_frame_errors = hwstat->rx_align_errs;
nstat->rx_crc_errors = hwstat->rx_crc_errs;
nstat->rx_errors = (hwstat->rx_jabber_pkts +
hwstat->rx_oversize_pkts +
hwstat->rx_missed_pkts +
hwstat->rx_crc_align_errs +
hwstat->rx_undersize +
hwstat->rx_crc_errs +
hwstat->rx_align_errs +
hwstat->rx_symbol_errs);
nstat->tx_aborted_errors = hwstat->tx_underruns;
#if 0
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
return nstat;
}
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
struct netdev_hw_addr *ha;
int i, num_ents;
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
i = 0;
netdev_for_each_mc_addr(ha, dev) {
if (i == num_ents)
break;
__b44_cam_write(bp, ha->addr, i++ + 1);
}
return i+1;
}
static void __b44_set_rx_mode(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
u32 val;
val = br32(bp, B44_RXCONFIG);
val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
val |= RXCONFIG_PROMISC;
bw32(bp, B44_RXCONFIG, val);
} else {
unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
int i = 1;
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
for (; i < 64; i++)
__b44_cam_write(bp, zero, i);
bw32(bp, B44_RXCONFIG, val);
val = br32(bp, B44_CAM_CTRL);
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
static void b44_set_rx_mode(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
spin_lock_irq(&bp->lock);
__b44_set_rx_mode(dev);
spin_unlock_irq(&bp->lock);
}
static u32 b44_get_msglevel(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
return bp->msg_enable;
}
static void b44_set_msglevel(struct net_device *dev, u32 value)
{
struct b44 *bp = netdev_priv(dev);
bp->msg_enable = value;
}
static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
{
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
WARN_ON(1); /* A device with this bus does not exist. */
break;
}
}
static int b44_nway_reset(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
u32 bmcr;
int r;
spin_lock_irq(&bp->lock);
b44_readphy(bp, MII_BMCR, &bmcr);
b44_readphy(bp, MII_BMCR, &bmcr);
r = -EINVAL;
if (bmcr & BMCR_ANENABLE) {
b44_writephy(bp, MII_BMCR,
bmcr | BMCR_ANRESTART);
r = 0;
}
spin_unlock_irq(&bp->lock);
return r;
}
static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_MII);
cmd->advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
cmd->advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
cmd->advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
cmd->advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
cmd->advertising |= ADVERTISED_100baseT_Full;
cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
SPEED_100 : SPEED_10));
cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
cmd->phy_address = bp->phy_addr;
cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (cmd->autoneg == AUTONEG_ENABLE)
cmd->advertising |= ADVERTISED_Autoneg;
if (!netif_running(dev)){
ethtool_cmd_speed_set(cmd, 0);
cmd->duplex = 0xff;
}
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
u32 speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
if (cmd->advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((speed != SPEED_100 &&
speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
if (cmd->autoneg == AUTONEG_ENABLE) {
bp->flags &= ~(B44_FLAG_FORCE_LINK |
B44_FLAG_100_BASE_T |
B44_FLAG_FULL_DUPLEX |
B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
if (cmd->advertising == 0) {
bp->flags |= (B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
} else {
if (cmd->advertising & ADVERTISED_10baseT_Half)
bp->flags |= B44_FLAG_ADV_10HALF;
if (cmd->advertising & ADVERTISED_10baseT_Full)
bp->flags |= B44_FLAG_ADV_10FULL;
if (cmd->advertising & ADVERTISED_100baseT_Half)
bp->flags |= B44_FLAG_ADV_100HALF;
if (cmd->advertising & ADVERTISED_100baseT_Full)
bp->flags |= B44_FLAG_ADV_100FULL;
}
} else {
bp->flags |= B44_FLAG_FORCE_LINK;
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
if (cmd->duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
if (netif_running(dev))
b44_setup_phy(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
static void b44_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct b44 *bp = netdev_priv(dev);
ering->rx_max_pending = B44_RX_RING_SIZE - 1;
ering->rx_pending = bp->rx_pending;
/* XXX ethtool lacks a tx_max_pending, oops... */
}
static int b44_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct b44 *bp = netdev_priv(dev);
if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
(ering->rx_mini_pending != 0) ||
(ering->rx_jumbo_pending != 0) ||
(ering->tx_pending > B44_TX_RING_SIZE - 1))
return -EINVAL;
spin_lock_irq(&bp->lock);
bp->rx_pending = ering->rx_pending;
bp->tx_pending = ering->tx_pending;
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
netif_wake_queue(bp->dev);
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
return 0;
}
static void b44_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct b44 *bp = netdev_priv(dev);
epause->autoneg =
(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
epause->rx_pause =
(bp->flags & B44_FLAG_RX_PAUSE) != 0;
epause->tx_pause =
(bp->flags & B44_FLAG_TX_PAUSE) != 0;
}
static int b44_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct b44 *bp = netdev_priv(dev);
spin_lock_irq(&bp->lock);
if (epause->autoneg)
bp->flags |= B44_FLAG_PAUSE_AUTO;
else
bp->flags &= ~B44_FLAG_PAUSE_AUTO;
if (epause->rx_pause)
bp->flags |= B44_FLAG_RX_PAUSE;
else
bp->flags &= ~B44_FLAG_RX_PAUSE;
if (epause->tx_pause)
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
return 0;
}
static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
case ETH_SS_STATS:
memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
break;
}
}
static int b44_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(b44_gstrings);
default:
return -EOPNOTSUPP;
}
}
static void b44_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct b44 *bp = netdev_priv(dev);
u32 *val = &bp->hw_stats.tx_good_octets;
u32 i;
spin_lock_irq(&bp->lock);
b44_stats_update(bp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data++ = *val++;
spin_unlock_irq(&bp->lock);
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct b44 *bp = netdev_priv(dev);
wol->supported = WAKE_MAGIC;
if (bp->flags & B44_FLAG_WOL_ENABLE)
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct b44 *bp = netdev_priv(dev);
spin_lock_irq(&bp->lock);
if (wol->wolopts & WAKE_MAGIC)
bp->flags |= B44_FLAG_WOL_ENABLE;
else
bp->flags &= ~B44_FLAG_WOL_ENABLE;
spin_unlock_irq(&bp->lock);
return 0;
}
static const struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
.get_settings = b44_get_settings,
.set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = b44_get_wol,
.set_wol = b44_set_wol,
.get_ringparam = b44_get_ringparam,
.set_ringparam = b44_set_ringparam,
.get_pauseparam = b44_get_pauseparam,
.set_pauseparam = b44_set_pauseparam,
.get_msglevel = b44_get_msglevel,
.set_msglevel = b44_set_msglevel,
.get_strings = b44_get_strings,
.get_sset_count = b44_get_sset_count,
.get_ethtool_stats = b44_get_ethtool_stats,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
int err = -EINVAL;
if (!netif_running(dev))
goto out;
spin_lock_irq(&bp->lock);
err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
spin_unlock_irq(&bp->lock);
out:
return err;
}
static int __devinit b44_get_invariants(struct b44 *bp)
{
struct ssb_device *sdev = bp->sdev;
int err = 0;
u8 *addr;
bp->dma_offset = ssb_dma_translation(sdev);
if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
instance > 1) {
addr = sdev->bus->sprom.et1mac;
bp->phy_addr = sdev->bus->sprom.et1phyaddr;
} else {
addr = sdev->bus->sprom.et0mac;
bp->phy_addr = sdev->bus->sprom.et0phyaddr;
}
/* Some ROMs have buggy PHY addresses with the high
* bits set (sign extension?). Truncate them to a
* valid PHY address. */
bp->phy_addr &= 0x1F;
memcpy(bp->dev->dev_addr, addr, 6);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
return -EINVAL;
}
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
bp->imask = IMASK_DEF;
/* XXX - really required?
bp->flags |= B44_FLAG_BUGGY_TXPTR;
*/
if (bp->sdev->id.revision >= 7)
bp->flags |= B44_FLAG_B0_ANDLATER;
return err;
}
static const struct net_device_ops b44_netdev_ops = {
.ndo_open = b44_open,
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
.ndo_get_stats = b44_get_stats,
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = b44_ioctl,
.ndo_tx_timeout = b44_tx_timeout,
.ndo_change_mtu = b44_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = b44_poll_controller,
#endif
};
static int __devinit b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
struct net_device *dev;
struct b44 *bp;
int err;
instance++;
pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
err = -ENOMEM;
goto out;
}
SET_NETDEV_DEV(dev, sdev->dev);
/* No interesting netdevice features in this card... */
dev->features |= 0;
bp = netdev_priv(dev);
bp->sdev = sdev;
bp->dev = dev;
bp->force_copybreak = 0;
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->irq = sdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
dev_err(sdev->dev,
"Failed to powerup the bus\n");
goto err_out_free_dev;
}
if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
}
err = b44_get_invariants(bp);
if (err) {
dev_err(sdev->dev,
"Problem fetching invariants of chip, aborting\n");
goto err_out_powerdown;
}
bp->mii_if.dev = dev;
bp->mii_if.mdio_read = b44_mii_read;
bp->mii_if.mdio_write = b44_mii_write;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
/* By default, advertise all speed/duplex settings. */
bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
/* By default, auto-negotiate PAUSE. */
bp->flags |= B44_FLAG_PAUSE_AUTO;
err = register_netdev(dev);
if (err) {
dev_err(sdev->dev, "Cannot register net device, aborting\n");
goto err_out_powerdown;
}
netif_carrier_off(dev);
ssb_set_drvdata(sdev, dev);
/* Chip reset provides power to the b44 MAC & PCI cores, which
* is necessary for MAC register access.
*/
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
if (b44_phy_reset(bp) < 0)
bp->phy_addr = B44_PHY_ADDR_NO_PHY;
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
err_out_free_dev:
free_netdev(dev);
out:
return err;
}
static void __devexit b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
unregister_netdev(dev);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
free_netdev(dev);
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
ssb_set_drvdata(sdev, NULL);
}
static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
{
struct net_device *dev = ssb_get_drvdata(sdev);
struct b44 *bp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
del_timer_sync(&bp->timer);
spin_lock_irq(&bp->lock);
b44_halt(bp);
netif_carrier_off(bp->dev);
netif_device_detach(bp->dev);
b44_free_rings(bp);
spin_unlock_irq(&bp->lock);
free_irq(dev->irq, dev);
if (bp->flags & B44_FLAG_WOL_ENABLE) {
b44_init_hw(bp, B44_PARTIAL_RESET);
b44_setup_wol(bp);
}
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
return 0;
}
static int b44_resume(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
struct b44 *bp = netdev_priv(dev);
int rc = 0;
rc = ssb_bus_powerup(sdev->bus, 0);
if (rc) {
dev_err(sdev->dev,
"Failed to powerup the bus\n");
return rc;
}
if (!netif_running(dev))
return 0;
spin_lock_irq(&bp->lock);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
/*
* As a shared interrupt, the handler can be called immediately. To be
* able to check the interrupt status the hardware must already be
* powered back on (b44_init_hw).
*/
rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
netdev_err(dev, "request_irq failed\n");
spin_lock_irq(&bp->lock);
b44_halt(bp);
b44_free_rings(bp);
spin_unlock_irq(&bp->lock);
return rc;
}
netif_device_attach(bp->dev);
b44_enable_ints(bp);
netif_wake_queue(dev);
mod_timer(&bp->timer, jiffies + 1);
return 0;
}
static struct ssb_driver b44_ssb_driver = {
.name = DRV_MODULE_NAME,
.id_table = b44_ssb_tbl,
.probe = b44_init_one,
.remove = __devexit_p(b44_remove_one),
.suspend = b44_suspend,
.resume = b44_resume,
};
static inline int __init b44_pci_init(void)
{
int err = 0;
#ifdef CONFIG_B44_PCI
err = ssb_pcihost_register(&b44_pci_driver);
#endif
return err;
}
static inline void b44_pci_exit(void)
{
#ifdef CONFIG_B44_PCI
ssb_pcihost_unregister(&b44_pci_driver);
#endif
}
static int __init b44_init(void)
{
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
/* Setup paramaters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
if (err)
return err;
err = ssb_driver_register(&b44_ssb_driver);
if (err)
b44_pci_exit();
return err;
}
static void __exit b44_cleanup(void)
{
ssb_driver_unregister(&b44_ssb_driver);
b44_pci_exit();
}
module_init(b44_init);
module_exit(b44_cleanup);
| gpl-2.0 |
S3neos/android_kernel_samsung_s3ve3g | net/irda/ircomm/ircomm_tty_attach.c | 5588 | 27880 | /*********************************************************************
*
* Filename: ircomm_tty_attach.c
* Version:
* Description: Code for attaching the serial driver to IrCOMM
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Jun 5 17:42:00 1999
* Modified at: Tue Jan 4 14:20:49 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
********************************************************************/
#include <linux/init.h>
#include <linux/sched.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/iriap.h>
#include <net/irda/irttp.h>
#include <net/irda/irias_object.h>
#include <net/irda/parameters.h>
#include <net/irda/ircomm_core.h>
#include <net/irda/ircomm_param.h>
#include <net/irda/ircomm_event.h>
#include <net/irda/ircomm_tty.h>
#include <net/irda/ircomm_tty_attach.h>
static void ircomm_tty_ias_register(struct ircomm_tty_cb *self);
static void ircomm_tty_discovery_indication(discinfo_t *discovery,
DISCOVERY_MODE mode,
void *priv);
static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value, void *priv);
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout);
static void ircomm_tty_watchdog_timer_expired(void *data);
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
const char *const ircomm_tty_state[] = {
"IRCOMM_TTY_IDLE",
"IRCOMM_TTY_SEARCH",
"IRCOMM_TTY_QUERY_PARAMETERS",
"IRCOMM_TTY_QUERY_LSAP_SEL",
"IRCOMM_TTY_SETUP",
"IRCOMM_TTY_READY",
"*** ERROR *** ",
};
#ifdef CONFIG_IRDA_DEBUG
static const char *const ircomm_tty_event[] = {
"IRCOMM_TTY_ATTACH_CABLE",
"IRCOMM_TTY_DETACH_CABLE",
"IRCOMM_TTY_DATA_REQUEST",
"IRCOMM_TTY_DATA_INDICATION",
"IRCOMM_TTY_DISCOVERY_REQUEST",
"IRCOMM_TTY_DISCOVERY_INDICATION",
"IRCOMM_TTY_CONNECT_CONFIRM",
"IRCOMM_TTY_CONNECT_INDICATION",
"IRCOMM_TTY_DISCONNECT_REQUEST",
"IRCOMM_TTY_DISCONNECT_INDICATION",
"IRCOMM_TTY_WD_TIMER_EXPIRED",
"IRCOMM_TTY_GOT_PARAMETERS",
"IRCOMM_TTY_GOT_LSAPSEL",
"*** ERROR ****",
};
#endif /* CONFIG_IRDA_DEBUG */
static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info) =
{
ircomm_tty_state_idle,
ircomm_tty_state_search,
ircomm_tty_state_query_parameters,
ircomm_tty_state_query_lsap_sel,
ircomm_tty_state_setup,
ircomm_tty_state_ready,
};
/*
* Function ircomm_tty_attach_cable (driver)
*
* Try to attach cable (IrCOMM link). This function will only return
* when the link has been connected, or if an error condition occurs.
* If success, the return value is the resulting service type.
*/
int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
{
IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Check if somebody has already connected to us */
if (ircomm_is_connected(self->ircomm)) {
IRDA_DEBUG(0, "%s(), already connected!\n", __func__ );
return 0;
}
/* Make sure nobody tries to write before the link is up */
self->tty->hw_stopped = 1;
ircomm_tty_ias_register(self);
ircomm_tty_do_event(self, IRCOMM_TTY_ATTACH_CABLE, NULL, NULL);
return 0;
}
/*
* Function ircomm_detach_cable (driver)
*
* Detach cable, or cable has been detached by peer
*
*/
void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
{
IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
del_timer(&self->watchdog_timer);
/* Remove discovery handler */
if (self->ckey) {
irlmp_unregister_client(self->ckey);
self->ckey = NULL;
}
/* Remove IrCOMM hint bits */
if (self->skey) {
irlmp_unregister_service(self->skey);
self->skey = NULL;
}
if (self->iriap) {
iriap_close(self->iriap);
self->iriap = NULL;
}
/* Remove LM-IAS object */
if (self->obj) {
irias_delete_object(self->obj);
self->obj = NULL;
}
ircomm_tty_do_event(self, IRCOMM_TTY_DETACH_CABLE, NULL, NULL);
/* Reset some values */
self->daddr = self->saddr = 0;
self->dlsap_sel = self->slsap_sel = 0;
memset(&self->settings, 0, sizeof(struct ircomm_params));
}
/*
* Function ircomm_tty_ias_register (self)
*
* Register with LM-IAS depending on which service type we are
*
*/
static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
{
__u8 oct_seq[6];
__u16 hints;
IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
/* Compute hint bits based on service */
hints = irlmp_service_to_hint(S_COMM);
if (self->service_type & IRCOMM_3_WIRE_RAW)
hints |= irlmp_service_to_hint(S_PRINTER);
/* Advertise IrCOMM hint bit in discovery */
if (!self->skey)
self->skey = irlmp_register_service(hints);
/* Set up a discovery handler */
if (!self->ckey)
self->ckey = irlmp_register_client(hints,
ircomm_tty_discovery_indication,
NULL, (void *) self);
/* If already done, no need to do it again */
if (self->obj)
return;
if (self->service_type & IRCOMM_3_WIRE_RAW) {
/* Register IrLPT with LM-IAS */
self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID);
irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel",
self->slsap_sel, IAS_KERNEL_ATTR);
} else {
/* Register IrCOMM with LM-IAS */
self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID);
irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel",
self->slsap_sel, IAS_KERNEL_ATTR);
/* Code the parameters into the buffer */
irda_param_pack(oct_seq, "bbbbbb",
IRCOMM_SERVICE_TYPE, 1, self->service_type,
IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL);
/* Register parameters with LM-IAS */
irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6,
IAS_KERNEL_ATTR);
}
irias_insert_object(self->obj);
}
/*
* Function ircomm_tty_ias_unregister (self)
*
* Remove our IAS object and client hook while connected.
*
*/
static void ircomm_tty_ias_unregister(struct ircomm_tty_cb *self)
{
/* Remove LM-IAS object now so it is not reused.
* IrCOMM deals very poorly with multiple incoming connections.
* It should looks a lot more like IrNET, and "dup" a server TSAP
* to the application TSAP (based on various rules).
* This is a cheap workaround allowing multiple clients to
* connect to us. It will not always work.
* Each IrCOMM socket has an IAS entry. Incoming connection will
* pick the first one found. So, when we are fully connected,
* we remove our IAS entries so that the next IAS entry is used.
* We do that for *both* client and server, because a server
* can also create client instances.
* Jean II */
if (self->obj) {
irias_delete_object(self->obj);
self->obj = NULL;
}
#if 0
/* Remove discovery handler.
* While we are connected, we no longer need to receive
* discovery events. This would be the case if there is
* multiple IrLAP interfaces. Jean II */
if (self->ckey) {
irlmp_unregister_client(self->ckey);
self->ckey = NULL;
}
#endif
}
/*
* Function ircomm_send_initial_parameters (self)
*
* Send initial parameters to the remote IrCOMM device. These parameters
* must be sent before any data.
*/
int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
{
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (self->service_type & IRCOMM_3_WIRE_RAW)
return 0;
/*
* Set default values, but only if the application for some reason
* haven't set them already
*/
IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ ,
self->settings.data_rate);
if (!self->settings.data_rate)
self->settings.data_rate = 9600;
IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ ,
self->settings.data_format);
if (!self->settings.data_format)
self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ ,
self->settings.flow_control);
/*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
/* Do not set delta values for the initial parameters */
self->settings.dte = IRCOMM_DTR | IRCOMM_RTS;
/* Only send service type parameter when we are the client */
if (self->client)
ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
/* For a 3 wire service, we just flush the last parameter and return */
if (self->settings.service_type == IRCOMM_3_WIRE) {
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
return 0;
}
/* Only 9-wire service types continue here */
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, FALSE);
#if 0
ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE);
ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE);
#endif
/* Notify peer that we are ready to receive data */
ircomm_param_request(self, IRCOMM_DTE, TRUE);
return 0;
}
/*
* Function ircomm_tty_discovery_indication (discovery)
*
* Remote device is discovered, try query the remote IAS to see which
* device it is, and which services it has.
*
*/
static void ircomm_tty_discovery_indication(discinfo_t *discovery,
DISCOVERY_MODE mode,
void *priv)
{
struct ircomm_tty_cb *self;
struct ircomm_tty_info info;
IRDA_DEBUG(2, "%s()\n", __func__ );
/* Important note :
* We need to drop all passive discoveries.
* The LSAP management of IrComm is deficient and doesn't deal
* with the case of two instance connecting to each other
* simultaneously (it will deadlock in LMP).
* The proper fix would be to use the same technique as in IrNET,
* to have one server socket and separate instances for the
* connecting/connected socket.
* The workaround is to drop passive discovery, which drastically
* reduce the probability of this happening.
* Jean II */
if(mode == DISCOVERY_PASSIVE)
return;
info.daddr = discovery->daddr;
info.saddr = discovery->saddr;
self = priv;
ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
NULL, &info);
}
/*
* Function ircomm_tty_disconnect_indication (instance, sap, reason, skb)
*
* Link disconnected
*
*/
void ircomm_tty_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
if (!self->tty)
return;
/* This will stop control data transfers */
self->flow = FLOW_STOP;
/* Stop data transfers */
self->tty->hw_stopped = 1;
ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL,
NULL);
}
/*
* Function ircomm_tty_getvalue_confirm (result, obj_id, value, priv)
*
* Got result from the IAS query we make
*
*/
static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value,
void *priv)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
/* We probably don't need to make any more queries */
iriap_close(self->iriap);
self->iriap = NULL;
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ );
return;
}
switch (value->type) {
case IAS_OCT_SEQ:
IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ );
irda_param_extract_all(self, value->t.oct_seq, value->len,
&ircomm_param_info);
ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL,
NULL);
break;
case IAS_INTEGER:
/* Got LSAP selector */
IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ ,
value->t.integer);
if (value->t.integer == -1) {
IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ );
} else
self->dlsap_sel = value->t.integer;
ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL);
break;
case IAS_MISSING:
IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ );
break;
default:
IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ );
break;
}
irias_delete_value(value);
}
/*
* Function ircomm_tty_connect_confirm (instance, sap, qos, max_sdu_size, skb)
*
* Connection confirmed
*
*/
void ircomm_tty_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_data_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
self->client = TRUE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
self->flow = FLOW_START;
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_CONFIRM, NULL, NULL);
/* No need to kfree_skb - see ircomm_ttp_connect_confirm() */
}
/*
* Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size,
* skb)
*
* we are discovered and being requested to connect by remote device !
*
*/
void ircomm_tty_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_data_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
int clen;
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
self->client = FALSE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
self->flow = FLOW_START;
clen = skb->data[0];
if (clen)
irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len, clen),
&ircomm_param_info);
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL);
/* No need to kfree_skb - see ircomm_ttp_connect_indication() */
}
/*
* Function ircomm_tty_link_established (self)
*
* Called when the IrCOMM link is established
*
*/
void ircomm_tty_link_established(struct ircomm_tty_cb *self)
{
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
if (!self->tty)
return;
del_timer(&self->watchdog_timer);
/*
* IrCOMM link is now up, and if we are not using hardware
* flow-control, then declare the hardware as running. Otherwise we
* will have to wait for the peer device (DCE) to raise the CTS
* line.
*/
if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) {
IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ );
return;
} else {
IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ );
self->tty->hw_stopped = 0;
/* Wake up processes blocked on open */
wake_up_interruptible(&self->open_wait);
}
schedule_work(&self->tqueue);
}
/*
* Function ircomm_tty_start_watchdog_timer (self, timeout)
*
* Start the watchdog timer. This timer is used to make sure that any
* connection attempt is successful, and if not, we will retry after
* the timeout
*/
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
ircomm_tty_watchdog_timer_expired);
}
/*
* Function ircomm_tty_watchdog_timer_expired (data)
*
* Called when the connect procedure have taken to much time.
*
*/
static void ircomm_tty_watchdog_timer_expired(void *data)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
IRDA_DEBUG(2, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
ircomm_tty_do_event(self, IRCOMM_TTY_WD_TIMER_EXPIRED, NULL, NULL);
}
/*
* Function ircomm_tty_do_event (self, event, skb)
*
* Process event
*
*/
int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info)
{
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
return (*state[self->state])(self, event, skb, info);
}
/*
* Function ircomm_tty_next_state (self, state)
*
* Switch state
*
*/
static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_STATE state)
{
/*
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ ,
ircomm_tty_state[self->state], self->service_type);
*/
self->state = state;
}
/*
* Function ircomm_tty_state_idle (self, event, skb, info)
*
* Just hanging around
*
*/
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_ATTACH_CABLE:
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
break;
case IRCOMM_TTY_DISCOVERY_INDICATION:
self->daddr = info->daddr;
self->saddr = info->saddr;
if (self->iriap) {
IRDA_WARNING("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
iriap_getvaluebyclass_request(self->iriap,
self->saddr, self->daddr,
"IrDA:IrCOMM", "Parameters");
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Just stay idle */
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_search (self, event, skb, info)
*
* Trying to discover an IrCOMM device
*
*/
static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_DISCOVERY_INDICATION:
self->daddr = info->daddr;
self->saddr = info->saddr;
if (self->iriap) {
IRDA_WARNING("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
if (self->service_type == IRCOMM_3_WIRE_RAW) {
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr, "IrLPT",
"IrDA:IrLMP:LsapSel");
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
} else {
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr,
"IrDA:IrCOMM",
"Parameters");
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
}
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
#if 1
/* Give up */
#else
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
#endif
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_query (self, event, skb, info)
*
* Querying the remote LM-IAS for IrCOMM parameters
*
*/
static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_PARAMETERS:
if (self->iriap) {
IRDA_WARNING("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr, "IrDA:IrCOMM",
"IrDA:TinyTP:LsapSel");
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_query_lsap_sel (self, event, skb, info)
*
* Query remote LM-IAS for the LSAP selector which we can connect to
*
*/
static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_LSAPSEL:
/* Connect to remote device */
ret = ircomm_connect_request(self->ircomm, self->dlsap_sel,
self->saddr, self->daddr,
NULL, self->service_type);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_SETUP);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_setup (self, event, skb, info)
*
* Trying to connect
*
*/
static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_CONNECT_CONFIRM:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/*
* Send initial parameters. This will also send out queued
* parameters waiting for the connection to come up
*/
ircomm_tty_send_initial_parameters(self);
ircomm_tty_link_established(self);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_DETACH_CABLE:
/* ircomm_disconnect_request(self->ircomm, NULL); */
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_ready (self, event, skb, info)
*
* IrCOMM is now connected
*
*/
static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
switch (event) {
case IRCOMM_TTY_DATA_REQUEST:
ret = ircomm_data_request(self->ircomm, skb);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_disconnect_request(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
case IRCOMM_TTY_DISCONNECT_INDICATION:
ircomm_tty_ias_register(self);
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
if (self->flags & ASYNC_CHECK_CD) {
/* Drop carrier */
self->settings.dce = IRCOMM_DELTA_CD;
ircomm_tty_check_modem_status(self);
} else {
IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ );
if (self->tty)
tty_hangup(self->tty);
}
break;
default:
IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
| gpl-2.0 |
CyanogenMod/android_kernel_lge_ls970 | arch/sparc/kernel/iommu.c | 6868 | 21361 | /* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include <asm/iommu.h>
#include "iommu_common.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
#define STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#define iommu_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define iommu_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static void iommu_flushall(struct iommu *iommu)
{
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_tags;
for (entry = 0; entry < 16; entry++) {
iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
(IOPTE_VALID | IOPTE_CACHE | \
(((CTX) << 47) & IOPTE_CONTEXT))
#define IOPTE_STREAMING(CTX) \
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page.
*/
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
val &= ~IOPTE_PAGE;
val |= iommu->dummy_page_pa;
iopte_val(*iopte) = val;
}
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
* facility it must all be done in one pass while under the iommu lock.
*
* On sun4u platforms, we only flush the IOMMU once every time we've passed
* over the entire page table doing allocations. Therefore we only ever advance
* the hint and cannot backtrack it.
*/
unsigned long iommu_range_alloc(struct device *dev,
struct iommu *iommu,
unsigned long npages,
unsigned long *handle)
{
unsigned long n, end, start, limit, boundary_size;
struct iommu_arena *arena = &iommu->arena;
int pass = 0;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
if (handle && *handle)
start = *handle;
else
start = arena->hint;
limit = arena->limit;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning and flush.
*/
if (start >= limit) {
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
}
again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << IO_PAGE_SHIFT);
else
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
n = iommu_area_alloc(arena->map, limit, start, npages,
iommu->page_table_map_base >> IO_PAGE_SHIFT,
boundary_size >> IO_PAGE_SHIFT, 0);
if (n == -1) {
if (likely(pass < 1)) {
/* First failure, rescan from the beginning. */
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
pass++;
goto again;
} else {
/* Second failure, give up */
return DMA_ERROR_CODE;
}
}
end = n + npages;
arena->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
return n;
}
void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long entry;
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
{
unsigned long i, order, sz, num_tsb_entries;
struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->page_table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->arena.map) {
printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
memset(iommu->arena.map, 0, sz);
iommu->arena.limit = num_tsb_entries;
if (tlb_type != hypervisor)
iommu->flush_all = iommu_flushall;
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
return 0;
out_free_dummy_page:
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
out_free_map:
kfree(iommu->arena.map);
iommu->arena.map = NULL;
return -ENOMEM;
}
static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
entry = iommu_range_alloc(dev, iommu, npages, NULL);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
if (unlikely(n == IOMMU_NUM_CTXS)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
unsigned long flags, order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
iopte_t *iopte;
void *ret;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
*dma_addrp = (iommu->page_table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
return ret;
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
struct dma_attrs *attrs)
{
struct iommu *iommu;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iommu_range_free(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(!base))
goto bad;
bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
return ret;
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
u32 vaddr, unsigned long ctx, unsigned long npages,
enum dma_data_direction direction)
{
int limit;
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
iommu_write(flushreg, ctx);
val = iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
iommu_write(flushreg, ctx);
val >>= 1;
}
val = iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "strbuf_flush: ctx flush "
"timeout matchreg[%llx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == DMA_TO_DEVICE)
return;
STC_FLUSHFLAG_INIT(strbuf);
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
rmb();
}
if (!limit)
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
/* Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
/* Step 2: Clear out TSB entries. */
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_range_free(iommu, bus_addr, npages);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
return 0;
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
prot = IOPTE_STREAMING(ctx);
else
prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
prot |= IOPTE_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf;
struct iommu *iommu;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
ctx = fetch_sg_ctx(iommu, sglist);
spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_single_for_cpu(struct device *dev,
dma_addr_t bus_addr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
int dma_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask >= (1UL << 32UL))
return 0;
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
return 0;
}
EXPORT_SYMBOL(dma_supported);
| gpl-2.0 |
girishverma/linux_cst_bsp | drivers/media/rc/keymaps/rc-encore-enltv.c | 7636 | 2927 | /* encore-enltv.h - Keytable for encore_enltv Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
Juan Pablo Sormani <sorman@gmail.com> */
static struct rc_map_table encore_enltv[] = {
/* Power button does nothing, neither in Windows app,
although it sends data (used for BIOS wakeup?) */
{ 0x0d, KEY_MUTE },
{ 0x1e, KEY_TV },
{ 0x00, KEY_VIDEO },
{ 0x01, KEY_AUDIO }, /* music */
{ 0x02, KEY_CAMERA }, /* picture */
{ 0x1f, KEY_1 },
{ 0x03, KEY_2 },
{ 0x04, KEY_3 },
{ 0x05, KEY_4 },
{ 0x1c, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x1d, KEY_9 },
{ 0x0a, KEY_0 },
{ 0x09, KEY_LIST }, /* -/-- */
{ 0x0b, KEY_LAST }, /* recall */
{ 0x14, KEY_HOME }, /* win start menu */
{ 0x15, KEY_EXIT }, /* exit */
{ 0x16, KEY_CHANNELUP }, /* UP */
{ 0x12, KEY_CHANNELDOWN }, /* DOWN */
{ 0x0c, KEY_VOLUMEUP }, /* RIGHT */
{ 0x17, KEY_VOLUMEDOWN }, /* LEFT */
{ 0x18, KEY_ENTER }, /* OK */
{ 0x0e, KEY_ESC },
{ 0x13, KEY_CYCLEWINDOWS }, /* desktop */
{ 0x11, KEY_TAB },
{ 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
{ 0x1a, KEY_MENU },
{ 0x1b, KEY_ZOOM }, /* fullscreen */
{ 0x44, KEY_TIME }, /* time shift */
{ 0x40, KEY_MODE }, /* source */
{ 0x5a, KEY_RECORD },
{ 0x42, KEY_PLAY }, /* play/pause */
{ 0x45, KEY_STOP },
{ 0x43, KEY_CAMERA }, /* camera icon */
{ 0x48, KEY_REWIND },
{ 0x4a, KEY_FASTFORWARD },
{ 0x49, KEY_PREVIOUS },
{ 0x4b, KEY_NEXT },
{ 0x4c, KEY_FAVORITES }, /* tv wall */
{ 0x4d, KEY_SOUND }, /* DVD sound */
{ 0x4e, KEY_LANGUAGE }, /* DVD lang */
{ 0x4f, KEY_TEXT }, /* DVD text */
{ 0x50, KEY_SLEEP }, /* shutdown */
{ 0x51, KEY_MODE }, /* stereo > main */
{ 0x52, KEY_SELECT }, /* stereo > sap */
{ 0x53, KEY_TEXT }, /* teletext */
{ 0x59, KEY_RED }, /* AP1 */
{ 0x41, KEY_GREEN }, /* AP2 */
{ 0x47, KEY_YELLOW }, /* AP3 */
{ 0x57, KEY_BLUE }, /* AP4 */
};
static struct rc_map_list encore_enltv_map = {
.map = {
.scan = encore_enltv,
.size = ARRAY_SIZE(encore_enltv),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_ENCORE_ENLTV,
}
};
static int __init init_rc_map_encore_enltv(void)
{
return rc_map_register(&encore_enltv_map);
}
static void __exit exit_rc_map_encore_enltv(void)
{
rc_map_unregister(&encore_enltv_map);
}
module_init(init_rc_map_encore_enltv)
module_exit(exit_rc_map_encore_enltv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
eoghan2t9/android_kernel_oppo_n1_test | drivers/media/rc/keymaps/rc-apac-viewcomp.c | 7636 | 2143 | /* apac-viewcomp.h - Keytable for apac_viewcomp Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Attila Kondoros <attila.kondoros@chello.hu> */
static struct rc_map_table apac_viewcomp[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x00, KEY_0 },
{ 0x17, KEY_LAST }, /* +100 */
{ 0x0a, KEY_LIST }, /* recall */
{ 0x1c, KEY_TUNER }, /* TV/FM */
{ 0x15, KEY_SEARCH }, /* scan */
{ 0x12, KEY_POWER }, /* power */
{ 0x1f, KEY_VOLUMEDOWN }, /* vol up */
{ 0x1b, KEY_VOLUMEUP }, /* vol down */
{ 0x1e, KEY_CHANNELDOWN }, /* chn up */
{ 0x1a, KEY_CHANNELUP }, /* chn down */
{ 0x11, KEY_VIDEO }, /* video */
{ 0x0f, KEY_ZOOM }, /* full screen */
{ 0x13, KEY_MUTE }, /* mute/unmute */
{ 0x10, KEY_TEXT }, /* min */
{ 0x0d, KEY_STOP }, /* freeze */
{ 0x0e, KEY_RECORD }, /* record */
{ 0x1d, KEY_PLAYPAUSE }, /* stop */
{ 0x19, KEY_PLAY }, /* play */
{ 0x16, KEY_GOTO }, /* osd */
{ 0x14, KEY_REFRESH }, /* default */
{ 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
{ 0x18, KEY_KPMINUS }, /* fine tune <<<< */
};
static struct rc_map_list apac_viewcomp_map = {
.map = {
.scan = apac_viewcomp,
.size = ARRAY_SIZE(apac_viewcomp),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_APAC_VIEWCOMP,
}
};
static int __init init_rc_map_apac_viewcomp(void)
{
return rc_map_register(&apac_viewcomp_map);
}
static void __exit exit_rc_map_apac_viewcomp(void)
{
rc_map_unregister(&apac_viewcomp_map);
}
module_init(init_rc_map_apac_viewcomp)
module_exit(exit_rc_map_apac_viewcomp)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
attn1/android_kernel_pantech_p9070 | init/do_mounts_md.c | 8148 | 8101 | #include <linux/delay.h>
#include <linux/raid/md_u.h>
#include <linux/raid/md_p.h>
#include "do_mounts.h"
/*
* When md (and any require personalities) are compiled into the kernel
* (not a module), arrays can be assembles are boot time using with AUTODETECT
* where specially marked partitions are registered with md_autodetect_dev(),
* and with MD_BOOT where devices to be collected are given on the boot line
* with md=.....
* The code for that is here.
*/
#ifdef CONFIG_MD_AUTODETECT
static int __initdata raid_noautodetect;
#else
static int __initdata raid_noautodetect=1;
#endif
static int __initdata raid_autopart;
static struct {
int minor;
int partitioned;
int level;
int chunk;
char *device_names;
} md_setup_args[256] __initdata;
static int md_setup_ents __initdata;
/*
* Parse the command-line parameters given our kernel, but do not
* actually try to invoke the MD device now; that is handled by
* md_setup_drive after the low-level disk drivers have initialised.
*
* 27/11/1999: Fixed to work correctly with the 2.3 kernel (which
* assigns the task of parsing integer arguments to the
* invoked program now). Added ability to initialise all
* the MD devices (by specifying multiple "md=" lines)
* instead of just one. -- KTK
* 18May2000: Added support for persistent-superblock arrays:
* md=n,0,factor,fault,device-list uses RAID0 for device n
* md=n,-1,factor,fault,device-list uses LINEAR for device n
* md=n,device-list reads a RAID superblock from the devices
* elements in device-list are read by name_to_kdev_t so can be
* a hex number or something like /dev/hda1 /dev/sdb
* 2001-06-03: Dave Cinege <dcinege@psychosis.com>
* Shifted name_to_kdev_t() and related operations to md_set_drive()
* for later execution. Rewrote section to make devfs compatible.
*/
static int __init md_setup(char *str)
{
int minor, level, factor, fault, partitioned = 0;
char *pername = "";
char *str1;
int ent;
if (*str == 'd') {
partitioned = 1;
str++;
}
if (get_option(&str, &minor) != 2) { /* MD Number */
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
return 0;
}
str1 = str;
for (ent=0 ; ent< md_setup_ents ; ent++)
if (md_setup_args[ent].minor == minor &&
md_setup_args[ent].partitioned == partitioned) {
printk(KERN_WARNING "md: md=%s%d, Specified more than once. "
"Replacing previous definition.\n", partitioned?"d":"", minor);
break;
}
if (ent >= ARRAY_SIZE(md_setup_args)) {
printk(KERN_WARNING "md: md=%s%d - too many md initialisations\n", partitioned?"d":"", minor);
return 0;
}
if (ent >= md_setup_ents)
md_setup_ents++;
switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
if (level == 0 || level == LEVEL_LINEAR) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
get_option(&str, &fault) != 2) {
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
return 0;
}
md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
if (level == LEVEL_LINEAR)
pername = "linear";
else
pername = "raid0";
break;
}
/* FALL THROUGH */
case 1: /* the first device is numeric */
str = str1;
/* FALL THROUGH */
case 0:
md_setup_args[ent].level = LEVEL_NONE;
pername="super-block";
}
printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n",
minor, pername, str);
md_setup_args[ent].device_names = str;
md_setup_args[ent].partitioned = partitioned;
md_setup_args[ent].minor = minor;
return 1;
}
static void __init md_setup_drive(void)
{
int minor, i, ent, partitioned;
dev_t dev;
dev_t devices[MD_SB_DISKS+1];
for (ent = 0; ent < md_setup_ents ; ent++) {
int fd;
int err = 0;
char *devname;
mdu_disk_info_t dinfo;
char name[16];
minor = md_setup_args[ent].minor;
partitioned = md_setup_args[ent].partitioned;
devname = md_setup_args[ent].device_names;
sprintf(name, "/dev/md%s%d", partitioned?"_d":"", minor);
if (partitioned)
dev = MKDEV(mdp_major, minor << MdpMinorShift);
else
dev = MKDEV(MD_MAJOR, minor);
create_dev(name, dev);
for (i = 0; i < MD_SB_DISKS && devname != NULL; i++) {
char *p;
char comp_name[64];
u32 rdev;
p = strchr(devname, ',');
if (p)
*p++ = 0;
dev = name_to_dev_t(devname);
if (strncmp(devname, "/dev/", 5) == 0)
devname += 5;
snprintf(comp_name, 63, "/dev/%s", devname);
rdev = bstat(comp_name);
if (rdev)
dev = new_decode_dev(rdev);
if (!dev) {
printk(KERN_WARNING "md: Unknown device name: %s\n", devname);
break;
}
devices[i] = dev;
devname = p;
}
devices[i] = 0;
if (!i)
continue;
printk(KERN_INFO "md: Loading md%s%d: %s\n",
partitioned ? "_d" : "", minor,
md_setup_args[ent].device_names);
fd = sys_open(name, 0, 0);
if (fd < 0) {
printk(KERN_ERR "md: open failed - cannot start "
"array %s\n", name);
continue;
}
if (sys_ioctl(fd, SET_ARRAY_INFO, 0) == -EBUSY) {
printk(KERN_WARNING
"md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n",
minor);
sys_close(fd);
continue;
}
if (md_setup_args[ent].level != LEVEL_NONE) {
/* non-persistent */
mdu_array_info_t ainfo;
ainfo.level = md_setup_args[ent].level;
ainfo.size = 0;
ainfo.nr_disks =0;
ainfo.raid_disks =0;
while (devices[ainfo.raid_disks])
ainfo.raid_disks++;
ainfo.md_minor =minor;
ainfo.not_persistent = 1;
ainfo.state = (1 << MD_SB_CLEAN);
ainfo.layout = 0;
ainfo.chunk_size = md_setup_args[ent].chunk;
err = sys_ioctl(fd, SET_ARRAY_INFO, (long)&ainfo);
for (i = 0; !err && i <= MD_SB_DISKS; i++) {
dev = devices[i];
if (!dev)
break;
dinfo.number = i;
dinfo.raid_disk = i;
dinfo.state = (1<<MD_DISK_ACTIVE)|(1<<MD_DISK_SYNC);
dinfo.major = MAJOR(dev);
dinfo.minor = MINOR(dev);
err = sys_ioctl(fd, ADD_NEW_DISK, (long)&dinfo);
}
} else {
/* persistent */
for (i = 0; i <= MD_SB_DISKS; i++) {
dev = devices[i];
if (!dev)
break;
dinfo.major = MAJOR(dev);
dinfo.minor = MINOR(dev);
sys_ioctl(fd, ADD_NEW_DISK, (long)&dinfo);
}
}
if (!err)
err = sys_ioctl(fd, RUN_ARRAY, 0);
if (err)
printk(KERN_WARNING "md: starting md%d failed\n", minor);
else {
/* reread the partition table.
* I (neilb) and not sure why this is needed, but I cannot
* boot a kernel with devfs compiled in from partitioned md
* array without it
*/
sys_close(fd);
fd = sys_open(name, 0, 0);
sys_ioctl(fd, BLKRRPART, 0);
}
sys_close(fd);
}
}
static int __init raid_setup(char *str)
{
int len, pos;
len = strlen(str) + 1;
pos = 0;
while (pos < len) {
char *comma = strchr(str+pos, ',');
int wlen;
if (comma)
wlen = (comma-str)-pos;
else wlen = (len-1)-pos;
if (!strncmp(str, "noautodetect", wlen))
raid_noautodetect = 1;
if (!strncmp(str, "autodetect", wlen))
raid_noautodetect = 0;
if (strncmp(str, "partitionable", wlen)==0)
raid_autopart = 1;
if (strncmp(str, "part", wlen)==0)
raid_autopart = 1;
pos += wlen+1;
}
return 1;
}
__setup("raid=", raid_setup);
__setup("md=", md_setup);
static void __init autodetect_raid(void)
{
int fd;
/*
* Since we don't want to detect and use half a raid array, we need to
* wait for the known devices to complete their probing
*/
printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
wait_for_device_probe();
fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
if (fd >= 0) {
sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
sys_close(fd);
}
}
void __init md_run_setup(void)
{
create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
if (raid_noautodetect)
printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=autodetect will force)\n");
else
autodetect_raid();
md_setup_drive();
}
| gpl-2.0 |
smartboyhw/ubuntu-saucy-rt | fs/fscache/main.c | 10708 | 5321 | /* General filesystem local caching manager
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "internal.h"
MODULE_DESCRIPTION("FS Cache Manager");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
unsigned fscache_defer_lookup = 1;
module_param_named(defer_lookup, fscache_defer_lookup, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(fscache_defer_lookup,
"Defer cookie lookup to background thread");
unsigned fscache_defer_create = 1;
module_param_named(defer_create, fscache_defer_create, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(fscache_defer_create,
"Defer cookie creation to background thread");
unsigned fscache_debug;
module_param_named(debug, fscache_debug, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(fscache_debug,
"FS-Cache debugging mask");
struct kobject *fscache_root;
struct workqueue_struct *fscache_object_wq;
struct workqueue_struct *fscache_op_wq;
DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
/* these values serve as lower bounds, will be adjusted in fscache_init() */
static unsigned fscache_object_max_active = 4;
static unsigned fscache_op_max_active = 2;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fscache_sysctl_header;
static int fscache_max_active_sysctl(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
struct workqueue_struct **wqp = table->extra1;
unsigned int *datap = table->data;
int ret;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret == 0)
workqueue_set_max_active(*wqp, *datap);
return ret;
}
ctl_table fscache_sysctls[] = {
{
.procname = "object_max_active",
.data = &fscache_object_max_active,
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = fscache_max_active_sysctl,
.extra1 = &fscache_object_wq,
},
{
.procname = "operation_max_active",
.data = &fscache_op_max_active,
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = fscache_max_active_sysctl,
.extra1 = &fscache_op_wq,
},
{}
};
ctl_table fscache_sysctls_root[] = {
{
.procname = "fscache",
.mode = 0555,
.child = fscache_sysctls,
},
{}
};
#endif
/*
* initialise the fs caching module
*/
static int __init fscache_init(void)
{
unsigned int nr_cpus = num_possible_cpus();
unsigned int cpu;
int ret;
fscache_object_max_active =
clamp_val(nr_cpus,
fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE);
ret = -ENOMEM;
fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND,
fscache_object_max_active);
if (!fscache_object_wq)
goto error_object_wq;
fscache_op_max_active =
clamp_val(fscache_object_max_active / 2,
fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
ret = -ENOMEM;
fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
fscache_op_max_active);
if (!fscache_op_wq)
goto error_op_wq;
for_each_possible_cpu(cpu)
init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
ret = fscache_proc_init();
if (ret < 0)
goto error_proc;
#ifdef CONFIG_SYSCTL
ret = -ENOMEM;
fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root);
if (!fscache_sysctl_header)
goto error_sysctl;
#endif
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
0,
0,
fscache_cookie_init_once);
if (!fscache_cookie_jar) {
printk(KERN_NOTICE
"FS-Cache: Failed to allocate a cookie jar\n");
ret = -ENOMEM;
goto error_cookie_jar;
}
fscache_root = kobject_create_and_add("fscache", kernel_kobj);
if (!fscache_root)
goto error_kobj;
printk(KERN_NOTICE "FS-Cache: Loaded\n");
return 0;
error_kobj:
kmem_cache_destroy(fscache_cookie_jar);
error_cookie_jar:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(fscache_sysctl_header);
error_sysctl:
#endif
fscache_proc_cleanup();
error_proc:
destroy_workqueue(fscache_op_wq);
error_op_wq:
destroy_workqueue(fscache_object_wq);
error_object_wq:
return ret;
}
fs_initcall(fscache_init);
/*
* clean up on module removal
*/
static void __exit fscache_exit(void)
{
_enter("");
kobject_put(fscache_root);
kmem_cache_destroy(fscache_cookie_jar);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(fscache_sysctl_header);
#endif
fscache_proc_cleanup();
destroy_workqueue(fscache_op_wq);
destroy_workqueue(fscache_object_wq);
printk(KERN_NOTICE "FS-Cache: Unloaded\n");
}
module_exit(fscache_exit);
/*
* wait_on_bit() sleep function for uninterruptible waiting
*/
int fscache_wait_bit(void *flags)
{
schedule();
return 0;
}
EXPORT_SYMBOL(fscache_wait_bit);
/*
* wait_on_bit() sleep function for interruptible waiting
*/
int fscache_wait_bit_interruptible(void *flags)
{
schedule();
return signal_pending(current);
}
EXPORT_SYMBOL(fscache_wait_bit_interruptible);
| gpl-2.0 |
drowningchild/lgog_old | arch/powerpc/boot/simple_alloc.c | 14548 | 3617 | /*
* Implement primitive realloc(3) functionality.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
* 2006 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <stddef.h>
#include "types.h"
#include "page.h"
#include "string.h"
#include "ops.h"
#define ENTRY_BEEN_USED 0x01
#define ENTRY_IN_USE 0x02
static struct alloc_info {
unsigned long flags;
unsigned long base;
unsigned long size;
} *alloc_tbl;
static unsigned long tbl_entries;
static unsigned long alloc_min;
static unsigned long next_base;
static unsigned long space_left;
/*
* First time an entry is used, its base and size are set.
* An entry can be freed and re-malloc'd but its base & size don't change.
* Should be smart enough for needs of bootwrapper.
*/
static void *simple_malloc(unsigned long size)
{
unsigned long i;
struct alloc_info *p = alloc_tbl;
if (size == 0)
goto err_out;
size = _ALIGN_UP(size, alloc_min);
for (i=0; i<tbl_entries; i++, p++)
if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
if (size <= space_left) {
p->base = next_base;
p->size = size;
p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
next_base += size;
space_left -= size;
return (void *)p->base;
}
goto err_out; /* not enough space left */
}
/* reuse an entry keeping same base & size */
else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
p->flags |= ENTRY_IN_USE;
return (void *)p->base;
}
err_out:
return NULL;
}
static struct alloc_info *simple_find_entry(void *ptr)
{
unsigned long i;
struct alloc_info *p = alloc_tbl;
for (i=0; i<tbl_entries; i++,p++) {
if (!(p->flags & ENTRY_BEEN_USED))
break;
if ((p->flags & ENTRY_IN_USE) &&
(p->base == (unsigned long)ptr))
return p;
}
return NULL;
}
static void simple_free(void *ptr)
{
struct alloc_info *p = simple_find_entry(ptr);
if (p != NULL)
p->flags &= ~ENTRY_IN_USE;
}
/*
* Change size of area pointed to by 'ptr' to 'size'.
* If 'ptr' is NULL, then its a malloc(). If 'size' is 0, then its a free().
* 'ptr' must be NULL or a pointer to a non-freed area previously returned by
* simple_realloc() or simple_malloc().
*/
static void *simple_realloc(void *ptr, unsigned long size)
{
struct alloc_info *p;
void *new;
if (size == 0) {
simple_free(ptr);
return NULL;
}
if (ptr == NULL)
return simple_malloc(size);
p = simple_find_entry(ptr);
if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
return NULL;
if (size <= p->size) /* fits in current block */
return ptr;
new = simple_malloc(size);
memcpy(new, ptr, p->size);
simple_free(ptr);
return new;
}
/*
* Returns addr of first byte after heap so caller can see if it took
* too much space. If so, change args & try again.
*/
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs)
{
unsigned long heap_base, tbl_size;
heap_size = _ALIGN_UP(heap_size, granularity);
alloc_min = granularity;
tbl_entries = max_allocs;
tbl_size = tbl_entries * sizeof(struct alloc_info);
alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8);
memset(alloc_tbl, 0, tbl_size);
heap_base = _ALIGN_UP((unsigned long)alloc_tbl + tbl_size, alloc_min);
next_base = heap_base;
space_left = heap_size;
platform_ops.malloc = simple_malloc;
platform_ops.free = simple_free;
platform_ops.realloc = simple_realloc;
return (void *)(heap_base + heap_size);
}
| gpl-2.0 |
MahoSata/linux-4.3-sata | drivers/acpi/sysfs.c | 213 | 23199 | /*
* sysfs.c - ACPI sysfs interface to userspace.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/acpi.h>
#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("sysfs");
#ifdef CONFIG_ACPI_DEBUG
/*
* ACPI debug sysfs I/F, including:
* /sys/modules/acpi/parameters/debug_layer
* /sys/modules/acpi/parameters/debug_level
* /sys/modules/acpi/parameters/trace_method_name
* /sys/modules/acpi/parameters/trace_state
* /sys/modules/acpi/parameters/trace_debug_layer
* /sys/modules/acpi/parameters/trace_debug_level
*/
struct acpi_dlayer {
const char *name;
unsigned long value;
};
struct acpi_dlevel {
const char *name;
unsigned long value;
};
#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
static const struct acpi_dlayer acpi_debug_layers[] = {
ACPI_DEBUG_INIT(ACPI_UTILITIES),
ACPI_DEBUG_INIT(ACPI_HARDWARE),
ACPI_DEBUG_INIT(ACPI_EVENTS),
ACPI_DEBUG_INIT(ACPI_TABLES),
ACPI_DEBUG_INIT(ACPI_NAMESPACE),
ACPI_DEBUG_INIT(ACPI_PARSER),
ACPI_DEBUG_INIT(ACPI_DISPATCHER),
ACPI_DEBUG_INIT(ACPI_EXECUTER),
ACPI_DEBUG_INIT(ACPI_RESOURCES),
ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
ACPI_DEBUG_INIT(ACPI_COMPILER),
ACPI_DEBUG_INIT(ACPI_TOOLS),
ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
};
static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_INIT),
ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
ACPI_DEBUG_INIT(ACPI_LV_INFO),
ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_PARSE),
ACPI_DEBUG_INIT(ACPI_LV_LOAD),
ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
ACPI_DEBUG_INIT(ACPI_LV_EXEC),
ACPI_DEBUG_INIT(ACPI_LV_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
ACPI_DEBUG_INIT(ACPI_LV_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_VALUES),
ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
ACPI_DEBUG_INIT(ACPI_LV_THREADS),
ACPI_DEBUG_INIT(ACPI_LV_IO),
ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_layers[i].name,
acpi_debug_layers[i].value,
(acpi_dbg_layer & acpi_debug_layers[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
ACPI_ALL_DRIVERS,
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
== 0 ? ' ' : '-');
result +=
sprintf(buffer + result,
"--\ndebug_layer = 0x%08X ( * = enabled)\n",
acpi_dbg_layer);
return result;
}
static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_levels[i].name,
acpi_debug_levels[i].value,
(acpi_dbg_level & acpi_debug_levels[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
acpi_dbg_level);
return result;
}
static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[1024];
int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
{
u32 saved_flags = 0;
bool is_abs_path = true;
if (*val != '\\')
is_abs_path = false;
if ((is_abs_path && strlen(val) > 1023) ||
(!is_abs_path && strlen(val) > 1022)) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
/*
* It's not safe to update acpi_gbl_trace_method_name without
* having the tracer stopped, so we save the original tracer
* state and disable it.
*/
saved_flags = acpi_gbl_trace_flags;
(void)acpi_debug_trace(NULL,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
0);
/* This is a hack. We can't kmalloc in early boot. */
if (is_abs_path)
strcpy(trace_method_name, val);
else {
trace_method_name[0] = '\\';
strcpy(trace_method_name+1, val);
}
/* Restore the original tracer state */
(void)acpi_debug_trace(trace_method_name,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
saved_flags);
return 0;
}
static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
{
return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
}
static const struct kernel_param_ops param_ops_trace_method = {
.set = param_set_trace_method_name,
.get = param_get_trace_method_name,
};
static const struct kernel_param_ops param_ops_trace_attrib = {
.set = param_set_uint,
.get = param_get_uint,
};
module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
acpi_status status;
const char *method = trace_method_name;
u32 flags = 0;
/* So "xxx-once" comparison should go prior than "xxx" comparison */
#define acpi_compare_param(val, key) \
strncmp((val), (key), sizeof(key) - 1)
if (!acpi_compare_param(val, "enable")) {
method = NULL;
flags = ACPI_TRACE_ENABLED;
} else if (!acpi_compare_param(val, "disable"))
method = NULL;
else if (!acpi_compare_param(val, "method-once"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
else if (!acpi_compare_param(val, "method"))
flags = ACPI_TRACE_ENABLED;
else if (!acpi_compare_param(val, "opcode-once"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
else if (!acpi_compare_param(val, "opcode"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
else
return -EINVAL;
status = acpi_debug_trace(method,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
flags);
if (ACPI_FAILURE(status))
return -EBUSY;
return 0;
}
static int param_get_trace_state(char *buffer, struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable");
else {
if (acpi_gbl_trace_method_name) {
if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
return sprintf(buffer, "method-once");
else
return sprintf(buffer, "method");
} else
return sprintf(buffer, "enable");
}
return 0;
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
byte, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
int result;
result = sprintf(buffer, "%x", ACPI_CA_VERSION);
return result;
}
module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
static struct kobject *dynamic_tables_kobj;
static struct kobject *hotplug_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
char name[8];
int instance;
struct list_head node;
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
container_of(bin_attr, struct acpi_table_attr, attr);
struct acpi_table_header *table_header = NULL;
acpi_status status;
char name[ACPI_NAME_SIZE];
if (strncmp(table_attr->name, "NULL", 4))
memcpy(name, table_attr->name, ACPI_NAME_SIZE);
else
memcpy(name, "\0\0\0\0", 4);
status = acpi_get_table(name, table_attr->instance, &table_header);
if (ACPI_FAILURE(status))
return -ENODEV;
return memory_read_from_buffer(buf, count, &offset,
table_header, table_header->length);
}
static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
struct acpi_table_header *table_header)
{
struct acpi_table_header *header = NULL;
struct acpi_table_attr *attr = NULL;
sysfs_attr_init(&table_attr->attr.attr);
if (table_header->signature[0] != '\0')
memcpy(table_attr->name, table_header->signature,
ACPI_NAME_SIZE);
else
memcpy(table_attr->name, "NULL", 4);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
if (!memcmp(table_attr->name, attr->name, ACPI_NAME_SIZE))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
table_attr->instance++;
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header)))
sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
table_attr->instance);
table_attr->attr.size = table_header->length;
table_attr->attr.read = acpi_table_show;
table_attr->attr.attr.name = table_attr->name;
table_attr->attr.attr.mode = 0400;
return;
}
static acpi_status
acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
switch (event) {
case ACPI_TABLE_EVENT_LOAD:
table_attr =
kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
acpi_table_attr_init(table_attr, table);
if (sysfs_create_bin_file(dynamic_tables_kobj,
&table_attr->attr)) {
kfree(table_attr);
return AE_ERROR;
} else
list_add_tail(&table_attr->node, &acpi_table_attr_list);
break;
case ACPI_TABLE_EVENT_UNLOAD:
/*
* we do not need to do anything right now
* because the table is not deleted from the
* global table list when unloading it.
*/
break;
default:
return AE_BAD_PARAMETER;
}
return AE_OK;
}
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
struct acpi_table_header *table_header = NULL;
int table_index;
acpi_status status;
int ret;
tables_kobj = kobject_create_and_add("tables", acpi_kobj);
if (!tables_kobj)
goto err;
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
for (table_index = 0;; table_index++) {
status = acpi_get_table_by_index(table_index, &table_header);
if (status == AE_BAD_PARAMETER)
break;
if (ACPI_FAILURE(status))
continue;
table_attr = NULL;
table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return -ENOMEM;
acpi_table_attr_init(table_attr, table_header);
ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr);
if (ret) {
kfree(table_attr);
return ret;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
}
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
return ACPI_FAILURE(status) ? -EINVAL : 0;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
return -ENOMEM;
}
/*
* Detailed ACPI IRQ counters:
* /sys/firmware/acpi/interrupts/
*/
u32 acpi_irq_handled;
u32 acpi_irq_not_handled;
#define COUNT_GPE 0
#define COUNT_SCI 1 /* acpi_irq_handled */
#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
#define COUNT_ERROR 3 /* other */
#define NUM_COUNTERS_EXTRA 4
struct event_counter {
u32 count;
u32 flags;
};
static struct event_counter *all_counters;
static u32 num_gpes;
static u32 num_counters;
static struct attribute **all_attrs;
static u32 acpi_gpe_count;
static struct attribute_group interrupt_stats_attr_group = {
.name = "interrupts",
};
static struct kobj_attribute *counter_attrs;
static void delete_gpe_attr_array(void)
{
struct event_counter *tmp = all_counters;
all_counters = NULL;
kfree(tmp);
if (counter_attrs) {
int i;
for (i = 0; i < num_gpes; i++)
kfree(counter_attrs[i].attr.name);
kfree(counter_attrs);
}
kfree(all_attrs);
return;
}
static void gpe_count(u32 gpe_number)
{
acpi_gpe_count++;
if (!all_counters)
return;
if (gpe_number < num_gpes)
all_counters[gpe_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void fixed_event_count(u32 event_number)
{
if (!all_counters)
return;
if (event_number < ACPI_NUM_FIXED_EVENTS)
all_counters[num_gpes + event_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
if (event_type == ACPI_EVENT_TYPE_GPE)
gpe_count(event_number);
if (event_type == ACPI_EVENT_TYPE_FIXED)
fixed_event_count(event_number);
}
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
int result = 0;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
goto end;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
goto end;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
end:
return result;
}
static ssize_t counter_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int index = attr - counter_attrs;
int size;
acpi_handle handle;
acpi_event_status status;
int result = 0;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
acpi_irq_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
acpi_irq_not_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
acpi_gpe_count;
size = sprintf(buf, "%8u", all_counters[index].count);
/* "gpe_all" or "sci" */
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
goto end;
result = get_status(index, &status, &handle);
if (result)
goto end;
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
size += sprintf(buf + size, " invalid");
else if (status & ACPI_EVENT_FLAG_ENABLED)
size += sprintf(buf + size, " enabled");
else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
size += sprintf(buf + size, " wake_enabled");
else
size += sprintf(buf + size, " disabled");
end:
size += sprintf(buf + size, "\n");
return result ? result : size;
}
/*
* counter_set() sets the specified counter.
* setting the total "sci" file to any value clears all counters.
* enable/disable/clear a gpe/fixed event in user space.
*/
static ssize_t counter_set(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf,
size_t size)
{
int index = attr - counter_attrs;
acpi_event_status status;
acpi_handle handle;
int result = 0;
unsigned long tmp;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
for (i = 0; i < num_counters; ++i)
all_counters[i].count = 0;
acpi_gpe_count = 0;
acpi_irq_handled = 0;
acpi_irq_not_handled = 0;
goto end;
}
/* show the event status for both GPEs and Fixed Events */
result = get_status(index, &status, &handle);
if (result)
goto end;
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
printk(KERN_WARNING PREFIX
"Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_gpe(handle, index);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_gpe(handle, index);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
else if (!kstrtoul(buf, 0, &tmp))
all_counters[index].count = tmp;
else
result = -EINVAL;
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
else if (!kstrtoul(buf, 0, &tmp))
all_counters[index].count = tmp;
else
result = -EINVAL;
} else
all_counters[index].count = strtoul(buf, NULL, 0);
if (ACPI_FAILURE(result))
result = -EINVAL;
end:
return result ? result : size;
}
void acpi_irq_stats_init(void)
{
acpi_status status;
int i;
if (all_counters)
return;
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
GFP_KERNEL);
if (all_counters == NULL)
goto fail;
status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
if (ACPI_FAILURE(status))
goto fail;
counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
for (i = 0; i < num_counters; ++i) {
char buffer[12];
char *name;
if (i < num_gpes)
sprintf(buffer, "gpe%02X", i);
else if (i == num_gpes + ACPI_EVENT_PMTIMER)
sprintf(buffer, "ff_pmtimer");
else if (i == num_gpes + ACPI_EVENT_GLOBAL)
sprintf(buffer, "ff_gbl_lock");
else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
sprintf(buffer, "ff_pwr_btn");
else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
sprintf(buffer, "ff_slp_btn");
else if (i == num_gpes + ACPI_EVENT_RTC)
sprintf(buffer, "ff_rt_clk");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
sprintf(buffer, "gpe_all");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
sprintf(buffer, "sci");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
sprintf(buffer, "sci_not");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
sprintf(buffer, "error");
else
sprintf(buffer, "bug%02X", i);
name = kstrdup(buffer, GFP_KERNEL);
if (name == NULL)
goto fail;
sysfs_attr_init(&counter_attrs[i].attr);
counter_attrs[i].attr.name = name;
counter_attrs[i].attr.mode = 0644;
counter_attrs[i].show = counter_show;
counter_attrs[i].store = counter_set;
all_attrs[i] = &counter_attrs[i].attr;
}
interrupt_stats_attr_group.attrs = all_attrs;
if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
return;
fail:
delete_gpe_attr_array();
return;
}
static void __exit interrupt_stats_exit(void)
{
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
return;
}
static ssize_t
acpi_show_profile(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
static const struct device_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
return sprintf(buf, "%d\n", hotplug->enabled);
}
static ssize_t hotplug_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t size)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
unsigned int val;
if (kstrtouint(buf, 10, &val) || val > 1)
return -EINVAL;
acpi_scan_hotplug_enabled(hotplug, val);
return size;
}
static struct kobj_attribute hotplug_enabled_attr =
__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
hotplug_enabled_store);
static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
NULL
};
static struct kobj_type acpi_hotplug_profile_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_attrs = hotplug_profile_attrs,
};
void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
const char *name)
{
int error;
if (!hotplug_kobj)
goto err_out;
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
goto err_out;
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;
err_out:
pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
}
static ssize_t force_remove_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !!acpi_force_hot_remove);
}
static ssize_t force_remove_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t size)
{
bool val;
int ret;
ret = strtobool(buf, &val);
if (ret < 0)
return ret;
lock_device_hotplug();
acpi_force_hot_remove = val;
unlock_device_hotplug();
return size;
}
static const struct kobj_attribute force_remove_attr =
__ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
force_remove_store);
int __init acpi_sysfs_init(void)
{
int result;
result = acpi_tables_sysfs_init();
if (result)
return result;
hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
if (result)
return result;
result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
return result;
}
| gpl-2.0 |
kibuuka/dv80_2.6_emo | arch/arm/mach-msm/qdsp5v2/adsp_driver.c | 725 | 15405 | /*
* Copyright (C) 2008 Google, Inc.
* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/msm_adsp.h>
#include <linux/android_pmem.h>
#include "adsp.h"
#include <mach/debug_mm.h>
#include <linux/slab.h>
struct adsp_pmem_info {
int fd;
void *vaddr;
};
struct adsp_pmem_region {
struct hlist_node list;
void *vaddr;
unsigned long paddr;
unsigned long kvaddr;
unsigned long len;
struct file *file;
};
struct adsp_device {
struct msm_adsp_module *module;
spinlock_t event_queue_lock;
wait_queue_head_t event_wait;
struct list_head event_queue;
int abort;
const char *name;
struct device *device;
struct cdev cdev;
};
static struct adsp_device *inode_to_device(struct inode *inode);
#define __CONTAINS(r, v, l) ({ \
typeof(r) __r = r; \
typeof(v) __v = v; \
typeof(v) __e = __v + l; \
int res = __v >= __r->vaddr && \
__e <= __r->vaddr + __r->len; \
res; \
})
#define CONTAINS(r1, r2) ({ \
typeof(r2) __r2 = r2; \
__CONTAINS(r1, __r2->vaddr, __r2->len); \
})
#define IN_RANGE(r, v) ({ \
typeof(r) __r = r; \
typeof(v) __vv = v; \
int res = ((__vv >= __r->vaddr) && \
(__vv < (__r->vaddr + __r->len))); \
res; \
})
#define OVERLAPS(r1, r2) ({ \
typeof(r1) __r1 = r1; \
typeof(r2) __r2 = r2; \
typeof(__r2->vaddr) __v = __r2->vaddr; \
typeof(__v) __e = __v + __r2->len - 1; \
int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
res; \
})
static int adsp_pmem_check(struct msm_adsp_module *module,
void *vaddr, unsigned long len)
{
struct adsp_pmem_region *region_elt;
struct hlist_node *node;
struct adsp_pmem_region t = { .vaddr = vaddr, .len = len };
hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
OVERLAPS(region_elt, &t)) {
MM_ERR("module %s:"
" region (vaddr %p len %ld)"
" clashes with registered region"
" (vaddr %p paddr %p len %ld)\n",
module->name,
vaddr, len,
region_elt->vaddr,
(void *)region_elt->paddr,
region_elt->len);
return -EINVAL;
}
}
return 0;
}
static int adsp_pmem_add(struct msm_adsp_module *module,
struct adsp_pmem_info *info)
{
unsigned long paddr, kvaddr, len;
struct file *file;
struct adsp_pmem_region *region;
int rc = -EINVAL;
mutex_lock(&module->pmem_regions_lock);
region = kmalloc(sizeof(*region), GFP_KERNEL);
if (!region) {
rc = -ENOMEM;
goto end;
}
INIT_HLIST_NODE(®ion->list);
if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
kfree(region);
goto end;
}
rc = adsp_pmem_check(module, info->vaddr, len);
if (rc < 0) {
put_pmem_file(file);
kfree(region);
goto end;
}
region->vaddr = info->vaddr;
region->paddr = paddr;
region->kvaddr = kvaddr;
region->len = len;
region->file = file;
hlist_add_head(®ion->list, &module->pmem_regions);
end:
mutex_unlock(&module->pmem_regions_lock);
return rc;
}
static int adsp_pmem_lookup_vaddr(struct msm_adsp_module *module, void **addr,
unsigned long len, struct adsp_pmem_region **region)
{
struct hlist_node *node;
void *vaddr = *addr;
struct adsp_pmem_region *region_elt;
int match_count = 0;
*region = NULL;
/* returns physical address or zero */
hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
if (vaddr >= region_elt->vaddr &&
vaddr < region_elt->vaddr + region_elt->len &&
vaddr + len <= region_elt->vaddr + region_elt->len) {
/* offset since we could pass vaddr inside a registerd
* pmem buffer
*/
match_count++;
if (!*region)
*region = region_elt;
}
}
if (match_count > 1) {
MM_ERR("module %s: "
"multiple hits for vaddr %p, len %ld\n",
module->name, vaddr, len);
hlist_for_each_entry(region_elt, node,
&module->pmem_regions, list) {
if (vaddr >= region_elt->vaddr &&
vaddr < region_elt->vaddr + region_elt->len &&
vaddr + len <= region_elt->vaddr + region_elt->len)
MM_ERR("%p, %ld --> %p\n",
region_elt->vaddr,
region_elt->len,
(void *)region_elt->paddr);
}
}
return *region ? 0 : -1;
}
int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
unsigned long *kvaddr, unsigned long len)
{
struct adsp_pmem_region *region;
void *vaddr = *addr;
unsigned long *paddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion);
if (ret) {
MM_ERR("not patching %s (paddr & kvaddr),"
" lookup (%p, %ld) failed\n",
module->name, vaddr, len);
return ret;
}
*paddr = region->paddr + (vaddr - region->vaddr);
*kvaddr = region->kvaddr + (vaddr - region->vaddr);
return 0;
}
int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr,
unsigned long len)
{
struct adsp_pmem_region *region;
void *vaddr = *addr;
unsigned long *paddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion);
if (ret) {
MM_ERR("not patching %s, lookup (%p, %ld) failed\n",
module->name, vaddr, len);
return ret;
}
*paddr = region->paddr + (vaddr - region->vaddr);
return 0;
}
static int adsp_verify_cmd(struct msm_adsp_module *module,
unsigned int queue_id, void *cmd_data,
size_t cmd_size)
{
/* call the per module verifier */
if (module->verify_cmd)
return module->verify_cmd(module, queue_id, cmd_data,
cmd_size);
else
MM_INFO("no packet verifying function "
"for task %s\n", module->name);
return 0;
}
static long adsp_write_cmd(struct adsp_device *adev, void __user *arg)
{
struct adsp_command_t cmd;
unsigned char buf[256];
void *cmd_data;
long rc;
if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd)))
return -EFAULT;
if (cmd.len > 256) {
cmd_data = kmalloc(cmd.len, GFP_USER);
if (!cmd_data)
return -ENOMEM;
} else {
cmd_data = buf;
}
if (copy_from_user(cmd_data, (void __user *)(cmd.data), cmd.len)) {
rc = -EFAULT;
goto end;
}
mutex_lock(&adev->module->pmem_regions_lock);
if (adsp_verify_cmd(adev->module, cmd.queue, cmd_data, cmd.len)) {
MM_ERR("module %s: verify failed.\n", adev->module->name);
rc = -EINVAL;
goto end;
}
rc = msm_adsp_write(adev->module, cmd.queue, cmd_data, cmd.len);
end:
mutex_unlock(&adev->module->pmem_regions_lock);
if (cmd.len > 256)
kfree(cmd_data);
return rc;
}
static int adsp_events_pending(struct adsp_device *adev)
{
unsigned long flags;
int yes;
spin_lock_irqsave(&adev->event_queue_lock, flags);
yes = !list_empty(&adev->event_queue);
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
return yes || adev->abort;
}
static int adsp_pmem_lookup_paddr(struct msm_adsp_module *module, void **addr,
struct adsp_pmem_region **region)
{
struct hlist_node *node;
unsigned long paddr = (unsigned long)(*addr);
struct adsp_pmem_region *region_elt;
hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
if (paddr >= region_elt->paddr &&
paddr < region_elt->paddr + region_elt->len) {
*region = region_elt;
return 0;
}
}
return -1;
}
int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr)
{
struct adsp_pmem_region *region;
unsigned long paddr = (unsigned long)(*addr);
unsigned long *vaddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_paddr(module, addr, ®ion);
if (ret) {
MM_ERR("not patching %s, paddr %p lookup failed\n",
module->name, vaddr);
return ret;
}
*vaddr = (unsigned long)region->vaddr + (paddr - region->paddr);
return 0;
}
static int adsp_patch_event(struct msm_adsp_module *module,
struct adsp_event *event)
{
/* call the per-module msg verifier */
if (module->patch_event)
return module->patch_event(module, event);
return 0;
}
static long adsp_get_event(struct adsp_device *adev, void __user *arg)
{
unsigned long flags;
struct adsp_event *data = NULL;
struct adsp_event_t evt;
int timeout;
long rc = 0;
if (copy_from_user(&evt, arg, sizeof(struct adsp_event_t)))
return -EFAULT;
timeout = (int)evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
adev->event_wait, adsp_events_pending(adev),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
adev->event_wait, adsp_events_pending(adev));
}
if (rc < 0)
return rc;
if (adev->abort)
return -ENODEV;
spin_lock_irqsave(&adev->event_queue_lock, flags);
if (!list_empty(&adev->event_queue)) {
data = list_first_entry(&adev->event_queue,
struct adsp_event, list);
list_del(&data->list);
}
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
if (!data)
return -EAGAIN;
/* DSP messages are type 0; they may contain physical addresses */
if (data->type == 0)
adsp_patch_event(adev->module, data);
/* map adsp_event --> adsp_event_t */
if (evt.len < data->size) {
rc = -ETOOSMALL;
goto end;
}
if (data->msg_id != EVENT_MSG_ID) {
if (copy_to_user((void *)(evt.data), data->data.msg16,
data->size)) {
rc = -EFAULT;
goto end;
}
} else {
if (copy_to_user((void *)(evt.data), data->data.msg32,
data->size)) {
rc = -EFAULT;
goto end;
}
}
evt.type = data->type; /* 0 --> from aDSP, 1 --> from ARM9 */
evt.msg_id = data->msg_id;
evt.flags = data->is16;
evt.len = data->size;
if (copy_to_user(arg, &evt, sizeof(evt)))
rc = -EFAULT;
end:
kfree(data);
return rc;
}
static int adsp_pmem_del(struct msm_adsp_module *module)
{
struct hlist_node *node, *tmp;
struct adsp_pmem_region *region;
mutex_lock(&module->pmem_regions_lock);
hlist_for_each_safe(node, tmp, &module->pmem_regions) {
region = hlist_entry(node, struct adsp_pmem_region, list);
hlist_del(node);
put_pmem_file(region->file);
kfree(region);
}
mutex_unlock(&module->pmem_regions_lock);
BUG_ON(!hlist_empty(&module->pmem_regions));
return 0;
}
static long adsp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct adsp_device *adev = filp->private_data;
switch (cmd) {
case ADSP_IOCTL_ENABLE:
return msm_adsp_enable(adev->module);
case ADSP_IOCTL_DISABLE:
return msm_adsp_disable(adev->module);
case ADSP_IOCTL_DISABLE_EVENT_RSP:
return msm_adsp_disable_event_rsp(adev->module);
case ADSP_IOCTL_DISABLE_ACK:
MM_ERR("ADSP_IOCTL_DISABLE_ACK is not implemented\n");
break;
case ADSP_IOCTL_WRITE_COMMAND:
return adsp_write_cmd(adev, (void __user *) arg);
case ADSP_IOCTL_GET_EVENT:
return adsp_get_event(adev, (void __user *) arg);
case ADSP_IOCTL_SET_CLKRATE: {
unsigned long clk_rate;
if (copy_from_user(&clk_rate, (void *) arg, sizeof(clk_rate)))
return -EFAULT;
return adsp_set_clkrate(adev->module, clk_rate);
}
case ADSP_IOCTL_REGISTER_PMEM: {
struct adsp_pmem_info info;
if (copy_from_user(&info, (void *) arg, sizeof(info)))
return -EFAULT;
return adsp_pmem_add(adev->module, &info);
}
case ADSP_IOCTL_ABORT_EVENT_READ:
adev->abort = 1;
wake_up(&adev->event_wait);
break;
case ADSP_IOCTL_UNREGISTER_PMEM:
return adsp_pmem_del(adev->module);
default:
break;
}
return -EINVAL;
}
static int adsp_release(struct inode *inode, struct file *filp)
{
struct adsp_device *adev = filp->private_data;
struct msm_adsp_module *module = adev->module;
int rc = 0;
MM_INFO("release '%s'\n", adev->name);
/* clear module before putting it to avoid race with open() */
adev->module = NULL;
rc = adsp_pmem_del(module);
msm_adsp_put(module);
return rc;
}
static void adsp_event(void *driver_data, unsigned id, size_t len,
void (*getevent)(void *ptr, size_t len))
{
struct adsp_device *adev = driver_data;
struct adsp_event *event;
unsigned long flags;
if (len > ADSP_EVENT_MAX_SIZE) {
MM_ERR("event too large (%d bytes)\n", len);
return;
}
event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (!event) {
MM_ERR("cannot allocate buffer\n");
return;
}
if (id != EVENT_MSG_ID) {
event->type = 0;
event->is16 = 0;
event->msg_id = id;
event->size = len;
getevent(event->data.msg16, len);
} else {
event->type = 1;
event->is16 = 1;
event->msg_id = id;
event->size = len;
getevent(event->data.msg32, len);
}
spin_lock_irqsave(&adev->event_queue_lock, flags);
list_add_tail(&event->list, &adev->event_queue);
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
wake_up(&adev->event_wait);
}
static struct msm_adsp_ops adsp_ops = {
.event = adsp_event,
};
static int adsp_open(struct inode *inode, struct file *filp)
{
struct adsp_device *adev;
int rc;
rc = nonseekable_open(inode, filp);
if (rc < 0)
return rc;
adev = inode_to_device(inode);
if (!adev)
return -ENODEV;
MM_INFO("open '%s'\n", adev->name);
rc = msm_adsp_get(adev->name, &adev->module, &adsp_ops, adev);
if (rc)
return rc;
MM_INFO("opened module '%s' adev %p\n", adev->name, adev);
filp->private_data = adev;
adev->abort = 0;
INIT_HLIST_HEAD(&adev->module->pmem_regions);
mutex_init(&adev->module->pmem_regions_lock);
return 0;
}
static unsigned adsp_device_count;
static struct adsp_device *adsp_devices;
static struct adsp_device *inode_to_device(struct inode *inode)
{
unsigned n = MINOR(inode->i_rdev);
if (n < adsp_device_count) {
if (adsp_devices[n].device)
return adsp_devices + n;
}
return NULL;
}
static dev_t adsp_devno;
static struct class *adsp_class;
static const struct file_operations adsp_fops = {
.owner = THIS_MODULE,
.open = adsp_open,
.unlocked_ioctl = adsp_ioctl,
.release = adsp_release,
};
static void adsp_create(struct adsp_device *adev, const char *name,
struct device *parent, dev_t devt)
{
struct device *dev;
int rc;
dev = device_create(adsp_class, parent, devt, "%s", name);
if (IS_ERR(dev))
return;
init_waitqueue_head(&adev->event_wait);
INIT_LIST_HEAD(&adev->event_queue);
spin_lock_init(&adev->event_queue_lock);
cdev_init(&adev->cdev, &adsp_fops);
adev->cdev.owner = THIS_MODULE;
rc = cdev_add(&adev->cdev, devt, 1);
if (rc < 0) {
device_destroy(adsp_class, devt);
} else {
adev->device = dev;
adev->name = name;
}
}
void msm_adsp_publish_cdevs(struct msm_adsp_module *modules, unsigned n)
{
int rc;
adsp_devices = kzalloc(sizeof(struct adsp_device) * n, GFP_KERNEL);
if (!adsp_devices)
return;
adsp_class = class_create(THIS_MODULE, "adsp");
if (IS_ERR(adsp_class))
goto fail_create_class;
rc = alloc_chrdev_region(&adsp_devno, 0, n, "adsp");
if (rc < 0)
goto fail_alloc_region;
adsp_device_count = n;
for (n = 0; n < adsp_device_count; n++) {
adsp_create(adsp_devices + n,
modules[n].name, &modules[n].pdev.dev,
MKDEV(MAJOR(adsp_devno), n));
}
return;
fail_alloc_region:
class_unregister(adsp_class);
fail_create_class:
kfree(adsp_devices);
}
| gpl-2.0 |
penhoi/linux-3.14.56 | arch/mips/netlogic/xlp/usb-init-xlp2.c | 981 | 8199 | /*
* Copyright (c) 2003-2013 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#define XLPII_USB3_CTL_0 0xc0
#define XLPII_VAUXRST BIT(0)
#define XLPII_VCCRST BIT(1)
#define XLPII_NUM2PORT 9
#define XLPII_NUM3PORT 13
#define XLPII_RTUNEREQ BIT(20)
#define XLPII_MS_CSYSREQ BIT(21)
#define XLPII_XS_CSYSREQ BIT(22)
#define XLPII_RETENABLEN BIT(23)
#define XLPII_TX2RX BIT(24)
#define XLPII_XHCIREV BIT(25)
#define XLPII_ECCDIS BIT(26)
#define XLPII_USB3_INT_REG 0xc2
#define XLPII_USB3_INT_MASK 0xc3
#define XLPII_USB_PHY_TEST 0xc6
#define XLPII_PRESET BIT(0)
#define XLPII_ATERESET BIT(1)
#define XLPII_LOOPEN BIT(2)
#define XLPII_TESTPDHSP BIT(3)
#define XLPII_TESTPDSSP BIT(4)
#define XLPII_TESTBURNIN BIT(5)
#define XLPII_USB_PHY_LOS_LV 0xc9
#define XLPII_LOSLEV 0
#define XLPII_LOSBIAS 5
#define XLPII_SQRXTX 8
#define XLPII_TXBOOST 11
#define XLPII_RSLKSEL 16
#define XLPII_FSEL 20
#define XLPII_USB_RFCLK_REG 0xcc
#define XLPII_VVLD 30
#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_xlpii_get_usb_pcibase(node, inst) \
nlm_pcicfg_base(cpu_is_xlp9xx() ? \
XLP9XX_IO_USB_OFFSET(node, inst) : \
XLP2XX_IO_USB_OFFSET(node, inst))
#define nlm_xlpii_get_usb_regbase(node, inst) \
(nlm_xlpii_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
static void xlp2xx_usb_ack(struct irq_data *data)
{
u64 port_addr;
switch (data->irq) {
case PIC_2XX_XHCI_0_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(0, 1);
break;
case PIC_2XX_XHCI_1_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(0, 2);
break;
case PIC_2XX_XHCI_2_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(0, 3);
break;
default:
pr_err("No matching USB irq!\n");
return;
}
nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
}
static void xlp9xx_usb_ack(struct irq_data *data)
{
u64 port_addr;
int node, irq;
/* Find the node and irq on the node */
irq = data->irq % NLM_IRQS_PER_NODE;
node = data->irq / NLM_IRQS_PER_NODE;
switch (irq) {
case PIC_9XX_XHCI_0_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(node, 1);
break;
case PIC_9XX_XHCI_1_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(node, 2);
break;
default:
pr_err("No matching USB irq %d node %d!\n", irq, node);
return;
}
nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
}
static void nlm_xlpii_usb_hw_reset(int node, int port)
{
u64 port_addr, xhci_base, pci_base;
void __iomem *corebase;
u32 val;
port_addr = nlm_xlpii_get_usb_regbase(node, port);
/* Set frequency */
val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV);
val &= ~(0x3f << XLPII_FSEL);
val |= (0x27 << XLPII_FSEL);
nlm_write_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV, val);
val = nlm_read_usb_reg(port_addr, XLPII_USB_RFCLK_REG);
val |= (1 << XLPII_VVLD);
nlm_write_usb_reg(port_addr, XLPII_USB_RFCLK_REG, val);
/* PHY reset */
val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_TEST);
val &= (XLPII_ATERESET | XLPII_LOOPEN | XLPII_TESTPDHSP
| XLPII_TESTPDSSP | XLPII_TESTBURNIN);
nlm_write_usb_reg(port_addr, XLPII_USB_PHY_TEST, val);
/* Setup control register */
val = XLPII_VAUXRST | XLPII_VCCRST | (1 << XLPII_NUM2PORT)
| (1 << XLPII_NUM3PORT) | XLPII_MS_CSYSREQ | XLPII_XS_CSYSREQ
| XLPII_RETENABLEN | XLPII_XHCIREV;
nlm_write_usb_reg(port_addr, XLPII_USB3_CTL_0, val);
/* Enable interrupts */
nlm_write_usb_reg(port_addr, XLPII_USB3_INT_MASK, 0x00000001);
/* Clear all interrupts */
nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
udelay(2000);
/* XHCI configuration at PCI mem */
pci_base = nlm_xlpii_get_usb_pcibase(node, port);
xhci_base = nlm_read_usb_reg(pci_base, 0x4) & ~0xf;
corebase = ioremap(xhci_base, 0x10000);
if (!corebase)
return;
writel(0x240002, corebase + 0xc2c0);
/* GCTL 0xc110 */
val = readl(corebase + 0xc110);
val &= ~(0x3 << 12);
val |= (1 << 12);
writel(val, corebase + 0xc110);
udelay(100);
/* PHYCFG 0xc200 */
val = readl(corebase + 0xc200);
val &= ~(1 << 6);
writel(val, corebase + 0xc200);
udelay(100);
/* PIPECTL 0xc2c0 */
val = readl(corebase + 0xc2c0);
val &= ~(1 << 17);
writel(val, corebase + 0xc2c0);
iounmap(corebase);
}
static int __init nlm_platform_xlpii_usb_init(void)
{
int node;
if (!cpu_is_xlpii())
return 0;
if (!cpu_is_xlp9xx()) {
/* XLP 2XX single node */
pr_info("Initializing 2XX USB Interface\n");
nlm_xlpii_usb_hw_reset(0, 1);
nlm_xlpii_usb_hw_reset(0, 2);
nlm_xlpii_usb_hw_reset(0, 3);
nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlp2xx_usb_ack);
nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlp2xx_usb_ack);
nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlp2xx_usb_ack);
return 0;
}
/* XLP 9XX, multi-node */
pr_info("Initializing 9XX USB Interface\n");
for (node = 0; node < NLM_NR_NODES; node++) {
if (!nlm_node_present(node))
continue;
nlm_xlpii_usb_hw_reset(node, 1);
nlm_xlpii_usb_hw_reset(node, 2);
nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
}
return 0;
}
arch_initcall(nlm_platform_xlpii_usb_init);
static u64 xlp_usb_dmamask = ~(u32)0;
/* Fixup the IRQ for USB devices which is exist on XLP9XX SOC PCIE bus */
static void nlm_xlp9xx_usb_fixup_final(struct pci_dev *dev)
{
int node;
node = xlp_socdev_to_node(dev);
dev->dev.dma_mask = &xlp_usb_dmamask;
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
switch (dev->devfn) {
case 0x21:
dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_0_IRQ);
break;
case 0x22:
dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ);
break;
}
}
/* Fixup the IRQ for USB devices which is exist on XLP2XX SOC PCIE bus */
static void nlm_xlp2xx_usb_fixup_final(struct pci_dev *dev)
{
dev->dev.dma_mask = &xlp_usb_dmamask;
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
switch (dev->devfn) {
case 0x21:
dev->irq = PIC_2XX_XHCI_0_IRQ;
break;
case 0x22:
dev->irq = PIC_2XX_XHCI_1_IRQ;
break;
case 0x23:
dev->irq = PIC_2XX_XHCI_2_IRQ;
break;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_XHCI,
nlm_xlp9xx_usb_fixup_final);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_XHCI,
nlm_xlp2xx_usb_fixup_final);
| gpl-2.0 |
dchadic/linux-cmps107 | drivers/scsi/scsi_trace.c | 981 | 6760 | /*
* Copyright (C) 2010 FUJITSU LIMITED
* Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/trace_seq.h>
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
txlen = cdb[4];
trace_seq_printf(p, "lba=%llu txlen=%llu",
(unsigned long long)lba, (unsigned long long)txlen);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= (cdb[2] << 24);
lba |= (cdb[3] << 16);
lba |= (cdb[4] << 8);
lba |= cdb[5];
txlen |= (cdb[7] << 8);
txlen |= cdb[8];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= (cdb[2] << 24);
lba |= (cdb[3] << 16);
lba |= (cdb[4] << 8);
lba |= cdb[5];
txlen |= (cdb[6] << 24);
txlen |= (cdb[7] << 16);
txlen |= (cdb[8] << 8);
txlen |= cdb[9];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= ((u64)cdb[2] << 56);
lba |= ((u64)cdb[3] << 48);
lba |= ((u64)cdb[4] << 40);
lba |= ((u64)cdb[5] << 32);
lba |= (cdb[6] << 24);
lba |= (cdb[7] << 16);
lba |= (cdb[8] << 8);
lba |= cdb[9];
txlen |= (cdb[10] << 24);
txlen |= (cdb[11] << 16);
txlen |= (cdb[12] << 8);
txlen |= cdb[13];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
sector_t lba = 0, txlen = 0;
u32 ei_lbrt = 0;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
cmd = "READ";
break;
case VERIFY_32:
cmd = "VERIFY";
break;
case WRITE_32:
cmd = "WRITE";
break;
case WRITE_SAME_32:
cmd = "WRITE_SAME";
break;
default:
trace_seq_puts(p, "UNKNOWN");
goto out;
}
lba |= ((u64)cdb[12] << 56);
lba |= ((u64)cdb[13] << 48);
lba |= ((u64)cdb[14] << 40);
lba |= ((u64)cdb[15] << 32);
lba |= (cdb[16] << 24);
lba |= (cdb[17] << 16);
lba |= (cdb[18] << 8);
lba |= cdb[19];
ei_lbrt |= (cdb[20] << 24);
ei_lbrt |= (cdb[21] << 16);
ei_lbrt |= (cdb[22] << 8);
ei_lbrt |= cdb[23];
txlen |= (cdb[28] << 24);
txlen |= (cdb[29] << 16);
txlen |= (cdb[30] << 8);
txlen |= cdb[31];
trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
cmd, (unsigned long long)lba,
(unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
out:
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
unsigned int regions = cdb[7] << 8 | cdb[8];
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
sector_t lba = 0;
u32 alloc_len = 0;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
cmd = "READ_CAPACITY_16";
break;
case SAI_GET_LBA_STATUS:
cmd = "GET_LBA_STATUS";
break;
default:
trace_seq_puts(p, "UNKNOWN");
goto out;
}
lba |= ((u64)cdb[2] << 56);
lba |= ((u64)cdb[3] << 48);
lba |= ((u64)cdb[4] << 40);
lba |= ((u64)cdb[5] << 32);
lba |= (cdb[6] << 24);
lba |= (cdb[7] << 16);
lba |= (cdb[8] << 8);
lba |= cdb[9];
alloc_len |= (cdb[10] << 24);
alloc_len |= (cdb[11] << 16);
alloc_len |= (cdb[12] << 8);
alloc_len |= cdb[13];
trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
(unsigned long long)lba, alloc_len);
out:
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
case VERIFY_32:
case WRITE_32:
case WRITE_SAME_32:
return scsi_trace_rw32(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
}
static const char *
scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_putc(p, '-');
trace_seq_putc(p, 0);
return ret;
}
const char *
scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (cdb[0]) {
case READ_6:
case WRITE_6:
return scsi_trace_rw6(p, cdb, len);
case READ_10:
case VERIFY:
case WRITE_10:
case WRITE_SAME:
return scsi_trace_rw10(p, cdb, len);
case READ_12:
case VERIFY_12:
case WRITE_12:
return scsi_trace_rw12(p, cdb, len);
case READ_16:
case VERIFY_16:
case WRITE_16:
case WRITE_SAME_16:
return scsi_trace_rw16(p, cdb, len);
case UNMAP:
return scsi_trace_unmap(p, cdb, len);
case SERVICE_ACTION_IN_16:
return scsi_trace_service_action_in(p, cdb, len);
case VARIABLE_LENGTH_CMD:
return scsi_trace_varlen(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
}
| gpl-2.0 |
sakindia123/android_kernel_htc_pico | drivers/input/joystick/xpad.c | 981 | 30589 | /*
* X-Box gamepad driver
*
* Copyright (c) 2002 Marko Friedemann <mfr@bmx-chemnitz.de>
* 2004 Oliver Schwartz <Oliver.Schwartz@gmx.de>,
* Steven Toth <steve@toth.demon.co.uk>,
* Franz Lehner <franz@caos.at>,
* Ivan Hawkes <blackhawk@ivanhawkes.com>
* 2005 Dominic Cerquetti <binary1230@yahoo.com>
* 2006 Adam Buchbinder <adam.buchbinder@gmail.com>
* 2007 Jan Kratochvil <honza@jikos.cz>
* 2010 Christoph Fritz <chf.fritz@googlemail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* This driver is based on:
* - information from http://euc.jp/periphs/xbox-controller.ja.html
* - the iForce driver drivers/char/joystick/iforce.c
* - the skeleton-driver drivers/usb/usb-skeleton.c
* - Xbox 360 information http://www.free60.org/wiki/Gamepad
*
* Thanks to:
* - ITO Takayuki for providing essential xpad information on his website
* - Vojtech Pavlik - iforce driver / input subsystem
* - Greg Kroah-Hartman - usb-skeleton driver
* - XBOX Linux project - extra USB id's
*
* TODO:
* - fine tune axes (especially trigger axes)
* - fix "analog" buttons (reported as digital now)
* - get rumble working
* - need USB IDs for other dance pads
*
* History:
*
* 2002-06-27 - 0.0.1 : first version, just said "XBOX HID controller"
*
* 2002-07-02 - 0.0.2 : basic working version
* - all axes and 9 of the 10 buttons work (german InterAct device)
* - the black button does not work
*
* 2002-07-14 - 0.0.3 : rework by Vojtech Pavlik
* - indentation fixes
* - usb + input init sequence fixes
*
* 2002-07-16 - 0.0.4 : minor changes, merge with Vojtech's v0.0.3
* - verified the lack of HID and report descriptors
* - verified that ALL buttons WORK
* - fixed d-pad to axes mapping
*
* 2002-07-17 - 0.0.5 : simplified d-pad handling
*
* 2004-10-02 - 0.0.6 : DDR pad support
* - borrowed from the XBOX linux kernel
* - USB id's for commonly used dance pads are present
* - dance pads will map D-PAD to buttons, not axes
* - pass the module paramater 'dpad_to_buttons' to force
* the D-PAD to map to buttons if your pad is not detected
*
* Later changes can be tracked in SCM.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/usb/input.h>
#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
#define DRIVER_DESC "X-Box pad driver"
#define XPAD_PKT_LEN 32
/* xbox d-pads should map to buttons, as is required for DDR pads
but we map them to axes when possible to simplify things */
#define MAP_DPAD_TO_BUTTONS (1 << 0)
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
#define XTYPE_XBOX 0
#define XTYPE_XBOX360 1
#define XTYPE_XBOX360W 2
#define XTYPE_UNKNOWN 3
static int dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
static int triggers_to_buttons;
module_param(triggers_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads");
static int sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
static const struct xpad_device {
u16 idVendor;
u16 idProduct;
char *name;
u8 mapping;
u8 xtype;
} xpad_device[] = {
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
{ 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
{ 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
{ 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
/* buttons shared with xbox and xbox360 */
static const signed short xpad_common_btn[] = {
BTN_A, BTN_B, BTN_X, BTN_Y, /* "analog" buttons */
BTN_START, BTN_SELECT, BTN_THUMBL, BTN_THUMBR, /* start/back/sticks */
-1 /* terminating entry */
};
/* original xbox controllers only */
static const signed short xpad_btn[] = {
BTN_C, BTN_Z, /* "analog" buttons */
-1 /* terminating entry */
};
/* used when dpad is mapped to buttons */
static const signed short xpad_btn_pad[] = {
BTN_TRIGGER_HAPPY1, BTN_TRIGGER_HAPPY2, /* d-pad left, right */
BTN_TRIGGER_HAPPY3, BTN_TRIGGER_HAPPY4, /* d-pad up, down */
-1 /* terminating entry */
};
/* used when triggers are mapped to buttons */
static const signed short xpad_btn_triggers[] = {
BTN_TL2, BTN_TR2, /* triggers left/right */
-1
};
static const signed short xpad360_btn[] = { /* buttons for x360 controller */
BTN_TL, BTN_TR, /* Button LB/RB */
BTN_MODE, /* The big X button */
-1
};
static const signed short xpad_abs[] = {
ABS_X, ABS_Y, /* left stick */
ABS_RX, ABS_RY, /* right stick */
-1 /* terminating entry */
};
/* used when dpad is mapped to axes */
static const signed short xpad_abs_pad[] = {
ABS_HAT0X, ABS_HAT0Y, /* d-pad axes */
-1 /* terminating entry */
};
/* used when triggers are mapped to axes */
static const signed short xpad_abs_triggers[] = {
ABS_Z, ABS_RZ, /* triggers left/right */
-1
};
/* Xbox 360 has a vendor-specific class, so we cannot match it with only
* USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
* match against vendor id as well. Wired Xbox 360 devices have protocol 1,
* wireless controllers have protocol 129. */
#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (vend), \
.bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
.bInterfaceSubClass = 93, \
.bInterfaceProtocol = (pr)
#define XPAD_XBOX360_VENDOR(vend) \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
static struct usb_device_id xpad_table [] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
{ }
};
MODULE_DEVICE_TABLE (usb, xpad_table);
struct usb_xpad {
struct input_dev *dev; /* input device interface */
struct usb_device *udev; /* usb device */
int pad_present;
struct urb *irq_in; /* urb for interrupt in report */
unsigned char *idata; /* input data */
dma_addr_t idata_dma;
struct urb *bulk_out;
unsigned char *bdata;
#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct urb *irq_out; /* urb for interrupt out report */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
struct mutex odata_mutex;
#endif
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct xpad_led *led;
#endif
char phys[64]; /* physical device path */
int mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
};
/*
* xpad_process_packet
*
* Completes a request by converting the data into events for the
* input subsystem.
*
* The used report descriptor was taken from ITO Takayukis website:
* http://euc.jp/periphs/xbox-controller.ja.html
*/
static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
struct input_dev *dev = xpad->dev;
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
/* left stick */
input_report_abs(dev, ABS_X,
(__s16) le16_to_cpup((__le16 *)(data + 12)));
input_report_abs(dev, ABS_Y,
~(__s16) le16_to_cpup((__le16 *)(data + 14)));
/* right stick */
input_report_abs(dev, ABS_RX,
(__s16) le16_to_cpup((__le16 *)(data + 16)));
input_report_abs(dev, ABS_RY,
~(__s16) le16_to_cpup((__le16 *)(data + 18)));
}
/* triggers left/right */
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
input_report_key(dev, BTN_TL2, data[10]);
input_report_key(dev, BTN_TR2, data[11]);
} else {
input_report_abs(dev, ABS_Z, data[10]);
input_report_abs(dev, ABS_RZ, data[11]);
}
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
input_report_abs(dev, ABS_HAT0Y,
!!(data[2] & 0x02) - !!(data[2] & 0x01));
}
/* start/back buttons and stick press left/right */
input_report_key(dev, BTN_START, data[2] & 0x10);
input_report_key(dev, BTN_SELECT, data[2] & 0x20);
input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
/* "analog" buttons A, B, X, Y */
input_report_key(dev, BTN_A, data[4]);
input_report_key(dev, BTN_B, data[5]);
input_report_key(dev, BTN_X, data[6]);
input_report_key(dev, BTN_Y, data[7]);
/* "analog" buttons black, white */
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
input_sync(dev);
}
/*
* xpad360_process_packet
*
* Completes a request by converting the data into events for the
* input subsystem. It is version for xbox 360 controller
*
* The used report descriptor was taken from:
* http://www.free60.org/wiki/Gamepad
*/
static void xpad360_process_packet(struct usb_xpad *xpad,
u16 cmd, unsigned char *data)
{
struct input_dev *dev = xpad->dev;
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
input_report_abs(dev, ABS_HAT0Y,
!!(data[2] & 0x02) - !!(data[2] & 0x01));
}
/* start/back buttons */
input_report_key(dev, BTN_START, data[2] & 0x10);
input_report_key(dev, BTN_SELECT, data[2] & 0x20);
/* stick press left/right */
input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
/* buttons A,B,X,Y,TL,TR and MODE */
input_report_key(dev, BTN_A, data[3] & 0x10);
input_report_key(dev, BTN_B, data[3] & 0x20);
input_report_key(dev, BTN_X, data[3] & 0x40);
input_report_key(dev, BTN_Y, data[3] & 0x80);
input_report_key(dev, BTN_TL, data[3] & 0x01);
input_report_key(dev, BTN_TR, data[3] & 0x02);
input_report_key(dev, BTN_MODE, data[3] & 0x04);
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
/* left stick */
input_report_abs(dev, ABS_X,
(__s16) le16_to_cpup((__le16 *)(data + 6)));
input_report_abs(dev, ABS_Y,
~(__s16) le16_to_cpup((__le16 *)(data + 8)));
/* right stick */
input_report_abs(dev, ABS_RX,
(__s16) le16_to_cpup((__le16 *)(data + 10)));
input_report_abs(dev, ABS_RY,
~(__s16) le16_to_cpup((__le16 *)(data + 12)));
}
/* triggers left/right */
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
input_report_key(dev, BTN_TL2, data[4]);
input_report_key(dev, BTN_TR2, data[5]);
} else {
input_report_abs(dev, ABS_Z, data[4]);
input_report_abs(dev, ABS_RZ, data[5]);
}
input_sync(dev);
}
/*
* xpad360w_process_packet
*
* Completes a request by converting the data into events for the
* input subsystem. It is version for xbox 360 wireless controller.
*
* Byte.Bit
* 00.1 - Status change: The controller or headset has connected/disconnected
* Bits 01.7 and 01.6 are valid
* 01.7 - Controller present
* 01.6 - Headset present
* 01.1 - Pad state (Bytes 4+) valid
*
*/
static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
/* Presence change */
if (data[0] & 0x08) {
if (data[1] & 0x80) {
xpad->pad_present = 1;
usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
} else
xpad->pad_present = 0;
}
/* Valid pad data */
if (!(data[1] & 0x1))
return;
xpad360_process_packet(xpad, cmd, &data[4]);
}
static void xpad_irq_in(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
int retval, status;
status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
goto exit;
}
switch (xpad->xtype) {
case XTYPE_XBOX360:
xpad360_process_packet(xpad, 0, xpad->idata);
break;
case XTYPE_XBOX360W:
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
static void xpad_bulk_out(struct urb *urb)
{
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__, urb->status);
break;
default:
dbg("%s - nonzero urb status received: %d", __func__, urb->status);
}
}
#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
static void xpad_irq_out(struct urb *urb)
{
int retval, status;
status = urb->status;
switch (status) {
case 0:
/* success */
return;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d", __func__, status);
goto exit;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
{
struct usb_endpoint_descriptor *ep_irq_out;
int error;
if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
return 0;
xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
GFP_KERNEL, &xpad->odata_dma);
if (!xpad->odata) {
error = -ENOMEM;
goto fail1;
}
mutex_init(&xpad->odata_mutex);
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_out) {
error = -ENOMEM;
goto fail2;
}
ep_irq_out = &intf->cur_altsetting->endpoint[1].desc;
usb_fill_int_urb(xpad->irq_out, xpad->udev,
usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
xpad->odata, XPAD_PKT_LEN,
xpad_irq_out, xpad, ep_irq_out->bInterval);
xpad->irq_out->transfer_dma = xpad->odata_dma;
xpad->irq_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return 0;
fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
fail1: return error;
}
static void xpad_stop_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX)
usb_kill_urb(xpad->irq_out);
}
static void xpad_deinit_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) {
usb_free_urb(xpad->irq_out);
usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->odata, xpad->odata_dma);
}
}
#else
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
static void xpad_deinit_output(struct usb_xpad *xpad) {}
static void xpad_stop_output(struct usb_xpad *xpad) {}
#endif
#ifdef CONFIG_JOYSTICK_XPAD_FF
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
if (effect->type == FF_RUMBLE) {
__u16 strong = effect->u.rumble.strong_magnitude;
__u16 weak = effect->u.rumble.weak_magnitude;
switch (xpad->xtype) {
case XTYPE_XBOX:
xpad->odata[0] = 0x00;
xpad->odata[1] = 0x06;
xpad->odata[2] = 0x00;
xpad->odata[3] = strong / 256; /* left actuator */
xpad->odata[4] = 0x00;
xpad->odata[5] = weak / 256; /* right actuator */
xpad->irq_out->transfer_buffer_length = 6;
return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
case XTYPE_XBOX360:
xpad->odata[0] = 0x00;
xpad->odata[1] = 0x08;
xpad->odata[2] = 0x00;
xpad->odata[3] = strong / 256; /* left actuator? */
xpad->odata[4] = weak / 256; /* right actuator? */
xpad->odata[5] = 0x00;
xpad->odata[6] = 0x00;
xpad->odata[7] = 0x00;
xpad->irq_out->transfer_buffer_length = 8;
return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
default:
dbg("%s - rumble command sent to unsupported xpad type: %d",
__func__, xpad->xtype);
return -1;
}
}
return 0;
}
static int xpad_init_ff(struct usb_xpad *xpad)
{
if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
return 0;
input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(xpad->dev, NULL, xpad_play_effect);
}
#else
static int xpad_init_ff(struct usb_xpad *xpad) { return 0; }
#endif
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
#include <linux/leds.h>
struct xpad_led {
char name[16];
struct led_classdev led_cdev;
struct usb_xpad *xpad;
};
static void xpad_send_led_command(struct usb_xpad *xpad, int command)
{
if (command >= 0 && command < 14) {
mutex_lock(&xpad->odata_mutex);
xpad->odata[0] = 0x01;
xpad->odata[1] = 0x03;
xpad->odata[2] = command;
xpad->irq_out->transfer_buffer_length = 3;
usb_submit_urb(xpad->irq_out, GFP_KERNEL);
mutex_unlock(&xpad->odata_mutex);
}
}
static void xpad_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct xpad_led *xpad_led = container_of(led_cdev,
struct xpad_led, led_cdev);
xpad_send_led_command(xpad_led->xpad, value);
}
static int xpad_led_probe(struct usb_xpad *xpad)
{
static atomic_t led_seq = ATOMIC_INIT(0);
long led_no;
struct xpad_led *led;
struct led_classdev *led_cdev;
int error;
if (xpad->xtype != XTYPE_XBOX360)
return 0;
xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led_no = (long)atomic_inc_return(&led_seq) - 1;
snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
led->xpad = xpad;
led_cdev = &led->led_cdev;
led_cdev->name = led->name;
led_cdev->brightness_set = xpad_led_set;
error = led_classdev_register(&xpad->udev->dev, led_cdev);
if (error) {
kfree(led);
xpad->led = NULL;
return error;
}
/*
* Light up the segment corresponding to controller number
*/
xpad_send_led_command(xpad, (led_no % 4) + 2);
return 0;
}
static void xpad_led_disconnect(struct usb_xpad *xpad)
{
struct xpad_led *xpad_led = xpad->led;
if (xpad_led) {
led_classdev_unregister(&xpad_led->led_cdev);
kfree(xpad_led);
}
}
#else
static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
static void xpad_led_disconnect(struct usb_xpad *xpad) { }
#endif
static int xpad_open(struct input_dev *dev)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
/* URB was submitted in probe */
if(xpad->xtype == XTYPE_XBOX360W)
return 0;
xpad->irq_in->dev = xpad->udev;
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
return -EIO;
return 0;
}
static void xpad_close(struct input_dev *dev)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
if (xpad->xtype != XTYPE_XBOX360W)
usb_kill_urb(xpad->irq_in);
xpad_stop_output(xpad);
}
static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
{
set_bit(abs, input_dev->absbit);
switch (abs) {
case ABS_X:
case ABS_Y:
case ABS_RX:
case ABS_RY: /* the two sticks */
input_set_abs_params(input_dev, abs, -32768, 32767, 16, 128);
break;
case ABS_Z:
case ABS_RZ: /* the triggers (if mapped to axes) */
input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
break;
case ABS_HAT0X:
case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
break;
}
}
static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_xpad *xpad;
struct input_dev *input_dev;
struct usb_endpoint_descriptor *ep_irq_in;
int i, error;
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!xpad || !input_dev) {
error = -ENOMEM;
goto fail1;
}
xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
GFP_KERNEL, &xpad->idata_dma);
if (!xpad->idata) {
error = -ENOMEM;
goto fail1;
}
xpad->irq_in = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_in) {
error = -ENOMEM;
goto fail2;
}
xpad->udev = udev;
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
if (xpad->xtype == XTYPE_UNKNOWN) {
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
else
xpad->xtype = XTYPE_XBOX360;
} else
xpad->xtype = XTYPE_XBOX;
if (dpad_to_buttons)
xpad->mapping |= MAP_DPAD_TO_BUTTONS;
if (triggers_to_buttons)
xpad->mapping |= MAP_TRIGGERS_TO_BUTTONS;
if (sticks_to_null)
xpad->mapping |= MAP_STICKS_TO_NULL;
}
xpad->dev = input_dev;
usb_make_path(udev, xpad->phys, sizeof(xpad->phys));
strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
input_dev->name = xpad_device[i].name;
input_dev->phys = xpad->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, xpad);
input_dev->open = xpad_open;
input_dev->close = xpad_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
input_dev->evbit[0] |= BIT_MASK(EV_ABS);
/* set up axes */
for (i = 0; xpad_abs[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs[i]);
}
/* set up standard buttons */
for (i = 0; xpad_common_btn[i] >= 0; i++)
__set_bit(xpad_common_btn[i], input_dev->keybit);
/* set up model-specific ones */
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
for (i = 0; xpad360_btn[i] >= 0; i++)
__set_bit(xpad360_btn[i], input_dev->keybit);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
__set_bit(xpad_btn[i], input_dev->keybit);
}
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
for (i = 0; xpad_btn_pad[i] >= 0; i++)
__set_bit(xpad_btn_pad[i], input_dev->keybit);
} else {
for (i = 0; xpad_abs_pad[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
}
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
for (i = 0; xpad_btn_triggers[i] >= 0; i++)
__set_bit(xpad_btn_triggers[i], input_dev->keybit);
} else {
for (i = 0; xpad_abs_triggers[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
}
error = xpad_init_output(intf, xpad);
if (error)
goto fail3;
error = xpad_init_ff(xpad);
if (error)
goto fail4;
error = xpad_led_probe(xpad);
if (error)
goto fail5;
ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(xpad->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
xpad->idata, XPAD_PKT_LEN, xpad_irq_in,
xpad, ep_irq_in->bInterval);
xpad->irq_in->transfer_dma = xpad->idata_dma;
xpad->irq_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = input_register_device(xpad->dev);
if (error)
goto fail6;
usb_set_intfdata(intf, xpad);
if (xpad->xtype == XTYPE_XBOX360W) {
/*
* Setup the message to set the LEDs on the
* controller when it shows up
*/
xpad->bulk_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->bulk_out) {
error = -ENOMEM;
goto fail7;
}
xpad->bdata = kzalloc(XPAD_PKT_LEN, GFP_KERNEL);
if (!xpad->bdata) {
error = -ENOMEM;
goto fail8;
}
xpad->bdata[2] = 0x08;
switch (intf->cur_altsetting->desc.bInterfaceNumber) {
case 0:
xpad->bdata[3] = 0x42;
break;
case 2:
xpad->bdata[3] = 0x43;
break;
case 4:
xpad->bdata[3] = 0x44;
break;
case 6:
xpad->bdata[3] = 0x45;
}
ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
usb_fill_bulk_urb(xpad->bulk_out, udev,
usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
/*
* Submit the int URB immediately rather than waiting for open
* because we get status messages from the device whether
* or not any controllers are attached. In fact, it's
* exactly the message that a controller has arrived that
* we're waiting for.
*/
xpad->irq_in->dev = xpad->udev;
error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
if (error)
goto fail9;
}
return 0;
fail9: kfree(xpad->bdata);
fail8: usb_free_urb(xpad->bulk_out);
fail7: input_unregister_device(input_dev);
input_dev = NULL;
fail6: xpad_led_disconnect(xpad);
fail5: if (input_dev)
input_ff_destroy(input_dev);
fail4: xpad_deinit_output(xpad);
fail3: usb_free_urb(xpad->irq_in);
fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
fail1: input_free_device(input_dev);
kfree(xpad);
return error;
}
static void xpad_disconnect(struct usb_interface *intf)
{
struct usb_xpad *xpad = usb_get_intfdata (intf);
xpad_led_disconnect(xpad);
input_unregister_device(xpad->dev);
xpad_deinit_output(xpad);
if (xpad->xtype == XTYPE_XBOX360W) {
usb_kill_urb(xpad->bulk_out);
usb_free_urb(xpad->bulk_out);
usb_kill_urb(xpad->irq_in);
}
usb_free_urb(xpad->irq_in);
usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->idata, xpad->idata_dma);
kfree(xpad->bdata);
kfree(xpad);
usb_set_intfdata(intf, NULL);
}
static struct usb_driver xpad_driver = {
.name = "xpad",
.probe = xpad_probe,
.disconnect = xpad_disconnect,
.id_table = xpad_table,
};
static int __init usb_xpad_init(void)
{
return usb_register(&xpad_driver);
}
static void __exit usb_xpad_exit(void)
{
usb_deregister(&xpad_driver);
}
module_init(usb_xpad_init);
module_exit(usb_xpad_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
pscholl/jennic-usb-zigbee-linux-driver | drivers/input/keyboard/atakbd.c | 2005 | 6402 | /*
* atakbd.c
*
* Copyright (c) 2005 Michael Schmitz
*
* Based on amikbd.c, which is
*
* Copyright (c) 2000-2001 Vojtech Pavlik
*
* Based on the work of:
* Hamish Macdonald
*/
/*
* Atari keyboard driver for Linux/m68k
*
* The low level init and interrupt stuff is handled in arch/mm68k/atari/atakeyb.c
* (the keyboard ACIA also handles the mouse and joystick data, and the keyboard
* interrupt is shared with the MIDI ACIA so MIDI data also get handled there).
* This driver only deals with handing key events off to the input layer.
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/atariints.h>
#include <asm/atarihw.h>
#include <asm/atarikb.h>
#include <asm/irq.h>
MODULE_AUTHOR("Michael Schmitz <schmitz@biophys.uni-duesseldorf.de>");
MODULE_DESCRIPTION("Atari keyboard driver");
MODULE_LICENSE("GPL");
/*
0x47: KP_7 71
0x48: KP_8 72
0x49: KP_9 73
0x62: KP_/ 98
0x4b: KP_4 75
0x4c: KP_5 76
0x4d: KP_6 77
0x37: KP_* 55
0x4f: KP_1 79
0x50: KP_2 80
0x51: KP_3 81
0x4a: KP_- 74
0x52: KP_0 82
0x53: KP_. 83
0x4e: KP_+ 78
0x67: Up 103
0x6c: Down 108
0x69: Left 105
0x6a: Right 106
*/
static unsigned char atakbd_keycode[0x72] = { /* American layout */
[0] = KEY_GRAVE,
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
[4] = KEY_3,
[5] = KEY_4,
[6] = KEY_5,
[7] = KEY_6,
[8] = KEY_7,
[9] = KEY_8,
[10] = KEY_9,
[11] = KEY_0,
[12] = KEY_MINUS,
[13] = KEY_EQUAL,
[14] = KEY_BACKSPACE,
[15] = KEY_TAB,
[16] = KEY_Q,
[17] = KEY_W,
[18] = KEY_E,
[19] = KEY_R,
[20] = KEY_T,
[21] = KEY_Y,
[22] = KEY_U,
[23] = KEY_I,
[24] = KEY_O,
[25] = KEY_P,
[26] = KEY_LEFTBRACE,
[27] = KEY_RIGHTBRACE,
[28] = KEY_ENTER,
[29] = KEY_LEFTCTRL,
[30] = KEY_A,
[31] = KEY_S,
[32] = KEY_D,
[33] = KEY_F,
[34] = KEY_G,
[35] = KEY_H,
[36] = KEY_J,
[37] = KEY_K,
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
[41] = KEY_BACKSLASH, /* FIXME, '#' */
[42] = KEY_LEFTSHIFT,
[43] = KEY_GRAVE, /* FIXME: '~' */
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
[47] = KEY_V,
[48] = KEY_B,
[49] = KEY_N,
[50] = KEY_M,
[51] = KEY_COMMA,
[52] = KEY_DOT,
[53] = KEY_SLASH,
[54] = KEY_RIGHTSHIFT,
[55] = KEY_KPASTERISK,
[56] = KEY_LEFTALT,
[57] = KEY_SPACE,
[58] = KEY_CAPSLOCK,
[59] = KEY_F1,
[60] = KEY_F2,
[61] = KEY_F3,
[62] = KEY_F4,
[63] = KEY_F5,
[64] = KEY_F6,
[65] = KEY_F7,
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
[69] = KEY_ESC,
[70] = KEY_DELETE,
[71] = KEY_KP7,
[72] = KEY_KP8,
[73] = KEY_KP9,
[74] = KEY_KPMINUS,
[75] = KEY_KP4,
[76] = KEY_KP5,
[77] = KEY_KP6,
[78] = KEY_KPPLUS,
[79] = KEY_KP1,
[80] = KEY_KP2,
[81] = KEY_KP3,
[82] = KEY_KP0,
[83] = KEY_KPDOT,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
[92] = KEY_KPASTERISK, /* FIXME */
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
[96] = KEY_BACKSLASH, /* FIXME: '<' */
[97] = KEY_KPASTERISK, /* FIXME */
[98] = KEY_KPSLASH,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
[103] = KEY_UP,
[104] = KEY_KPASTERISK, /* FIXME */
[105] = KEY_LEFT,
[106] = KEY_RIGHT,
[107] = KEY_KPASTERISK, /* FIXME */
[108] = KEY_DOWN,
[109] = KEY_KPASTERISK, /* FIXME */
[110] = KEY_KPASTERISK, /* FIXME */
[111] = KEY_KPASTERISK, /* FIXME */
[112] = KEY_KPASTERISK, /* FIXME */
[113] = KEY_KPASTERISK /* FIXME */
};
static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
input_report_key(atakbd_dev, scancode, 1);
input_report_key(atakbd_dev, scancode, 0);
input_sync(atakbd_dev);
} else {
input_report_key(atakbd_dev, scancode, down);
input_sync(atakbd_dev);
}
} else /* scancodes >= 0xf2 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
}
static int __init atakbd_init(void)
{
int i, error;
if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP))
return -ENODEV;
// need to init core driver if not already done so
if (atari_keyb_init())
return -ENODEV;
atakbd_dev = input_allocate_device();
if (!atakbd_dev)
return -ENOMEM;
atakbd_dev->name = "Atari Keyboard";
atakbd_dev->phys = "atakbd/input0";
atakbd_dev->id.bustype = BUS_HOST;
atakbd_dev->id.vendor = 0x0001;
atakbd_dev->id.product = 0x0001;
atakbd_dev->id.version = 0x0100;
atakbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
atakbd_dev->keycode = atakbd_keycode;
atakbd_dev->keycodesize = sizeof(unsigned char);
atakbd_dev->keycodemax = ARRAY_SIZE(atakbd_keycode);
for (i = 1; i < 0x72; i++) {
set_bit(atakbd_keycode[i], atakbd_dev->keybit);
}
/* error check */
error = input_register_device(atakbd_dev);
if (error) {
input_free_device(atakbd_dev);
return error;
}
atari_input_keyboard_interrupt_hook = atakbd_interrupt;
return 0;
}
static void __exit atakbd_exit(void)
{
atari_input_keyboard_interrupt_hook = NULL;
input_unregister_device(atakbd_dev);
}
module_init(atakbd_init);
module_exit(atakbd_exit);
| gpl-2.0 |
holyangel/M9 | arch/x86/crypto/aes_glue.c | 2261 | 1697 | /*
* Glue Code for the asm optimized version of the AES Cipher Algorithm
*
*/
#include <linux/module.h>
#include <crypto/aes.h>
#include <asm/crypto/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_enc_blk(ctx, dst, src);
}
EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86);
void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_dec_blk(ctx, dst, src);
}
EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
aes_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
aes_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = crypto_aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS("aes");
MODULE_ALIAS("aes-asm");
| gpl-2.0 |
MpApQ/kernel_huawei | drivers/net/ehea/ehea_ethtool.c | 2773 | 7164 | /*
* linux/drivers/net/ehea/ehea_ethtool.c
*
* eHEA ethernet device driver for IBM eServer System p
*
* (C) Copyright IBM Corp. 2006
*
* Authors:
* Christoph Raisch <raisch@de.ibm.com>
* Jan-Bernd Themann <themann@de.ibm.com>
* Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ehea.h"
#include "ehea_phyp.h"
static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ehea_port *port = netdev_priv(dev);
u32 speed;
int ret;
ret = ehea_sense_port_attr(port);
if (ret)
return ret;
if (netif_carrier_ok(dev)) {
switch (port->port_speed) {
case EHEA_SPEED_10M:
speed = SPEED_10;
break;
case EHEA_SPEED_100M:
speed = SPEED_100;
break;
case EHEA_SPEED_1G:
speed = SPEED_1000;
break;
case EHEA_SPEED_10G:
speed = SPEED_10000;
break;
default:
speed = -1;
break; /* BUG */
}
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
speed = ~0;
cmd->duplex = -1;
}
ethtool_cmd_speed_set(cmd, speed);
if (cmd->speed == SPEED_10000) {
cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
cmd->port = PORT_FIBRE;
} else {
cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
| SUPPORTED_TP);
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
| ADVERTISED_TP);
cmd->port = PORT_TP;
}
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ehea_port *port = netdev_priv(dev);
int ret = 0;
u32 sp;
if (cmd->autoneg == AUTONEG_ENABLE) {
sp = EHEA_SPEED_AUTONEG;
goto doit;
}
switch (cmd->speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
else
sp = H_SPEED_10M_H;
break;
case SPEED_100:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_100M_F;
else
sp = H_SPEED_100M_H;
break;
case SPEED_1000:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_1G_F;
else
ret = -EINVAL;
break;
case SPEED_10000:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10G_F;
else
ret = -EINVAL;
break;
default:
ret = -EINVAL;
break;
}
if (ret)
goto out;
doit:
ret = ehea_set_portspeed(port, sp);
if (!ret)
netdev_info(dev,
"Port speed successfully set: %dMbps %s Duplex\n",
port->port_speed,
port->full_duplex == 1 ? "Full" : "Half");
out:
return ret;
}
static int ehea_nway_reset(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
int ret;
ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
if (!ret)
netdev_info(port->netdev,
"Port speed successfully set: %dMbps %s Duplex\n",
port->port_speed,
port->full_duplex == 1 ? "Full" : "Half");
return ret;
}
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
return port->msg_enable;
}
static void ehea_set_msglevel(struct net_device *dev, u32 value)
{
struct ehea_port *port = netdev_priv(dev);
port->msg_enable = value;
}
static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"sig_comp_iv"},
{"swqe_refill_th"},
{"port resets"},
{"Receive errors"},
{"TCP cksum errors"},
{"IP cksum errors"},
{"Frame cksum errors"},
{"num SQ stopped"},
{"SQ stopped"},
{"PR0 free_swqes"},
{"PR1 free_swqes"},
{"PR2 free_swqes"},
{"PR3 free_swqes"},
{"PR4 free_swqes"},
{"PR5 free_swqes"},
{"PR6 free_swqes"},
{"PR7 free_swqes"},
{"LRO aggregated"},
{"LRO flushed"},
{"LRO no_desc"},
};
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS) {
memcpy(data, &ehea_ethtool_stats_keys,
sizeof(ehea_ethtool_stats_keys));
}
}
static int ehea_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ehea_ethtool_stats_keys);
default:
return -EOPNOTSUPP;
}
}
static void ehea_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
int i, k, tmp;
struct ehea_port *port = netdev_priv(dev);
for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++)
data[i] = 0;
i = 0;
data[i++] = port->sig_comp_iv;
data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.poll_receive_errors;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_tcp_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_ip_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_frame_crc;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.queue_stopped;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].queue_stopped;
data[i++] = tmp;
for (k = 0; k < 8; k++)
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.aggregated;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.flushed;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].lro_mgr.stats.no_desc;
data[i++] = tmp;
}
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
.set_msglevel = ehea_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = ehea_get_strings,
.get_sset_count = ehea_get_sset_count,
.get_ethtool_stats = ehea_get_ethtool_stats,
.set_settings = ehea_set_settings,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
void ehea_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
}
| gpl-2.0 |
hiikezoe/android_kernel_samsung_scl21 | sound/soc/codecs/wm8995.c | 2773 | 56027 | /*
* wm8995.c -- WM8995 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
*
* Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
*
* Based on wm8994.c and wm_hubs.c by Mark Brown
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8995.h"
#define WM8995_NUM_SUPPLIES 8
static const char *wm8995_supply_names[WM8995_NUM_SUPPLIES] = {
"DCVDD",
"DBVDD1",
"DBVDD2",
"DBVDD3",
"AVDD1",
"AVDD2",
"CPVDD",
"MICVDD"
};
static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = {
[0] = 0x8995, [5] = 0x0100, [16] = 0x000b, [17] = 0x000b,
[24] = 0x02c0, [25] = 0x02c0, [26] = 0x02c0, [27] = 0x02c0,
[28] = 0x000f, [32] = 0x0005, [33] = 0x0005, [40] = 0x0003,
[41] = 0x0013, [48] = 0x0004, [56] = 0x09f8, [64] = 0x1f25,
[69] = 0x0004, [82] = 0xaaaa, [84] = 0x2a2a, [146] = 0x0060,
[256] = 0x0002, [257] = 0x8004, [520] = 0x0010, [528] = 0x0083,
[529] = 0x0083, [548] = 0x0c80, [580] = 0x0c80, [768] = 0x4050,
[769] = 0x4000, [771] = 0x0040, [772] = 0x0040, [773] = 0x0040,
[774] = 0x0004, [775] = 0x0100, [784] = 0x4050, [785] = 0x4000,
[787] = 0x0040, [788] = 0x0040, [789] = 0x0040, [1024] = 0x00c0,
[1025] = 0x00c0, [1026] = 0x00c0, [1027] = 0x00c0, [1028] = 0x00c0,
[1029] = 0x00c0, [1030] = 0x00c0, [1031] = 0x00c0, [1056] = 0x0200,
[1057] = 0x0010, [1058] = 0x0200, [1059] = 0x0010, [1088] = 0x0098,
[1089] = 0x0845, [1104] = 0x0098, [1105] = 0x0845, [1152] = 0x6318,
[1153] = 0x6300, [1154] = 0x0fca, [1155] = 0x0400, [1156] = 0x00d8,
[1157] = 0x1eb5, [1158] = 0xf145, [1159] = 0x0b75, [1160] = 0x01c5,
[1161] = 0x1c58, [1162] = 0xf373, [1163] = 0x0a54, [1164] = 0x0558,
[1165] = 0x168e, [1166] = 0xf829, [1167] = 0x07ad, [1168] = 0x1103,
[1169] = 0x0564, [1170] = 0x0559, [1171] = 0x4000, [1184] = 0x6318,
[1185] = 0x6300, [1186] = 0x0fca, [1187] = 0x0400, [1188] = 0x00d8,
[1189] = 0x1eb5, [1190] = 0xf145, [1191] = 0x0b75, [1192] = 0x01c5,
[1193] = 0x1c58, [1194] = 0xf373, [1195] = 0x0a54, [1196] = 0x0558,
[1197] = 0x168e, [1198] = 0xf829, [1199] = 0x07ad, [1200] = 0x1103,
[1201] = 0x0564, [1202] = 0x0559, [1203] = 0x4000, [1280] = 0x00c0,
[1281] = 0x00c0, [1282] = 0x00c0, [1283] = 0x00c0, [1312] = 0x0200,
[1313] = 0x0010, [1344] = 0x0098, [1345] = 0x0845, [1408] = 0x6318,
[1409] = 0x6300, [1410] = 0x0fca, [1411] = 0x0400, [1412] = 0x00d8,
[1413] = 0x1eb5, [1414] = 0xf145, [1415] = 0x0b75, [1416] = 0x01c5,
[1417] = 0x1c58, [1418] = 0xf373, [1419] = 0x0a54, [1420] = 0x0558,
[1421] = 0x168e, [1422] = 0xf829, [1423] = 0x07ad, [1424] = 0x1103,
[1425] = 0x0564, [1426] = 0x0559, [1427] = 0x4000, [1568] = 0x0002,
[1792] = 0xa100, [1793] = 0xa101, [1794] = 0xa101, [1795] = 0xa101,
[1796] = 0xa101, [1797] = 0xa101, [1798] = 0xa101, [1799] = 0xa101,
[1800] = 0xa101, [1801] = 0xa101, [1802] = 0xa101, [1803] = 0xa101,
[1804] = 0xa101, [1805] = 0xa101, [1825] = 0x0055, [1848] = 0x3fff,
[1849] = 0x1fff, [2049] = 0x0001, [2050] = 0x0069, [2056] = 0x0002,
[2057] = 0x0003, [2058] = 0x0069, [12288] = 0x0001, [12289] = 0x0001,
[12291] = 0x0006, [12292] = 0x0040, [12293] = 0x0001, [12294] = 0x000f,
[12295] = 0x0006, [12296] = 0x0001, [12297] = 0x0003, [12298] = 0x0104,
[12300] = 0x0060, [12301] = 0x0011, [12302] = 0x0401, [12304] = 0x0050,
[12305] = 0x0003, [12306] = 0x0100, [12308] = 0x0051, [12309] = 0x0003,
[12310] = 0x0104, [12311] = 0x000a, [12312] = 0x0060, [12313] = 0x003b,
[12314] = 0x0502, [12315] = 0x0100, [12316] = 0x2fff, [12320] = 0x2fff,
[12324] = 0x2fff, [12328] = 0x2fff, [12332] = 0x2fff, [12336] = 0x2fff,
[12340] = 0x2fff, [12344] = 0x2fff, [12348] = 0x2fff, [12352] = 0x0001,
[12353] = 0x0001, [12355] = 0x0006, [12356] = 0x0040, [12357] = 0x0001,
[12358] = 0x000f, [12359] = 0x0006, [12360] = 0x0001, [12361] = 0x0003,
[12362] = 0x0104, [12364] = 0x0060, [12365] = 0x0011, [12366] = 0x0401,
[12368] = 0x0050, [12369] = 0x0003, [12370] = 0x0100, [12372] = 0x0060,
[12373] = 0x003b, [12374] = 0x0502, [12375] = 0x0100, [12376] = 0x2fff,
[12380] = 0x2fff, [12384] = 0x2fff, [12388] = 0x2fff, [12392] = 0x2fff,
[12396] = 0x2fff, [12400] = 0x2fff, [12404] = 0x2fff, [12408] = 0x2fff,
[12412] = 0x2fff, [12416] = 0x0001, [12417] = 0x0001, [12419] = 0x0006,
[12420] = 0x0040, [12421] = 0x0001, [12422] = 0x000f, [12423] = 0x0006,
[12424] = 0x0001, [12425] = 0x0003, [12426] = 0x0106, [12428] = 0x0061,
[12429] = 0x0011, [12430] = 0x0401, [12432] = 0x0050, [12433] = 0x0003,
[12434] = 0x0102, [12436] = 0x0051, [12437] = 0x0003, [12438] = 0x0106,
[12439] = 0x000a, [12440] = 0x0061, [12441] = 0x003b, [12442] = 0x0502,
[12443] = 0x0100, [12444] = 0x2fff, [12448] = 0x2fff, [12452] = 0x2fff,
[12456] = 0x2fff, [12460] = 0x2fff, [12464] = 0x2fff, [12468] = 0x2fff,
[12472] = 0x2fff, [12476] = 0x2fff, [12480] = 0x0001, [12481] = 0x0001,
[12483] = 0x0006, [12484] = 0x0040, [12485] = 0x0001, [12486] = 0x000f,
[12487] = 0x0006, [12488] = 0x0001, [12489] = 0x0003, [12490] = 0x0106,
[12492] = 0x0061, [12493] = 0x0011, [12494] = 0x0401, [12496] = 0x0050,
[12497] = 0x0003, [12498] = 0x0102, [12500] = 0x0061, [12501] = 0x003b,
[12502] = 0x0502, [12503] = 0x0100, [12504] = 0x2fff, [12508] = 0x2fff,
[12512] = 0x2fff, [12516] = 0x2fff, [12520] = 0x2fff, [12524] = 0x2fff,
[12528] = 0x2fff, [12532] = 0x2fff, [12536] = 0x2fff, [12540] = 0x2fff,
[12544] = 0x0060, [12546] = 0x0601, [12548] = 0x0050, [12550] = 0x0100,
[12552] = 0x0001, [12554] = 0x0104, [12555] = 0x0100, [12556] = 0x2fff,
[12560] = 0x2fff, [12564] = 0x2fff, [12568] = 0x2fff, [12572] = 0x2fff,
[12576] = 0x2fff, [12580] = 0x2fff, [12584] = 0x2fff, [12588] = 0x2fff,
[12592] = 0x2fff, [12596] = 0x2fff, [12600] = 0x2fff, [12604] = 0x2fff,
[12608] = 0x0061, [12610] = 0x0601, [12612] = 0x0050, [12614] = 0x0102,
[12616] = 0x0001, [12618] = 0x0106, [12619] = 0x0100, [12620] = 0x2fff,
[12624] = 0x2fff, [12628] = 0x2fff, [12632] = 0x2fff, [12636] = 0x2fff,
[12640] = 0x2fff, [12644] = 0x2fff, [12648] = 0x2fff, [12652] = 0x2fff,
[12656] = 0x2fff, [12660] = 0x2fff, [12664] = 0x2fff, [12668] = 0x2fff,
[12672] = 0x0060, [12674] = 0x0601, [12676] = 0x0061, [12678] = 0x0601,
[12680] = 0x0050, [12682] = 0x0300, [12684] = 0x0001, [12686] = 0x0304,
[12688] = 0x0040, [12690] = 0x000f, [12692] = 0x0001, [12695] = 0x0100
};
struct fll_config {
int src;
int in;
int out;
};
struct wm8995_priv {
enum snd_soc_control_type control_type;
int sysclk[2];
int mclk[2];
int aifclk[2];
struct fll_config fll[2], fll_suspend[2];
struct regulator_bulk_data supplies[WM8995_NUM_SUPPLIES];
struct notifier_block disable_nb[WM8995_NUM_SUPPLIES];
struct snd_soc_codec *codec;
};
/*
* We can't use the same notifier block for more than one supply and
* there's no way I can see to get from a callback to the caller
* except container_of().
*/
#define WM8995_REGULATOR_EVENT(n) \
static int wm8995_regulator_event_##n(struct notifier_block *nb, \
unsigned long event, void *data) \
{ \
struct wm8995_priv *wm8995 = container_of(nb, struct wm8995_priv, \
disable_nb[n]); \
if (event & REGULATOR_EVENT_DISABLE) { \
wm8995->codec->cache_sync = 1; \
} \
return 0; \
}
WM8995_REGULATOR_EVENT(0)
WM8995_REGULATOR_EVENT(1)
WM8995_REGULATOR_EVENT(2)
WM8995_REGULATOR_EVENT(3)
WM8995_REGULATOR_EVENT(4)
WM8995_REGULATOR_EVENT(5)
WM8995_REGULATOR_EVENT(6)
WM8995_REGULATOR_EVENT(7)
static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0);
static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0);
static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0);
static const char *in1l_text[] = {
"Differential", "Single-ended IN1LN", "Single-ended IN1LP"
};
static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
2, in1l_text);
static const char *in1r_text[] = {
"Differential", "Single-ended IN1RN", "Single-ended IN1RP"
};
static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
0, in1r_text);
static const char *dmic_src_text[] = {
"DMICDAT1", "DMICDAT2", "DMICDAT3"
};
static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5,
8, dmic_src_text);
static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5,
6, dmic_src_text);
static const struct snd_kcontrol_new wm8995_snd_controls[] = {
SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME,
WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME,
WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1),
SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME,
WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME,
WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1),
SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME,
WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME,
WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME,
WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME,
WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv),
SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL,
4, 3, 0, in1l_boost_tlv),
SOC_ENUM("IN1L Mode", in1l_enum),
SOC_ENUM("IN1R Mode", in1r_enum),
SOC_ENUM("DMIC1 SRC", dmic_src1_enum),
SOC_ENUM("DMIC2 SRC", dmic_src2_enum),
SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5,
24, 0, sidetone_tlv),
SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5,
24, 0, sidetone_tlv),
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME,
WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME,
WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME,
WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv)
};
static void wm8995_update_class_w(struct snd_soc_codec *codec)
{
int enable = 1;
int source = 0; /* GCC flow analysis can't track enable */
int reg, reg_r;
/* We also need the same setting for L/R and only one path */
reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING);
switch (reg) {
case WM8995_AIF2DACL_TO_DAC1L:
dev_dbg(codec->dev, "Class W source AIF2DAC\n");
source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT;
break;
case WM8995_AIF1DAC2L_TO_DAC1L:
dev_dbg(codec->dev, "Class W source AIF1DAC2\n");
source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT;
break;
case WM8995_AIF1DAC1L_TO_DAC1L:
dev_dbg(codec->dev, "Class W source AIF1DAC1\n");
source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT;
break;
default:
dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg);
enable = 0;
break;
}
reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING);
if (reg_r != reg) {
dev_dbg(codec->dev, "Left and right DAC mixers different\n");
enable = 0;
}
if (enable) {
dev_dbg(codec->dev, "Class W enabled\n");
snd_soc_update_bits(codec, WM8995_CLASS_W_1,
WM8995_CP_DYN_PWR_MASK |
WM8995_CP_DYN_SRC_SEL_MASK,
source | WM8995_CP_DYN_PWR);
} else {
dev_dbg(codec->dev, "Class W disabled\n");
snd_soc_update_bits(codec, WM8995_CLASS_W_1,
WM8995_CP_DYN_PWR_MASK, 0);
}
}
static int check_clk_sys(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
unsigned int reg;
const char *clk;
reg = snd_soc_read(source->codec, WM8995_CLOCKING_1);
/* Check what we're currently using for CLK_SYS */
if (reg & WM8995_SYSCLK_SRC)
clk = "AIF2CLK";
else
clk = "AIF1CLK";
return !strcmp(source->name, clk);
}
static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *w = wlist->widgets[0];
struct snd_soc_codec *codec;
int ret;
codec = w->codec;
ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
wm8995_update_class_w(codec);
return ret;
}
static int hp_supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec;
struct wm8995_priv *wm8995;
codec = w->codec;
wm8995 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Enable the headphone amp */
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_HPOUT1L_ENA_MASK |
WM8995_HPOUT1R_ENA_MASK,
WM8995_HPOUT1L_ENA |
WM8995_HPOUT1R_ENA);
/* Enable the second stage */
snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
WM8995_HPOUT1L_DLY_MASK |
WM8995_HPOUT1R_DLY_MASK,
WM8995_HPOUT1L_DLY |
WM8995_HPOUT1R_DLY);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
WM8995_CP_ENA_MASK, 0);
break;
}
return 0;
}
static void dc_servo_cmd(struct snd_soc_codec *codec,
unsigned int reg, unsigned int val, unsigned int mask)
{
int timeout = 10;
dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n",
__func__, reg, val, mask);
snd_soc_write(codec, reg, val);
while (timeout--) {
msleep(10);
val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0);
if ((val & mask) == mask)
return;
}
dev_err(codec->dev, "Timed out waiting for DC Servo\n");
}
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec;
unsigned int reg;
codec = w->codec;
reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
WM8995_CP_ENA_MASK, WM8995_CP_ENA);
msleep(5);
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_HPOUT1L_ENA_MASK |
WM8995_HPOUT1R_ENA_MASK,
WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA);
udelay(20);
reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY;
snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 |
WM8995_DCS_ENA_CHAN_1);
dc_servo_cmd(codec, WM8995_DC_SERVO_2,
WM8995_DCS_TRIG_STARTUP_0 |
WM8995_DCS_TRIG_STARTUP_1,
WM8995_DCS_TRIG_DAC_WR_0 |
WM8995_DCS_TRIG_DAC_WR_1);
reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT |
WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT;
snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
WM8995_HPOUT1L_OUTP_MASK |
WM8995_HPOUT1R_OUTP_MASK |
WM8995_HPOUT1L_RMV_SHORT_MASK |
WM8995_HPOUT1R_RMV_SHORT_MASK, 0);
snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
WM8995_HPOUT1L_DLY_MASK |
WM8995_HPOUT1R_DLY_MASK, 0);
snd_soc_write(codec, WM8995_DC_SERVO_1, 0);
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_HPOUT1L_ENA_MASK |
WM8995_HPOUT1R_ENA_MASK,
0);
break;
}
return 0;
}
static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
{
struct wm8995_priv *wm8995;
int rate;
int reg1 = 0;
int offset;
wm8995 = snd_soc_codec_get_drvdata(codec);
if (aif)
offset = 4;
else
offset = 0;
switch (wm8995->sysclk[aif]) {
case WM8995_SYSCLK_MCLK1:
rate = wm8995->mclk[0];
break;
case WM8995_SYSCLK_MCLK2:
reg1 |= 0x8;
rate = wm8995->mclk[1];
break;
case WM8995_SYSCLK_FLL1:
reg1 |= 0x10;
rate = wm8995->fll[0].out;
break;
case WM8995_SYSCLK_FLL2:
reg1 |= 0x18;
rate = wm8995->fll[1].out;
break;
default:
return -EINVAL;
}
if (rate >= 13500000) {
rate /= 2;
reg1 |= WM8995_AIF1CLK_DIV;
dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n",
aif + 1, rate);
}
wm8995->aifclk[aif] = rate;
snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset,
WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK,
reg1);
return 0;
}
static int configure_clock(struct snd_soc_codec *codec)
{
struct wm8995_priv *wm8995;
int old, new;
wm8995 = snd_soc_codec_get_drvdata(codec);
/* Bring up the AIF clocks first */
configure_aif_clock(codec, 0);
configure_aif_clock(codec, 1);
/*
* Then switch CLK_SYS over to the higher of them; a change
* can only happen as a result of a clocking change which can
* only be made outside of DAPM so we can safely redo the
* clocking.
*/
/* If they're equal it doesn't matter which is used */
if (wm8995->aifclk[0] == wm8995->aifclk[1])
return 0;
if (wm8995->aifclk[0] < wm8995->aifclk[1])
new = WM8995_SYSCLK_SRC;
else
new = 0;
old = snd_soc_read(codec, WM8995_CLOCKING_1) & WM8995_SYSCLK_SRC;
/* If there's no change then we're done. */
if (old == new)
return 0;
snd_soc_update_bits(codec, WM8995_CLOCKING_1,
WM8995_SYSCLK_SRC_MASK, new);
snd_soc_dapm_sync(&codec->dapm);
return 0;
}
static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec;
codec = w->codec;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
return configure_clock(codec);
case SND_SOC_DAPM_POST_PMD:
configure_clock(codec);
break;
}
return 0;
}
static const char *sidetone_text[] = {
"ADC/DMIC1", "DMIC2",
};
static const struct soc_enum sidetone1_enum =
SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text);
static const struct snd_kcontrol_new sidetone1_mux =
SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
static const struct soc_enum sidetone2_enum =
SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text);
static const struct snd_kcontrol_new sidetone2_mux =
SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
static const struct snd_kcontrol_new aif1adc1l_mix[] = {
SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new aif1adc1r_mix[] = {
SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new aif1adc2l_mix[] = {
SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new aif1adc2r_mix[] = {
SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new dac1l_mix[] = {
WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
5, 1, 0),
WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
4, 1, 0),
WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
2, 1, 0),
WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
1, 1, 0),
WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new dac1r_mix[] = {
WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
5, 1, 0),
WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
4, 1, 0),
WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
2, 1, 0),
WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
1, 1, 0),
WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new aif2dac2l_mix[] = {
SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
5, 1, 0),
SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
4, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
2, 1, 0),
SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new aif2dac2r_mix[] = {
SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
5, 1, 0),
SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
4, 1, 0),
SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
2, 1, 0),
SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
1, 1, 0),
SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
0, 1, 0),
};
static const struct snd_kcontrol_new in1l_pga =
SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0);
static const struct snd_kcontrol_new in1r_pga =
SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0);
static const char *adc_mux_text[] = {
"ADC",
"DMIC",
};
static const struct soc_enum adc_enum =
SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
static const struct snd_kcontrol_new adcl_mux =
SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
static const struct snd_kcontrol_new adcr_mux =
SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum);
static const char *spk_src_text[] = {
"DAC1L", "DAC1R", "DAC2L", "DAC2R"
};
static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1,
0, spk_src_text);
static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1,
0, spk_src_text);
static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2,
0, spk_src_text);
static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2,
0, spk_src_text);
static const struct snd_kcontrol_new spk1l_mux =
SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum);
static const struct snd_kcontrol_new spk1r_mux =
SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum);
static const struct snd_kcontrol_new spk2l_mux =
SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum);
static const struct snd_kcontrol_new spk2r_mux =
SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum);
static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("DMIC1DAT"),
SND_SOC_DAPM_INPUT("DMIC2DAT"),
SND_SOC_DAPM_INPUT("IN1L"),
SND_SOC_DAPM_INPUT("IN1R"),
SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0,
&in1l_pga, 1),
SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0,
&in1r_pga, 1),
SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0),
SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0),
SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0,
WM8995_POWER_MANAGEMENT_3, 9, 0),
SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0,
WM8995_POWER_MANAGEMENT_3, 8, 0),
SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0,
SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
0, WM8995_POWER_MANAGEMENT_3, 11, 0),
SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
0, WM8995_POWER_MANAGEMENT_3, 10, 0),
SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0,
&adcl_mux),
SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0,
&adcr_mux),
SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0),
SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0),
SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0),
SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0),
SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0),
SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0),
SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0,
aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)),
SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0,
aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)),
SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0,
aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)),
SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
9, 0),
SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
8, 0),
SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM,
0, 0),
SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
11, 0),
SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
10, 0),
SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0,
aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)),
SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0,
aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)),
SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0),
SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0),
SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0),
SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0),
SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix,
ARRAY_SIZE(dac1l_mix)),
SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix,
ARRAY_SIZE(dac1r_mix)),
SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux),
SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux),
SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0,
hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1,
4, 0, &spk1l_mux),
SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1,
4, 0, &spk1r_mux),
SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2,
4, 0, &spk2l_mux),
SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2,
4, 0, &spk2r_mux),
SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
SND_SOC_DAPM_OUTPUT("HP1L"),
SND_SOC_DAPM_OUTPUT("HP1R"),
SND_SOC_DAPM_OUTPUT("SPK1L"),
SND_SOC_DAPM_OUTPUT("SPK1R"),
SND_SOC_DAPM_OUTPUT("SPK2L"),
SND_SOC_DAPM_OUTPUT("SPK2R")
};
static const struct snd_soc_dapm_route wm8995_intercon[] = {
{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
{ "DSP1CLK", NULL, "CLK_SYS" },
{ "DSP2CLK", NULL, "CLK_SYS" },
{ "SYSDSPCLK", NULL, "CLK_SYS" },
{ "AIF1ADC1L", NULL, "AIF1CLK" },
{ "AIF1ADC1L", NULL, "DSP1CLK" },
{ "AIF1ADC1R", NULL, "AIF1CLK" },
{ "AIF1ADC1R", NULL, "DSP1CLK" },
{ "AIF1ADC1R", NULL, "SYSDSPCLK" },
{ "AIF1ADC2L", NULL, "AIF1CLK" },
{ "AIF1ADC2L", NULL, "DSP1CLK" },
{ "AIF1ADC2R", NULL, "AIF1CLK" },
{ "AIF1ADC2R", NULL, "DSP1CLK" },
{ "AIF1ADC2R", NULL, "SYSDSPCLK" },
{ "DMIC1L", NULL, "DMIC1DAT" },
{ "DMIC1L", NULL, "CLK_SYS" },
{ "DMIC1R", NULL, "DMIC1DAT" },
{ "DMIC1R", NULL, "CLK_SYS" },
{ "DMIC2L", NULL, "DMIC2DAT" },
{ "DMIC2L", NULL, "CLK_SYS" },
{ "DMIC2R", NULL, "DMIC2DAT" },
{ "DMIC2R", NULL, "CLK_SYS" },
{ "ADCL", NULL, "AIF1CLK" },
{ "ADCL", NULL, "DSP1CLK" },
{ "ADCL", NULL, "SYSDSPCLK" },
{ "ADCR", NULL, "AIF1CLK" },
{ "ADCR", NULL, "DSP1CLK" },
{ "ADCR", NULL, "SYSDSPCLK" },
{ "IN1L PGA", "IN1L Switch", "IN1L" },
{ "IN1R PGA", "IN1R Switch", "IN1R" },
{ "IN1L PGA", NULL, "LDO2" },
{ "IN1R PGA", NULL, "LDO2" },
{ "ADCL", NULL, "IN1L PGA" },
{ "ADCR", NULL, "IN1R PGA" },
{ "ADCL Mux", "ADC", "ADCL" },
{ "ADCL Mux", "DMIC", "DMIC1L" },
{ "ADCR Mux", "ADC", "ADCR" },
{ "ADCR Mux", "DMIC", "DMIC1R" },
/* AIF1 outputs */
{ "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" },
{ "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" },
{ "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" },
{ "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" },
{ "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" },
{ "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" },
{ "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" },
{ "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" },
/* Sidetone */
{ "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" },
{ "Left Sidetone", "DMIC2", "AIF1ADC2L" },
{ "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" },
{ "Right Sidetone", "DMIC2", "AIF1ADC2R" },
{ "AIF1DAC1L", NULL, "AIF1CLK" },
{ "AIF1DAC1L", NULL, "DSP1CLK" },
{ "AIF1DAC1R", NULL, "AIF1CLK" },
{ "AIF1DAC1R", NULL, "DSP1CLK" },
{ "AIF1DAC1R", NULL, "SYSDSPCLK" },
{ "AIF1DAC2L", NULL, "AIF1CLK" },
{ "AIF1DAC2L", NULL, "DSP1CLK" },
{ "AIF1DAC2R", NULL, "AIF1CLK" },
{ "AIF1DAC2R", NULL, "DSP1CLK" },
{ "AIF1DAC2R", NULL, "SYSDSPCLK" },
{ "DAC1L", NULL, "AIF1CLK" },
{ "DAC1L", NULL, "DSP1CLK" },
{ "DAC1L", NULL, "SYSDSPCLK" },
{ "DAC1R", NULL, "AIF1CLK" },
{ "DAC1R", NULL, "DSP1CLK" },
{ "DAC1R", NULL, "SYSDSPCLK" },
{ "AIF1DAC1L", NULL, "AIF1DACDAT" },
{ "AIF1DAC1R", NULL, "AIF1DACDAT" },
{ "AIF1DAC2L", NULL, "AIF1DACDAT" },
{ "AIF1DAC2R", NULL, "AIF1DACDAT" },
/* DAC1 inputs */
{ "DAC1L", NULL, "DAC1L Mixer" },
{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
{ "DAC1R", NULL, "DAC1R Mixer" },
{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
{ "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" },
{ "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" },
/* DAC2/AIF2 outputs */
{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
/* Output stages */
{ "Headphone PGA", NULL, "DAC1L" },
{ "Headphone PGA", NULL, "DAC1R" },
{ "Headphone PGA", NULL, "DAC2L" },
{ "Headphone PGA", NULL, "DAC2R" },
{ "Headphone PGA", NULL, "Headphone Supply" },
{ "Headphone PGA", NULL, "CLK_SYS" },
{ "Headphone PGA", NULL, "LDO2" },
{ "HP1L", NULL, "Headphone PGA" },
{ "HP1R", NULL, "Headphone PGA" },
{ "SPK1L Driver", "DAC1L", "DAC1L" },
{ "SPK1L Driver", "DAC1R", "DAC1R" },
{ "SPK1L Driver", "DAC2L", "DAC2L" },
{ "SPK1L Driver", "DAC2R", "DAC2R" },
{ "SPK1L Driver", NULL, "CLK_SYS" },
{ "SPK1R Driver", "DAC1L", "DAC1L" },
{ "SPK1R Driver", "DAC1R", "DAC1R" },
{ "SPK1R Driver", "DAC2L", "DAC2L" },
{ "SPK1R Driver", "DAC2R", "DAC2R" },
{ "SPK1R Driver", NULL, "CLK_SYS" },
{ "SPK2L Driver", "DAC1L", "DAC1L" },
{ "SPK2L Driver", "DAC1R", "DAC1R" },
{ "SPK2L Driver", "DAC2L", "DAC2L" },
{ "SPK2L Driver", "DAC2R", "DAC2R" },
{ "SPK2L Driver", NULL, "CLK_SYS" },
{ "SPK2R Driver", "DAC1L", "DAC1L" },
{ "SPK2R Driver", "DAC1R", "DAC1R" },
{ "SPK2R Driver", "DAC2L", "DAC2L" },
{ "SPK2R Driver", "DAC2R", "DAC2R" },
{ "SPK2R Driver", NULL, "CLK_SYS" },
{ "SPK1L", NULL, "SPK1L Driver" },
{ "SPK1R", NULL, "SPK1R Driver" },
{ "SPK2L", NULL, "SPK2L Driver" },
{ "SPK2R", NULL, "SPK2R Driver" }
};
static int wm8995_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
/* out of bounds registers are generally considered
* volatile to support register banks that are partially
* owned by something else for e.g. a DSP
*/
if (reg > WM8995_MAX_CACHED_REGISTER)
return 1;
switch (reg) {
case WM8995_SOFTWARE_RESET:
case WM8995_DC_SERVO_READBACK_0:
case WM8995_INTERRUPT_STATUS_1:
case WM8995_INTERRUPT_STATUS_2:
case WM8995_INTERRUPT_STATUS_1_MASK:
case WM8995_INTERRUPT_STATUS_2_MASK:
case WM8995_INTERRUPT_CONTROL:
case WM8995_ACCESSORY_DETECT_MODE1:
case WM8995_ACCESSORY_DETECT_MODE2:
case WM8995_HEADPHONE_DETECT1:
case WM8995_HEADPHONE_DETECT2:
return 1;
}
return 0;
}
static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
int mute_reg;
switch (dai->id) {
case 0:
mute_reg = WM8995_AIF1_DAC1_FILTERS_1;
break;
case 1:
mute_reg = WM8995_AIF2_DAC_FILTERS_1;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK,
!!mute << WM8995_AIF1DAC1_MUTE_SHIFT);
return 0;
}
static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec;
int master;
int aif;
codec = dai->codec;
master = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBM_CFM:
master = WM8995_AIF1_MSTR;
break;
default:
dev_err(dai->dev, "Unknown master/slave configuration\n");
return -EINVAL;
}
aif = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8995_AIF1_LRCLK_INV;
case SND_SOC_DAIFMT_DSP_A:
aif |= (0x3 << WM8995_AIF1_FMT_SHIFT);
break;
case SND_SOC_DAIFMT_I2S:
aif |= (0x2 << WM8995_AIF1_FMT_SHIFT);
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
aif |= (0x1 << WM8995_AIF1_FMT_SHIFT);
break;
default:
dev_err(dai->dev, "Unknown dai format\n");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
/* frame inversion not valid for DSP modes */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_NF:
aif |= WM8995_AIF1_BCLK_INV;
break;
default:
return -EINVAL;
}
break;
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
aif |= WM8995_AIF1_BCLK_INV;
break;
case SND_SOC_DAIFMT_NB_IF:
aif |= WM8995_AIF1_LRCLK_INV;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1,
WM8995_AIF1_BCLK_INV_MASK |
WM8995_AIF1_LRCLK_INV_MASK |
WM8995_AIF1_FMT_MASK, aif);
snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE,
WM8995_AIF1_MSTR_MASK, master);
return 0;
}
static const int srs[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100,
48000, 88200, 96000
};
static const int fs_ratios[] = {
-1 /* reserved */,
128, 192, 256, 384, 512, 768, 1024, 1408, 1536
};
static const int bclk_divs[] = {
10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480
};
static int wm8995_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec;
struct wm8995_priv *wm8995;
int aif1_reg;
int bclk_reg;
int lrclk_reg;
int rate_reg;
int bclk_rate;
int aif1;
int lrclk, bclk;
int i, rate_val, best, best_val, cur_val;
codec = dai->codec;
wm8995 = snd_soc_codec_get_drvdata(codec);
switch (dai->id) {
case 0:
aif1_reg = WM8995_AIF1_CONTROL_1;
bclk_reg = WM8995_AIF1_BCLK;
rate_reg = WM8995_AIF1_RATE;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
wm8995->lrclk_shared[0] */) {
lrclk_reg = WM8995_AIF1DAC_LRCLK;
} else {
lrclk_reg = WM8995_AIF1ADC_LRCLK;
dev_dbg(codec->dev, "AIF1 using split LRCLK\n");
}
break;
case 1:
aif1_reg = WM8995_AIF2_CONTROL_1;
bclk_reg = WM8995_AIF2_BCLK;
rate_reg = WM8995_AIF2_RATE;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
wm8995->lrclk_shared[1] */) {
lrclk_reg = WM8995_AIF2DAC_LRCLK;
} else {
lrclk_reg = WM8995_AIF2ADC_LRCLK;
dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
}
break;
default:
return -EINVAL;
}
bclk_rate = snd_soc_params_to_bclk(params);
if (bclk_rate < 0)
return bclk_rate;
aif1 = 0;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT);
break;
case SNDRV_PCM_FORMAT_S24_LE:
aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT);
break;
case SNDRV_PCM_FORMAT_S32_LE:
aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT);
break;
default:
dev_err(dai->dev, "Unsupported word length %u\n",
params_format(params));
return -EINVAL;
}
/* try to find a suitable sample rate */
for (i = 0; i < ARRAY_SIZE(srs); ++i)
if (srs[i] == params_rate(params))
break;
if (i == ARRAY_SIZE(srs)) {
dev_err(dai->dev, "Sample rate %d is not supported\n",
params_rate(params));
return -EINVAL;
}
rate_val = i << WM8995_AIF1_SR_SHIFT;
dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]);
dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
dai->id + 1, wm8995->aifclk[dai->id], bclk_rate);
/* AIFCLK/fs ratio; look for a close match in either direction */
best = 1;
best_val = abs((fs_ratios[1] * params_rate(params))
- wm8995->aifclk[dai->id]);
for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) {
cur_val = abs((fs_ratios[i] * params_rate(params))
- wm8995->aifclk[dai->id]);
if (cur_val >= best_val)
continue;
best = i;
best_val = cur_val;
}
rate_val |= best;
dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n",
dai->id + 1, fs_ratios[best]);
/*
* We may not get quite the right frequency if using
* approximate clocks so look for the closest match that is
* higher than the target (we need to ensure that there enough
* BCLKs to clock out the samples).
*/
best = 0;
bclk = 0;
for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate;
if (cur_val < 0) /* BCLK table is sorted */
break;
best = i;
}
bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT;
bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best];
dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n",
bclk_divs[best], bclk_rate);
lrclk = bclk_rate / params_rate(params);
dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
lrclk, bclk_rate / lrclk);
snd_soc_update_bits(codec, aif1_reg,
WM8995_AIF1_WL_MASK, aif1);
snd_soc_update_bits(codec, bclk_reg,
WM8995_AIF1_BCLK_DIV_MASK, bclk);
snd_soc_update_bits(codec, lrclk_reg,
WM8995_AIF1DAC_RATE_MASK, lrclk);
snd_soc_update_bits(codec, rate_reg,
WM8995_AIF1_SR_MASK |
WM8995_AIF1CLK_RATE_MASK, rate_val);
return 0;
}
static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
{
struct snd_soc_codec *codec = codec_dai->codec;
int reg, val, mask;
switch (codec_dai->id) {
case 0:
reg = WM8995_AIF1_MASTER_SLAVE;
mask = WM8995_AIF1_TRI;
break;
case 1:
reg = WM8995_AIF2_MASTER_SLAVE;
mask = WM8995_AIF2_TRI;
break;
case 2:
reg = WM8995_POWER_MANAGEMENT_5;
mask = WM8995_AIF3_TRI;
break;
default:
return -EINVAL;
}
if (tristate)
val = mask;
else
val = 0;
return snd_soc_update_bits(codec, reg, mask, val);
}
/* The size in bits of the FLL divide multiplied by 10
* to allow rounding later */
#define FIXED_FLL_SIZE ((1 << 16) * 10)
struct fll_div {
u16 outdiv;
u16 n;
u16 k;
u16 clk_ref_div;
u16 fll_fratio;
};
static int wm8995_get_fll_config(struct fll_div *fll,
int freq_in, int freq_out)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod;
pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
/* Scale the input frequency down to <= 13.5MHz */
fll->clk_ref_div = 0;
while (freq_in > 13500000) {
fll->clk_ref_div++;
freq_in /= 2;
if (fll->clk_ref_div > 3)
return -EINVAL;
}
pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in);
/* Scale the output to give 90MHz<=Fvco<=100MHz */
fll->outdiv = 3;
while (freq_out * (fll->outdiv + 1) < 90000000) {
fll->outdiv++;
if (fll->outdiv > 63)
return -EINVAL;
}
freq_out *= fll->outdiv + 1;
pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out);
if (freq_in > 1000000) {
fll->fll_fratio = 0;
} else if (freq_in > 256000) {
fll->fll_fratio = 1;
freq_in *= 2;
} else if (freq_in > 128000) {
fll->fll_fratio = 2;
freq_in *= 4;
} else if (freq_in > 64000) {
fll->fll_fratio = 3;
freq_in *= 8;
} else {
fll->fll_fratio = 4;
freq_in *= 16;
}
pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in);
/* Now, calculate N.K */
Ndiv = freq_out / freq_in;
fll->n = Ndiv;
Nmod = freq_out % freq_in;
pr_debug("Nmod=%d\n", Nmod);
/* Calculate fractional part - scale up so we can round. */
Kpart = FIXED_FLL_SIZE * (long long)Nmod;
do_div(Kpart, freq_in);
K = Kpart & 0xFFFFFFFF;
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
fll->k = K / 10;
pr_debug("N=%x K=%x\n", fll->n, fll->k);
return 0;
}
static int wm8995_set_fll(struct snd_soc_dai *dai, int id,
int src, unsigned int freq_in,
unsigned int freq_out)
{
struct snd_soc_codec *codec;
struct wm8995_priv *wm8995;
int reg_offset, ret;
struct fll_div fll;
u16 reg, aif1, aif2;
codec = dai->codec;
wm8995 = snd_soc_codec_get_drvdata(codec);
aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1)
& WM8995_AIF1CLK_ENA;
aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1)
& WM8995_AIF2CLK_ENA;
switch (id) {
case WM8995_FLL1:
reg_offset = 0;
id = 0;
break;
case WM8995_FLL2:
reg_offset = 0x20;
id = 1;
break;
default:
return -EINVAL;
}
switch (src) {
case 0:
/* Allow no source specification when stopping */
if (freq_out)
return -EINVAL;
break;
case WM8995_FLL_SRC_MCLK1:
case WM8995_FLL_SRC_MCLK2:
case WM8995_FLL_SRC_LRCLK:
case WM8995_FLL_SRC_BCLK:
break;
default:
return -EINVAL;
}
/* Are we changing anything? */
if (wm8995->fll[id].src == src &&
wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out)
return 0;
/* If we're stopping the FLL redo the old config - no
* registers will actually be written but we avoid GCC flow
* analysis bugs spewing warnings.
*/
if (freq_out)
ret = wm8995_get_fll_config(&fll, freq_in, freq_out);
else
ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in,
wm8995->fll[id].out);
if (ret < 0)
return ret;
/* Gate the AIF clocks while we reclock */
snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
WM8995_AIF1CLK_ENA_MASK, 0);
snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
WM8995_AIF2CLK_ENA_MASK, 0);
/* We always need to disable the FLL while reconfiguring */
snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
WM8995_FLL1_ENA_MASK, 0);
reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) |
(fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT);
snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset,
WM8995_FLL1_OUTDIV_MASK |
WM8995_FLL1_FRATIO_MASK, reg);
snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k);
snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset,
WM8995_FLL1_N_MASK,
fll.n << WM8995_FLL1_N_SHIFT);
snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset,
WM8995_FLL1_REFCLK_DIV_MASK |
WM8995_FLL1_REFCLK_SRC_MASK,
(fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) |
(src - 1));
if (freq_out)
snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA);
wm8995->fll[id].in = freq_in;
wm8995->fll[id].out = freq_out;
wm8995->fll[id].src = src;
/* Enable any gated AIF clocks */
snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
WM8995_AIF1CLK_ENA_MASK, aif1);
snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
WM8995_AIF2CLK_ENA_MASK, aif2);
configure_clock(codec);
return 0;
}
static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec;
struct wm8995_priv *wm8995;
codec = dai->codec;
wm8995 = snd_soc_codec_get_drvdata(codec);
switch (dai->id) {
case 0:
case 1:
break;
default:
/* AIF3 shares clocking with AIF1/2 */
return -EINVAL;
}
switch (clk_id) {
case WM8995_SYSCLK_MCLK1:
wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
wm8995->mclk[0] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
dai->id + 1, freq);
break;
case WM8995_SYSCLK_MCLK2:
wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
wm8995->mclk[1] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
dai->id + 1, freq);
break;
case WM8995_SYSCLK_FLL1:
wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1;
dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1);
break;
case WM8995_SYSCLK_FLL2:
wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2;
dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1);
break;
case WM8995_SYSCLK_OPCLK:
default:
dev_err(dai->dev, "Unknown clock source %d\n", clk_id);
return -EINVAL;
}
configure_clock(codec);
return 0;
}
static int wm8995_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8995_priv *wm8995;
int ret;
wm8995 = snd_soc_codec_get_drvdata(codec);
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies),
wm8995->supplies);
if (ret)
return ret;
ret = snd_soc_cache_sync(codec);
if (ret) {
dev_err(codec->dev,
"Failed to sync cache: %d\n", ret);
return ret;
}
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_BG_ENA_MASK, WM8995_BG_ENA);
}
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
WM8995_BG_ENA_MASK, 0);
regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies),
wm8995->supplies);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#ifdef CONFIG_PM
static int wm8995_suspend(struct snd_soc_codec *codec, pm_message_t state)
{
wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8995_resume(struct snd_soc_codec *codec)
{
wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define wm8995_suspend NULL
#define wm8995_resume NULL
#endif
static int wm8995_remove(struct snd_soc_codec *codec)
{
struct wm8995_priv *wm8995;
struct i2c_client *i2c;
i2c = container_of(codec->dev, struct i2c_client, dev);
wm8995 = snd_soc_codec_get_drvdata(codec);
wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8995_probe(struct snd_soc_codec *codec)
{
struct wm8995_priv *wm8995;
int i;
int ret;
codec->dapm.idle_bias_off = 1;
wm8995 = snd_soc_codec_get_drvdata(codec);
wm8995->codec = codec;
ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++)
wm8995->supplies[i].supply = wm8995_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8995->supplies),
wm8995->supplies);
if (ret) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
wm8995->disable_nb[0].notifier_call = wm8995_regulator_event_0;
wm8995->disable_nb[1].notifier_call = wm8995_regulator_event_1;
wm8995->disable_nb[2].notifier_call = wm8995_regulator_event_2;
wm8995->disable_nb[3].notifier_call = wm8995_regulator_event_3;
wm8995->disable_nb[4].notifier_call = wm8995_regulator_event_4;
wm8995->disable_nb[5].notifier_call = wm8995_regulator_event_5;
wm8995->disable_nb[6].notifier_call = wm8995_regulator_event_6;
wm8995->disable_nb[7].notifier_call = wm8995_regulator_event_7;
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) {
ret = regulator_register_notifier(wm8995->supplies[i].consumer,
&wm8995->disable_nb[i]);
if (ret) {
dev_err(codec->dev,
"Failed to register regulator notifier: %d\n",
ret);
}
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies),
wm8995->supplies);
if (ret) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_reg_get;
}
ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET);
if (ret < 0) {
dev_err(codec->dev, "Failed to read device ID: %d\n", ret);
goto err_reg_enable;
}
if (ret != 0x8995) {
dev_err(codec->dev, "Invalid device ID: %#x\n", ret);
goto err_reg_enable;
}
ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
goto err_reg_enable;
}
wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch volume updates (right only; we always do left then right). */
snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME,
WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU);
snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME,
WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU);
snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME,
WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU);
snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME,
WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU);
snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME,
WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME,
WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME,
WM8995_DAC1_VU_MASK, WM8995_DAC1_VU);
snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME,
WM8995_DAC2_VU_MASK, WM8995_DAC2_VU);
snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME,
WM8995_IN1_VU_MASK, WM8995_IN1_VU);
wm8995_update_class_w(codec);
snd_soc_add_controls(codec, wm8995_snd_controls,
ARRAY_SIZE(wm8995_snd_controls));
snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets,
ARRAY_SIZE(wm8995_dapm_widgets));
snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon,
ARRAY_SIZE(wm8995_intercon));
return 0;
err_reg_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
err_reg_get:
regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
return ret;
}
#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops wm8995_aif1_dai_ops = {
.set_sysclk = wm8995_set_dai_sysclk,
.set_fmt = wm8995_set_dai_fmt,
.hw_params = wm8995_hw_params,
.digital_mute = wm8995_aif_mute,
.set_pll = wm8995_set_fll,
.set_tristate = wm8995_set_tristate,
};
static struct snd_soc_dai_ops wm8995_aif2_dai_ops = {
.set_sysclk = wm8995_set_dai_sysclk,
.set_fmt = wm8995_set_dai_fmt,
.hw_params = wm8995_hw_params,
.digital_mute = wm8995_aif_mute,
.set_pll = wm8995_set_fll,
.set_tristate = wm8995_set_tristate,
};
static struct snd_soc_dai_ops wm8995_aif3_dai_ops = {
.set_tristate = wm8995_set_tristate,
};
static struct snd_soc_dai_driver wm8995_dai[] = {
{
.name = "wm8995-aif1",
.playback = {
.stream_name = "AIF1 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = WM8995_FORMATS
},
.capture = {
.stream_name = "AIF1 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8995_FORMATS
},
.ops = &wm8995_aif1_dai_ops
},
{
.name = "wm8995-aif2",
.playback = {
.stream_name = "AIF2 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = WM8995_FORMATS
},
.capture = {
.stream_name = "AIF2 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8995_FORMATS
},
.ops = &wm8995_aif2_dai_ops
},
{
.name = "wm8995-aif3",
.playback = {
.stream_name = "AIF3 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = WM8995_FORMATS
},
.capture = {
.stream_name = "AIF3 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = WM8995_FORMATS
},
.ops = &wm8995_aif3_dai_ops
}
};
static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.probe = wm8995_probe,
.remove = wm8995_remove,
.suspend = wm8995_suspend,
.resume = wm8995_resume,
.set_bias_level = wm8995_set_bias_level,
.reg_cache_size = ARRAY_SIZE(wm8995_reg_defs),
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8995_reg_defs,
.volatile_register = wm8995_volatile,
.compress_type = SND_SOC_RBTREE_COMPRESSION
};
#if defined(CONFIG_SPI_MASTER)
static int __devinit wm8995_spi_probe(struct spi_device *spi)
{
struct wm8995_priv *wm8995;
int ret;
wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
if (!wm8995)
return -ENOMEM;
wm8995->control_type = SND_SOC_SPI;
spi_set_drvdata(spi, wm8995);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8995, wm8995_dai,
ARRAY_SIZE(wm8995_dai));
if (ret < 0)
kfree(wm8995);
return ret;
}
static int __devexit wm8995_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
kfree(spi_get_drvdata(spi));
return 0;
}
static struct spi_driver wm8995_spi_driver = {
.driver = {
.name = "wm8995",
.owner = THIS_MODULE,
},
.probe = wm8995_spi_probe,
.remove = __devexit_p(wm8995_spi_remove)
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static __devinit int wm8995_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8995_priv *wm8995;
int ret;
wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
if (!wm8995)
return -ENOMEM;
wm8995->control_type = SND_SOC_I2C;
i2c_set_clientdata(i2c, wm8995);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8995, wm8995_dai,
ARRAY_SIZE(wm8995_dai));
if (ret < 0)
kfree(wm8995);
return ret;
}
static __devexit int wm8995_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
return 0;
}
static const struct i2c_device_id wm8995_i2c_id[] = {
{"wm8995", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id);
static struct i2c_driver wm8995_i2c_driver = {
.driver = {
.name = "wm8995",
.owner = THIS_MODULE,
},
.probe = wm8995_i2c_probe,
.remove = __devexit_p(wm8995_i2c_remove),
.id_table = wm8995_i2c_id
};
#endif
static int __init wm8995_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8995_i2c_driver);
if (ret) {
printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n",
ret);
}
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8995_spi_driver);
if (ret) {
printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n",
ret);
}
#endif
return ret;
}
module_init(wm8995_modinit);
static void __exit wm8995_exit(void)
{
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8995_i2c_driver);
#endif
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8995_spi_driver);
#endif
}
module_exit(wm8995_exit);
MODULE_DESCRIPTION("ASoC WM8995 driver");
MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aznrice/KinGk_msm8960 | drivers/video/vt8500lcdfb.c | 3029 | 12394 | /*
* linux/drivers/video/vt8500lcdfb.c
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* Based on skeletonfb.c and pxafb.c
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
#include <mach/vt8500fb.h>
#include "vt8500lcdfb.h"
#include "wmt_ge_rops.h"
#define to_vt8500lcd_info(__info) container_of(__info, \
struct vt8500lcd_info, fb)
static int vt8500lcd_set_par(struct fb_info *info)
{
struct vt8500lcd_info *fbi = to_vt8500lcd_info(info);
int reg_bpp = 5; /* 16bpp */
int i;
unsigned long control0;
if (!fbi)
return -EINVAL;
if (info->var.bits_per_pixel <= 8) {
/* palettized */
info->var.red.offset = 0;
info->var.red.length = info->var.bits_per_pixel;
info->var.red.msb_right = 0;
info->var.green.offset = 0;
info->var.green.length = info->var.bits_per_pixel;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = info->var.bits_per_pixel;
info->var.blue.msb_right = 0;
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = info->var.xres_virtual /
(8/info->var.bits_per_pixel);
} else {
/* non-palettized */
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
if (info->var.bits_per_pixel == 16) {
/* RGB565 */
info->var.red.offset = 11;
info->var.red.length = 5;
info->var.red.msb_right = 0;
info->var.green.offset = 5;
info->var.green.length = 6;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = 5;
info->var.blue.msb_right = 0;
} else {
/* Equal depths per channel */
info->var.red.offset = info->var.bits_per_pixel
* 2 / 3;
info->var.red.length = info->var.bits_per_pixel / 3;
info->var.red.msb_right = 0;
info->var.green.offset = info->var.bits_per_pixel / 3;
info->var.green.length = info->var.bits_per_pixel / 3;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = info->var.bits_per_pixel / 3;
info->var.blue.msb_right = 0;
}
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = info->var.bits_per_pixel > 16 ?
info->var.xres_virtual << 2 :
info->var.xres_virtual << 1;
}
for (i = 0; i < 8; i++) {
if (bpp_values[i] == info->var.bits_per_pixel) {
reg_bpp = i;
continue;
}
}
control0 = readl(fbi->regbase) & ~0xf;
writel(0, fbi->regbase);
while (readl(fbi->regbase + 0x38) & 0x10)
/* wait */;
writel((((info->var.hsync_len - 1) & 0x3f) << 26)
| ((info->var.left_margin & 0xff) << 18)
| (((info->var.xres - 1) & 0x3ff) << 8)
| (info->var.right_margin & 0xff), fbi->regbase + 0x4);
writel((((info->var.vsync_len - 1) & 0x3f) << 26)
| ((info->var.upper_margin & 0xff) << 18)
| (((info->var.yres - 1) & 0x3ff) << 8)
| (info->var.lower_margin & 0xff), fbi->regbase + 0x8);
writel((((info->var.yres - 1) & 0x400) << 2)
| ((info->var.xres - 1) & 0x400), fbi->regbase + 0x10);
writel(0x80000000, fbi->regbase + 0x20);
writel(control0 | (reg_bpp << 1) | 0x100, fbi->regbase);
return 0;
}
static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf)
{
chan &= 0xffff;
chan >>= 16 - bf->length;
return chan << bf->offset;
}
static int vt8500lcd_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info) {
struct vt8500lcd_info *fbi = to_vt8500lcd_info(info);
int ret = 1;
unsigned int val;
if (regno >= 256)
return -EINVAL;
if (info->var.grayscale)
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
switch (fbi->fb.fix.visual) {
case FB_VISUAL_TRUECOLOR:
if (regno < 16) {
u32 *pal = fbi->fb.pseudo_palette;
val = chan_to_field(red, &fbi->fb.var.red);
val |= chan_to_field(green, &fbi->fb.var.green);
val |= chan_to_field(blue, &fbi->fb.var.blue);
pal[regno] = val;
ret = 0;
}
break;
case FB_VISUAL_STATIC_PSEUDOCOLOR:
case FB_VISUAL_PSEUDOCOLOR:
writew((red & 0xf800)
| ((green >> 5) & 0x7e0)
| ((blue >> 11) & 0x1f),
fbi->palette_cpu + sizeof(u16) * regno);
break;
}
return ret;
}
static int vt8500lcd_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
struct vt8500lcd_info *fbi = to_vt8500lcd_info(info);
if (cmd == FBIO_WAITFORVSYNC) {
/* Unmask End of Frame interrupt */
writel(0xffffffff ^ (1 << 3), fbi->regbase + 0x3c);
ret = wait_event_interruptible_timeout(fbi->wait,
readl(fbi->regbase + 0x38) & (1 << 3), HZ / 10);
/* Mask back to reduce unwanted interrupt traffic */
writel(0xffffffff, fbi->regbase + 0x3c);
if (ret < 0)
return ret;
if (ret == 0)
return -ETIMEDOUT;
}
return ret;
}
static int vt8500lcd_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
unsigned pixlen = info->fix.line_length / info->var.xres_virtual;
unsigned off = pixlen * var->xoffset
+ info->fix.line_length * var->yoffset;
struct vt8500lcd_info *fbi = to_vt8500lcd_info(info);
writel((1 << 31)
| (((var->xres_virtual - var->xres) * pixlen / 4) << 20)
| (off >> 2), fbi->regbase + 0x20);
return 0;
}
/*
* vt8500lcd_blank():
* Blank the display by setting all palette values to zero. Note,
* True Color modes do not really use the palette, so this will not
* blank the display in all modes.
*/
static int vt8500lcd_blank(int blank, struct fb_info *info)
{
int i;
switch (blank) {
case FB_BLANK_POWERDOWN:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_NORMAL:
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
for (i = 0; i < 256; i++)
vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
case FB_BLANK_UNBLANK:
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
fb_set_cmap(&info->cmap, info);
}
return 0;
}
static struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
.fb_set_par = vt8500lcd_set_par,
.fb_setcolreg = vt8500lcd_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
.fb_copyarea = wmt_ge_copyarea,
.fb_imageblit = sys_imageblit,
.fb_sync = wmt_ge_sync,
.fb_ioctl = vt8500lcd_ioctl,
.fb_pan_display = vt8500lcd_pan_display,
.fb_blank = vt8500lcd_blank,
};
static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
{
struct vt8500lcd_info *fbi = dev_id;
if (readl(fbi->regbase + 0x38) & (1 << 3))
wake_up_interruptible(&fbi->wait);
writel(0xffffffff, fbi->regbase + 0x38);
return IRQ_HANDLED;
}
static int __devinit vt8500lcd_probe(struct platform_device *pdev)
{
struct vt8500lcd_info *fbi;
struct resource *res;
struct vt8500fb_platform_data *pdata = pdev->dev.platform_data;
void *addr;
int irq, ret;
ret = -ENOMEM;
fbi = NULL;
fbi = kzalloc(sizeof(struct vt8500lcd_info) + sizeof(u32) * 16,
GFP_KERNEL);
if (!fbi) {
dev_err(&pdev->dev, "Failed to initialize framebuffer device\n");
ret = -ENOMEM;
goto failed;
}
strcpy(fbi->fb.fix.id, "VT8500 LCD");
fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS;
fbi->fb.fix.xpanstep = 0;
fbi->fb.fix.ypanstep = 1;
fbi->fb.fix.ywrapstep = 0;
fbi->fb.fix.accel = FB_ACCEL_NONE;
fbi->fb.var.nonstd = 0;
fbi->fb.var.activate = FB_ACTIVATE_NOW;
fbi->fb.var.height = -1;
fbi->fb.var.width = -1;
fbi->fb.var.vmode = FB_VMODE_NONINTERLACED;
fbi->fb.fbops = &vt8500lcd_ops;
fbi->fb.flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_YPAN
| FBINFO_VIRTFB
| FBINFO_PARTIAL_PAN_OK;
fbi->fb.node = -1;
addr = fbi;
addr = addr + sizeof(struct vt8500lcd_info);
fbi->fb.pseudo_palette = addr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
ret = -ENODEV;
goto failed_fbi;
}
res = request_mem_region(res->start, resource_size(res), "vt8500lcd");
if (res == NULL) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
ret = -EBUSY;
goto failed_fbi;
}
fbi->regbase = ioremap(res->start, resource_size(res));
if (fbi->regbase == NULL) {
dev_err(&pdev->dev, "failed to map I/O memory\n");
ret = -EBUSY;
goto failed_free_res;
}
fbi->fb.fix.smem_start = pdata->video_mem_phys;
fbi->fb.fix.smem_len = pdata->video_mem_len;
fbi->fb.screen_base = pdata->video_mem_virt;
fbi->palette_size = PAGE_ALIGN(512);
fbi->palette_cpu = dma_alloc_coherent(&pdev->dev,
fbi->palette_size,
&fbi->palette_phys,
GFP_KERNEL);
if (fbi->palette_cpu == NULL) {
dev_err(&pdev->dev, "Failed to allocate palette buffer\n");
ret = -ENOMEM;
goto failed_free_io;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENODEV;
goto failed_free_palette;
}
ret = request_irq(irq, vt8500lcd_handle_irq, IRQF_DISABLED, "LCD", fbi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
ret = -EBUSY;
goto failed_free_palette;
}
init_waitqueue_head(&fbi->wait);
if (fb_alloc_cmap(&fbi->fb.cmap, 256, 0) < 0) {
dev_err(&pdev->dev, "Failed to allocate color map\n");
ret = -ENOMEM;
goto failed_free_irq;
}
fb_videomode_to_var(&fbi->fb.var, &pdata->mode);
fbi->fb.var.bits_per_pixel = pdata->bpp;
fbi->fb.var.xres_virtual = pdata->xres_virtual;
fbi->fb.var.yres_virtual = pdata->yres_virtual;
ret = vt8500lcd_set_par(&fbi->fb);
if (ret) {
dev_err(&pdev->dev, "Failed to set parameters\n");
goto failed_free_cmap;
}
writel(fbi->fb.fix.smem_start >> 22, fbi->regbase + 0x1c);
writel((fbi->palette_phys & 0xfffffe00) | 1, fbi->regbase + 0x18);
platform_set_drvdata(pdev, fbi);
ret = register_framebuffer(&fbi->fb);
if (ret < 0) {
dev_err(&pdev->dev,
"Failed to register framebuffer device: %d\n", ret);
goto failed_free_cmap;
}
/*
* Ok, now enable the LCD controller
*/
writel(readl(fbi->regbase) | 1, fbi->regbase);
return 0;
failed_free_cmap:
if (fbi->fb.cmap.len)
fb_dealloc_cmap(&fbi->fb.cmap);
failed_free_irq:
free_irq(irq, fbi);
failed_free_palette:
dma_free_coherent(&pdev->dev, fbi->palette_size,
fbi->palette_cpu, fbi->palette_phys);
failed_free_io:
iounmap(fbi->regbase);
failed_free_res:
release_mem_region(res->start, resource_size(res));
failed_fbi:
platform_set_drvdata(pdev, NULL);
kfree(fbi);
failed:
return ret;
}
static int __devexit vt8500lcd_remove(struct platform_device *pdev)
{
struct vt8500lcd_info *fbi = platform_get_drvdata(pdev);
struct resource *res;
int irq;
unregister_framebuffer(&fbi->fb);
writel(0, fbi->regbase);
if (fbi->fb.cmap.len)
fb_dealloc_cmap(&fbi->fb.cmap);
irq = platform_get_irq(pdev, 0);
free_irq(irq, fbi);
dma_free_coherent(&pdev->dev, fbi->palette_size,
fbi->palette_cpu, fbi->palette_phys);
iounmap(fbi->regbase);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
kfree(fbi);
return 0;
}
static struct platform_driver vt8500lcd_driver = {
.probe = vt8500lcd_probe,
.remove = __devexit_p(vt8500lcd_remove),
.driver = {
.owner = THIS_MODULE,
.name = "vt8500-lcd",
},
};
static int __init vt8500lcd_init(void)
{
return platform_driver_register(&vt8500lcd_driver);
}
static void __exit vt8500lcd_exit(void)
{
platform_driver_unregister(&vt8500lcd_driver);
}
module_init(vt8500lcd_init);
module_exit(vt8500lcd_exit);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("LCD controller driver for VIA VT8500");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Tasssadar/kernel_nexus | drivers/isdn/hysdn/hysdn_procconf.c | 3285 | 13929 | /* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
*
* Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions.
*
* written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
*
* Copyright 1999 by Werner Cornelius (werner@titro.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/cred.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include "hysdn_defs.h"
static DEFINE_MUTEX(hysdn_conf_mutex);
#define INFO_OUT_LEN 80 /* length of info line including lf */
/********************************************************/
/* defines and data structure for conf write operations */
/********************************************************/
#define CONF_STATE_DETECT 0 /* waiting for detect */
#define CONF_STATE_CONF 1 /* writing config data */
#define CONF_STATE_POF 2 /* writing pof data */
#define CONF_LINE_LEN 255 /* 255 chars max */
struct conf_writedata {
hysdn_card *card; /* card the device is connected to */
int buf_size; /* actual number of bytes in the buffer */
int needed_size; /* needed size when reading pof */
int state; /* actual interface states from above constants */
unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */
unsigned short channel; /* active channel number */
unsigned char *pof_buffer; /* buffer when writing pof */
};
/***********************************************************************/
/* process_line parses one config line and transfers it to the card if */
/* necessary. */
/* if the return value is negative an error occurred. */
/***********************************************************************/
static int
process_line(struct conf_writedata *cnf)
{
unsigned char *cp = cnf->conf_line;
int i;
if (cnf->card->debug_flags & LOG_CNF_LINE)
hysdn_addlog(cnf->card, "conf line: %s", cp);
if (*cp == '-') { /* option */
cp++; /* point to option char */
if (*cp++ != 'c')
return (0); /* option unknown or used */
i = 0; /* start value for channel */
while ((*cp <= '9') && (*cp >= '0'))
i = i * 10 + *cp++ - '0'; /* get decimal number */
if (i > 65535) {
if (cnf->card->debug_flags & LOG_CNF_MISC)
hysdn_addlog(cnf->card, "conf channel invalid %d", i);
return (-ERR_INV_CHAN); /* invalid channel */
}
cnf->channel = i & 0xFFFF; /* set new channel number */
return (0); /* success */
} /* option */
if (*cp == '*') { /* line to send */
if (cnf->card->debug_flags & LOG_CNF_DATA)
hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp);
return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1,
cnf->channel)); /* send the line without * */
} /* line to send */
return (0);
} /* process_line */
/***********************************/
/* conf file operations and tables */
/***********************************/
/****************************************************/
/* write conf file -> boot or send cfg line to card */
/****************************************************/
static ssize_t
hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
{
struct conf_writedata *cnf;
int i;
unsigned char ch, *cp;
if (!count)
return (0); /* nothing to handle */
if (!(cnf = file->private_data))
return (-EFAULT); /* should never happen */
if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */
if (copy_from_user(&ch, buf, 1)) /* get first char for detect */
return (-EFAULT);
if (ch == 0x1A) {
/* we detected a pof file */
if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0)
return (cnf->needed_size); /* an error occurred -> exit */
cnf->buf_size = 0; /* buffer is empty */
cnf->state = CONF_STATE_POF; /* new state */
} else {
/* conf data has been detected */
cnf->buf_size = 0; /* buffer is empty */
cnf->state = CONF_STATE_CONF; /* requested conf data write */
if (cnf->card->state != CARD_STATE_RUN)
return (-ERR_NOT_BOOTED);
cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */
cnf->channel = 4098; /* default channel for output */
}
} /* state was auto detect */
if (cnf->state == CONF_STATE_POF) { /* pof write active */
i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */
if (i <= 0)
return (-EINVAL); /* size error handling pof */
if (i < count)
count = i; /* limit requested number of bytes */
if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count))
return (-EFAULT); /* error while copying */
cnf->buf_size += count;
if (cnf->needed_size == cnf->buf_size) {
cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */
if (cnf->needed_size <= 0) {
cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */
return (cnf->needed_size); /* an error occurred */
}
cnf->buf_size = 0; /* buffer is empty again */
}
}
/* pof write active */
else { /* conf write active */
if (cnf->card->state != CARD_STATE_RUN) {
if (cnf->card->debug_flags & LOG_CNF_MISC)
hysdn_addlog(cnf->card, "cnf write denied -> not booted");
return (-ERR_NOT_BOOTED);
}
i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */
if (i > 0) {
/* copy remaining bytes into buffer */
if (count > i)
count = i; /* limit transfer */
if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count))
return (-EFAULT); /* error while copying */
i = count; /* number of chars in buffer */
cp = cnf->conf_line + cnf->buf_size;
while (i) {
/* search for end of line */
if ((*cp < ' ') && (*cp != 9))
break; /* end of line found */
cp++;
i--;
} /* search for end of line */
if (i) {
/* delimiter found */
*cp++ = 0; /* string termination */
count -= (i - 1); /* subtract remaining bytes from count */
while ((i) && (*cp < ' ') && (*cp != 9)) {
i--; /* discard next char */
count++; /* mark as read */
cp++; /* next char */
}
cnf->buf_size = 0; /* buffer is empty after transfer */
if ((i = process_line(cnf)) < 0) /* handle the line */
count = i; /* return the error */
}
/* delimiter found */
else {
cnf->buf_size += count; /* add chars to string */
if (cnf->buf_size >= CONF_LINE_LEN - 1) {
if (cnf->card->debug_flags & LOG_CNF_MISC)
hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count);
return (-ERR_CONF_LONG);
}
} /* not delimited */
}
/* copy remaining bytes into buffer */
else {
if (cnf->card->debug_flags & LOG_CNF_MISC)
hysdn_addlog(cnf->card, "cnf line too long");
return (-ERR_CONF_LONG);
}
} /* conf write active */
return (count);
} /* hysdn_conf_write */
/*******************************************/
/* read conf file -> output card info data */
/*******************************************/
static ssize_t
hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
char *cp;
if (!(file->f_mode & FMODE_READ))
return -EPERM; /* no permission to read */
if (!(cp = file->private_data))
return -EFAULT; /* should never happen */
return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
} /* hysdn_conf_read */
/******************/
/* open conf file */
/******************/
static int
hysdn_conf_open(struct inode *ino, struct file *filep)
{
hysdn_card *card;
struct proc_dir_entry *pd;
struct conf_writedata *cnf;
char *cp, *tmp;
/* now search the addressed card */
mutex_lock(&hysdn_conf_mutex);
card = card_root;
while (card) {
pd = card->procconf;
if (pd == PDE(ino))
break;
card = card->next; /* search next entry */
}
if (!card) {
mutex_unlock(&hysdn_conf_mutex);
return (-ENODEV); /* device is unknown/invalid */
}
if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
filep->f_cred->fsuid, filep->f_cred->fsgid,
filep->f_mode);
if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
/* write only access -> write boot file or conf line */
if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) {
mutex_unlock(&hysdn_conf_mutex);
return (-EFAULT);
}
cnf->card = card;
cnf->buf_size = 0; /* nothing buffered */
cnf->state = CONF_STATE_DETECT; /* start auto detect */
filep->private_data = cnf;
} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
/* read access -> output card info data */
if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
mutex_unlock(&hysdn_conf_mutex);
return (-EFAULT); /* out of memory */
}
filep->private_data = tmp; /* start of string */
/* first output a headline */
sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device");
cp = tmp; /* start of string */
while (*cp)
cp++;
while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
*cp++ = ' ';
*cp++ = '\n';
/* and now the data */
sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s",
card->myid,
card->bus,
PCI_SLOT(card->devfn),
card->brdtype,
card->irq,
card->iobase,
card->membase,
card->bchans,
card->faxchans,
card->state,
hysdn_net_getname(card));
while (*cp)
cp++;
while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
*cp++ = ' ';
*cp++ = '\n';
*cp = 0; /* end of string */
} else { /* simultaneous read/write access forbidden ! */
mutex_unlock(&hysdn_conf_mutex);
return (-EPERM); /* no permission this time */
}
mutex_unlock(&hysdn_conf_mutex);
return nonseekable_open(ino, filep);
} /* hysdn_conf_open */
/***************************/
/* close a config file. */
/***************************/
static int
hysdn_conf_close(struct inode *ino, struct file *filep)
{
hysdn_card *card;
struct conf_writedata *cnf;
int retval = 0;
struct proc_dir_entry *pd;
mutex_lock(&hysdn_conf_mutex);
/* search the addressed card */
card = card_root;
while (card) {
pd = card->procconf;
if (pd == PDE(ino))
break;
card = card->next; /* search next entry */
}
if (!card) {
mutex_unlock(&hysdn_conf_mutex);
return (-ENODEV); /* device is unknown/invalid */
}
if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
filep->f_cred->fsuid, filep->f_cred->fsgid,
filep->f_mode);
if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
/* write only access -> write boot file or conf line */
if (filep->private_data) {
cnf = filep->private_data;
if (cnf->state == CONF_STATE_POF)
retval = pof_write_close(cnf->card); /* close the pof write */
kfree(filep->private_data); /* free allocated memory for buffer */
} /* handle write private data */
} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
/* read access -> output card info data */
kfree(filep->private_data); /* release memory */
}
mutex_unlock(&hysdn_conf_mutex);
return (retval);
} /* hysdn_conf_close */
/******************************************************/
/* table for conf filesystem functions defined above. */
/******************************************************/
static const struct file_operations conf_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hysdn_conf_read,
.write = hysdn_conf_write,
.open = hysdn_conf_open,
.release = hysdn_conf_close,
};
/*****************************/
/* hysdn subdir in /proc/net */
/*****************************/
struct proc_dir_entry *hysdn_proc_entry = NULL;
/*******************************************************************************/
/* hysdn_procconf_init is called when the module is loaded and after the cards */
/* have been detected. The needed proc dir and card config files are created. */
/* The log init is called at last. */
/*******************************************************************************/
int
hysdn_procconf_init(void)
{
hysdn_card *card;
unsigned char conf_name[20];
hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net);
if (!hysdn_proc_entry) {
printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
return (-1);
}
card = card_root; /* point to first card */
while (card) {
sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
if ((card->procconf = (void *) proc_create(conf_name,
S_IFREG | S_IRUGO | S_IWUSR,
hysdn_proc_entry,
&conf_fops)) != NULL) {
hysdn_proclog_init(card); /* init the log file entry */
}
card = card->next; /* next entry */
}
printk(KERN_NOTICE "HYSDN: procfs initialised\n");
return (0);
} /* hysdn_procconf_init */
/*************************************************************************************/
/* hysdn_procconf_release is called when the module is unloaded and before the cards */
/* resources are released. The module counter is assumed to be 0 ! */
/*************************************************************************************/
void
hysdn_procconf_release(void)
{
hysdn_card *card;
unsigned char conf_name[20];
card = card_root; /* start with first card */
while (card) {
sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
if (card->procconf)
remove_proc_entry(conf_name, hysdn_proc_entry);
hysdn_proclog_release(card); /* init the log file entry */
card = card->next; /* point to next card */
}
remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net);
}
| gpl-2.0 |
evilp/android_kernel_hp_phobos | sound/sparc/cs4231.c | 5077 | 57591 | /*
* Driver for CS4231 sound chips found on Sparcs.
* Copyright (C) 2002, 2008 David S. Miller <davem@davemloft.net>
*
* Based entirely upon drivers/sbus/audio/cs4231.c which is:
* Copyright (C) 1996, 1997, 1998 Derrick J Brashear (shadow@andrew.cmu.edu)
* and also sound/isa/cs423x/cs4231_lib.c which is:
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/timer.h>
#include <sound/initval.h>
#include <sound/pcm_params.h>
#ifdef CONFIG_SBUS
#define SBUS_SUPPORT
#endif
#if defined(CONFIG_PCI) && defined(CONFIG_SPARC64)
#define EBUS_SUPPORT
#include <linux/pci.h>
#include <asm/ebus_dma.h>
#endif
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
/* Enable this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Sun CS4231 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Sun CS4231 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Sun CS4231 soundcard.");
MODULE_AUTHOR("Jaroslav Kysela, Derrick J. Brashear and David S. Miller");
MODULE_DESCRIPTION("Sun CS4231");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}");
#ifdef SBUS_SUPPORT
struct sbus_dma_info {
spinlock_t lock; /* DMA access lock */
int dir;
void __iomem *regs;
};
#endif
struct snd_cs4231;
struct cs4231_dma_control {
void (*prepare)(struct cs4231_dma_control *dma_cont,
int dir);
void (*enable)(struct cs4231_dma_control *dma_cont, int on);
int (*request)(struct cs4231_dma_control *dma_cont,
dma_addr_t bus_addr, size_t len);
unsigned int (*address)(struct cs4231_dma_control *dma_cont);
#ifdef EBUS_SUPPORT
struct ebus_dma_info ebus_info;
#endif
#ifdef SBUS_SUPPORT
struct sbus_dma_info sbus_info;
#endif
};
struct snd_cs4231 {
spinlock_t lock; /* registers access lock */
void __iomem *port;
struct cs4231_dma_control p_dma;
struct cs4231_dma_control c_dma;
u32 flags;
#define CS4231_FLAG_EBUS 0x00000001
#define CS4231_FLAG_PLAYBACK 0x00000002
#define CS4231_FLAG_CAPTURE 0x00000004
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_pcm_substream *playback_substream;
unsigned int p_periods_sent;
struct snd_pcm_substream *capture_substream;
unsigned int c_periods_sent;
struct snd_timer *timer;
unsigned short mode;
#define CS4231_MODE_NONE 0x0000
#define CS4231_MODE_PLAY 0x0001
#define CS4231_MODE_RECORD 0x0002
#define CS4231_MODE_TIMER 0x0004
#define CS4231_MODE_OPEN (CS4231_MODE_PLAY | CS4231_MODE_RECORD | \
CS4231_MODE_TIMER)
unsigned char image[32]; /* registers image */
int mce_bit;
int calibrate_mute;
struct mutex mce_mutex; /* mutex for mce register */
struct mutex open_mutex; /* mutex for ALSA open/close */
struct platform_device *op;
unsigned int irq[2];
unsigned int regs_size;
struct snd_cs4231 *next;
};
/* Eventually we can use sound/isa/cs423x/cs4231_lib.c directly, but for
* now.... -DaveM
*/
/* IO ports */
#include <sound/cs4231-regs.h>
/* XXX offsets are different than PC ISA chips... */
#define CS4231U(chip, x) ((chip)->port + ((c_d_c_CS4231##x) << 2))
/* SBUS DMA register defines. */
#define APCCSR 0x10UL /* APC DMA CSR */
#define APCCVA 0x20UL /* APC Capture DMA Address */
#define APCCC 0x24UL /* APC Capture Count */
#define APCCNVA 0x28UL /* APC Capture DMA Next Address */
#define APCCNC 0x2cUL /* APC Capture Next Count */
#define APCPVA 0x30UL /* APC Play DMA Address */
#define APCPC 0x34UL /* APC Play Count */
#define APCPNVA 0x38UL /* APC Play DMA Next Address */
#define APCPNC 0x3cUL /* APC Play Next Count */
/* Defines for SBUS DMA-routines */
#define APCVA 0x0UL /* APC DMA Address */
#define APCC 0x4UL /* APC Count */
#define APCNVA 0x8UL /* APC DMA Next Address */
#define APCNC 0xcUL /* APC Next Count */
#define APC_PLAY 0x30UL /* Play registers start at 0x30 */
#define APC_RECORD 0x20UL /* Record registers start at 0x20 */
/* APCCSR bits */
#define APC_INT_PENDING 0x800000 /* Interrupt Pending */
#define APC_PLAY_INT 0x400000 /* Playback interrupt */
#define APC_CAPT_INT 0x200000 /* Capture interrupt */
#define APC_GENL_INT 0x100000 /* General interrupt */
#define APC_XINT_ENA 0x80000 /* General ext int. enable */
#define APC_XINT_PLAY 0x40000 /* Playback ext intr */
#define APC_XINT_CAPT 0x20000 /* Capture ext intr */
#define APC_XINT_GENL 0x10000 /* Error ext intr */
#define APC_XINT_EMPT 0x8000 /* Pipe empty interrupt (0 write to pva) */
#define APC_XINT_PEMP 0x4000 /* Play pipe empty (pva and pnva not set) */
#define APC_XINT_PNVA 0x2000 /* Playback NVA dirty */
#define APC_XINT_PENA 0x1000 /* play pipe empty Int enable */
#define APC_XINT_COVF 0x800 /* Cap data dropped on floor */
#define APC_XINT_CNVA 0x400 /* Capture NVA dirty */
#define APC_XINT_CEMP 0x200 /* Capture pipe empty (cva and cnva not set) */
#define APC_XINT_CENA 0x100 /* Cap. pipe empty int enable */
#define APC_PPAUSE 0x80 /* Pause the play DMA */
#define APC_CPAUSE 0x40 /* Pause the capture DMA */
#define APC_CDC_RESET 0x20 /* CODEC RESET */
#define APC_PDMA_READY 0x08 /* Play DMA Go */
#define APC_CDMA_READY 0x04 /* Capture DMA Go */
#define APC_CHIP_RESET 0x01 /* Reset the chip */
/* EBUS DMA register offsets */
#define EBDMA_CSR 0x00UL /* Control/Status */
#define EBDMA_ADDR 0x04UL /* DMA Address */
#define EBDMA_COUNT 0x08UL /* DMA Count */
/*
* Some variables
*/
static unsigned char freq_bits[14] = {
/* 5510 */ 0x00 | CS4231_XTAL2,
/* 6620 */ 0x0E | CS4231_XTAL2,
/* 8000 */ 0x00 | CS4231_XTAL1,
/* 9600 */ 0x0E | CS4231_XTAL1,
/* 11025 */ 0x02 | CS4231_XTAL2,
/* 16000 */ 0x02 | CS4231_XTAL1,
/* 18900 */ 0x04 | CS4231_XTAL2,
/* 22050 */ 0x06 | CS4231_XTAL2,
/* 27042 */ 0x04 | CS4231_XTAL1,
/* 32000 */ 0x06 | CS4231_XTAL1,
/* 33075 */ 0x0C | CS4231_XTAL2,
/* 37800 */ 0x08 | CS4231_XTAL2,
/* 44100 */ 0x0A | CS4231_XTAL2,
/* 48000 */ 0x0C | CS4231_XTAL1
};
static unsigned int rates[14] = {
5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050,
27042, 32000, 33075, 37800, 44100, 48000
};
static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
};
static int snd_cs4231_xrate(struct snd_pcm_runtime *runtime)
{
return snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
}
static unsigned char snd_cs4231_original_image[32] =
{
0x00, /* 00/00 - lic */
0x00, /* 01/01 - ric */
0x9f, /* 02/02 - la1ic */
0x9f, /* 03/03 - ra1ic */
0x9f, /* 04/04 - la2ic */
0x9f, /* 05/05 - ra2ic */
0xbf, /* 06/06 - loc */
0xbf, /* 07/07 - roc */
0x20, /* 08/08 - pdfr */
CS4231_AUTOCALIB, /* 09/09 - ic */
0x00, /* 0a/10 - pc */
0x00, /* 0b/11 - ti */
CS4231_MODE2, /* 0c/12 - mi */
0x00, /* 0d/13 - lbc */
0x00, /* 0e/14 - pbru */
0x00, /* 0f/15 - pbrl */
0x80, /* 10/16 - afei */
0x01, /* 11/17 - afeii */
0x9f, /* 12/18 - llic */
0x9f, /* 13/19 - rlic */
0x00, /* 14/20 - tlb */
0x00, /* 15/21 - thb */
0x00, /* 16/22 - la3mic/reserved */
0x00, /* 17/23 - ra3mic/reserved */
0x00, /* 18/24 - afs */
0x00, /* 19/25 - lamoc/version */
0x00, /* 1a/26 - mioc */
0x00, /* 1b/27 - ramoc/reserved */
0x20, /* 1c/28 - cdfr */
0x00, /* 1d/29 - res4 */
0x00, /* 1e/30 - cbru */
0x00, /* 1f/31 - cbrl */
};
static u8 __cs4231_readb(struct snd_cs4231 *cp, void __iomem *reg_addr)
{
if (cp->flags & CS4231_FLAG_EBUS)
return readb(reg_addr);
else
return sbus_readb(reg_addr);
}
static void __cs4231_writeb(struct snd_cs4231 *cp, u8 val,
void __iomem *reg_addr)
{
if (cp->flags & CS4231_FLAG_EBUS)
return writeb(val, reg_addr);
else
return sbus_writeb(val, reg_addr);
}
/*
* Basic I/O functions
*/
static void snd_cs4231_ready(struct snd_cs4231 *chip)
{
int timeout;
for (timeout = 250; timeout > 0; timeout--) {
int val = __cs4231_readb(chip, CS4231U(chip, REGSEL));
if ((val & CS4231_INIT) == 0)
break;
udelay(100);
}
}
static void snd_cs4231_dout(struct snd_cs4231 *chip, unsigned char reg,
unsigned char value)
{
snd_cs4231_ready(chip);
#ifdef CONFIG_SND_DEBUG
if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
snd_printdd("out: auto calibration time out - reg = 0x%x, "
"value = 0x%x\n",
reg, value);
#endif
__cs4231_writeb(chip, chip->mce_bit | reg, CS4231U(chip, REGSEL));
wmb();
__cs4231_writeb(chip, value, CS4231U(chip, REG));
mb();
}
static inline void snd_cs4231_outm(struct snd_cs4231 *chip, unsigned char reg,
unsigned char mask, unsigned char value)
{
unsigned char tmp = (chip->image[reg] & mask) | value;
chip->image[reg] = tmp;
if (!chip->calibrate_mute)
snd_cs4231_dout(chip, reg, tmp);
}
static void snd_cs4231_out(struct snd_cs4231 *chip, unsigned char reg,
unsigned char value)
{
snd_cs4231_dout(chip, reg, value);
chip->image[reg] = value;
mb();
}
static unsigned char snd_cs4231_in(struct snd_cs4231 *chip, unsigned char reg)
{
snd_cs4231_ready(chip);
#ifdef CONFIG_SND_DEBUG
if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
snd_printdd("in: auto calibration time out - reg = 0x%x\n",
reg);
#endif
__cs4231_writeb(chip, chip->mce_bit | reg, CS4231U(chip, REGSEL));
mb();
return __cs4231_readb(chip, CS4231U(chip, REG));
}
/*
* CS4231 detection / MCE routines
*/
static void snd_cs4231_busy_wait(struct snd_cs4231 *chip)
{
int timeout;
/* looks like this sequence is proper for CS4231A chip (GUS MAX) */
for (timeout = 5; timeout > 0; timeout--)
__cs4231_readb(chip, CS4231U(chip, REGSEL));
/* end of cleanup sequence */
for (timeout = 500; timeout > 0; timeout--) {
int val = __cs4231_readb(chip, CS4231U(chip, REGSEL));
if ((val & CS4231_INIT) == 0)
break;
msleep(1);
}
}
static void snd_cs4231_mce_up(struct snd_cs4231 *chip)
{
unsigned long flags;
int timeout;
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_ready(chip);
#ifdef CONFIG_SND_DEBUG
if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
snd_printdd("mce_up - auto calibration time out (0)\n");
#endif
chip->mce_bit |= CS4231_MCE;
timeout = __cs4231_readb(chip, CS4231U(chip, REGSEL));
if (timeout == 0x80)
snd_printdd("mce_up [%p]: serious init problem - "
"codec still busy\n",
chip->port);
if (!(timeout & CS4231_MCE))
__cs4231_writeb(chip, chip->mce_bit | (timeout & 0x1f),
CS4231U(chip, REGSEL));
spin_unlock_irqrestore(&chip->lock, flags);
}
static void snd_cs4231_mce_down(struct snd_cs4231 *chip)
{
unsigned long flags, timeout;
int reg;
snd_cs4231_busy_wait(chip);
spin_lock_irqsave(&chip->lock, flags);
#ifdef CONFIG_SND_DEBUG
if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
snd_printdd("mce_down [%p] - auto calibration time out (0)\n",
CS4231U(chip, REGSEL));
#endif
chip->mce_bit &= ~CS4231_MCE;
reg = __cs4231_readb(chip, CS4231U(chip, REGSEL));
__cs4231_writeb(chip, chip->mce_bit | (reg & 0x1f),
CS4231U(chip, REGSEL));
if (reg == 0x80)
snd_printdd("mce_down [%p]: serious init problem "
"- codec still busy\n", chip->port);
if ((reg & CS4231_MCE) == 0) {
spin_unlock_irqrestore(&chip->lock, flags);
return;
}
/*
* Wait for auto-calibration (AC) process to finish, i.e. ACI to go low.
*/
timeout = jiffies + msecs_to_jiffies(250);
do {
spin_unlock_irqrestore(&chip->lock, flags);
msleep(1);
spin_lock_irqsave(&chip->lock, flags);
reg = snd_cs4231_in(chip, CS4231_TEST_INIT);
reg &= CS4231_CALIB_IN_PROGRESS;
} while (reg && time_before(jiffies, timeout));
spin_unlock_irqrestore(&chip->lock, flags);
if (reg)
snd_printk(KERN_ERR
"mce_down - auto calibration time out (2)\n");
}
static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont,
struct snd_pcm_substream *substream,
unsigned int *periods_sent)
{
struct snd_pcm_runtime *runtime = substream->runtime;
while (1) {
unsigned int period_size = snd_pcm_lib_period_bytes(substream);
unsigned int offset = period_size * (*periods_sent);
BUG_ON(period_size >= (1 << 24));
if (dma_cont->request(dma_cont,
runtime->dma_addr + offset, period_size))
return;
(*periods_sent) = ((*periods_sent) + 1) % runtime->periods;
}
}
static void cs4231_dma_trigger(struct snd_pcm_substream *substream,
unsigned int what, int on)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct cs4231_dma_control *dma_cont;
if (what & CS4231_PLAYBACK_ENABLE) {
dma_cont = &chip->p_dma;
if (on) {
dma_cont->prepare(dma_cont, 0);
dma_cont->enable(dma_cont, 1);
snd_cs4231_advance_dma(dma_cont,
chip->playback_substream,
&chip->p_periods_sent);
} else {
dma_cont->enable(dma_cont, 0);
}
}
if (what & CS4231_RECORD_ENABLE) {
dma_cont = &chip->c_dma;
if (on) {
dma_cont->prepare(dma_cont, 1);
dma_cont->enable(dma_cont, 1);
snd_cs4231_advance_dma(dma_cont,
chip->capture_substream,
&chip->c_periods_sent);
} else {
dma_cont->enable(dma_cont, 0);
}
}
}
static int snd_cs4231_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
int result = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
{
unsigned int what = 0;
struct snd_pcm_substream *s;
unsigned long flags;
snd_pcm_group_for_each_entry(s, substream) {
if (s == chip->playback_substream) {
what |= CS4231_PLAYBACK_ENABLE;
snd_pcm_trigger_done(s, substream);
} else if (s == chip->capture_substream) {
what |= CS4231_RECORD_ENABLE;
snd_pcm_trigger_done(s, substream);
}
}
spin_lock_irqsave(&chip->lock, flags);
if (cmd == SNDRV_PCM_TRIGGER_START) {
cs4231_dma_trigger(substream, what, 1);
chip->image[CS4231_IFACE_CTRL] |= what;
} else {
cs4231_dma_trigger(substream, what, 0);
chip->image[CS4231_IFACE_CTRL] &= ~what;
}
snd_cs4231_out(chip, CS4231_IFACE_CTRL,
chip->image[CS4231_IFACE_CTRL]);
spin_unlock_irqrestore(&chip->lock, flags);
break;
}
default:
result = -EINVAL;
break;
}
return result;
}
/*
* CODEC I/O
*/
static unsigned char snd_cs4231_get_rate(unsigned int rate)
{
int i;
for (i = 0; i < 14; i++)
if (rate == rates[i])
return freq_bits[i];
return freq_bits[13];
}
static unsigned char snd_cs4231_get_format(struct snd_cs4231 *chip, int format,
int channels)
{
unsigned char rformat;
rformat = CS4231_LINEAR_8;
switch (format) {
case SNDRV_PCM_FORMAT_MU_LAW:
rformat = CS4231_ULAW_8;
break;
case SNDRV_PCM_FORMAT_A_LAW:
rformat = CS4231_ALAW_8;
break;
case SNDRV_PCM_FORMAT_S16_LE:
rformat = CS4231_LINEAR_16;
break;
case SNDRV_PCM_FORMAT_S16_BE:
rformat = CS4231_LINEAR_16_BIG;
break;
case SNDRV_PCM_FORMAT_IMA_ADPCM:
rformat = CS4231_ADPCM_16;
break;
}
if (channels > 1)
rformat |= CS4231_STEREO;
return rformat;
}
static void snd_cs4231_calibrate_mute(struct snd_cs4231 *chip, int mute)
{
unsigned long flags;
mute = mute ? 1 : 0;
spin_lock_irqsave(&chip->lock, flags);
if (chip->calibrate_mute == mute) {
spin_unlock_irqrestore(&chip->lock, flags);
return;
}
if (!mute) {
snd_cs4231_dout(chip, CS4231_LEFT_INPUT,
chip->image[CS4231_LEFT_INPUT]);
snd_cs4231_dout(chip, CS4231_RIGHT_INPUT,
chip->image[CS4231_RIGHT_INPUT]);
snd_cs4231_dout(chip, CS4231_LOOPBACK,
chip->image[CS4231_LOOPBACK]);
}
snd_cs4231_dout(chip, CS4231_AUX1_LEFT_INPUT,
mute ? 0x80 : chip->image[CS4231_AUX1_LEFT_INPUT]);
snd_cs4231_dout(chip, CS4231_AUX1_RIGHT_INPUT,
mute ? 0x80 : chip->image[CS4231_AUX1_RIGHT_INPUT]);
snd_cs4231_dout(chip, CS4231_AUX2_LEFT_INPUT,
mute ? 0x80 : chip->image[CS4231_AUX2_LEFT_INPUT]);
snd_cs4231_dout(chip, CS4231_AUX2_RIGHT_INPUT,
mute ? 0x80 : chip->image[CS4231_AUX2_RIGHT_INPUT]);
snd_cs4231_dout(chip, CS4231_LEFT_OUTPUT,
mute ? 0x80 : chip->image[CS4231_LEFT_OUTPUT]);
snd_cs4231_dout(chip, CS4231_RIGHT_OUTPUT,
mute ? 0x80 : chip->image[CS4231_RIGHT_OUTPUT]);
snd_cs4231_dout(chip, CS4231_LEFT_LINE_IN,
mute ? 0x80 : chip->image[CS4231_LEFT_LINE_IN]);
snd_cs4231_dout(chip, CS4231_RIGHT_LINE_IN,
mute ? 0x80 : chip->image[CS4231_RIGHT_LINE_IN]);
snd_cs4231_dout(chip, CS4231_MONO_CTRL,
mute ? 0xc0 : chip->image[CS4231_MONO_CTRL]);
chip->calibrate_mute = mute;
spin_unlock_irqrestore(&chip->lock, flags);
}
static void snd_cs4231_playback_format(struct snd_cs4231 *chip,
struct snd_pcm_hw_params *params,
unsigned char pdfr)
{
unsigned long flags;
mutex_lock(&chip->mce_mutex);
snd_cs4231_calibrate_mute(chip, 1);
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT,
(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) ?
(pdfr & 0xf0) | (chip->image[CS4231_REC_FORMAT] & 0x0f) :
pdfr);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
snd_cs4231_calibrate_mute(chip, 0);
mutex_unlock(&chip->mce_mutex);
}
static void snd_cs4231_capture_format(struct snd_cs4231 *chip,
struct snd_pcm_hw_params *params,
unsigned char cdfr)
{
unsigned long flags;
mutex_lock(&chip->mce_mutex);
snd_cs4231_calibrate_mute(chip, 1);
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) {
snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT,
((chip->image[CS4231_PLAYBK_FORMAT]) & 0xf0) |
(cdfr & 0x0f));
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
}
snd_cs4231_out(chip, CS4231_REC_FORMAT, cdfr);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
snd_cs4231_calibrate_mute(chip, 0);
mutex_unlock(&chip->mce_mutex);
}
/*
* Timer interface
*/
static unsigned long snd_cs4231_timer_resolution(struct snd_timer *timer)
{
struct snd_cs4231 *chip = snd_timer_chip(timer);
return chip->image[CS4231_PLAYBK_FORMAT] & 1 ? 9969 : 9920;
}
static int snd_cs4231_timer_start(struct snd_timer *timer)
{
unsigned long flags;
unsigned int ticks;
struct snd_cs4231 *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->lock, flags);
ticks = timer->sticks;
if ((chip->image[CS4231_ALT_FEATURE_1] & CS4231_TIMER_ENABLE) == 0 ||
(unsigned char)(ticks >> 8) != chip->image[CS4231_TIMER_HIGH] ||
(unsigned char)ticks != chip->image[CS4231_TIMER_LOW]) {
snd_cs4231_out(chip, CS4231_TIMER_HIGH,
chip->image[CS4231_TIMER_HIGH] =
(unsigned char) (ticks >> 8));
snd_cs4231_out(chip, CS4231_TIMER_LOW,
chip->image[CS4231_TIMER_LOW] =
(unsigned char) ticks);
snd_cs4231_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] |
CS4231_TIMER_ENABLE);
}
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_cs4231_timer_stop(struct snd_timer *timer)
{
unsigned long flags;
struct snd_cs4231 *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->lock, flags);
chip->image[CS4231_ALT_FEATURE_1] &= ~CS4231_TIMER_ENABLE;
snd_cs4231_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1]);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static void __devinit snd_cs4231_init(struct snd_cs4231 *chip)
{
unsigned long flags;
snd_cs4231_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printdd("init: (1)\n");
#endif
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE |
CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE |
CS4231_RECORD_PIO |
CS4231_CALIB_MODE);
chip->image[CS4231_IFACE_CTRL] |= CS4231_AUTOCALIB;
snd_cs4231_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printdd("init: (2)\n");
#endif
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printdd("init: (3) - afei = 0x%x\n",
chip->image[CS4231_ALT_FEATURE_1]);
#endif
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_ALT_FEATURE_2,
chip->image[CS4231_ALT_FEATURE_2]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT,
chip->image[CS4231_PLAYBK_FORMAT]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printdd("init: (4)\n");
#endif
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_REC_FORMAT, chip->image[CS4231_REC_FORMAT]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printdd("init: (5)\n");
#endif
}
static int snd_cs4231_open(struct snd_cs4231 *chip, unsigned int mode)
{
unsigned long flags;
mutex_lock(&chip->open_mutex);
if ((chip->mode & mode)) {
mutex_unlock(&chip->open_mutex);
return -EAGAIN;
}
if (chip->mode & CS4231_MODE_OPEN) {
chip->mode |= mode;
mutex_unlock(&chip->open_mutex);
return 0;
}
/* ok. now enable and ack CODEC IRQ */
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ |
CS4231_RECORD_IRQ |
CS4231_TIMER_IRQ);
snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0);
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
snd_cs4231_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ |
CS4231_RECORD_IRQ |
CS4231_TIMER_IRQ);
snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0);
spin_unlock_irqrestore(&chip->lock, flags);
chip->mode = mode;
mutex_unlock(&chip->open_mutex);
return 0;
}
static void snd_cs4231_close(struct snd_cs4231 *chip, unsigned int mode)
{
unsigned long flags;
mutex_lock(&chip->open_mutex);
chip->mode &= ~mode;
if (chip->mode & CS4231_MODE_OPEN) {
mutex_unlock(&chip->open_mutex);
return;
}
snd_cs4231_calibrate_mute(chip, 1);
/* disable IRQ */
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0);
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
/* now disable record & playback */
if (chip->image[CS4231_IFACE_CTRL] &
(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE | CS4231_RECORD_PIO)) {
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_up(chip);
spin_lock_irqsave(&chip->lock, flags);
chip->image[CS4231_IFACE_CTRL] &=
~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE | CS4231_RECORD_PIO);
snd_cs4231_out(chip, CS4231_IFACE_CTRL,
chip->image[CS4231_IFACE_CTRL]);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_down(chip);
spin_lock_irqsave(&chip->lock, flags);
}
/* clear IRQ again */
snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0);
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_calibrate_mute(chip, 0);
chip->mode = 0;
mutex_unlock(&chip->open_mutex);
}
/*
* timer open/close
*/
static int snd_cs4231_timer_open(struct snd_timer *timer)
{
struct snd_cs4231 *chip = snd_timer_chip(timer);
snd_cs4231_open(chip, CS4231_MODE_TIMER);
return 0;
}
static int snd_cs4231_timer_close(struct snd_timer *timer)
{
struct snd_cs4231 *chip = snd_timer_chip(timer);
snd_cs4231_close(chip, CS4231_MODE_TIMER);
return 0;
}
static struct snd_timer_hardware snd_cs4231_timer_table = {
.flags = SNDRV_TIMER_HW_AUTO,
.resolution = 9945,
.ticks = 65535,
.open = snd_cs4231_timer_open,
.close = snd_cs4231_timer_close,
.c_resolution = snd_cs4231_timer_resolution,
.start = snd_cs4231_timer_start,
.stop = snd_cs4231_timer_stop,
};
/*
* ok.. exported functions..
*/
static int snd_cs4231_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
unsigned char new_pdfr;
int err;
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (err < 0)
return err;
new_pdfr = snd_cs4231_get_format(chip, params_format(hw_params),
params_channels(hw_params)) |
snd_cs4231_get_rate(params_rate(hw_params));
snd_cs4231_playback_format(chip, hw_params, new_pdfr);
return 0;
}
static int snd_cs4231_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE |
CS4231_PLAYBACK_PIO);
BUG_ON(runtime->period_size > 0xffff + 1);
chip->p_periods_sent = 0;
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_cs4231_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
unsigned char new_cdfr;
int err;
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (err < 0)
return err;
new_cdfr = snd_cs4231_get_format(chip, params_format(hw_params),
params_channels(hw_params)) |
snd_cs4231_get_rate(params_rate(hw_params));
snd_cs4231_capture_format(chip, hw_params, new_cdfr);
return 0;
}
static int snd_cs4231_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE |
CS4231_RECORD_PIO);
chip->c_periods_sent = 0;
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static void snd_cs4231_overrange(struct snd_cs4231 *chip)
{
unsigned long flags;
unsigned char res;
spin_lock_irqsave(&chip->lock, flags);
res = snd_cs4231_in(chip, CS4231_TEST_INIT);
spin_unlock_irqrestore(&chip->lock, flags);
/* detect overrange only above 0dB; may be user selectable? */
if (res & (0x08 | 0x02))
chip->capture_substream->runtime->overrange++;
}
static void snd_cs4231_play_callback(struct snd_cs4231 *chip)
{
if (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) {
snd_pcm_period_elapsed(chip->playback_substream);
snd_cs4231_advance_dma(&chip->p_dma, chip->playback_substream,
&chip->p_periods_sent);
}
}
static void snd_cs4231_capture_callback(struct snd_cs4231 *chip)
{
if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) {
snd_pcm_period_elapsed(chip->capture_substream);
snd_cs4231_advance_dma(&chip->c_dma, chip->capture_substream,
&chip->c_periods_sent);
}
}
static snd_pcm_uframes_t snd_cs4231_playback_pointer(
struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct cs4231_dma_control *dma_cont = &chip->p_dma;
size_t ptr;
if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE))
return 0;
ptr = dma_cont->address(dma_cont);
if (ptr != 0)
ptr -= substream->runtime->dma_addr;
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_cs4231_capture_pointer(
struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct cs4231_dma_control *dma_cont = &chip->c_dma;
size_t ptr;
if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE))
return 0;
ptr = dma_cont->address(dma_cont);
if (ptr != 0)
ptr -= substream->runtime->dma_addr;
return bytes_to_frames(substream->runtime, ptr);
}
static int __devinit snd_cs4231_probe(struct snd_cs4231 *chip)
{
unsigned long flags;
int i;
int id = 0;
int vers = 0;
unsigned char *ptr;
for (i = 0; i < 50; i++) {
mb();
if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
msleep(2);
else {
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_out(chip, CS4231_MISC_INFO, CS4231_MODE2);
id = snd_cs4231_in(chip, CS4231_MISC_INFO) & 0x0f;
vers = snd_cs4231_in(chip, CS4231_VERSION);
spin_unlock_irqrestore(&chip->lock, flags);
if (id == 0x0a)
break; /* this is valid value */
}
}
snd_printdd("cs4231: port = %p, id = 0x%x\n", chip->port, id);
if (id != 0x0a)
return -ENODEV; /* no valid device found */
spin_lock_irqsave(&chip->lock, flags);
/* clear any pendings IRQ */
__cs4231_readb(chip, CS4231U(chip, STATUS));
__cs4231_writeb(chip, 0, CS4231U(chip, STATUS));
mb();
spin_unlock_irqrestore(&chip->lock, flags);
chip->image[CS4231_MISC_INFO] = CS4231_MODE2;
chip->image[CS4231_IFACE_CTRL] =
chip->image[CS4231_IFACE_CTRL] & ~CS4231_SINGLE_DMA;
chip->image[CS4231_ALT_FEATURE_1] = 0x80;
chip->image[CS4231_ALT_FEATURE_2] = 0x01;
if (vers & 0x20)
chip->image[CS4231_ALT_FEATURE_2] |= 0x02;
ptr = (unsigned char *) &chip->image;
snd_cs4231_mce_down(chip);
spin_lock_irqsave(&chip->lock, flags);
for (i = 0; i < 32; i++) /* ok.. fill all CS4231 registers */
snd_cs4231_out(chip, i, *ptr++);
spin_unlock_irqrestore(&chip->lock, flags);
snd_cs4231_mce_up(chip);
snd_cs4231_mce_down(chip);
mdelay(2);
return 0; /* all things are ok.. */
}
static struct snd_pcm_hardware snd_cs4231_playback = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_START,
.formats = SNDRV_PCM_FMTBIT_MU_LAW |
SNDRV_PCM_FMTBIT_A_LAW |
SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE,
.rates = SNDRV_PCM_RATE_KNOT |
SNDRV_PCM_RATE_8000_48000,
.rate_min = 5510,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 32 * 1024,
.period_bytes_min = 64,
.period_bytes_max = 32 * 1024,
.periods_min = 1,
.periods_max = 1024,
};
static struct snd_pcm_hardware snd_cs4231_capture = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_START,
.formats = SNDRV_PCM_FMTBIT_MU_LAW |
SNDRV_PCM_FMTBIT_A_LAW |
SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE,
.rates = SNDRV_PCM_RATE_KNOT |
SNDRV_PCM_RATE_8000_48000,
.rate_min = 5510,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 32 * 1024,
.period_bytes_min = 64,
.period_bytes_max = 32 * 1024,
.periods_min = 1,
.periods_max = 1024,
};
static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->hw = snd_cs4231_playback;
err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
if (err < 0) {
snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->playback_substream = substream;
chip->p_periods_sent = 0;
snd_pcm_set_sync(substream);
snd_cs4231_xrate(runtime);
return 0;
}
static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->hw = snd_cs4231_capture;
err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
if (err < 0) {
snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->capture_substream = substream;
chip->c_periods_sent = 0;
snd_pcm_set_sync(substream);
snd_cs4231_xrate(runtime);
return 0;
}
static int snd_cs4231_playback_close(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
snd_cs4231_close(chip, CS4231_MODE_PLAY);
chip->playback_substream = NULL;
return 0;
}
static int snd_cs4231_capture_close(struct snd_pcm_substream *substream)
{
struct snd_cs4231 *chip = snd_pcm_substream_chip(substream);
snd_cs4231_close(chip, CS4231_MODE_RECORD);
chip->capture_substream = NULL;
return 0;
}
/* XXX We can do some power-management, in particular on EBUS using
* XXX the audio AUXIO register...
*/
static struct snd_pcm_ops snd_cs4231_playback_ops = {
.open = snd_cs4231_playback_open,
.close = snd_cs4231_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cs4231_playback_hw_params,
.hw_free = snd_pcm_lib_free_pages,
.prepare = snd_cs4231_playback_prepare,
.trigger = snd_cs4231_trigger,
.pointer = snd_cs4231_playback_pointer,
};
static struct snd_pcm_ops snd_cs4231_capture_ops = {
.open = snd_cs4231_capture_open,
.close = snd_cs4231_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cs4231_capture_hw_params,
.hw_free = snd_pcm_lib_free_pages,
.prepare = snd_cs4231_capture_prepare,
.trigger = snd_cs4231_trigger,
.pointer = snd_cs4231_capture_pointer,
};
static int __devinit snd_cs4231_pcm(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(card, "CS4231", 0, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_cs4231_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cs4231_capture_ops);
/* global setup */
pcm->private_data = chip;
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
strcpy(pcm->name, "CS4231");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->op->dev,
64 * 1024, 128 * 1024);
chip->pcm = pcm;
return 0;
}
static int __devinit snd_cs4231_timer(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
struct snd_timer *timer;
struct snd_timer_id tid;
int err;
/* Timer initialization */
tid.dev_class = SNDRV_TIMER_CLASS_CARD;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = card->number;
tid.device = 0;
tid.subdevice = 0;
err = snd_timer_new(card, "CS4231", &tid, &timer);
if (err < 0)
return err;
strcpy(timer->name, "CS4231");
timer->private_data = chip;
timer->hw = snd_cs4231_timer_table;
chip->timer = timer;
return 0;
}
/*
* MIXER part
*/
static int snd_cs4231_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[4] = {
"Line", "CD", "Mic", "Mix"
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 2;
uinfo->value.enumerated.items = 4;
if (uinfo->value.enumerated.item > 3)
uinfo->value.enumerated.item = 3;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_cs4231_get_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
ucontrol->value.enumerated.item[0] =
(chip->image[CS4231_LEFT_INPUT] & CS4231_MIXS_ALL) >> 6;
ucontrol->value.enumerated.item[1] =
(chip->image[CS4231_RIGHT_INPUT] & CS4231_MIXS_ALL) >> 6;
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_cs4231_put_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
unsigned short left, right;
int change;
if (ucontrol->value.enumerated.item[0] > 3 ||
ucontrol->value.enumerated.item[1] > 3)
return -EINVAL;
left = ucontrol->value.enumerated.item[0] << 6;
right = ucontrol->value.enumerated.item[1] << 6;
spin_lock_irqsave(&chip->lock, flags);
left = (chip->image[CS4231_LEFT_INPUT] & ~CS4231_MIXS_ALL) | left;
right = (chip->image[CS4231_RIGHT_INPUT] & ~CS4231_MIXS_ALL) | right;
change = left != chip->image[CS4231_LEFT_INPUT] ||
right != chip->image[CS4231_RIGHT_INPUT];
snd_cs4231_out(chip, CS4231_LEFT_INPUT, left);
snd_cs4231_out(chip, CS4231_RIGHT_INPUT, right);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
static int snd_cs4231_info_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = (mask == 1) ?
SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_cs4231_get_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
spin_lock_irqsave(&chip->lock, flags);
ucontrol->value.integer.value[0] = (chip->image[reg] >> shift) & mask;
spin_unlock_irqrestore(&chip->lock, flags);
if (invert)
ucontrol->value.integer.value[0] =
(mask - ucontrol->value.integer.value[0]);
return 0;
}
static int snd_cs4231_put_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int change;
unsigned short val;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
val <<= shift;
spin_lock_irqsave(&chip->lock, flags);
val = (chip->image[reg] & ~(mask << shift)) | val;
change = val != chip->image[reg];
snd_cs4231_out(chip, reg, val);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
static int snd_cs4231_info_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 24) & 0xff;
uinfo->type = mask == 1 ?
SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_cs4231_get_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
spin_lock_irqsave(&chip->lock, flags);
ucontrol->value.integer.value[0] =
(chip->image[left_reg] >> shift_left) & mask;
ucontrol->value.integer.value[1] =
(chip->image[right_reg] >> shift_right) & mask;
spin_unlock_irqrestore(&chip->lock, flags);
if (invert) {
ucontrol->value.integer.value[0] =
(mask - ucontrol->value.integer.value[0]);
ucontrol->value.integer.value[1] =
(mask - ucontrol->value.integer.value[1]);
}
return 0;
}
static int snd_cs4231_put_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
int change;
unsigned short val1, val2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
val1 <<= shift_left;
val2 <<= shift_right;
spin_lock_irqsave(&chip->lock, flags);
val1 = (chip->image[left_reg] & ~(mask << shift_left)) | val1;
val2 = (chip->image[right_reg] & ~(mask << shift_right)) | val2;
change = val1 != chip->image[left_reg];
change |= val2 != chip->image[right_reg];
snd_cs4231_out(chip, left_reg, val1);
snd_cs4231_out(chip, right_reg, val2);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
#define CS4231_SINGLE(xname, xindex, reg, shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), .index = (xindex), \
.info = snd_cs4231_info_single, \
.get = snd_cs4231_get_single, .put = snd_cs4231_put_single, \
.private_value = (reg) | ((shift) << 8) | ((mask) << 16) | ((invert) << 24) }
#define CS4231_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, \
shift_right, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), .index = (xindex), \
.info = snd_cs4231_info_double, \
.get = snd_cs4231_get_double, .put = snd_cs4231_put_double, \
.private_value = (left_reg) | ((right_reg) << 8) | ((shift_left) << 16) | \
((shift_right) << 19) | ((mask) << 24) | ((invert) << 22) }
static struct snd_kcontrol_new snd_cs4231_controls[] __devinitdata = {
CS4231_DOUBLE("PCM Playback Switch", 0, CS4231_LEFT_OUTPUT,
CS4231_RIGHT_OUTPUT, 7, 7, 1, 1),
CS4231_DOUBLE("PCM Playback Volume", 0, CS4231_LEFT_OUTPUT,
CS4231_RIGHT_OUTPUT, 0, 0, 63, 1),
CS4231_DOUBLE("Line Playback Switch", 0, CS4231_LEFT_LINE_IN,
CS4231_RIGHT_LINE_IN, 7, 7, 1, 1),
CS4231_DOUBLE("Line Playback Volume", 0, CS4231_LEFT_LINE_IN,
CS4231_RIGHT_LINE_IN, 0, 0, 31, 1),
CS4231_DOUBLE("Aux Playback Switch", 0, CS4231_AUX1_LEFT_INPUT,
CS4231_AUX1_RIGHT_INPUT, 7, 7, 1, 1),
CS4231_DOUBLE("Aux Playback Volume", 0, CS4231_AUX1_LEFT_INPUT,
CS4231_AUX1_RIGHT_INPUT, 0, 0, 31, 1),
CS4231_DOUBLE("Aux Playback Switch", 1, CS4231_AUX2_LEFT_INPUT,
CS4231_AUX2_RIGHT_INPUT, 7, 7, 1, 1),
CS4231_DOUBLE("Aux Playback Volume", 1, CS4231_AUX2_LEFT_INPUT,
CS4231_AUX2_RIGHT_INPUT, 0, 0, 31, 1),
CS4231_SINGLE("Mono Playback Switch", 0, CS4231_MONO_CTRL, 7, 1, 1),
CS4231_SINGLE("Mono Playback Volume", 0, CS4231_MONO_CTRL, 0, 15, 1),
CS4231_SINGLE("Mono Output Playback Switch", 0, CS4231_MONO_CTRL, 6, 1, 1),
CS4231_SINGLE("Mono Output Playback Bypass", 0, CS4231_MONO_CTRL, 5, 1, 0),
CS4231_DOUBLE("Capture Volume", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 0, 0,
15, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = snd_cs4231_info_mux,
.get = snd_cs4231_get_mux,
.put = snd_cs4231_put_mux,
},
CS4231_DOUBLE("Mic Boost", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 5, 5,
1, 0),
CS4231_SINGLE("Loopback Capture Switch", 0, CS4231_LOOPBACK, 0, 1, 0),
CS4231_SINGLE("Loopback Capture Volume", 0, CS4231_LOOPBACK, 2, 63, 1),
/* SPARC specific uses of XCTL{0,1} general purpose outputs. */
CS4231_SINGLE("Line Out Switch", 0, CS4231_PIN_CTRL, 6, 1, 1),
CS4231_SINGLE("Headphone Out Switch", 0, CS4231_PIN_CTRL, 7, 1, 1)
};
static int __devinit snd_cs4231_mixer(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
int err, idx;
if (snd_BUG_ON(!chip || !chip->pcm))
return -EINVAL;
strcpy(card->mixername, chip->pcm->name);
for (idx = 0; idx < ARRAY_SIZE(snd_cs4231_controls); idx++) {
err = snd_ctl_add(card,
snd_ctl_new1(&snd_cs4231_controls[idx], chip));
if (err < 0)
return err;
}
return 0;
}
static int dev;
static int __devinit cs4231_attach_begin(struct snd_card **rcard)
{
struct snd_card *card;
struct snd_cs4231 *chip;
int err;
*rcard = NULL;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_cs4231), &card);
if (err < 0)
return err;
strcpy(card->driver, "CS4231");
strcpy(card->shortname, "Sun CS4231");
chip = card->private_data;
chip->card = card;
*rcard = card;
return 0;
}
static int __devinit cs4231_attach_finish(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
int err;
err = snd_cs4231_pcm(card);
if (err < 0)
goto out_err;
err = snd_cs4231_mixer(card);
if (err < 0)
goto out_err;
err = snd_cs4231_timer(card);
if (err < 0)
goto out_err;
err = snd_card_register(card);
if (err < 0)
goto out_err;
dev_set_drvdata(&chip->op->dev, chip);
dev++;
return 0;
out_err:
snd_card_free(card);
return err;
}
#ifdef SBUS_SUPPORT
static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id)
{
unsigned long flags;
unsigned char status;
u32 csr;
struct snd_cs4231 *chip = dev_id;
/*This is IRQ is not raised by the cs4231*/
if (!(__cs4231_readb(chip, CS4231U(chip, STATUS)) & CS4231_GLOBALIRQ))
return IRQ_NONE;
/* ACK the APC interrupt. */
csr = sbus_readl(chip->port + APCCSR);
sbus_writel(csr, chip->port + APCCSR);
if ((csr & APC_PDMA_READY) &&
(csr & APC_PLAY_INT) &&
(csr & APC_XINT_PNVA) &&
!(csr & APC_XINT_EMPT))
snd_cs4231_play_callback(chip);
if ((csr & APC_CDMA_READY) &&
(csr & APC_CAPT_INT) &&
(csr & APC_XINT_CNVA) &&
!(csr & APC_XINT_EMPT))
snd_cs4231_capture_callback(chip);
status = snd_cs4231_in(chip, CS4231_IRQ_STATUS);
if (status & CS4231_TIMER_IRQ) {
if (chip->timer)
snd_timer_interrupt(chip->timer, chip->timer->sticks);
}
if ((status & CS4231_RECORD_IRQ) && (csr & APC_CDMA_READY))
snd_cs4231_overrange(chip);
/* ACK the CS4231 interrupt. */
spin_lock_irqsave(&chip->lock, flags);
snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0);
spin_unlock_irqrestore(&chip->lock, flags);
return IRQ_HANDLED;
}
/*
* SBUS DMA routines
*/
static int sbus_dma_request(struct cs4231_dma_control *dma_cont,
dma_addr_t bus_addr, size_t len)
{
unsigned long flags;
u32 test, csr;
int err;
struct sbus_dma_info *base = &dma_cont->sbus_info;
if (len >= (1 << 24))
return -EINVAL;
spin_lock_irqsave(&base->lock, flags);
csr = sbus_readl(base->regs + APCCSR);
err = -EINVAL;
test = APC_CDMA_READY;
if (base->dir == APC_PLAY)
test = APC_PDMA_READY;
if (!(csr & test))
goto out;
err = -EBUSY;
test = APC_XINT_CNVA;
if (base->dir == APC_PLAY)
test = APC_XINT_PNVA;
if (!(csr & test))
goto out;
err = 0;
sbus_writel(bus_addr, base->regs + base->dir + APCNVA);
sbus_writel(len, base->regs + base->dir + APCNC);
out:
spin_unlock_irqrestore(&base->lock, flags);
return err;
}
static void sbus_dma_prepare(struct cs4231_dma_control *dma_cont, int d)
{
unsigned long flags;
u32 csr, test;
struct sbus_dma_info *base = &dma_cont->sbus_info;
spin_lock_irqsave(&base->lock, flags);
csr = sbus_readl(base->regs + APCCSR);
test = APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA |
APC_XINT_PLAY | APC_XINT_PEMP | APC_XINT_GENL |
APC_XINT_PENA;
if (base->dir == APC_RECORD)
test = APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA |
APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL;
csr |= test;
sbus_writel(csr, base->regs + APCCSR);
spin_unlock_irqrestore(&base->lock, flags);
}
static void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on)
{
unsigned long flags;
u32 csr, shift;
struct sbus_dma_info *base = &dma_cont->sbus_info;
spin_lock_irqsave(&base->lock, flags);
if (!on) {
sbus_writel(0, base->regs + base->dir + APCNC);
sbus_writel(0, base->regs + base->dir + APCNVA);
if (base->dir == APC_PLAY) {
sbus_writel(0, base->regs + base->dir + APCC);
sbus_writel(0, base->regs + base->dir + APCVA);
}
udelay(1200);
}
csr = sbus_readl(base->regs + APCCSR);
shift = 0;
if (base->dir == APC_PLAY)
shift = 1;
if (on)
csr &= ~(APC_CPAUSE << shift);
else
csr |= (APC_CPAUSE << shift);
sbus_writel(csr, base->regs + APCCSR);
if (on)
csr |= (APC_CDMA_READY << shift);
else
csr &= ~(APC_CDMA_READY << shift);
sbus_writel(csr, base->regs + APCCSR);
spin_unlock_irqrestore(&base->lock, flags);
}
static unsigned int sbus_dma_addr(struct cs4231_dma_control *dma_cont)
{
struct sbus_dma_info *base = &dma_cont->sbus_info;
return sbus_readl(base->regs + base->dir + APCVA);
}
/*
* Init and exit routines
*/
static int snd_cs4231_sbus_free(struct snd_cs4231 *chip)
{
struct platform_device *op = chip->op;
if (chip->irq[0])
free_irq(chip->irq[0], chip);
if (chip->port)
of_iounmap(&op->resource[0], chip->port, chip->regs_size);
return 0;
}
static int snd_cs4231_sbus_dev_free(struct snd_device *device)
{
struct snd_cs4231 *cp = device->device_data;
return snd_cs4231_sbus_free(cp);
}
static struct snd_device_ops snd_cs4231_sbus_dev_ops = {
.dev_free = snd_cs4231_sbus_dev_free,
};
static int __devinit snd_cs4231_sbus_create(struct snd_card *card,
struct platform_device *op,
int dev)
{
struct snd_cs4231 *chip = card->private_data;
int err;
spin_lock_init(&chip->lock);
spin_lock_init(&chip->c_dma.sbus_info.lock);
spin_lock_init(&chip->p_dma.sbus_info.lock);
mutex_init(&chip->mce_mutex);
mutex_init(&chip->open_mutex);
chip->op = op;
chip->regs_size = resource_size(&op->resource[0]);
memcpy(&chip->image, &snd_cs4231_original_image,
sizeof(snd_cs4231_original_image));
chip->port = of_ioremap(&op->resource[0], 0,
chip->regs_size, "cs4231");
if (!chip->port) {
snd_printdd("cs4231-%d: Unable to map chip registers.\n", dev);
return -EIO;
}
chip->c_dma.sbus_info.regs = chip->port;
chip->p_dma.sbus_info.regs = chip->port;
chip->c_dma.sbus_info.dir = APC_RECORD;
chip->p_dma.sbus_info.dir = APC_PLAY;
chip->p_dma.prepare = sbus_dma_prepare;
chip->p_dma.enable = sbus_dma_enable;
chip->p_dma.request = sbus_dma_request;
chip->p_dma.address = sbus_dma_addr;
chip->c_dma.prepare = sbus_dma_prepare;
chip->c_dma.enable = sbus_dma_enable;
chip->c_dma.request = sbus_dma_request;
chip->c_dma.address = sbus_dma_addr;
if (request_irq(op->archdata.irqs[0], snd_cs4231_sbus_interrupt,
IRQF_SHARED, "cs4231", chip)) {
snd_printdd("cs4231-%d: Unable to grab SBUS IRQ %d\n",
dev, op->archdata.irqs[0]);
snd_cs4231_sbus_free(chip);
return -EBUSY;
}
chip->irq[0] = op->archdata.irqs[0];
if (snd_cs4231_probe(chip) < 0) {
snd_cs4231_sbus_free(chip);
return -ENODEV;
}
snd_cs4231_init(chip);
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
chip, &snd_cs4231_sbus_dev_ops)) < 0) {
snd_cs4231_sbus_free(chip);
return err;
}
return 0;
}
static int __devinit cs4231_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
struct snd_card *card;
int err;
err = cs4231_attach_begin(&card);
if (err)
return err;
sprintf(card->longname, "%s at 0x%02lx:0x%016Lx, irq %d",
card->shortname,
rp->flags & 0xffL,
(unsigned long long)rp->start,
op->archdata.irqs[0]);
err = snd_cs4231_sbus_create(card, op, dev);
if (err < 0) {
snd_card_free(card);
return err;
}
return cs4231_attach_finish(card);
}
#endif
#ifdef EBUS_SUPPORT
static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event,
void *cookie)
{
struct snd_cs4231 *chip = cookie;
snd_cs4231_play_callback(chip);
}
static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p,
int event, void *cookie)
{
struct snd_cs4231 *chip = cookie;
snd_cs4231_capture_callback(chip);
}
/*
* EBUS DMA wrappers
*/
static int _ebus_dma_request(struct cs4231_dma_control *dma_cont,
dma_addr_t bus_addr, size_t len)
{
return ebus_dma_request(&dma_cont->ebus_info, bus_addr, len);
}
static void _ebus_dma_enable(struct cs4231_dma_control *dma_cont, int on)
{
ebus_dma_enable(&dma_cont->ebus_info, on);
}
static void _ebus_dma_prepare(struct cs4231_dma_control *dma_cont, int dir)
{
ebus_dma_prepare(&dma_cont->ebus_info, dir);
}
static unsigned int _ebus_dma_addr(struct cs4231_dma_control *dma_cont)
{
return ebus_dma_addr(&dma_cont->ebus_info);
}
/*
* Init and exit routines
*/
static int snd_cs4231_ebus_free(struct snd_cs4231 *chip)
{
struct platform_device *op = chip->op;
if (chip->c_dma.ebus_info.regs) {
ebus_dma_unregister(&chip->c_dma.ebus_info);
of_iounmap(&op->resource[2], chip->c_dma.ebus_info.regs, 0x10);
}
if (chip->p_dma.ebus_info.regs) {
ebus_dma_unregister(&chip->p_dma.ebus_info);
of_iounmap(&op->resource[1], chip->p_dma.ebus_info.regs, 0x10);
}
if (chip->port)
of_iounmap(&op->resource[0], chip->port, 0x10);
return 0;
}
static int snd_cs4231_ebus_dev_free(struct snd_device *device)
{
struct snd_cs4231 *cp = device->device_data;
return snd_cs4231_ebus_free(cp);
}
static struct snd_device_ops snd_cs4231_ebus_dev_ops = {
.dev_free = snd_cs4231_ebus_dev_free,
};
static int __devinit snd_cs4231_ebus_create(struct snd_card *card,
struct platform_device *op,
int dev)
{
struct snd_cs4231 *chip = card->private_data;
int err;
spin_lock_init(&chip->lock);
spin_lock_init(&chip->c_dma.ebus_info.lock);
spin_lock_init(&chip->p_dma.ebus_info.lock);
mutex_init(&chip->mce_mutex);
mutex_init(&chip->open_mutex);
chip->flags |= CS4231_FLAG_EBUS;
chip->op = op;
memcpy(&chip->image, &snd_cs4231_original_image,
sizeof(snd_cs4231_original_image));
strcpy(chip->c_dma.ebus_info.name, "cs4231(capture)");
chip->c_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
chip->c_dma.ebus_info.callback = snd_cs4231_ebus_capture_callback;
chip->c_dma.ebus_info.client_cookie = chip;
chip->c_dma.ebus_info.irq = op->archdata.irqs[0];
strcpy(chip->p_dma.ebus_info.name, "cs4231(play)");
chip->p_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
chip->p_dma.ebus_info.callback = snd_cs4231_ebus_play_callback;
chip->p_dma.ebus_info.client_cookie = chip;
chip->p_dma.ebus_info.irq = op->archdata.irqs[1];
chip->p_dma.prepare = _ebus_dma_prepare;
chip->p_dma.enable = _ebus_dma_enable;
chip->p_dma.request = _ebus_dma_request;
chip->p_dma.address = _ebus_dma_addr;
chip->c_dma.prepare = _ebus_dma_prepare;
chip->c_dma.enable = _ebus_dma_enable;
chip->c_dma.request = _ebus_dma_request;
chip->c_dma.address = _ebus_dma_addr;
chip->port = of_ioremap(&op->resource[0], 0, 0x10, "cs4231");
chip->p_dma.ebus_info.regs =
of_ioremap(&op->resource[1], 0, 0x10, "cs4231_pdma");
chip->c_dma.ebus_info.regs =
of_ioremap(&op->resource[2], 0, 0x10, "cs4231_cdma");
if (!chip->port || !chip->p_dma.ebus_info.regs ||
!chip->c_dma.ebus_info.regs) {
snd_cs4231_ebus_free(chip);
snd_printdd("cs4231-%d: Unable to map chip registers.\n", dev);
return -EIO;
}
if (ebus_dma_register(&chip->c_dma.ebus_info)) {
snd_cs4231_ebus_free(chip);
snd_printdd("cs4231-%d: Unable to register EBUS capture DMA\n",
dev);
return -EBUSY;
}
if (ebus_dma_irq_enable(&chip->c_dma.ebus_info, 1)) {
snd_cs4231_ebus_free(chip);
snd_printdd("cs4231-%d: Unable to enable EBUS capture IRQ\n",
dev);
return -EBUSY;
}
if (ebus_dma_register(&chip->p_dma.ebus_info)) {
snd_cs4231_ebus_free(chip);
snd_printdd("cs4231-%d: Unable to register EBUS play DMA\n",
dev);
return -EBUSY;
}
if (ebus_dma_irq_enable(&chip->p_dma.ebus_info, 1)) {
snd_cs4231_ebus_free(chip);
snd_printdd("cs4231-%d: Unable to enable EBUS play IRQ\n", dev);
return -EBUSY;
}
if (snd_cs4231_probe(chip) < 0) {
snd_cs4231_ebus_free(chip);
return -ENODEV;
}
snd_cs4231_init(chip);
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
chip, &snd_cs4231_ebus_dev_ops)) < 0) {
snd_cs4231_ebus_free(chip);
return err;
}
return 0;
}
static int __devinit cs4231_ebus_probe(struct platform_device *op)
{
struct snd_card *card;
int err;
err = cs4231_attach_begin(&card);
if (err)
return err;
sprintf(card->longname, "%s at 0x%llx, irq %d",
card->shortname,
op->resource[0].start,
op->archdata.irqs[0]);
err = snd_cs4231_ebus_create(card, op, dev);
if (err < 0) {
snd_card_free(card);
return err;
}
return cs4231_attach_finish(card);
}
#endif
static int __devinit cs4231_probe(struct platform_device *op)
{
#ifdef EBUS_SUPPORT
if (!strcmp(op->dev.of_node->parent->name, "ebus"))
return cs4231_ebus_probe(op);
#endif
#ifdef SBUS_SUPPORT
if (!strcmp(op->dev.of_node->parent->name, "sbus") ||
!strcmp(op->dev.of_node->parent->name, "sbi"))
return cs4231_sbus_probe(op);
#endif
return -ENODEV;
}
static int __devexit cs4231_remove(struct platform_device *op)
{
struct snd_cs4231 *chip = dev_get_drvdata(&op->dev);
snd_card_free(chip->card);
return 0;
}
static const struct of_device_id cs4231_match[] = {
{
.name = "SUNW,CS4231",
},
{
.name = "audio",
.compatible = "SUNW,CS4231",
},
{},
};
MODULE_DEVICE_TABLE(of, cs4231_match);
static struct platform_driver cs4231_driver = {
.driver = {
.name = "audio",
.owner = THIS_MODULE,
.of_match_table = cs4231_match,
},
.probe = cs4231_probe,
.remove = __devexit_p(cs4231_remove),
};
module_platform_driver(cs4231_driver);
| gpl-2.0 |
Hadramos/android_sony_xperiaz_kernel_sources | drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c | 7637 | 2183 | /* winfast-usbii-deluxe.h - Keytable for winfast_usbii_deluxe Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Leadtek Winfast TV USB II Deluxe remote
Magnus Alm <magnus.alm@gmail.com>
*/
static struct rc_map_table winfast_usbii_deluxe[] = {
{ 0x62, KEY_0},
{ 0x75, KEY_1},
{ 0x76, KEY_2},
{ 0x77, KEY_3},
{ 0x79, KEY_4},
{ 0x7a, KEY_5},
{ 0x7b, KEY_6},
{ 0x7d, KEY_7},
{ 0x7e, KEY_8},
{ 0x7f, KEY_9},
{ 0x38, KEY_CAMERA}, /* SNAPSHOT */
{ 0x37, KEY_RECORD}, /* RECORD */
{ 0x35, KEY_TIME}, /* TIMESHIFT */
{ 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
{ 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
{ 0x64, KEY_MUTE}, /* MUTE */
{ 0x21, KEY_CHANNEL}, /* SURF */
{ 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
{ 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
{ 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
{ 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
{ 0x70, KEY_POWER2}, /* TV ON/OFF */
{ 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
{ 0x3a, KEY_NEW}, /* PIP */
{ 0x73, KEY_ZOOM}, /* FULLSECREEN */
{ 0x66, KEY_INFO}, /* OSD (DISPLAY) */
{ 0x31, KEY_DOT}, /* '.' */
{ 0x63, KEY_ENTER}, /* ENTER */
};
static struct rc_map_list winfast_usbii_deluxe_map = {
.map = {
.scan = winfast_usbii_deluxe,
.size = ARRAY_SIZE(winfast_usbii_deluxe),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_WINFAST_USBII_DELUXE,
}
};
static int __init init_rc_map_winfast_usbii_deluxe(void)
{
return rc_map_register(&winfast_usbii_deluxe_map);
}
static void __exit exit_rc_map_winfast_usbii_deluxe(void)
{
rc_map_unregister(&winfast_usbii_deluxe_map);
}
module_init(init_rc_map_winfast_usbii_deluxe)
module_exit(exit_rc_map_winfast_usbii_deluxe)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
Hani-K/H-Vitamin_trltedt | drivers/media/rc/keymaps/rc-pinnacle-grey.c | 7637 | 1995 | /* pinnacle-grey.h - Keytable for pinnacle_grey Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table pinnacle_grey[] = {
{ 0x3a, KEY_0 },
{ 0x31, KEY_1 },
{ 0x32, KEY_2 },
{ 0x33, KEY_3 },
{ 0x34, KEY_4 },
{ 0x35, KEY_5 },
{ 0x36, KEY_6 },
{ 0x37, KEY_7 },
{ 0x38, KEY_8 },
{ 0x39, KEY_9 },
{ 0x2f, KEY_POWER },
{ 0x2e, KEY_P },
{ 0x1f, KEY_L },
{ 0x2b, KEY_I },
{ 0x2d, KEY_SCREEN },
{ 0x1e, KEY_ZOOM },
{ 0x1b, KEY_VOLUMEUP },
{ 0x0f, KEY_VOLUMEDOWN },
{ 0x17, KEY_CHANNELUP },
{ 0x1c, KEY_CHANNELDOWN },
{ 0x25, KEY_INFO },
{ 0x3c, KEY_MUTE },
{ 0x3d, KEY_LEFT },
{ 0x3b, KEY_RIGHT },
{ 0x3f, KEY_UP },
{ 0x3e, KEY_DOWN },
{ 0x1a, KEY_ENTER },
{ 0x1d, KEY_MENU },
{ 0x19, KEY_AGAIN },
{ 0x16, KEY_PREVIOUSSONG },
{ 0x13, KEY_NEXTSONG },
{ 0x15, KEY_PAUSE },
{ 0x0e, KEY_REWIND },
{ 0x0d, KEY_PLAY },
{ 0x0b, KEY_STOP },
{ 0x07, KEY_FORWARD },
{ 0x27, KEY_RECORD },
{ 0x26, KEY_TUNER },
{ 0x29, KEY_TEXT },
{ 0x2a, KEY_MEDIA },
{ 0x18, KEY_EPG },
};
static struct rc_map_list pinnacle_grey_map = {
.map = {
.scan = pinnacle_grey,
.size = ARRAY_SIZE(pinnacle_grey),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PINNACLE_GREY,
}
};
static int __init init_rc_map_pinnacle_grey(void)
{
return rc_map_register(&pinnacle_grey_map);
}
static void __exit exit_rc_map_pinnacle_grey(void)
{
rc_map_unregister(&pinnacle_grey_map);
}
module_init(init_rc_map_pinnacle_grey)
module_exit(exit_rc_map_pinnacle_grey)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
XileForce/Vindicator-S6-Test | drivers/media/rc/keymaps/rc-pixelview-mk12.c | 7637 | 2164 | /* rc-pixelview-mk12.h - Keytable for pixelview Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Keytable for MK-F12 IR remote provided together with Pixelview
* Ultra Pro Remote Controller. Uses NEC extended format.
*/
static struct rc_map_table pixelview_mk12[] = {
{ 0x866b03, KEY_TUNER }, /* Timeshift */
{ 0x866b1e, KEY_POWER2 }, /* power */
{ 0x866b01, KEY_1 },
{ 0x866b0b, KEY_2 },
{ 0x866b1b, KEY_3 },
{ 0x866b05, KEY_4 },
{ 0x866b09, KEY_5 },
{ 0x866b15, KEY_6 },
{ 0x866b06, KEY_7 },
{ 0x866b0a, KEY_8 },
{ 0x866b12, KEY_9 },
{ 0x866b02, KEY_0 },
{ 0x866b13, KEY_AGAIN }, /* loop */
{ 0x866b10, KEY_DIGITS }, /* +100 */
{ 0x866b00, KEY_VIDEO }, /* source */
{ 0x866b18, KEY_MUTE }, /* mute */
{ 0x866b19, KEY_CAMERA }, /* snapshot */
{ 0x866b1a, KEY_SEARCH }, /* scan */
{ 0x866b16, KEY_CHANNELUP }, /* chn + */
{ 0x866b14, KEY_CHANNELDOWN }, /* chn - */
{ 0x866b1f, KEY_VOLUMEUP }, /* vol + */
{ 0x866b17, KEY_VOLUMEDOWN }, /* vol - */
{ 0x866b1c, KEY_ZOOM }, /* zoom */
{ 0x866b04, KEY_REWIND },
{ 0x866b0e, KEY_RECORD },
{ 0x866b0c, KEY_FORWARD },
{ 0x866b1d, KEY_STOP },
{ 0x866b08, KEY_PLAY },
{ 0x866b0f, KEY_PAUSE },
{ 0x866b0d, KEY_TV },
{ 0x866b07, KEY_RADIO }, /* FM */
};
static struct rc_map_list pixelview_map = {
.map = {
.scan = pixelview_mk12,
.size = ARRAY_SIZE(pixelview_mk12),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_PIXELVIEW_MK12,
}
};
static int __init init_rc_map_pixelview(void)
{
return rc_map_register(&pixelview_map);
}
static void __exit exit_rc_map_pixelview(void)
{
rc_map_unregister(&pixelview_map);
}
module_init(init_rc_map_pixelview)
module_exit(exit_rc_map_pixelview)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
weritos666/ARCHOS_50_Platinum | drivers/media/rc/keymaps/rc-videomate-tv-pvr.c | 7637 | 1957 | /* videomate-tv-pvr.h - Keytable for videomate_tv_pvr Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table videomate_tv_pvr[] = {
{ 0x14, KEY_MUTE },
{ 0x24, KEY_ZOOM },
{ 0x01, KEY_DVD },
{ 0x23, KEY_RADIO },
{ 0x00, KEY_TV },
{ 0x0a, KEY_REWIND },
{ 0x08, KEY_PLAYPAUSE },
{ 0x0f, KEY_FORWARD },
{ 0x02, KEY_PREVIOUS },
{ 0x07, KEY_STOP },
{ 0x06, KEY_NEXT },
{ 0x0c, KEY_UP },
{ 0x0e, KEY_DOWN },
{ 0x0b, KEY_LEFT },
{ 0x0d, KEY_RIGHT },
{ 0x11, KEY_OK },
{ 0x03, KEY_MENU },
{ 0x09, KEY_SETUP },
{ 0x05, KEY_VIDEO },
{ 0x22, KEY_CHANNEL },
{ 0x12, KEY_VOLUMEUP },
{ 0x15, KEY_VOLUMEDOWN },
{ 0x10, KEY_CHANNELUP },
{ 0x13, KEY_CHANNELDOWN },
{ 0x04, KEY_RECORD },
{ 0x16, KEY_1 },
{ 0x17, KEY_2 },
{ 0x18, KEY_3 },
{ 0x19, KEY_4 },
{ 0x1a, KEY_5 },
{ 0x1b, KEY_6 },
{ 0x1c, KEY_7 },
{ 0x1d, KEY_8 },
{ 0x1e, KEY_9 },
{ 0x1f, KEY_0 },
{ 0x20, KEY_LANGUAGE },
{ 0x21, KEY_SLEEP },
};
static struct rc_map_list videomate_tv_pvr_map = {
.map = {
.scan = videomate_tv_pvr,
.size = ARRAY_SIZE(videomate_tv_pvr),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_VIDEOMATE_TV_PVR,
}
};
static int __init init_rc_map_videomate_tv_pvr(void)
{
return rc_map_register(&videomate_tv_pvr_map);
}
static void __exit exit_rc_map_videomate_tv_pvr(void)
{
rc_map_unregister(&videomate_tv_pvr_map);
}
module_init(init_rc_map_videomate_tv_pvr)
module_exit(exit_rc_map_videomate_tv_pvr)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
RenderBroken/dory_render_kernel | drivers/media/rc/keymaps/rc-norwood.c | 7637 | 2638 | /* norwood.h - Keytable for norwood Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Norwood Micro (non-Pro) TV Tuner
By Peter Naulls <peter@chocky.org>
Key comments are the functions given in the manual */
static struct rc_map_table norwood[] = {
/* Keys 0 to 9 */
{ 0x20, KEY_0 },
{ 0x21, KEY_1 },
{ 0x22, KEY_2 },
{ 0x23, KEY_3 },
{ 0x24, KEY_4 },
{ 0x25, KEY_5 },
{ 0x26, KEY_6 },
{ 0x27, KEY_7 },
{ 0x28, KEY_8 },
{ 0x29, KEY_9 },
{ 0x78, KEY_VIDEO }, /* Video Source */
{ 0x2c, KEY_EXIT }, /* Open/Close software */
{ 0x2a, KEY_SELECT }, /* 2 Digit Select */
{ 0x69, KEY_AGAIN }, /* Recall */
{ 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
{ 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
{ 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
{ 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
{ 0x2d, KEY_MUTE }, /* Mute */
{ 0x30, KEY_VOLUMEUP }, /* Volume up */
{ 0x31, KEY_VOLUMEDOWN }, /* Volume down */
{ 0x60, KEY_CHANNELUP }, /* Channel up */
{ 0x61, KEY_CHANNELDOWN }, /* Channel down */
{ 0x3f, KEY_RECORD }, /* Record */
{ 0x37, KEY_PLAY }, /* Play */
{ 0x36, KEY_PAUSE }, /* Pause */
{ 0x2b, KEY_STOP }, /* Stop */
{ 0x67, KEY_FASTFORWARD }, /* Forward */
{ 0x66, KEY_REWIND }, /* Rewind */
{ 0x3e, KEY_SEARCH }, /* Auto Scan */
{ 0x2e, KEY_CAMERA }, /* Capture Video */
{ 0x6d, KEY_MENU }, /* Show/Hide Control */
{ 0x2f, KEY_ZOOM }, /* Full Screen */
{ 0x34, KEY_RADIO }, /* FM */
{ 0x65, KEY_POWER }, /* Computer power */
};
static struct rc_map_list norwood_map = {
.map = {
.scan = norwood,
.size = ARRAY_SIZE(norwood),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_NORWOOD,
}
};
static int __init init_rc_map_norwood(void)
{
return rc_map_register(&norwood_map);
}
static void __exit exit_rc_map_norwood(void)
{
rc_map_unregister(&norwood_map);
}
module_init(init_rc_map_norwood)
module_exit(exit_rc_map_norwood)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
F4uzan/skernel_u0 | drivers/media/rc/keymaps/rc-behold-columbus.c | 7637 | 2967 | /* behold-columbus.h - Keytable for behold_columbus Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Beholder Intl. Ltd. 2008
* Dmitry Belimov d.belimov@google.com
* Keytable is used by BeholdTV Columbus
* The "ascii-art picture" below (in comments, first row
* is the keycode in hex, and subsequent row(s) shows
* the button labels (several variants when appropriate)
* helps to descide which keycodes to assign to the buttons.
*/
static struct rc_map_table behold_columbus[] = {
/* 0x13 0x11 0x1C 0x12 *
* Mute Source TV/FM Power *
* */
{ 0x13, KEY_MUTE },
{ 0x11, KEY_VIDEO },
{ 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
{ 0x12, KEY_POWER },
/* 0x01 0x02 0x03 0x0D *
* 1 2 3 Stereo *
* *
* 0x04 0x05 0x06 0x19 *
* 4 5 6 Snapshot *
* *
* 0x07 0x08 0x09 0x10 *
* 7 8 9 Zoom *
* */
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x0D, KEY_SETUP }, /* Setup key */
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x19, KEY_CAMERA }, /* Snapshot key */
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x10, KEY_ZOOM },
/* 0x0A 0x00 0x0B 0x0C *
* RECALL 0 ChannelUp VolumeUp *
* */
{ 0x0A, KEY_AGAIN },
{ 0x00, KEY_0 },
{ 0x0B, KEY_CHANNELUP },
{ 0x0C, KEY_VOLUMEUP },
/* 0x1B 0x1D 0x15 0x18 *
* Timeshift Record ChannelDown VolumeDown *
* */
{ 0x1B, KEY_TIME },
{ 0x1D, KEY_RECORD },
{ 0x15, KEY_CHANNELDOWN },
{ 0x18, KEY_VOLUMEDOWN },
/* 0x0E 0x1E 0x0F 0x1A *
* Stop Pause Previouse Next *
* */
{ 0x0E, KEY_STOP },
{ 0x1E, KEY_PAUSE },
{ 0x0F, KEY_PREVIOUS },
{ 0x1A, KEY_NEXT },
};
static struct rc_map_list behold_columbus_map = {
.map = {
.scan = behold_columbus,
.size = ARRAY_SIZE(behold_columbus),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_BEHOLD_COLUMBUS,
}
};
static int __init init_rc_map_behold_columbus(void)
{
return rc_map_register(&behold_columbus_map);
}
static void __exit exit_rc_map_behold_columbus(void)
{
rc_map_unregister(&behold_columbus_map);
}
module_init(init_rc_map_behold_columbus)
module_exit(exit_rc_map_behold_columbus)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
syhost/android_kernel_zte_n918st | drivers/media/rc/keymaps/rc-proteus-2309.c | 7637 | 1879 | /* proteus-2309.h - Keytable for proteus_2309 Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
static struct rc_map_table proteus_2309[] = {
/* numeric */
{ 0x00, KEY_0 },
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x5c, KEY_POWER }, /* power */
{ 0x20, KEY_ZOOM }, /* full screen */
{ 0x0f, KEY_BACKSPACE }, /* recall */
{ 0x1b, KEY_ENTER }, /* mute */
{ 0x41, KEY_RECORD }, /* record */
{ 0x43, KEY_STOP }, /* stop */
{ 0x16, KEY_S },
{ 0x1a, KEY_POWER2 }, /* off */
{ 0x2e, KEY_RED },
{ 0x1f, KEY_CHANNELDOWN }, /* channel - */
{ 0x1c, KEY_CHANNELUP }, /* channel + */
{ 0x10, KEY_VOLUMEDOWN }, /* volume - */
{ 0x1e, KEY_VOLUMEUP }, /* volume + */
{ 0x14, KEY_F1 },
};
static struct rc_map_list proteus_2309_map = {
.map = {
.scan = proteus_2309,
.size = ARRAY_SIZE(proteus_2309),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PROTEUS_2309,
}
};
static int __init init_rc_map_proteus_2309(void)
{
return rc_map_register(&proteus_2309_map);
}
static void __exit exit_rc_map_proteus_2309(void)
{
rc_map_unregister(&proteus_2309_map);
}
module_init(init_rc_map_proteus_2309)
module_exit(exit_rc_map_proteus_2309)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
TripNRaVeR/caf_kernel_msm_htc_m7 | drivers/media/rc/keymaps/rc-cinergy-1400.c | 7637 | 1917 | /* cinergy-1400.h - Keytable for cinergy_1400 Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Cinergy 1400 DVB-T */
static struct rc_map_table cinergy_1400[] = {
{ 0x01, KEY_POWER },
{ 0x02, KEY_1 },
{ 0x03, KEY_2 },
{ 0x04, KEY_3 },
{ 0x05, KEY_4 },
{ 0x06, KEY_5 },
{ 0x07, KEY_6 },
{ 0x08, KEY_7 },
{ 0x09, KEY_8 },
{ 0x0a, KEY_9 },
{ 0x0c, KEY_0 },
{ 0x0b, KEY_VIDEO },
{ 0x0d, KEY_REFRESH },
{ 0x0e, KEY_SELECT },
{ 0x0f, KEY_EPG },
{ 0x10, KEY_UP },
{ 0x11, KEY_LEFT },
{ 0x12, KEY_OK },
{ 0x13, KEY_RIGHT },
{ 0x14, KEY_DOWN },
{ 0x15, KEY_TEXT },
{ 0x16, KEY_INFO },
{ 0x17, KEY_RED },
{ 0x18, KEY_GREEN },
{ 0x19, KEY_YELLOW },
{ 0x1a, KEY_BLUE },
{ 0x1b, KEY_CHANNELUP },
{ 0x1c, KEY_VOLUMEUP },
{ 0x1d, KEY_MUTE },
{ 0x1e, KEY_VOLUMEDOWN },
{ 0x1f, KEY_CHANNELDOWN },
{ 0x40, KEY_PAUSE },
{ 0x4c, KEY_PLAY },
{ 0x58, KEY_RECORD },
{ 0x54, KEY_PREVIOUS },
{ 0x48, KEY_STOP },
{ 0x5c, KEY_NEXT },
};
static struct rc_map_list cinergy_1400_map = {
.map = {
.scan = cinergy_1400,
.size = ARRAY_SIZE(cinergy_1400),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_CINERGY_1400,
}
};
static int __init init_rc_map_cinergy_1400(void)
{
return rc_map_register(&cinergy_1400_map);
}
static void __exit exit_rc_map_cinergy_1400(void)
{
rc_map_unregister(&cinergy_1400_map);
}
module_init(init_rc_map_cinergy_1400)
module_exit(exit_rc_map_cinergy_1400)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
XXMrHyde/android_kernel_moto_shamu | drivers/media/rc/keymaps/rc-avermedia-dvbt.c | 7637 | 2504 | /* avermedia-dvbt.h - Keytable for avermedia_dvbt Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Matt Jesson <dvb@jesson.eclipse.co.uk */
static struct rc_map_table avermedia_dvbt[] = {
{ 0x28, KEY_0 }, /* '0' / 'enter' */
{ 0x22, KEY_1 }, /* '1' */
{ 0x12, KEY_2 }, /* '2' / 'up arrow' */
{ 0x32, KEY_3 }, /* '3' */
{ 0x24, KEY_4 }, /* '4' / 'left arrow' */
{ 0x14, KEY_5 }, /* '5' */
{ 0x34, KEY_6 }, /* '6' / 'right arrow' */
{ 0x26, KEY_7 }, /* '7' */
{ 0x16, KEY_8 }, /* '8' / 'down arrow' */
{ 0x36, KEY_9 }, /* '9' */
{ 0x20, KEY_VIDEO }, /* 'source' */
{ 0x10, KEY_TEXT }, /* 'teletext' */
{ 0x00, KEY_POWER }, /* 'power' */
{ 0x04, KEY_AUDIO }, /* 'audio' */
{ 0x06, KEY_ZOOM }, /* 'full screen' */
{ 0x18, KEY_SWITCHVIDEOMODE }, /* 'display' */
{ 0x38, KEY_SEARCH }, /* 'loop' */
{ 0x08, KEY_INFO }, /* 'preview' */
{ 0x2a, KEY_REWIND }, /* 'backward <<' */
{ 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
{ 0x3a, KEY_RECORD }, /* 'capture' */
{ 0x0a, KEY_MUTE }, /* 'mute' */
{ 0x2c, KEY_RECORD }, /* 'record' */
{ 0x1c, KEY_PAUSE }, /* 'pause' */
{ 0x3c, KEY_STOP }, /* 'stop' */
{ 0x0c, KEY_PLAY }, /* 'play' */
{ 0x2e, KEY_RED }, /* 'red' */
{ 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
{ 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
{ 0x21, KEY_GREEN }, /* 'green' */
{ 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
{ 0x31, KEY_CHANNELUP }, /* 'channel +' */
{ 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
{ 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
};
static struct rc_map_list avermedia_dvbt_map = {
.map = {
.scan = avermedia_dvbt,
.size = ARRAY_SIZE(avermedia_dvbt),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_AVERMEDIA_DVBT,
}
};
static int __init init_rc_map_avermedia_dvbt(void)
{
return rc_map_register(&avermedia_dvbt_map);
}
static void __exit exit_rc_map_avermedia_dvbt(void)
{
rc_map_unregister(&avermedia_dvbt_map);
}
module_init(init_rc_map_avermedia_dvbt)
module_exit(exit_rc_map_avermedia_dvbt)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
SemonCat/texj-kernel-samsung-s6-g9250 | drivers/media/rc/keymaps/rc-avermedia-a16d.c | 7637 | 1828 | /* avermedia-a16d.h - Keytable for avermedia_a16d Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table avermedia_a16d[] = {
{ 0x20, KEY_LIST},
{ 0x00, KEY_POWER},
{ 0x28, KEY_1},
{ 0x18, KEY_2},
{ 0x38, KEY_3},
{ 0x24, KEY_4},
{ 0x14, KEY_5},
{ 0x34, KEY_6},
{ 0x2c, KEY_7},
{ 0x1c, KEY_8},
{ 0x3c, KEY_9},
{ 0x12, KEY_SUBTITLE},
{ 0x22, KEY_0},
{ 0x32, KEY_REWIND},
{ 0x3a, KEY_SHUFFLE},
{ 0x02, KEY_PRINT},
{ 0x11, KEY_CHANNELDOWN},
{ 0x31, KEY_CHANNELUP},
{ 0x0c, KEY_ZOOM},
{ 0x1e, KEY_VOLUMEDOWN},
{ 0x3e, KEY_VOLUMEUP},
{ 0x0a, KEY_MUTE},
{ 0x04, KEY_AUDIO},
{ 0x26, KEY_RECORD},
{ 0x06, KEY_PLAY},
{ 0x36, KEY_STOP},
{ 0x16, KEY_PAUSE},
{ 0x2e, KEY_REWIND},
{ 0x0e, KEY_FASTFORWARD},
{ 0x30, KEY_TEXT},
{ 0x21, KEY_GREEN},
{ 0x01, KEY_BLUE},
{ 0x08, KEY_EPG},
{ 0x2a, KEY_MENU},
};
static struct rc_map_list avermedia_a16d_map = {
.map = {
.scan = avermedia_a16d,
.size = ARRAY_SIZE(avermedia_a16d),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_AVERMEDIA_A16D,
}
};
static int __init init_rc_map_avermedia_a16d(void)
{
return rc_map_register(&avermedia_a16d_map);
}
static void __exit exit_rc_map_avermedia_a16d(void)
{
rc_map_unregister(&avermedia_a16d_map);
}
module_init(init_rc_map_avermedia_a16d)
module_exit(exit_rc_map_avermedia_a16d)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
Kenepo/roots_kk_lge_msm8974 | drivers/media/rc/keymaps/rc-pctv-sedna.c | 7637 | 2136 | /* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Mapping for the 28 key remote control as seen at
http://www.sednacomputer.com/photo/cardbus-tv.jpg
Pavel Mihaylov <bin@bash.info>
Also for the remote bundled with Kozumi KTV-01C card */
static struct rc_map_table pctv_sedna[] = {
{ 0x00, KEY_0 },
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x0a, KEY_AGAIN }, /* Recall */
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_VOLUMEUP },
{ 0x0d, KEY_MODE }, /* Stereo */
{ 0x0e, KEY_STOP },
{ 0x0f, KEY_PREVIOUSSONG },
{ 0x10, KEY_ZOOM },
{ 0x11, KEY_VIDEO }, /* Source */
{ 0x12, KEY_POWER },
{ 0x13, KEY_MUTE },
{ 0x15, KEY_CHANNELDOWN },
{ 0x18, KEY_VOLUMEDOWN },
{ 0x19, KEY_CAMERA }, /* Snapshot */
{ 0x1a, KEY_NEXTSONG },
{ 0x1b, KEY_TIME }, /* Time Shift */
{ 0x1c, KEY_RADIO }, /* FM Radio */
{ 0x1d, KEY_RECORD },
{ 0x1e, KEY_PAUSE },
/* additional codes for Kozumi's remote */
{ 0x14, KEY_INFO }, /* OSD */
{ 0x16, KEY_OK }, /* OK */
{ 0x17, KEY_DIGITS }, /* Plus */
{ 0x1f, KEY_PLAY }, /* Play */
};
static struct rc_map_list pctv_sedna_map = {
.map = {
.scan = pctv_sedna,
.size = ARRAY_SIZE(pctv_sedna),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PCTV_SEDNA,
}
};
static int __init init_rc_map_pctv_sedna(void)
{
return rc_map_register(&pctv_sedna_map);
}
static void __exit exit_rc_map_pctv_sedna(void)
{
rc_map_unregister(&pctv_sedna_map);
}
module_init(init_rc_map_pctv_sedna)
module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
alexax66/LP-Kernel-a3ltexx | drivers/media/rc/keymaps/rc-avermedia-m135a.c | 7637 | 3749 | /* avermedia-m135a.c - Keytable for Avermedia M135A Remote Controllers
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
* Copyright (c) 2010 by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Avermedia M135A with RM-JX and RM-K6 remote controls
*
* On Avermedia M135A with IR model RM-JX, the same codes exist on both
* Positivo (BR) and original IR, initial version and remote control codes
* added by Mauro Carvalho Chehab <mchehab@infradead.org>
*
* Positivo also ships Avermedia M135A with model RM-K6, extra control
* codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
*/
static struct rc_map_table avermedia_m135a[] = {
/* RM-JX */
{ 0x0200, KEY_POWER2 },
{ 0x022e, KEY_DOT }, /* '.' */
{ 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
{ 0x0205, KEY_1 },
{ 0x0206, KEY_2 },
{ 0x0207, KEY_3 },
{ 0x0209, KEY_4 },
{ 0x020a, KEY_5 },
{ 0x020b, KEY_6 },
{ 0x020d, KEY_7 },
{ 0x020e, KEY_8 },
{ 0x020f, KEY_9 },
{ 0x0211, KEY_0 },
{ 0x0213, KEY_RIGHT }, /* -> or L */
{ 0x0212, KEY_LEFT }, /* <- or R */
{ 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
{ 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
{ 0x0303, KEY_CHANNELUP },
{ 0x0302, KEY_CHANNELDOWN },
{ 0x021f, KEY_VOLUMEUP },
{ 0x021e, KEY_VOLUMEDOWN },
{ 0x020c, KEY_ENTER }, /* Full Screen */
{ 0x0214, KEY_MUTE },
{ 0x0208, KEY_AUDIO },
{ 0x0203, KEY_TEXT }, /* Teletext */
{ 0x0204, KEY_EPG },
{ 0x022b, KEY_TV2 }, /* TV2 or PIP */
{ 0x021d, KEY_RED },
{ 0x021c, KEY_YELLOW },
{ 0x0301, KEY_GREEN },
{ 0x0300, KEY_BLUE },
{ 0x021a, KEY_PLAYPAUSE },
{ 0x0219, KEY_RECORD },
{ 0x0218, KEY_PLAY },
{ 0x021b, KEY_STOP },
/* RM-K6 */
{ 0x0401, KEY_POWER2 },
{ 0x0406, KEY_MUTE },
{ 0x0408, KEY_MODE }, /* TV/FM */
{ 0x0409, KEY_1 },
{ 0x040a, KEY_2 },
{ 0x040b, KEY_3 },
{ 0x040c, KEY_4 },
{ 0x040d, KEY_5 },
{ 0x040e, KEY_6 },
{ 0x040f, KEY_7 },
{ 0x0410, KEY_8 },
{ 0x0411, KEY_9 },
{ 0x044c, KEY_DOT }, /* '.' */
{ 0x0412, KEY_0 },
{ 0x0407, KEY_REFRESH }, /* Refresh/Reload */
{ 0x0413, KEY_AUDIO },
{ 0x0440, KEY_SCREEN }, /* Full Screen toggle */
{ 0x0441, KEY_HOME },
{ 0x0442, KEY_BACK },
{ 0x0447, KEY_UP },
{ 0x0448, KEY_DOWN },
{ 0x0449, KEY_LEFT },
{ 0x044a, KEY_RIGHT },
{ 0x044b, KEY_OK },
{ 0x0404, KEY_VOLUMEUP },
{ 0x0405, KEY_VOLUMEDOWN },
{ 0x0402, KEY_CHANNELUP },
{ 0x0403, KEY_CHANNELDOWN },
{ 0x0443, KEY_RED },
{ 0x0444, KEY_GREEN },
{ 0x0445, KEY_YELLOW },
{ 0x0446, KEY_BLUE },
{ 0x0414, KEY_TEXT },
{ 0x0415, KEY_EPG },
{ 0x041a, KEY_TV2 }, /* PIP */
{ 0x041b, KEY_CAMERA }, /* Snapshot */
{ 0x0417, KEY_RECORD },
{ 0x0416, KEY_PLAYPAUSE },
{ 0x0418, KEY_STOP },
{ 0x0419, KEY_PAUSE },
{ 0x041f, KEY_PREVIOUS },
{ 0x041c, KEY_REWIND },
{ 0x041d, KEY_FORWARD },
{ 0x041e, KEY_NEXT },
};
static struct rc_map_list avermedia_m135a_map = {
.map = {
.scan = avermedia_m135a,
.size = ARRAY_SIZE(avermedia_m135a),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_AVERMEDIA_M135A,
}
};
static int __init init_rc_map_avermedia_m135a(void)
{
return rc_map_register(&avermedia_m135a_map);
}
static void __exit exit_rc_map_avermedia_m135a(void)
{
rc_map_unregister(&avermedia_m135a_map);
}
module_init(init_rc_map_avermedia_m135a)
module_exit(exit_rc_map_avermedia_m135a)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
SlimRoms/kernel_motorola_msm8226 | sound/oss/trix.c | 8405 | 11131 | /*
* sound/oss/trix.c
*
* Low level driver for the MediaTrix AudioTrix Pro
* (MT-0002-PC Control Chip)
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
* Changes
* Alan Cox Modularisation, cleanup.
* Christoph Hellwig Adapted to module_init/module_exit
* Arnaldo C. de Melo Got rid of attach_uart401
*/
#include <linux/init.h>
#include <linux/module.h>
#include "sound_config.h"
#include "sb.h"
#include "sound_firmware.h"
#include "ad1848.h"
#include "mpu401.h"
#include "trix_boot.h"
static int mpu;
static bool joystick;
static unsigned char trix_read(int addr)
{
outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */
return inb(0x391); /* MT-0002-PC ASIC data */
}
static void trix_write(int addr, int data)
{
outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */
outb(((unsigned char) data), 0x391); /* MT-0002-PC ASIC data */
}
static void download_boot(int base)
{
int i = 0, n = trix_boot_len;
if (trix_boot_len == 0)
return;
trix_write(0xf8, 0x00); /* ??????? */
outb((0x01), base + 6); /* Clear the internal data pointer */
outb((0x00), base + 6); /* Restart */
/*
* Write the boot code to the RAM upload/download register.
* Each write increments the internal data pointer.
*/
outb((0x01), base + 6); /* Clear the internal data pointer */
outb((0x1A), 0x390); /* Select RAM download/upload port */
for (i = 0; i < n; i++)
outb((trix_boot[i]), 0x391);
for (i = n; i < 10016; i++) /* Clear up to first 16 bytes of data RAM */
outb((0x00), 0x391);
outb((0x00), base + 6); /* Reset */
outb((0x50), 0x390); /* ?????? */
}
static int trix_set_wss_port(struct address_info *hw_config)
{
unsigned char addr_bits;
if (trix_read(0x15) != 0x71) /* No ASIC signature */
{
MDB(printk(KERN_ERR "No AudioTrix ASIC signature found\n"));
return 0;
}
/*
* Reset some registers.
*/
trix_write(0x13, 0);
trix_write(0x14, 0);
/*
* Configure the ASIC to place the codec to the proper I/O location
*/
switch (hw_config->io_base)
{
case 0x530:
addr_bits = 0;
break;
case 0x604:
addr_bits = 1;
break;
case 0xE80:
addr_bits = 2;
break;
case 0xF40:
addr_bits = 3;
break;
default:
return 0;
}
trix_write(0x19, (trix_read(0x19) & 0x03) | addr_bits);
return 1;
}
/*
* Probe and attach routines for the Windows Sound System mode of
* AudioTrix Pro
*/
static int __init init_trix_wss(struct address_info *hw_config)
{
static unsigned char dma_bits[4] = {
1, 2, 0, 3
};
struct resource *ports;
int config_port = hw_config->io_base + 0;
int dma1 = hw_config->dma, dma2 = hw_config->dma2;
int old_num_mixers = num_mixers;
u8 config, bits;
int ret;
switch(hw_config->irq) {
case 7:
bits = 8;
break;
case 9:
bits = 0x10;
break;
case 10:
bits = 0x18;
break;
case 11:
bits = 0x20;
break;
default:
printk(KERN_ERR "AudioTrix: Bad WSS IRQ %d\n", hw_config->irq);
return 0;
}
switch (dma1) {
case 0:
case 1:
case 3:
break;
default:
printk(KERN_ERR "AudioTrix: Bad WSS DMA %d\n", dma1);
return 0;
}
switch (dma2) {
case -1:
case 0:
case 1:
case 3:
break;
default:
printk(KERN_ERR "AudioTrix: Bad capture DMA %d\n", dma2);
return 0;
}
/*
* Check if the IO port returns valid signature. The original MS Sound
* system returns 0x04 while some cards (AudioTrix Pro for example)
* return 0x00.
*/
ports = request_region(hw_config->io_base + 4, 4, "ad1848");
if (!ports) {
printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base);
return 0;
}
if (!request_region(hw_config->io_base, 4, "MSS config")) {
printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base);
release_region(hw_config->io_base + 4, 4);
return 0;
}
if (!trix_set_wss_port(hw_config))
goto fail;
config = inb(hw_config->io_base + 3);
if ((config & 0x3f) != 0x00)
{
MDB(printk(KERN_ERR "No MSS signature detected on port 0x%x\n", hw_config->io_base));
goto fail;
}
/*
* Check that DMA0 is not in use with a 8 bit board.
*/
if (dma1 == 0 && config & 0x80)
{
printk(KERN_ERR "AudioTrix: Can't use DMA0 with a 8 bit card slot\n");
goto fail;
}
if (hw_config->irq > 9 && config & 0x80)
{
printk(KERN_ERR "AudioTrix: Can't use IRQ%d with a 8 bit card slot\n", hw_config->irq);
goto fail;
}
ret = ad1848_detect(ports, NULL, hw_config->osp);
if (!ret)
goto fail;
if (joystick==1)
trix_write(0x15, 0x80);
/*
* Set the IRQ and DMA addresses.
*/
outb((bits | 0x40), config_port);
if (dma2 == -1 || dma2 == dma1)
{
bits |= dma_bits[dma1];
dma2 = dma1;
}
else
{
unsigned char tmp;
tmp = trix_read(0x13) & ~30;
trix_write(0x13, tmp | 0x80 | (dma1 << 4));
tmp = trix_read(0x14) & ~30;
trix_write(0x14, tmp | 0x80 | (dma2 << 4));
}
outb((bits), config_port); /* Write IRQ+DMA setup */
hw_config->slots[0] = ad1848_init("AudioTrix Pro", ports,
hw_config->irq,
dma1,
dma2,
0,
hw_config->osp,
THIS_MODULE);
if (num_mixers > old_num_mixers) /* Mixer got installed */
{
AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_LINE); /* Line in */
AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_CD);
AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_SYNTH); /* OPL4 */
AD1848_REROUTE(SOUND_MIXER_SPEAKER, SOUND_MIXER_ALTPCM); /* SB */
}
return 1;
fail:
release_region(hw_config->io_base, 4);
release_region(hw_config->io_base + 4, 4);
return 0;
}
static int __init probe_trix_sb(struct address_info *hw_config)
{
int tmp;
unsigned char conf;
extern int sb_be_quiet;
int old_quiet;
static signed char irq_translate[] = {
-1, -1, -1, 0, 1, 2, -1, 3
};
if (trix_boot_len == 0)
return 0; /* No boot code -> no fun */
if ((hw_config->io_base & 0xffffff8f) != 0x200)
return 0;
tmp = hw_config->irq;
if (tmp > 7)
return 0;
if (irq_translate[tmp] == -1)
return 0;
tmp = hw_config->dma;
if (tmp != 1 && tmp != 3)
return 0;
if (!request_region(hw_config->io_base, 16, "soundblaster")) {
printk(KERN_ERR "AudioTrix: SB I/O port conflict (%x)\n", hw_config->io_base);
return 0;
}
conf = 0x84; /* DMA and IRQ enable */
conf |= hw_config->io_base & 0x70; /* I/O address bits */
conf |= irq_translate[hw_config->irq];
if (hw_config->dma == 3)
conf |= 0x08;
trix_write(0x1b, conf);
download_boot(hw_config->io_base);
hw_config->name = "AudioTrix SB";
if (!sb_dsp_detect(hw_config, 0, 0, NULL)) {
release_region(hw_config->io_base, 16);
return 0;
}
hw_config->driver_use_1 = SB_NO_MIDI | SB_NO_MIXER | SB_NO_RECORDING;
/* Prevent false alarms */
old_quiet = sb_be_quiet;
sb_be_quiet = 1;
sb_dsp_init(hw_config, THIS_MODULE);
sb_be_quiet = old_quiet;
return 1;
}
static int __init probe_trix_mpu(struct address_info *hw_config)
{
unsigned char conf;
static int irq_bits[] = {
-1, -1, -1, 1, 2, 3, -1, 4, -1, 5
};
if (hw_config->irq > 9)
{
printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq);
return 0;
}
if (irq_bits[hw_config->irq] == -1)
{
printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq);
return 0;
}
switch (hw_config->io_base)
{
case 0x330:
conf = 0x00;
break;
case 0x370:
conf = 0x04;
break;
case 0x3b0:
conf = 0x08;
break;
case 0x3f0:
conf = 0x0c;
break;
default:
return 0; /* Invalid port */
}
conf |= irq_bits[hw_config->irq] << 4;
trix_write(0x19, (trix_read(0x19) & 0x83) | conf);
hw_config->name = "AudioTrix Pro";
return probe_uart401(hw_config, THIS_MODULE);
}
static void __exit unload_trix_wss(struct address_info *hw_config)
{
int dma2 = hw_config->dma2;
if (dma2 == -1)
dma2 = hw_config->dma;
release_region(0x390, 2);
release_region(hw_config->io_base, 4);
ad1848_unload(hw_config->io_base + 4,
hw_config->irq,
hw_config->dma,
dma2,
0);
sound_unload_audiodev(hw_config->slots[0]);
}
static inline void __exit unload_trix_mpu(struct address_info *hw_config)
{
unload_uart401(hw_config);
}
static inline void __exit unload_trix_sb(struct address_info *hw_config)
{
sb_dsp_unload(hw_config, mpu);
}
static struct address_info cfg;
static struct address_info cfg2;
static struct address_info cfg_mpu;
static int sb;
static int fw_load;
static int __initdata io = -1;
static int __initdata irq = -1;
static int __initdata dma = -1;
static int __initdata dma2 = -1; /* Set this for modules that need it */
static int __initdata sb_io = -1;
static int __initdata sb_dma = -1;
static int __initdata sb_irq = -1;
static int __initdata mpu_io = -1;
static int __initdata mpu_irq = -1;
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(dma, int, 0);
module_param(dma2, int, 0);
module_param(sb_io, int, 0);
module_param(sb_dma, int, 0);
module_param(sb_irq, int, 0);
module_param(mpu_io, int, 0);
module_param(mpu_irq, int, 0);
module_param(joystick, bool, 0);
static int __init init_trix(void)
{
printk(KERN_INFO "MediaTrix audio driver Copyright (C) by Hannu Savolainen 1993-1996\n");
cfg.io_base = io;
cfg.irq = irq;
cfg.dma = dma;
cfg.dma2 = dma2;
cfg2.io_base = sb_io;
cfg2.irq = sb_irq;
cfg2.dma = sb_dma;
cfg_mpu.io_base = mpu_io;
cfg_mpu.irq = mpu_irq;
if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
return -EINVAL;
}
if (cfg2.io_base != -1 && (cfg2.irq == -1 || cfg2.dma == -1)) {
printk(KERN_INFO "CONFIG_SB_IRQ and CONFIG_SB_DMA must be specified if SB_IO is set.\n");
return -EINVAL;
}
if (cfg_mpu.io_base != -1 && cfg_mpu.irq == -1) {
printk(KERN_INFO "CONFIG_MPU_IRQ must be specified if MPU_IO is set.\n");
return -EINVAL;
}
if (!trix_boot)
{
fw_load = 1;
trix_boot_len = mod_firmware_load("/etc/sound/trxpro.bin",
(char **) &trix_boot);
}
if (!request_region(0x390, 2, "AudioTrix")) {
printk(KERN_ERR "AudioTrix: Config port I/O conflict\n");
return -ENODEV;
}
if (!init_trix_wss(&cfg)) {
release_region(0x390, 2);
return -ENODEV;
}
/*
* We must attach in the right order to get the firmware
* loaded up in time.
*/
if (cfg2.io_base != -1) {
sb = probe_trix_sb(&cfg2);
}
if (cfg_mpu.io_base != -1)
mpu = probe_trix_mpu(&cfg_mpu);
return 0;
}
static void __exit cleanup_trix(void)
{
if (fw_load && trix_boot)
vfree(trix_boot);
if (sb)
unload_trix_sb(&cfg2);
if (mpu)
unload_trix_mpu(&cfg_mpu);
unload_trix_wss(&cfg);
}
module_init(init_trix);
module_exit(cleanup_trix);
#ifndef MODULE
static int __init setup_trix (char *str)
{
/* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, mpu_io, mpu_irq */
int ints[9];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
dma = ints[3];
dma2 = ints[4];
sb_io = ints[5];
sb_irq = ints[6];
sb_dma = ints[6];
mpu_io = ints[7];
mpu_irq = ints[8];
return 1;
}
__setup("trix=", setup_trix);
#endif
MODULE_LICENSE("GPL");
| gpl-2.0 |
LuckJC/cubie-linux | arch/sh/mm/gup.c | 10453 | 6772 | /*
* Lockless get_user_pages_fast for SuperH
*
* Copyright (C) 2009 - 2010 Paul Mundt
*
* Cloned from the x86 and PowerPC versions, by:
*
* Copyright (C) 2008 Nick Piggin
* Copyright (C) 2008 Novell Inc.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
static inline pte_t gup_get_pte(pte_t *ptep)
{
#ifndef CONFIG_X2TLB
return ACCESS_ONCE(*ptep);
#else
/*
* With get_user_pages_fast, we walk down the pagetables without
* taking any locks. For this we would like to load the pointers
* atomically, but that is not possible with 64-bit PTEs. What
* we do have is the guarantee that a pte will only either go
* from not present to present, or present to not present or both
* -- it will not switch to a completely different present page
* without a TLB flush in between; something that we are blocking
* by holding interrupts off.
*
* Setting ptes from not present to present goes:
* ptep->pte_high = h;
* smp_wmb();
* ptep->pte_low = l;
*
* And present to not present goes:
* ptep->pte_low = 0;
* smp_wmb();
* ptep->pte_high = 0;
*
* We must ensure here that the load of pte_low sees l iff pte_high
* sees h. We load pte_high *after* loading pte_low, which ensures we
* don't see an older value of pte_high. *Then* we recheck pte_low,
* which ensures that we haven't picked up a changed pte high. We might
* have got rubbish values from pte_low and pte_high, but we are
* guaranteed that pte_low will not have the present bit set *unless*
* it is 'l'. And get_user_pages_fast only operates on present ptes, so
* we're safe.
*
* gup_get_pte should not be used or copied outside gup.c without being
* very careful -- it does not atomically load the pte or anything that
* is likely to be useful for you.
*/
pte_t pte;
retry:
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
if (unlikely(pte.pte_low != ptep->pte_low))
goto retry;
return pte;
#endif
}
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
u64 mask, result;
pte_t *ptep;
#ifdef CONFIG_X2TLB
result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
if (write)
result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
#elif defined(CONFIG_SUPERH64)
result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
if (write)
result |= _PAGE_WRITE;
#else
result = _PAGE_PRESENT | _PAGE_USER;
if (write)
result |= _PAGE_RW;
#endif
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_val(pte) & mask) != result) {
pte_unmap(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = *pmdp;
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&pgd, addr);
do {
pud_t pud = *pudp;
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
unsigned long flags;
pgd_t *pgdp;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
return 0;
/*
* This doesn't prevent pagetable teardown, but does prevent
* the pagetables and pages from being freed.
*/
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
break;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
pgd_t *pgdp;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
goto slow_irqon;
local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
{
int ret;
slow:
local_irq_enable();
slow_irqon:
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
return ret;
}
}
| gpl-2.0 |
AresHou/android_kernel_lge_geehrc | drivers/ide/ide-scan-pci.c | 12245 | 2725 | /*
* support for probing IDE PCI devices in the PCI bus order
*
* Copyright (c) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (c) 1995-1998 Mark Lord
*
* May be copied or modified under the terms of the GNU General Public License
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ide.h>
/*
* Module interfaces
*/
static int pre_init = 1; /* Before first ordered IDE scan */
static LIST_HEAD(ide_pci_drivers);
/*
* __ide_pci_register_driver - attach IDE driver
* @driver: pci driver
* @module: owner module of the driver
*
* Registers a driver with the IDE layer. The IDE layer arranges that
* boot time setup is done in the expected device order and then
* hands the controllers off to the core PCI code to do the rest of
* the work.
*
* Returns are the same as for pci_register_driver
*/
int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
const char *mod_name)
{
if (!pre_init)
return __pci_register_driver(driver, module, mod_name);
driver->driver.owner = module;
list_add_tail(&driver->node, &ide_pci_drivers);
return 0;
}
EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
/**
* ide_scan_pcidev - find an IDE driver for a device
* @dev: PCI device to check
*
* Look for an IDE driver to handle the device we are considering.
* This is only used during boot up to get the ordering correct. After
* boot up the pci layer takes over the job.
*/
static int __init ide_scan_pcidev(struct pci_dev *dev)
{
struct list_head *l;
struct pci_driver *d;
list_for_each(l, &ide_pci_drivers) {
d = list_entry(l, struct pci_driver, node);
if (d->id_table) {
const struct pci_device_id *id =
pci_match_id(d->id_table, dev);
if (id != NULL && d->probe(dev, id) >= 0) {
dev->driver = d;
pci_dev_get(dev);
return 1;
}
}
}
return 0;
}
/**
* ide_scan_pcibus - perform the initial IDE driver scan
*
* Perform the initial bus rather than driver ordered scan of the
* PCI drivers. After this all IDE pci handling becomes standard
* module ordering not traditionally ordered.
*/
static int __init ide_scan_pcibus(void)
{
struct pci_dev *dev = NULL;
struct pci_driver *d;
struct list_head *l, *n;
pre_init = 0;
for_each_pci_dev(dev)
ide_scan_pcidev(dev);
/*
* Hand the drivers over to the PCI layer now we
* are post init.
*/
list_for_each_safe(l, n, &ide_pci_drivers) {
list_del(l);
d = list_entry(l, struct pci_driver, node);
if (__pci_register_driver(d, d->driver.owner,
d->driver.mod_name))
printk(KERN_ERR "%s: failed to register %s driver\n",
__func__, d->driver.mod_name);
}
return 0;
}
module_init(ide_scan_pcibus);
| gpl-2.0 |
theme/linux | drivers/gpio/gpio-f7188x.c | 214 | 10318 | /*
* GPIO driver for Fintek Super-I/O F71882 and F71889
*
* Copyright (C) 2010-2013 LaCie
*
* Author: Simon Guinot <simon.guinot@sequanux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#define DRVNAME "gpio-f7188x"
/*
* Super-I/O registers
*/
#define SIO_LDSEL 0x07 /* Logical device select */
#define SIO_DEVID 0x20 /* Device ID (2 bytes) */
#define SIO_DEVREV 0x22 /* Device revision */
#define SIO_MANID 0x23 /* Fintek ID (2 bytes) */
#define SIO_LD_GPIO 0x06 /* GPIO logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
enum chips { f71882fg, f71889f };
static const char * const f7188x_names[] = {
"f71882fg",
"f71889f",
};
struct f7188x_sio {
int addr;
enum chips type;
};
struct f7188x_gpio_bank {
struct gpio_chip chip;
unsigned int regbase;
struct f7188x_gpio_data *data;
};
struct f7188x_gpio_data {
struct f7188x_sio *sio;
int nr_bank;
struct f7188x_gpio_bank *bank;
};
/*
* Super-I/O functions.
*/
static inline int superio_inb(int base, int reg)
{
outb(reg, base);
return inb(base + 1);
}
static int superio_inw(int base, int reg)
{
int val;
outb(reg++, base);
val = inb(base + 1) << 8;
outb(reg, base);
val |= inb(base + 1);
return val;
}
static inline void superio_outb(int base, int reg, int val)
{
outb(reg, base);
outb(val, base + 1);
}
static inline int superio_enter(int base)
{
/* Don't step on other drivers' I/O space by accident. */
if (!request_muxed_region(base, 2, DRVNAME)) {
pr_err(DRVNAME "I/O address 0x%04x already in use\n", base);
return -EBUSY;
}
/* According to the datasheet the key must be send twice. */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
return 0;
}
static inline void superio_select(int base, int ld)
{
outb(SIO_LDSEL, base);
outb(ld, base + 1);
}
static inline void superio_exit(int base)
{
outb(SIO_LOCK_KEY, base);
release_region(base, 2);
}
/*
* GPIO chip.
*/
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
{ \
.chip = { \
.label = DRVNAME, \
.owner = THIS_MODULE, \
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
.set = f7188x_gpio_set, \
.base = _base, \
.ngpio = _ngpio, \
.can_sleep = true, \
}, \
.regbase = _regbase, \
}
#define gpio_dir(base) (base + 0)
#define gpio_data_out(base) (base + 1)
#define gpio_data_in(base) (base + 2)
/* Output mode register (0:open drain 1:push-pull). */
#define gpio_out_mode(base) (base + 3)
static struct f7188x_gpio_bank f71882_gpio_bank[] = {
F7188X_GPIO_BANK(0 , 8, 0xF0),
F7188X_GPIO_BANK(10, 8, 0xE0),
F7188X_GPIO_BANK(20, 8, 0xD0),
F7188X_GPIO_BANK(30, 4, 0xC0),
F7188X_GPIO_BANK(40, 4, 0xB0),
};
static struct f7188x_gpio_bank f71889_gpio_bank[] = {
F7188X_GPIO_BANK(0 , 7, 0xF0),
F7188X_GPIO_BANK(10, 7, 0xE0),
F7188X_GPIO_BANK(20, 8, 0xD0),
F7188X_GPIO_BANK(30, 8, 0xC0),
F7188X_GPIO_BANK(40, 8, 0xB0),
F7188X_GPIO_BANK(50, 5, 0xA0),
F7188X_GPIO_BANK(60, 8, 0x90),
F7188X_GPIO_BANK(70, 8, 0x80),
};
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
int err;
struct f7188x_gpio_bank *bank =
container_of(chip, struct f7188x_gpio_bank, chip);
struct f7188x_sio *sio = bank->data->sio;
u8 dir;
err = superio_enter(sio->addr);
if (err)
return err;
superio_select(sio->addr, SIO_LD_GPIO);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
dir &= ~(1 << offset);
superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
return 0;
}
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
int err;
struct f7188x_gpio_bank *bank =
container_of(chip, struct f7188x_gpio_bank, chip);
struct f7188x_sio *sio = bank->data->sio;
u8 dir, data;
err = superio_enter(sio->addr);
if (err)
return err;
superio_select(sio->addr, SIO_LD_GPIO);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
dir = !!(dir & (1 << offset));
if (dir)
data = superio_inb(sio->addr, gpio_data_out(bank->regbase));
else
data = superio_inb(sio->addr, gpio_data_in(bank->regbase));
superio_exit(sio->addr);
return !!(data & 1 << offset);
}
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
int err;
struct f7188x_gpio_bank *bank =
container_of(chip, struct f7188x_gpio_bank, chip);
struct f7188x_sio *sio = bank->data->sio;
u8 dir, data_out;
err = superio_enter(sio->addr);
if (err)
return err;
superio_select(sio->addr, SIO_LD_GPIO);
data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
if (value)
data_out |= (1 << offset);
else
data_out &= ~(1 << offset);
superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
dir |= (1 << offset);
superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
return 0;
}
static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
int err;
struct f7188x_gpio_bank *bank =
container_of(chip, struct f7188x_gpio_bank, chip);
struct f7188x_sio *sio = bank->data->sio;
u8 data_out;
err = superio_enter(sio->addr);
if (err)
return;
superio_select(sio->addr, SIO_LD_GPIO);
data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
if (value)
data_out |= (1 << offset);
else
data_out &= ~(1 << offset);
superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
}
/*
* Platform device and driver.
*/
static int f7188x_gpio_probe(struct platform_device *pdev)
{
int err;
int i;
struct f7188x_sio *sio = pdev->dev.platform_data;
struct f7188x_gpio_data *data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
switch (sio->type) {
case f71882fg:
data->nr_bank = ARRAY_SIZE(f71882_gpio_bank);
data->bank = f71882_gpio_bank;
break;
case f71889f:
data->nr_bank = ARRAY_SIZE(f71889_gpio_bank);
data->bank = f71889_gpio_bank;
break;
default:
return -ENODEV;
}
data->sio = sio;
platform_set_drvdata(pdev, data);
/* For each GPIO bank, register a GPIO chip. */
for (i = 0; i < data->nr_bank; i++) {
struct f7188x_gpio_bank *bank = &data->bank[i];
bank->chip.dev = &pdev->dev;
bank->data = data;
err = gpiochip_add(&bank->chip);
if (err) {
dev_err(&pdev->dev,
"Failed to register gpiochip %d: %d\n",
i, err);
goto err_gpiochip;
}
}
return 0;
err_gpiochip:
for (i = i - 1; i >= 0; i--) {
struct f7188x_gpio_bank *bank = &data->bank[i];
gpiochip_remove(&bank->chip);
}
return err;
}
static int f7188x_gpio_remove(struct platform_device *pdev)
{
int i;
struct f7188x_gpio_data *data = platform_get_drvdata(pdev);
for (i = 0; i < data->nr_bank; i++) {
struct f7188x_gpio_bank *bank = &data->bank[i];
gpiochip_remove(&bank->chip);
}
return 0;
}
static int __init f7188x_find(int addr, struct f7188x_sio *sio)
{
int err;
u16 devid;
err = superio_enter(addr);
if (err)
return err;
err = -ENODEV;
devid = superio_inw(addr, SIO_MANID);
if (devid != SIO_FINTEK_ID) {
pr_debug(DRVNAME ": Not a Fintek device at 0x%08x\n", addr);
goto err;
}
devid = superio_inw(addr, SIO_DEVID);
switch (devid) {
case SIO_F71882_ID:
sio->type = f71882fg;
break;
case SIO_F71889_ID:
sio->type = f71889f;
break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
}
sio->addr = addr;
err = 0;
pr_info(DRVNAME ": Found %s at %#x, revision %d\n",
f7188x_names[sio->type],
(unsigned int) addr,
(int) superio_inb(addr, SIO_DEVREV));
err:
superio_exit(addr);
return err;
}
static struct platform_device *f7188x_gpio_pdev;
static int __init
f7188x_gpio_device_add(const struct f7188x_sio *sio)
{
int err;
f7188x_gpio_pdev = platform_device_alloc(DRVNAME, -1);
if (!f7188x_gpio_pdev)
return -ENOMEM;
err = platform_device_add_data(f7188x_gpio_pdev,
sio, sizeof(*sio));
if (err) {
pr_err(DRVNAME "Platform data allocation failed\n");
goto err;
}
err = platform_device_add(f7188x_gpio_pdev);
if (err) {
pr_err(DRVNAME "Device addition failed\n");
goto err;
}
return 0;
err:
platform_device_put(f7188x_gpio_pdev);
return err;
}
/*
* Try to match a supported Fintech device by reading the (hard-wired)
* configuration I/O ports. If available, then register both the platform
* device and driver to support the GPIOs.
*/
static struct platform_driver f7188x_gpio_driver = {
.driver = {
.name = DRVNAME,
},
.probe = f7188x_gpio_probe,
.remove = f7188x_gpio_remove,
};
static int __init f7188x_gpio_init(void)
{
int err;
struct f7188x_sio sio;
if (f7188x_find(0x2e, &sio) &&
f7188x_find(0x4e, &sio))
return -ENODEV;
err = platform_driver_register(&f7188x_gpio_driver);
if (!err) {
err = f7188x_gpio_device_add(&sio);
if (err)
platform_driver_unregister(&f7188x_gpio_driver);
}
return err;
}
subsys_initcall(f7188x_gpio_init);
static void __exit f7188x_gpio_exit(void)
{
platform_device_unregister(f7188x_gpio_pdev);
platform_driver_unregister(&f7188x_gpio_driver);
}
module_exit(f7188x_gpio_exit);
MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71882FG and F71889F");
MODULE_AUTHOR("Simon Guinot <simon.guinot@sequanux.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
y-sensei/phyLinux | drivers/net/wireless/ipw2x00/libipw_module.c | 1238 | 8707 | /*******************************************************************************
Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<j@w1.fi>
Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
Intel Linux Wireless <ilw@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <net/net_namespace.h>
#include <net/arp.h>
#include "libipw.h"
#define DRV_DESCRIPTION "802.11 data/management/control stack"
#define DRV_NAME "libipw"
#define DRV_PROCNAME "ieee80211"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
static struct cfg80211_ops libipw_config_ops = { };
static void *libipw_wiphy_privid = &libipw_wiphy_privid;
static int libipw_networks_allocate(struct libipw_device *ieee)
{
int i, j;
for (i = 0; i < MAX_NETWORK_COUNT; i++) {
ieee->networks[i] = kzalloc(sizeof(struct libipw_network),
GFP_KERNEL);
if (!ieee->networks[i]) {
LIBIPW_ERROR("Out of memory allocating beacons\n");
for (j = 0; j < i; j++)
kfree(ieee->networks[j]);
return -ENOMEM;
}
}
return 0;
}
static inline void libipw_networks_free(struct libipw_device *ieee)
{
int i;
for (i = 0; i < MAX_NETWORK_COUNT; i++)
kfree(ieee->networks[i]);
}
void libipw_networks_age(struct libipw_device *ieee,
unsigned long age_secs)
{
struct libipw_network *network = NULL;
unsigned long flags;
unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
network->last_scanned -= age_jiffies;
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
EXPORT_SYMBOL(libipw_networks_age);
static void libipw_networks_initialize(struct libipw_device *ieee)
{
int i;
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
list_add_tail(&ieee->networks[i]->list,
&ieee->network_free_list);
}
int libipw_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > LIBIPW_DATA_LEN))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL(libipw_change_mtu);
struct net_device *alloc_libipw(int sizeof_priv, int monitor)
{
struct libipw_device *ieee;
struct net_device *dev;
int err;
LIBIPW_DEBUG_INFO("Initializing...\n");
dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv);
if (!dev)
goto failed;
ieee = netdev_priv(dev);
ieee->dev = dev;
if (!monitor) {
ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
if (!ieee->wdev.wiphy) {
LIBIPW_ERROR("Unable to allocate wiphy.\n");
goto failed_free_netdev;
}
ieee->dev->ieee80211_ptr = &ieee->wdev;
ieee->wdev.iftype = NL80211_IFTYPE_STATION;
/* Fill-out wiphy structure bits we know... Not enough info
here to call set_wiphy_dev or set MAC address or channel info
-- have to do that in ->ndo_init... */
ieee->wdev.wiphy->privid = libipw_wiphy_privid;
ieee->wdev.wiphy->max_scan_ssids = 1;
ieee->wdev.wiphy->max_scan_ie_len = 0;
ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_ADHOC);
}
err = libipw_networks_allocate(ieee);
if (err) {
LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
goto failed_free_wiphy;
}
libipw_networks_initialize(ieee);
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->rts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
/* Default to enabling full open WEP with host based encrypt/decrypt */
ieee->host_encrypt = 1;
ieee->host_decrypt = 1;
ieee->host_mc_decrypt = 1;
/* Host fragmentation in Open mode. Default is enabled.
* Note: host fragmentation is always enabled if host encryption
* is enabled. For cards can do hardware encryption, they must do
* hardware fragmentation as well. So we don't need a variable
* like host_enc_frag. */
ieee->host_open_frag = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
spin_lock_init(&ieee->lock);
lib80211_crypt_info_init(&ieee->crypt_info, dev->name, &ieee->lock);
ieee->wpa_enabled = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
return dev;
failed_free_wiphy:
if (!monitor)
wiphy_free(ieee->wdev.wiphy);
failed_free_netdev:
free_netdev(dev);
failed:
return NULL;
}
EXPORT_SYMBOL(alloc_libipw);
void free_libipw(struct net_device *dev, int monitor)
{
struct libipw_device *ieee = netdev_priv(dev);
lib80211_crypt_info_free(&ieee->crypt_info);
libipw_networks_free(ieee);
/* free cfg80211 resources */
if (!monitor)
wiphy_free(ieee->wdev.wiphy);
free_netdev(dev);
}
EXPORT_SYMBOL(free_libipw);
#ifdef CONFIG_LIBIPW_DEBUG
static int debug = 0;
u32 libipw_debug_level = 0;
EXPORT_SYMBOL_GPL(libipw_debug_level);
static struct proc_dir_entry *libipw_proc = NULL;
static int debug_level_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "0x%08X\n", libipw_debug_level);
return 0;
}
static int debug_level_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_level_proc_show, NULL);
}
static ssize_t debug_level_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char buf[] = "0x00000000\n";
size_t len = min(sizeof(buf) - 1, count);
unsigned long val;
if (copy_from_user(buf, buffer, len))
return count;
buf[len] = 0;
if (sscanf(buf, "%li", &val) != 1)
printk(KERN_INFO DRV_NAME
": %s is not in hex or decimal form.\n", buf);
else
libipw_debug_level = val;
return strnlen(buf, len);
}
static const struct file_operations debug_level_proc_fops = {
.owner = THIS_MODULE,
.open = debug_level_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = debug_level_proc_write,
};
#endif /* CONFIG_LIBIPW_DEBUG */
static int __init libipw_init(void)
{
#ifdef CONFIG_LIBIPW_DEBUG
struct proc_dir_entry *e;
libipw_debug_level = debug;
libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
if (libipw_proc == NULL) {
LIBIPW_ERROR("Unable to create " DRV_PROCNAME
" proc directory\n");
return -EIO;
}
e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
&debug_level_proc_fops);
if (!e) {
remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
return -EIO;
}
#endif /* CONFIG_LIBIPW_DEBUG */
printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
return 0;
}
static void __exit libipw_exit(void)
{
#ifdef CONFIG_LIBIPW_DEBUG
if (libipw_proc) {
remove_proc_entry("debug_level", libipw_proc);
remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
}
#endif /* CONFIG_LIBIPW_DEBUG */
}
#ifdef CONFIG_LIBIPW_DEBUG
#include <linux/moduleparam.h>
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
#endif /* CONFIG_LIBIPW_DEBUG */
module_exit(libipw_exit);
module_init(libipw_init);
| gpl-2.0 |
andreamerello/linux-stm32 | arch/sh/kernel/cpu/sh2a/setup-sh7206.c | 1750 | 8462 | /*
* SH7206 Setup
*
* Copyright (C) 2006 Yoshinori Sato
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
ADC_ADI0, ADC_ADI1,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S,
IIC3,
CMT0, CMT1, BSC, WDT,
MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V,
POE2_OEI3,
SCIF0, SCIF1, SCIF2, SCIF3,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144),
INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152),
INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157),
INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159),
INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161),
INTC_IRQ(MTU0_VEF, 162),
INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165),
INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169),
INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173),
INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177),
INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181),
INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183),
INTC_IRQ(MTU2_TCI3V, 184),
INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189),
INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191),
INTC_IRQ(MTU2_TCI4V, 192),
INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197),
INTC_IRQ(MTU5, 198),
INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201),
INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205),
INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207),
INTC_IRQ(MTU2S_TCI3V, 208),
INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213),
INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215),
INTC_IRQ(MTU2S_TCI4V, 216),
INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221),
INTC_IRQ(MTU5S, 222),
INTC_IRQ(POE2_OEI3, 224),
INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229),
INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231),
INTC_IRQ(IIC3, 232),
INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241),
INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243),
INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245),
INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247),
INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249),
INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251),
INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253),
INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI0, ADC_ADI1 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { CMT0, CMT1, BSC, WDT } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { MTU0_ABCD, MTU0_VEF,
MTU1_AB, MTU1_VU } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU2_TCI3V } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU4_ABCD, MTU2_TCI4V,
MTU5, POE2_12 } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU3S_ABCD, MTU2S_TCI3V,
MTU4S_ABCD, MTU2S_TCI4V } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU5S, POE2_OEI3, IIC3, 0 } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffe8000, 0x100),
DEFINE_RES_IRQ(240),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfffe8800, 0x100),
DEFINE_RES_IRQ(244),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfffe9000, 0x100),
DEFINE_RES_IRQ(248),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfffe9800, 0x100),
DEFINE_RES_IRQ(252),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xfffec000, 0x10),
DEFINE_RES_IRQ(140),
DEFINE_RES_IRQ(144),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(156, "tgi0a"),
DEFINE_RES_IRQ_NAMED(164, "tgi1a"),
DEFINE_RES_IRQ_NAMED(180, "tgi2a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2s",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct platform_device *sh7206_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
};
static int __init sh7206_devices_setup(void)
{
return platform_add_devices(sh7206_devices,
ARRAY_SIZE(sh7206_devices));
}
arch_initcall(sh7206_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7206_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
};
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
void __init plat_early_device_setup(void)
{
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
early_platform_add_devices(sh7206_early_devices,
ARRAY_SIZE(sh7206_early_devices));
}
| gpl-2.0 |
Rafael-Cirilo/linux | drivers/net/irda/ksdazzle-sir.c | 2006 | 23404 | /*****************************************************************************
*
* Filename: ksdazzle.c
* Version: 0.1.2
* Description: Irda KingSun Dazzle USB Dongle
* Status: Experimental
* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
*
* Based on stir4200, mcs7780, kingsun-sir drivers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
/*
* Following is my most current (2007-07-26) understanding of how the Kingsun
* 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This
* information was deduced by examining the USB traffic captured with USBSnoopy
* from the WinXP driver. Feel free to update here as more of the dongle is
* known.
*
* General: This dongle exposes one interface with two interrupt endpoints, one
* IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine
* dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and
* unwrapped manually as in stir4200, kingsun-sir, and ks959-sir.
*
* Transmission: To transmit an IrDA frame, it is necessary to wrap it, then
* split it into multiple segments of up to 7 bytes each, and transmit each in
* sequence. It seems that sending a single big block (like kingsun-sir does)
* won't work with this dongle. Each segment needs to be prefixed with a value
* equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload
* of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9,
* and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the
* payload, not considered by the prefix, are ignored (set to 0 by this
* implementation).
*
* Reception: To receive data, the driver must poll the dongle regularly (like
* kingsun-sir.c) with interrupt URBs. If data is available, it will be returned
* in payloads from 0 to 8 bytes long. When concatenated, these payloads form
* a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir
*
* Speed change: To change the speed of the dongle, the driver prepares a
* control URB with the following as a setup packet:
* bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
* bRequest 0x09
* wValue 0x0200
* wIndex 0x0001
* wLength 0x0008 (length of the payload)
* The payload is a 8-byte record, apparently identical to the one used in
* drivers/usb/serial/cypress_m8.c to change speed:
* __u32 baudSpeed;
* unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
* unsigned int : 1;
* unsigned int stopBits : 1;
* unsigned int parityEnable : 1;
* unsigned int parityType : 1;
* unsigned int : 1;
* unsigned int reset : 1;
* unsigned char reserved[3]; // set to 0
*
* For now only SIR speeds have been observed with this dongle. Therefore,
* nothing is known on what changes (if any) must be done to frame wrapping /
* unwrapping for higher than SIR speeds. This driver assumes no change is
* necessary and announces support for all the way to 115200 bps.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/crc.h>
#define KSDAZZLE_VENDOR_ID 0x07d0
#define KSDAZZLE_PRODUCT_ID 0x4100
/* These are the currently known USB ids */
static struct usb_device_id dongles[] = {
/* KingSun Co,Ltd IrDA/USB Bridge */
{USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)},
{}
};
MODULE_DEVICE_TABLE(usb, dongles);
#define KINGSUN_MTT 0x07
#define KINGSUN_REQ_RECV 0x01
#define KINGSUN_REQ_SEND 0x09
#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */
struct ksdazzle_speedparams {
__le32 baudrate; /* baud rate, little endian */
__u8 flags;
__u8 reserved[3];
} __packed;
#define KS_DATA_5_BITS 0x00
#define KS_DATA_6_BITS 0x01
#define KS_DATA_7_BITS 0x02
#define KS_DATA_8_BITS 0x03
#define KS_STOP_BITS_1 0x00
#define KS_STOP_BITS_2 0x08
#define KS_PAR_DISABLE 0x00
#define KS_PAR_EVEN 0x10
#define KS_PAR_ODD 0x30
#define KS_RESET 0x80
#define KINGSUN_EP_IN 0
#define KINGSUN_EP_OUT 1
struct ksdazzle_cb {
struct usb_device *usbdev; /* init: probe_irda */
struct net_device *netdev; /* network layer */
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
struct urb *tx_urb;
__u8 *tx_buf_clear;
unsigned int tx_buf_clear_used;
unsigned int tx_buf_clear_sent;
__u8 tx_payload[8];
struct urb *rx_urb;
__u8 *rx_buf;
iobuff_t rx_unwrap_buff;
struct usb_ctrlrequest *speed_setuprequest;
struct urb *speed_urb;
struct ksdazzle_speedparams speedparams;
unsigned int new_speed;
__u8 ep_in;
__u8 ep_out;
spinlock_t lock;
int receiving;
};
/* Callback transmission routine */
static void ksdazzle_speed_irq(struct urb *urb)
{
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0)
dev_err(&urb->dev->dev,
"ksdazzle_speed_irq: urb asynchronously failed - %d\n",
urb->status);
}
/* Send a control request to change speed of the dongle */
static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed)
{
static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
57600, 115200, 576000, 1152000, 4000000, 0
};
int err;
unsigned int i;
if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
return -ENOMEM;
/* Check that requested speed is among the supported ones */
for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
if (supported_speeds[i] == 0)
return -EOPNOTSUPP;
memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams));
kingsun->speedparams.baudrate = cpu_to_le32(speed);
kingsun->speedparams.flags = KS_DATA_8_BITS;
/* speed_setuprequest pre-filled in ksdazzle_probe */
usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
usb_sndctrlpipe(kingsun->usbdev, 0),
(unsigned char *)kingsun->speed_setuprequest,
&(kingsun->speedparams),
sizeof(struct ksdazzle_speedparams),
ksdazzle_speed_irq, kingsun);
kingsun->speed_urb->status = 0;
err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
return err;
}
/* Submit one fragment of an IrDA frame to the dongle */
static void ksdazzle_send_irq(struct urb *urb);
static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun)
{
unsigned int wraplen;
int ret;
/* We can send at most 7 bytes of payload at a time */
wraplen = 7;
if (wraplen > kingsun->tx_buf_clear_used)
wraplen = kingsun->tx_buf_clear_used;
/* Prepare payload prefix with used length */
memset(kingsun->tx_payload, 0, 8);
kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen;
memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen);
usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1);
kingsun->tx_urb->status = 0;
ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
/* Remember how much data was sent, in order to update at callback */
kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
return ret;
}
/* Callback transmission routine */
static void ksdazzle_send_irq(struct urb *urb)
{
struct ksdazzle_cb *kingsun = urb->context;
struct net_device *netdev = kingsun->netdev;
int ret = 0;
/* in process of stopping, just drop data */
if (!netif_running(kingsun->netdev)) {
dev_err(&kingsun->usbdev->dev,
"ksdazzle_send_irq: Network not running!\n");
return;
}
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0) {
dev_err(&kingsun->usbdev->dev,
"ksdazzle_send_irq: urb asynchronously failed - %d\n",
urb->status);
return;
}
if (kingsun->tx_buf_clear_used > 0) {
/* Update data remaining to be sent */
if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
memmove(kingsun->tx_buf_clear,
kingsun->tx_buf_clear +
kingsun->tx_buf_clear_sent,
kingsun->tx_buf_clear_used -
kingsun->tx_buf_clear_sent);
}
kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
kingsun->tx_buf_clear_sent = 0;
if (kingsun->tx_buf_clear_used > 0) {
/* There is more data to be sent */
if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
dev_err(&kingsun->usbdev->dev,
"ksdazzle_send_irq: failed tx_urb submit: %d\n",
ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
break;
default:
netdev->stats.tx_errors++;
netif_start_queue(netdev);
}
}
} else {
/* All data sent, send next speed && wake network queue */
if (kingsun->new_speed != -1 &&
cpu_to_le32(kingsun->new_speed) !=
kingsun->speedparams.baudrate)
ksdazzle_change_speed(kingsun,
kingsun->new_speed);
netif_wake_queue(netdev);
}
}
}
/*
* Called from net/core when new frame is available.
*/
static netdev_tx_t ksdazzle_hard_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ksdazzle_cb *kingsun;
unsigned int wraplen;
int ret = 0;
netif_stop_queue(netdev);
/* the IRDA wrapping routines don't deal with non linear skb */
SKB_LINEAR_ASSERT(skb);
kingsun = netdev_priv(netdev);
spin_lock(&kingsun->lock);
kingsun->new_speed = irda_get_next_speed(skb);
/* Append data to the end of whatever data remains to be transmitted */
wraplen =
async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
kingsun->tx_buf_clear_used = wraplen;
if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
dev_err(&kingsun->usbdev->dev,
"ksdazzle_hard_xmit: failed tx_urb submit: %d\n", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
break;
default:
netdev->stats.tx_errors++;
netif_start_queue(netdev);
}
} else {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
spin_unlock(&kingsun->lock);
return NETDEV_TX_OK;
}
/* Receive callback function */
static void ksdazzle_rcv_irq(struct urb *urb)
{
struct ksdazzle_cb *kingsun = urb->context;
struct net_device *netdev = kingsun->netdev;
/* in process of stopping, just drop data */
if (!netif_running(netdev)) {
kingsun->receiving = 0;
return;
}
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0) {
dev_err(&kingsun->usbdev->dev,
"ksdazzle_rcv_irq: urb asynchronously failed - %d\n",
urb->status);
kingsun->receiving = 0;
return;
}
if (urb->actual_length > 0) {
__u8 *bytes = urb->transfer_buffer;
unsigned int i;
for (i = 0; i < urb->actual_length; i++) {
async_unwrap_char(netdev, &netdev->stats,
&kingsun->rx_unwrap_buff, bytes[i]);
}
kingsun->receiving =
(kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
}
/* This urb has already been filled in ksdazzle_net_open. It is assumed that
urb keeps the pointer to the payload buffer.
*/
urb->status = 0;
usb_submit_urb(urb, GFP_ATOMIC);
}
/*
* Function ksdazzle_net_open (dev)
*
* Network device is taken up. Usually this is done by "ifconfig irda0 up"
*/
static int ksdazzle_net_open(struct net_device *netdev)
{
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
int err = -ENOMEM;
char hwname[16];
/* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */
kingsun->receiving = 0;
/* Initialize for SIR to copy data directly into skb. */
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
if (!kingsun->rx_unwrap_buff.skb)
goto free_mem;
skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
goto free_mem;
kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->tx_urb)
goto free_mem;
kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->speed_urb)
goto free_mem;
/* Initialize speed for dongle */
kingsun->new_speed = 9600;
err = ksdazzle_change_speed(kingsun, 9600);
if (err < 0)
goto free_mem;
/*
* Now that everything should be initialized properly,
* Open new IrLAP layer instance to take care of us...
*/
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
if (!kingsun->irlap) {
err = -ENOMEM;
dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
goto free_mem;
}
/* Start reception. */
usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq,
kingsun, 1);
kingsun->rx_urb->status = 0;
err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
if (err) {
dev_err(&kingsun->usbdev->dev, "first urb-submit failed: %d\n", err);
goto close_irlap;
}
netif_start_queue(netdev);
/* Situation at this point:
- all work buffers allocated
- urbs allocated and ready to fill
- max rx packet known (in max_rx)
- unwrap state machine initialized, in state outside of any frame
- receive request in progress
- IrLAP layer started, about to hand over packets to send
*/
return 0;
close_irlap:
irlap_close(kingsun->irlap);
free_mem:
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
if (kingsun->rx_unwrap_buff.skb) {
kfree_skb(kingsun->rx_unwrap_buff.skb);
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->rx_unwrap_buff.head = NULL;
}
return err;
}
/*
* Function ksdazzle_net_close (dev)
*
* Network device is taken down. Usually this is done by
* "ifconfig irda0 down"
*/
static int ksdazzle_net_close(struct net_device *netdev)
{
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
/* Stop transmit processing */
netif_stop_queue(netdev);
/* Mop up receive && transmit urb's */
usb_kill_urb(kingsun->tx_urb);
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_kill_urb(kingsun->speed_urb);
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_kill_urb(kingsun->rx_urb);
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
kfree_skb(kingsun->rx_unwrap_buff.skb);
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->rx_unwrap_buff.head = NULL;
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->receiving = 0;
/* Stop and remove instance of IrLAP */
irlap_close(kingsun->irlap);
kingsun->irlap = NULL;
return 0;
}
/*
* IOCTLs : Extra out-of-band network commands...
*/
static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *)rq;
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
int ret = 0;
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* Check if the device is still there */
if (netif_device_present(kingsun->netdev))
return ksdazzle_change_speed(kingsun,
irq->ifr_baudrate);
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* Check if the IrDA stack is still there */
if (netif_running(kingsun->netdev))
irda_device_set_media_busy(kingsun->netdev, TRUE);
break;
case SIOCGRECEIVING:
/* Only approximately true */
irq->ifr_receiving = kingsun->receiving;
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static const struct net_device_ops ksdazzle_ops = {
.ndo_start_xmit = ksdazzle_hard_xmit,
.ndo_open = ksdazzle_net_open,
.ndo_stop = ksdazzle_net_close,
.ndo_do_ioctl = ksdazzle_net_ioctl,
};
/*
* This routine is called by the USB subsystem for each new device
* in the system. We need to check if the device is ours, and in
* this case start handling it.
*/
static int ksdazzle_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct usb_device *dev = interface_to_usbdev(intf);
struct ksdazzle_cb *kingsun = NULL;
struct net_device *net = NULL;
int ret = -ENOMEM;
int pipe, maxp_in, maxp_out;
__u8 ep_in;
__u8 ep_out;
/* Check that there really are two interrupt endpoints. Check based on the
one in drivers/usb/input/usbmouse.c
*/
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints != 2) {
dev_err(&intf->dev, "ksdazzle: expected 2 endpoints, found %d\n",
interface->desc.bNumEndpoints);
return -ENODEV;
}
endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
if (!usb_endpoint_is_int_in(endpoint)) {
dev_err(&intf->dev,
"ksdazzle: endpoint 0 is not interrupt IN\n");
return -ENODEV;
}
ep_in = endpoint->bEndpointAddress;
pipe = usb_rcvintpipe(dev, ep_in);
maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
if (maxp_in > 255 || maxp_in <= 1) {
dev_err(&intf->dev,
"ksdazzle: endpoint 0 has max packet size %d not in range [2..255]\n",
maxp_in);
return -ENODEV;
}
endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
if (!usb_endpoint_is_int_out(endpoint)) {
dev_err(&intf->dev,
"ksdazzle: endpoint 1 is not interrupt OUT\n");
return -ENODEV;
}
ep_out = endpoint->bEndpointAddress;
pipe = usb_sndintpipe(dev, ep_out);
maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
/* Allocate network device container. */
net = alloc_irdadev(sizeof(*kingsun));
if (!net)
goto err_out1;
SET_NETDEV_DEV(net, &intf->dev);
kingsun = netdev_priv(net);
kingsun->netdev = net;
kingsun->usbdev = dev;
kingsun->ep_in = ep_in;
kingsun->ep_out = ep_out;
kingsun->irlap = NULL;
kingsun->tx_urb = NULL;
kingsun->tx_buf_clear = NULL;
kingsun->tx_buf_clear_used = 0;
kingsun->tx_buf_clear_sent = 0;
kingsun->rx_urb = NULL;
kingsun->rx_buf = NULL;
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->receiving = 0;
spin_lock_init(&kingsun->lock);
kingsun->speed_setuprequest = NULL;
kingsun->speed_urb = NULL;
kingsun->speedparams.baudrate = 0;
/* Allocate input buffer */
kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL);
if (!kingsun->rx_buf)
goto free_mem;
/* Allocate output buffer */
kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
if (!kingsun->tx_buf_clear)
goto free_mem;
/* Allocate and initialize speed setup packet */
kingsun->speed_setuprequest =
kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!kingsun->speed_setuprequest)
goto free_mem;
kingsun->speed_setuprequest->bRequestType =
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
kingsun->speed_setuprequest->wLength =
cpu_to_le16(sizeof(struct ksdazzle_speedparams));
printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, "
"Vendor: %x, Product: %x\n",
dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&kingsun->qos);
/* Baud rates known to be supported. Please uncomment if devices (other
than a SonyEriccson K300 phone) can be shown to support higher speeds
with this dongle.
*/
kingsun->qos.baud_rate.bits =
IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
irda_qos_bits_to_value(&kingsun->qos);
/* Override the network functions we need to use */
net->netdev_ops = &ksdazzle_ops;
ret = register_netdev(net);
if (ret != 0)
goto free_mem;
dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n",
net->name);
usb_set_intfdata(intf, kingsun);
/* Situation at this point:
- all work buffers allocated
- setup requests pre-filled
- urbs not allocated, set to NULL
- max rx packet known (is KINGSUN_FIFO_SIZE)
- unwrap state machine (partially) initialized, but skb == NULL
*/
return 0;
free_mem:
kfree(kingsun->speed_setuprequest);
kfree(kingsun->tx_buf_clear);
kfree(kingsun->rx_buf);
free_netdev(net);
err_out1:
return ret;
}
/*
* The current device is removed, the USB layer tell us to shut it down...
*/
static void ksdazzle_disconnect(struct usb_interface *intf)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
if (!kingsun)
return;
unregister_netdev(kingsun->netdev);
/* Mop up receive && transmit urb's */
usb_kill_urb(kingsun->speed_urb);
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_kill_urb(kingsun->tx_urb);
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_kill_urb(kingsun->rx_urb);
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
kfree(kingsun->speed_setuprequest);
kfree(kingsun->tx_buf_clear);
kfree(kingsun->rx_buf);
free_netdev(kingsun->netdev);
usb_set_intfdata(intf, NULL);
}
#ifdef CONFIG_PM
/* USB suspend, so power off the transmitter/receiver */
static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
netif_device_detach(kingsun->netdev);
if (kingsun->speed_urb != NULL)
usb_kill_urb(kingsun->speed_urb);
if (kingsun->tx_urb != NULL)
usb_kill_urb(kingsun->tx_urb);
if (kingsun->rx_urb != NULL)
usb_kill_urb(kingsun->rx_urb);
return 0;
}
/* Coming out of suspend, so reset hardware */
static int ksdazzle_resume(struct usb_interface *intf)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
if (kingsun->rx_urb != NULL) {
/* Setup request already filled in ksdazzle_probe */
usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
}
netif_device_attach(kingsun->netdev);
return 0;
}
#endif
/*
* USB device callbacks
*/
static struct usb_driver irda_driver = {
.name = "ksdazzle-sir",
.probe = ksdazzle_probe,
.disconnect = ksdazzle_disconnect,
.id_table = dongles,
#ifdef CONFIG_PM
.suspend = ksdazzle_suspend,
.resume = ksdazzle_resume,
#endif
};
module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pershoot/android_kernel_samsung_p4 | drivers/media/dvb/dvb-usb/ce6230.c | 2774 | 8222 | /*
* DVB USB Linux driver for Intel CE6230 DVB-T USB2.0 receiver
*
* Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "ce6230.h"
#include "zl10353.h"
#include "mxl5005s.h"
/* debug */
static int dvb_usb_ce6230_debug;
module_param_named(debug, dvb_usb_ce6230_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct zl10353_config ce6230_zl10353_config;
static int ce6230_rw_udev(struct usb_device *udev, struct req_t *req)
{
int ret;
unsigned int pipe;
u8 request;
u8 requesttype;
u16 value;
u16 index;
u8 *buf;
request = req->cmd;
value = req->value;
index = req->index;
switch (req->cmd) {
case I2C_READ:
case DEMOD_READ:
case REG_READ:
requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
break;
case I2C_WRITE:
case DEMOD_WRITE:
case REG_WRITE:
requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT);
break;
default:
err("unknown command:%02x", req->cmd);
ret = -EPERM;
goto error;
}
buf = kmalloc(req->data_len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto error;
}
if (requesttype == (USB_TYPE_VENDOR | USB_DIR_OUT)) {
/* write */
memcpy(buf, req->data, req->data_len);
pipe = usb_sndctrlpipe(udev, 0);
} else {
/* read */
pipe = usb_rcvctrlpipe(udev, 0);
}
msleep(1); /* avoid I2C errors */
ret = usb_control_msg(udev, pipe, request, requesttype, value, index,
buf, req->data_len, CE6230_USB_TIMEOUT);
ce6230_debug_dump(request, requesttype, value, index, buf,
req->data_len, deb_xfer);
if (ret < 0)
deb_info("%s: usb_control_msg failed:%d\n", __func__, ret);
else
ret = 0;
/* read request, copy returned data to return buf */
if (!ret && requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
memcpy(req->data, buf, req->data_len);
kfree(buf);
error:
return ret;
}
static int ce6230_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
{
return ce6230_rw_udev(d->udev, req);
}
/* I2C */
static int ce6230_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i = 0;
struct req_t req;
int ret = 0;
memset(&req, 0, sizeof(req));
if (num > 2)
return -EINVAL;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
req.cmd = DEMOD_READ;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
req.data_len = msg[i+1].len;
req.data = &msg[i+1].buf[0];
ret = ce6230_ctrl_msg(d, &req);
} else {
err("i2c read not implemented");
ret = -EPERM;
}
i += 2;
} else {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
req.cmd = DEMOD_WRITE;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
req.data_len = msg[i].len-1;
req.data = &msg[i].buf[1];
ret = ce6230_ctrl_msg(d, &req);
} else {
req.cmd = I2C_WRITE;
req.value = 0x2000 + (msg[i].addr >> 1);
req.index = 0x0000;
req.data_len = msg[i].len;
req.data = &msg[i].buf[0];
ret = ce6230_ctrl_msg(d, &req);
}
i += 1;
}
if (ret)
break;
}
mutex_unlock(&d->i2c_mutex);
return ret ? ret : i;
}
static u32 ce6230_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm ce6230_i2c_algo = {
.master_xfer = ce6230_i2c_xfer,
.functionality = ce6230_i2c_func,
};
/* Callbacks for DVB USB */
static struct zl10353_config ce6230_zl10353_config = {
.demod_address = 0x1e,
.adc_clock = 450000,
.if2 = 45700,
.no_tuner = 1,
.parallel_ts = 1,
.clock_ctl_1 = 0x34,
.pll_0 = 0x0e,
};
static int ce6230_zl10353_frontend_attach(struct dvb_usb_adapter *adap)
{
deb_info("%s:\n", __func__);
adap->fe = dvb_attach(zl10353_attach, &ce6230_zl10353_config,
&adap->dev->i2c_adap);
if (adap->fe == NULL)
return -ENODEV;
return 0;
}
static struct mxl5005s_config ce6230_mxl5003s_config = {
.i2c_address = 0xc6,
.if_freq = IF_FREQ_4570000HZ,
.xtal_freq = CRYSTAL_FREQ_16000000HZ,
.agc_mode = MXL_SINGLE_AGC,
.tracking_filter = MXL_TF_DEFAULT,
.rssi_enable = MXL_RSSI_ENABLE,
.cap_select = MXL_CAP_SEL_ENABLE,
.div_out = MXL_DIV_OUT_4,
.clock_out = MXL_CLOCK_OUT_DISABLE,
.output_load = MXL5005S_IF_OUTPUT_LOAD_200_OHM,
.top = MXL5005S_TOP_25P2,
.mod_mode = MXL_DIGITAL_MODE,
.if_mode = MXL_ZERO_IF,
.AgcMasterByte = 0x00,
};
static int ce6230_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
deb_info("%s:\n", __func__);
ret = dvb_attach(mxl5005s_attach, adap->fe, &adap->dev->i2c_adap,
&ce6230_mxl5003s_config) == NULL ? -ENODEV : 0;
return ret;
}
static int ce6230_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
deb_info("%s: onoff:%d\n", __func__, onoff);
/* InterfaceNumber 1 / AlternateSetting 0 idle
InterfaceNumber 1 / AlternateSetting 1 streaming */
ret = usb_set_interface(d->udev, 1, onoff);
if (ret)
err("usb_set_interface failed with error:%d", ret);
return ret;
}
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties ce6230_properties;
static int ce6230_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret = 0;
struct dvb_usb_device *d = NULL;
deb_info("%s: interface:%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
ret = dvb_usb_device_init(intf, &ce6230_properties, THIS_MODULE,
&d, adapter_nr);
if (ret)
err("init failed with error:%d\n", ret);
}
return ret;
}
static struct usb_device_id ce6230_table[] = {
{ USB_DEVICE(USB_VID_INTEL, USB_PID_INTEL_CE9500) },
{ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A310) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ce6230_table);
static struct dvb_usb_device_properties ce6230_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.no_reconnect = 1,
.size_of_priv = 0,
.num_adapters = 1,
.adapter = {
{
.frontend_attach = ce6230_zl10353_frontend_attach,
.tuner_attach = ce6230_mxl5003s_tuner_attach,
.stream = {
.type = USB_BULK,
.count = 6,
.endpoint = 0x82,
.u = {
.bulk = {
.buffersize = (16*512),
}
}
},
}
},
.power_ctrl = ce6230_power_ctrl,
.i2c_algo = &ce6230_i2c_algo,
.num_device_descs = 2,
.devices = {
{
.name = "Intel CE9500 reference design",
.cold_ids = {NULL},
.warm_ids = {&ce6230_table[0], NULL},
},
{
.name = "AVerMedia A310 USB 2.0 DVB-T tuner",
.cold_ids = {NULL},
.warm_ids = {&ce6230_table[1], NULL},
},
}
};
static struct usb_driver ce6230_driver = {
.name = "dvb_usb_ce6230",
.probe = ce6230_probe,
.disconnect = dvb_usb_device_exit,
.id_table = ce6230_table,
};
/* module stuff */
static int __init ce6230_module_init(void)
{
int ret;
deb_info("%s:\n", __func__);
ret = usb_register(&ce6230_driver);
if (ret)
err("usb_register failed with error:%d", ret);
return ret;
}
static void __exit ce6230_module_exit(void)
{
deb_info("%s:\n", __func__);
/* deregister this driver from the USB subsystem */
usb_deregister(&ce6230_driver);
}
module_init(ce6230_module_init);
module_exit(ce6230_module_exit);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Intel CE6230 DVB-T USB2.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
leftrepo/Owl-Kernel-for-Xperia-Sola | drivers/net/arm/w90p910_ether.c | 3030 | 26650 | /*
* Copyright (c) 2008-2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#define DRV_MODULE_NAME "w90p910-emc"
#define DRV_MODULE_VERSION "0.1"
/* Ethernet MAC Registers */
#define REG_CAMCMR 0x00
#define REG_CAMEN 0x04
#define REG_CAMM_BASE 0x08
#define REG_CAML_BASE 0x0c
#define REG_TXDLSA 0x88
#define REG_RXDLSA 0x8C
#define REG_MCMDR 0x90
#define REG_MIID 0x94
#define REG_MIIDA 0x98
#define REG_FFTCR 0x9C
#define REG_TSDR 0xa0
#define REG_RSDR 0xa4
#define REG_DMARFC 0xa8
#define REG_MIEN 0xac
#define REG_MISTA 0xb0
#define REG_CTXDSA 0xcc
#define REG_CTXBSA 0xd0
#define REG_CRXDSA 0xd4
#define REG_CRXBSA 0xd8
/* mac controller bit */
#define MCMDR_RXON 0x01
#define MCMDR_ACP (0x01 << 3)
#define MCMDR_SPCRC (0x01 << 5)
#define MCMDR_TXON (0x01 << 8)
#define MCMDR_FDUP (0x01 << 18)
#define MCMDR_ENMDC (0x01 << 19)
#define MCMDR_OPMOD (0x01 << 20)
#define SWR (0x01 << 24)
/* cam command regiser */
#define CAMCMR_AUP 0x01
#define CAMCMR_AMP (0x01 << 1)
#define CAMCMR_ABP (0x01 << 2)
#define CAMCMR_CCAM (0x01 << 3)
#define CAMCMR_ECMP (0x01 << 4)
#define CAM0EN 0x01
/* mac mii controller bit */
#define MDCCR (0x0a << 20)
#define PHYAD (0x01 << 8)
#define PHYWR (0x01 << 16)
#define PHYBUSY (0x01 << 17)
#define PHYPRESP (0x01 << 18)
#define CAM_ENTRY_SIZE 0x08
/* rx and tx status */
#define TXDS_TXCP (0x01 << 19)
#define RXDS_CRCE (0x01 << 17)
#define RXDS_PTLE (0x01 << 19)
#define RXDS_RXGD (0x01 << 20)
#define RXDS_ALIE (0x01 << 21)
#define RXDS_RP (0x01 << 22)
/* mac interrupt status*/
#define MISTA_EXDEF (0x01 << 19)
#define MISTA_TXBERR (0x01 << 24)
#define MISTA_TDU (0x01 << 23)
#define MISTA_RDU (0x01 << 10)
#define MISTA_RXBERR (0x01 << 11)
#define ENSTART 0x01
#define ENRXINTR 0x01
#define ENRXGD (0x01 << 4)
#define ENRXBERR (0x01 << 11)
#define ENTXINTR (0x01 << 16)
#define ENTXCP (0x01 << 18)
#define ENTXABT (0x01 << 21)
#define ENTXBERR (0x01 << 24)
#define ENMDC (0x01 << 19)
#define PHYBUSY (0x01 << 17)
#define MDCCR_VAL 0xa00000
/* rx and tx owner bit */
#define RX_OWEN_DMA (0x01 << 31)
#define RX_OWEN_CPU (~(0x03 << 30))
#define TX_OWEN_DMA (0x01 << 31)
#define TX_OWEN_CPU (~(0x01 << 31))
/* tx frame desc controller bit */
#define MACTXINTEN 0x04
#define CRCMODE 0x02
#define PADDINGMODE 0x01
/* fftcr controller bit */
#define TXTHD (0x03 << 8)
#define BLENGTH (0x01 << 20)
/* global setting for driver */
#define RX_DESC_SIZE 50
#define TX_DESC_SIZE 10
#define MAX_RBUFF_SZ 0x600
#define MAX_TBUFF_SZ 0x600
#define TX_TIMEOUT (HZ/2)
#define DELAY 1000
#define CAM0 0x0
static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
struct w90p910_rxbd {
unsigned int sl;
unsigned int buffer;
unsigned int reserved;
unsigned int next;
};
struct w90p910_txbd {
unsigned int mode;
unsigned int buffer;
unsigned int sl;
unsigned int next;
};
struct recv_pdesc {
struct w90p910_rxbd desclist[RX_DESC_SIZE];
char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
};
struct tran_pdesc {
struct w90p910_txbd desclist[TX_DESC_SIZE];
char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
};
struct w90p910_ether {
struct recv_pdesc *rdesc;
struct tran_pdesc *tdesc;
dma_addr_t rdesc_phys;
dma_addr_t tdesc_phys;
struct net_device_stats stats;
struct platform_device *pdev;
struct resource *res;
struct sk_buff *skb;
struct clk *clk;
struct clk *rmiiclk;
struct mii_if_info mii;
struct timer_list check_timer;
void __iomem *reg;
int rxirq;
int txirq;
unsigned int cur_tx;
unsigned int cur_rx;
unsigned int finish_tx;
unsigned int rx_packets;
unsigned int rx_bytes;
unsigned int start_tx_ptr;
unsigned int start_rx_ptr;
unsigned int linkflag;
};
static void update_linkspeed_register(struct net_device *dev,
unsigned int speed, unsigned int duplex)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = __raw_readl(ether->reg + REG_MCMDR);
if (speed == SPEED_100) {
/* 100 full/half duplex */
if (duplex == DUPLEX_FULL) {
val |= (MCMDR_OPMOD | MCMDR_FDUP);
} else {
val |= MCMDR_OPMOD;
val &= ~MCMDR_FDUP;
}
} else {
/* 10 full/half duplex */
if (duplex == DUPLEX_FULL) {
val |= MCMDR_FDUP;
val &= ~MCMDR_OPMOD;
} else {
val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
}
}
__raw_writel(val, ether->reg + REG_MCMDR);
}
static void update_linkspeed(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
unsigned int bmsr, bmcr, lpa, speed, duplex;
pdev = ether->pdev;
if (!mii_link_ok(ðer->mii)) {
ether->linkflag = 0x0;
netif_carrier_off(dev);
dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
return;
}
if (ether->linkflag == 1)
return;
bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
if (bmcr & BMCR_ANENABLE) {
if (!(bmsr & BMSR_ANEGCOMPLETE))
return;
lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
speed = SPEED_100;
else
speed = SPEED_10;
if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
duplex = DUPLEX_FULL;
else
duplex = DUPLEX_HALF;
} else {
speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
}
update_linkspeed_register(dev, speed, duplex);
dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
(duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
ether->linkflag = 0x01;
netif_carrier_on(dev);
}
static void w90p910_check_link(unsigned long dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct w90p910_ether *ether = netdev_priv(dev);
update_linkspeed(dev);
mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000));
}
static void w90p910_write_cam(struct net_device *dev,
unsigned int x, unsigned char *pval)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int msw, lsw;
msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
lsw = (pval[4] << 24) | (pval[5] << 16);
__raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
__raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
}
static int w90p910_init_desc(struct net_device *dev)
{
struct w90p910_ether *ether;
struct w90p910_txbd *tdesc;
struct w90p910_rxbd *rdesc;
struct platform_device *pdev;
unsigned int i;
ether = netdev_priv(dev);
pdev = ether->pdev;
ether->tdesc = (struct tran_pdesc *)
dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
ðer->tdesc_phys, GFP_KERNEL);
if (!ether->tdesc) {
dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
return -ENOMEM;
}
ether->rdesc = (struct recv_pdesc *)
dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
ðer->rdesc_phys, GFP_KERNEL);
if (!ether->rdesc) {
dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
ether->tdesc, ether->tdesc_phys);
return -ENOMEM;
}
for (i = 0; i < TX_DESC_SIZE; i++) {
unsigned int offset;
tdesc = &(ether->tdesc->desclist[i]);
if (i == TX_DESC_SIZE - 1)
offset = offsetof(struct tran_pdesc, desclist[0]);
else
offset = offsetof(struct tran_pdesc, desclist[i + 1]);
tdesc->next = ether->tdesc_phys + offset;
tdesc->buffer = ether->tdesc_phys +
offsetof(struct tran_pdesc, tran_buf[i]);
tdesc->sl = 0;
tdesc->mode = 0;
}
ether->start_tx_ptr = ether->tdesc_phys;
for (i = 0; i < RX_DESC_SIZE; i++) {
unsigned int offset;
rdesc = &(ether->rdesc->desclist[i]);
if (i == RX_DESC_SIZE - 1)
offset = offsetof(struct recv_pdesc, desclist[0]);
else
offset = offsetof(struct recv_pdesc, desclist[i + 1]);
rdesc->next = ether->rdesc_phys + offset;
rdesc->sl = RX_OWEN_DMA;
rdesc->buffer = ether->rdesc_phys +
offsetof(struct recv_pdesc, recv_buf[i]);
}
ether->start_rx_ptr = ether->rdesc_phys;
return 0;
}
static void w90p910_set_fifo_threshold(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = TXTHD | BLENGTH;
__raw_writel(val, ether->reg + REG_FFTCR);
}
static void w90p910_return_default_idle(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = __raw_readl(ether->reg + REG_MCMDR);
val |= SWR;
__raw_writel(val, ether->reg + REG_MCMDR);
}
static void w90p910_trigger_rx(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
__raw_writel(ENSTART, ether->reg + REG_RSDR);
}
static void w90p910_trigger_tx(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
__raw_writel(ENSTART, ether->reg + REG_TSDR);
}
static void w90p910_enable_mac_interrupt(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
val |= ENTXBERR | ENRXBERR | ENTXABT;
__raw_writel(val, ether->reg + REG_MIEN);
}
static void w90p910_get_and_clear_int(struct net_device *dev,
unsigned int *val)
{
struct w90p910_ether *ether = netdev_priv(dev);
*val = __raw_readl(ether->reg + REG_MISTA);
__raw_writel(*val, ether->reg + REG_MISTA);
}
static void w90p910_set_global_maccmd(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = __raw_readl(ether->reg + REG_MCMDR);
val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
__raw_writel(val, ether->reg + REG_MCMDR);
}
static void w90p910_enable_cam(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
w90p910_write_cam(dev, CAM0, dev->dev_addr);
val = __raw_readl(ether->reg + REG_CAMEN);
val |= CAM0EN;
__raw_writel(val, ether->reg + REG_CAMEN);
}
static void w90p910_enable_cam_command(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
__raw_writel(val, ether->reg + REG_CAMCMR);
}
static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = __raw_readl(ether->reg + REG_MCMDR);
if (enable)
val |= MCMDR_TXON;
else
val &= ~MCMDR_TXON;
__raw_writel(val, ether->reg + REG_MCMDR);
}
static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
{
struct w90p910_ether *ether = netdev_priv(dev);
unsigned int val;
val = __raw_readl(ether->reg + REG_MCMDR);
if (enable)
val |= MCMDR_RXON;
else
val &= ~MCMDR_RXON;
__raw_writel(val, ether->reg + REG_MCMDR);
}
static void w90p910_set_curdest(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
__raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
__raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
}
static void w90p910_reset_mac(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
w90p910_enable_tx(dev, 0);
w90p910_enable_rx(dev, 0);
w90p910_set_fifo_threshold(dev);
w90p910_return_default_idle(dev);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
w90p910_init_desc(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
w90p910_set_curdest(dev);
w90p910_enable_cam(dev);
w90p910_enable_cam_command(dev);
w90p910_enable_mac_interrupt(dev);
w90p910_enable_tx(dev, 1);
w90p910_enable_rx(dev, 1);
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
}
static void w90p910_mdio_write(struct net_device *dev,
int phy_id, int reg, int data)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
unsigned int val, i;
pdev = ether->pdev;
__raw_writel(data, ether->reg + REG_MIID);
val = (phy_id << 0x08) | reg;
val |= PHYBUSY | PHYWR | MDCCR_VAL;
__raw_writel(val, ether->reg + REG_MIIDA);
for (i = 0; i < DELAY; i++) {
if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
break;
}
if (i == DELAY)
dev_warn(&pdev->dev, "mdio write timed out\n");
}
static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
unsigned int val, i, data;
pdev = ether->pdev;
val = (phy_id << 0x08) | reg;
val |= PHYBUSY | MDCCR_VAL;
__raw_writel(val, ether->reg + REG_MIIDA);
for (i = 0; i < DELAY; i++) {
if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
break;
}
if (i == DELAY) {
dev_warn(&pdev->dev, "mdio read timed out\n");
data = 0xffff;
} else {
data = __raw_readl(ether->reg + REG_MIID);
}
return data;
}
static int w90p910_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *address = addr;
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
w90p910_write_cam(dev, CAM0, dev->dev_addr);
return 0;
}
static int w90p910_ether_close(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
pdev = ether->pdev;
dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
ether->rdesc, ether->rdesc_phys);
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
ether->tdesc, ether->tdesc_phys);
netif_stop_queue(dev);
del_timer_sync(ðer->check_timer);
clk_disable(ether->rmiiclk);
clk_disable(ether->clk);
free_irq(ether->txirq, dev);
free_irq(ether->rxirq, dev);
return 0;
}
static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
{
struct w90p910_ether *ether;
ether = netdev_priv(dev);
return ðer->stats;
}
static int w90p910_send_frame(struct net_device *dev,
unsigned char *data, int length)
{
struct w90p910_ether *ether;
struct w90p910_txbd *txbd;
struct platform_device *pdev;
unsigned char *buffer;
ether = netdev_priv(dev);
pdev = ether->pdev;
txbd = ðer->tdesc->desclist[ether->cur_tx];
buffer = ether->tdesc->tran_buf[ether->cur_tx];
if (length > 1514) {
dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
length = 1514;
}
txbd->sl = length & 0xFFFF;
memcpy(buffer, data, length);
txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
w90p910_enable_tx(dev, 1);
w90p910_trigger_tx(dev);
if (++ether->cur_tx >= TX_DESC_SIZE)
ether->cur_tx = 0;
txbd = ðer->tdesc->desclist[ether->cur_tx];
if (txbd->mode & TX_OWEN_DMA)
netif_stop_queue(dev);
return 0;
}
static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
ether->skb = skb;
dev_kfree_skb_irq(skb);
return 0;
}
return -EAGAIN;
}
static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
{
struct w90p910_ether *ether;
struct w90p910_txbd *txbd;
struct platform_device *pdev;
struct net_device *dev;
unsigned int cur_entry, entry, status;
dev = dev_id;
ether = netdev_priv(dev);
pdev = ether->pdev;
w90p910_get_and_clear_int(dev, &status);
cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
entry = ether->tdesc_phys +
offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
while (entry != cur_entry) {
txbd = ðer->tdesc->desclist[ether->finish_tx];
if (++ether->finish_tx >= TX_DESC_SIZE)
ether->finish_tx = 0;
if (txbd->sl & TXDS_TXCP) {
ether->stats.tx_packets++;
ether->stats.tx_bytes += txbd->sl & 0xFFFF;
} else {
ether->stats.tx_errors++;
}
txbd->sl = 0x0;
txbd->mode = 0x0;
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
entry = ether->tdesc_phys +
offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
}
if (status & MISTA_EXDEF) {
dev_err(&pdev->dev, "emc defer exceed interrupt\n");
} else if (status & MISTA_TXBERR) {
dev_err(&pdev->dev, "emc bus error interrupt\n");
w90p910_reset_mac(dev);
} else if (status & MISTA_TDU) {
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
}
return IRQ_HANDLED;
}
static void netdev_rx(struct net_device *dev)
{
struct w90p910_ether *ether;
struct w90p910_rxbd *rxbd;
struct platform_device *pdev;
struct sk_buff *skb;
unsigned char *data;
unsigned int length, status, val, entry;
ether = netdev_priv(dev);
pdev = ether->pdev;
rxbd = ðer->rdesc->desclist[ether->cur_rx];
do {
val = __raw_readl(ether->reg + REG_CRXDSA);
entry = ether->rdesc_phys +
offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
if (val == entry)
break;
status = rxbd->sl;
length = status & 0xFFFF;
if (status & RXDS_RXGD) {
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = dev_alloc_skb(length+2);
if (!skb) {
dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
return;
}
skb_reserve(skb, 2);
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
skb->protocol = eth_type_trans(skb, dev);
ether->stats.rx_packets++;
ether->stats.rx_bytes += length;
netif_rx(skb);
} else {
ether->stats.rx_errors++;
if (status & RXDS_RP) {
dev_err(&pdev->dev, "rx runt err\n");
ether->stats.rx_length_errors++;
} else if (status & RXDS_CRCE) {
dev_err(&pdev->dev, "rx crc err\n");
ether->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
dev_err(&pdev->dev, "rx aligment err\n");
ether->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
ether->stats.rx_over_errors++;
}
}
rxbd->sl = RX_OWEN_DMA;
rxbd->reserved = 0x0;
if (++ether->cur_rx >= RX_DESC_SIZE)
ether->cur_rx = 0;
rxbd = ðer->rdesc->desclist[ether->cur_rx];
} while (1);
}
static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
{
struct net_device *dev;
struct w90p910_ether *ether;
struct platform_device *pdev;
unsigned int status;
dev = dev_id;
ether = netdev_priv(dev);
pdev = ether->pdev;
w90p910_get_and_clear_int(dev, &status);
if (status & MISTA_RDU) {
netdev_rx(dev);
w90p910_trigger_rx(dev);
return IRQ_HANDLED;
} else if (status & MISTA_RXBERR) {
dev_err(&pdev->dev, "emc rx bus error\n");
w90p910_reset_mac(dev);
}
netdev_rx(dev);
return IRQ_HANDLED;
}
static int w90p910_ether_open(struct net_device *dev)
{
struct w90p910_ether *ether;
struct platform_device *pdev;
ether = netdev_priv(dev);
pdev = ether->pdev;
w90p910_reset_mac(dev);
w90p910_set_fifo_threshold(dev);
w90p910_set_curdest(dev);
w90p910_enable_cam(dev);
w90p910_enable_cam_command(dev);
w90p910_enable_mac_interrupt(dev);
w90p910_set_global_maccmd(dev);
w90p910_enable_rx(dev, 1);
clk_enable(ether->rmiiclk);
clk_enable(ether->clk);
ether->rx_packets = 0x0;
ether->rx_bytes = 0x0;
if (request_irq(ether->txirq, w90p910_tx_interrupt,
0x0, pdev->name, dev)) {
dev_err(&pdev->dev, "register irq tx failed\n");
return -EAGAIN;
}
if (request_irq(ether->rxirq, w90p910_rx_interrupt,
0x0, pdev->name, dev)) {
dev_err(&pdev->dev, "register irq rx failed\n");
free_irq(ether->txirq, dev);
return -EAGAIN;
}
mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000));
netif_start_queue(dev);
w90p910_trigger_rx(dev);
dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
return 0;
}
static void w90p910_ether_set_multicast_list(struct net_device *dev)
{
struct w90p910_ether *ether;
unsigned int rx_mode;
ether = netdev_priv(dev);
if (dev->flags & IFF_PROMISC)
rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
else
rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
__raw_writel(rx_mode, ether->reg + REG_CAMCMR);
}
static int w90p910_ether_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(ifr);
return generic_mii_ioctl(ðer->mii, data, cmd, NULL);
}
static void w90p910_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
}
static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
return mii_ethtool_gset(ðer->mii, cmd);
}
static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
return mii_ethtool_sset(ðer->mii, cmd);
}
static int w90p910_nway_reset(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
return mii_nway_restart(ðer->mii);
}
static u32 w90p910_get_link(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
return mii_link_ok(ðer->mii);
}
static const struct ethtool_ops w90p910_ether_ethtool_ops = {
.get_settings = w90p910_get_settings,
.set_settings = w90p910_set_settings,
.get_drvinfo = w90p910_get_drvinfo,
.nway_reset = w90p910_nway_reset,
.get_link = w90p910_get_link,
};
static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_open = w90p910_ether_open,
.ndo_stop = w90p910_ether_close,
.ndo_start_xmit = w90p910_ether_start_xmit,
.ndo_get_stats = w90p910_ether_stats,
.ndo_set_multicast_list = w90p910_ether_set_multicast_list,
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
static void __init get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
char addr[6];
pdev = ether->pdev;
addr[0] = 0x00;
addr[1] = 0x02;
addr[2] = 0xac;
addr[3] = 0x55;
addr[4] = 0x88;
addr[5] = 0xa8;
if (is_valid_ether_addr(addr))
memcpy(dev->dev_addr, &addr, 0x06);
else
dev_err(&pdev->dev, "invalid mac address\n");
}
static int w90p910_ether_setup(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
ether_setup(dev);
dev->netdev_ops = &w90p910_ether_netdev_ops;
dev->ethtool_ops = &w90p910_ether_ethtool_ops;
dev->tx_queue_len = 16;
dev->dma = 0x0;
dev->watchdog_timeo = TX_TIMEOUT;
get_mac_address(dev);
ether->cur_tx = 0x0;
ether->cur_rx = 0x0;
ether->finish_tx = 0x0;
ether->linkflag = 0x0;
ether->mii.phy_id = 0x01;
ether->mii.phy_id_mask = 0x1f;
ether->mii.reg_num_mask = 0x1f;
ether->mii.dev = dev;
ether->mii.mdio_read = w90p910_mdio_read;
ether->mii.mdio_write = w90p910_mdio_write;
setup_timer(ðer->check_timer, w90p910_check_link,
(unsigned long)dev);
return 0;
}
static int __devinit w90p910_ether_probe(struct platform_device *pdev)
{
struct w90p910_ether *ether;
struct net_device *dev;
int error;
dev = alloc_etherdev(sizeof(struct w90p910_ether));
if (!dev)
return -ENOMEM;
ether = netdev_priv(dev);
ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (ether->res == NULL) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
error = -ENXIO;
goto failed_free;
}
if (!request_mem_region(ether->res->start,
resource_size(ether->res), pdev->name)) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
error = -EBUSY;
goto failed_free;
}
ether->reg = ioremap(ether->res->start, resource_size(ether->res));
if (ether->reg == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
error = -ENXIO;
goto failed_free_mem;
}
ether->txirq = platform_get_irq(pdev, 0);
if (ether->txirq < 0) {
dev_err(&pdev->dev, "failed to get ether tx irq\n");
error = -ENXIO;
goto failed_free_io;
}
ether->rxirq = platform_get_irq(pdev, 1);
if (ether->rxirq < 0) {
dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
goto failed_free_txirq;
}
platform_set_drvdata(pdev, dev);
ether->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(ether->clk)) {
dev_err(&pdev->dev, "failed to get ether clock\n");
error = PTR_ERR(ether->clk);
goto failed_free_rxirq;
}
ether->rmiiclk = clk_get(&pdev->dev, "RMII");
if (IS_ERR(ether->rmiiclk)) {
dev_err(&pdev->dev, "failed to get ether clock\n");
error = PTR_ERR(ether->rmiiclk);
goto failed_put_clk;
}
ether->pdev = pdev;
w90p910_ether_setup(dev);
error = register_netdev(dev);
if (error != 0) {
dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
error = -ENODEV;
goto failed_put_rmiiclk;
}
return 0;
failed_put_rmiiclk:
clk_put(ether->rmiiclk);
failed_put_clk:
clk_put(ether->clk);
failed_free_rxirq:
free_irq(ether->rxirq, pdev);
platform_set_drvdata(pdev, NULL);
failed_free_txirq:
free_irq(ether->txirq, pdev);
failed_free_io:
iounmap(ether->reg);
failed_free_mem:
release_mem_region(ether->res->start, resource_size(ether->res));
failed_free:
free_netdev(dev);
return error;
}
static int __devexit w90p910_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct w90p910_ether *ether = netdev_priv(dev);
unregister_netdev(dev);
clk_put(ether->rmiiclk);
clk_put(ether->clk);
iounmap(ether->reg);
release_mem_region(ether->res->start, resource_size(ether->res));
free_irq(ether->txirq, dev);
free_irq(ether->rxirq, dev);
del_timer_sync(ðer->check_timer);
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
static struct platform_driver w90p910_ether_driver = {
.probe = w90p910_ether_probe,
.remove = __devexit_p(w90p910_ether_remove),
.driver = {
.name = "nuc900-emc",
.owner = THIS_MODULE,
},
};
static int __init w90p910_ether_init(void)
{
return platform_driver_register(&w90p910_ether_driver);
}
static void __exit w90p910_ether_exit(void)
{
platform_driver_unregister(&w90p910_ether_driver);
}
module_init(w90p910_ether_init);
module_exit(w90p910_ether_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-emc");
| gpl-2.0 |
ajayramaswamy/linux-imx-gk802 | drivers/isdn/capi/capidrv.c | 3286 | 63631 | /* $Id: capidrv.c,v 1.1.2.2 2004/01/12 23:17:24 keil Exp $
*
* ISDN4Linux Driver, using capi20 interface (kernelcapi)
*
* Copyright 1997 by Carsten Paeth <calle@calle.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/skbuff.h>
#include <linux/isdn.h>
#include <linux/isdnif.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
#include "capidrv.h"
static int debugmode = 0;
MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
module_param(debugmode, uint, S_IRUGO|S_IWUSR);
/* -------- type definitions ----------------------------------------- */
struct capidrv_contr {
struct capidrv_contr *next;
struct module *owner;
u32 contrnr;
char name[20];
/*
* for isdn4linux
*/
isdn_if interface;
int myid;
/*
* LISTEN state
*/
int state;
u32 cipmask;
u32 cipmask2;
struct timer_list listentimer;
/*
* ID of capi message sent
*/
u16 msgid;
/*
* B-Channels
*/
int nbchan;
struct capidrv_bchan {
struct capidrv_contr *contr;
u8 msn[ISDN_MSNLEN];
int l2;
int l3;
u8 num[ISDN_MSNLEN];
u8 mynum[ISDN_MSNLEN];
int si1;
int si2;
int incoming;
int disconnecting;
struct capidrv_plci {
struct capidrv_plci *next;
u32 plci;
u32 ncci; /* ncci for CONNECT_ACTIVE_IND */
u16 msgid; /* to identfy CONNECT_CONF */
int chan;
int state;
int leasedline;
struct capidrv_ncci {
struct capidrv_ncci *next;
struct capidrv_plci *plcip;
u32 ncci;
u16 msgid; /* to identfy CONNECT_B3_CONF */
int chan;
int state;
int oldstate;
/* */
u16 datahandle;
struct ncci_datahandle_queue {
struct ncci_datahandle_queue *next;
u16 datahandle;
int len;
} *ackqueue;
} *ncci_list;
} *plcip;
struct capidrv_ncci *nccip;
} *bchans;
struct capidrv_plci *plci_list;
/* for q931 data */
u8 q931_buf[4096];
u8 *q931_read;
u8 *q931_write;
u8 *q931_end;
};
struct capidrv_data {
struct capi20_appl ap;
int ncontr;
struct capidrv_contr *contr_list;
};
typedef struct capidrv_plci capidrv_plci;
typedef struct capidrv_ncci capidrv_ncci;
typedef struct capidrv_contr capidrv_contr;
typedef struct capidrv_data capidrv_data;
typedef struct capidrv_bchan capidrv_bchan;
/* -------- data definitions ----------------------------------------- */
static capidrv_data global;
static DEFINE_SPINLOCK(global_lock);
static void handle_dtrace_data(capidrv_contr *card,
int send, int level2, u8 *data, u16 len);
/* -------- convert functions ---------------------------------------- */
static inline u32 b1prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
return 0;
case ISDN_PROTO_L2_HDLC:
default:
return 0;
case ISDN_PROTO_L2_TRANS:
return 1;
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
return 2;
case ISDN_PROTO_L2_FAX:
return 4;
case ISDN_PROTO_L2_MODEM:
return 8;
}
}
static inline u32 b2prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
default:
return 0;
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
case ISDN_PROTO_L2_MODEM:
return 1;
case ISDN_PROTO_L2_FAX:
return 4;
}
}
static inline u32 b3prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
case ISDN_PROTO_L2_MODEM:
default:
return 0;
case ISDN_PROTO_L2_FAX:
return 4;
}
}
static _cstruct b1config_async_v110(u16 rate)
{
/* CAPI-Spec "B1 Configuration" */
static unsigned char buf[9];
buf[0] = 8; /* len */
/* maximum bitrate */
buf[1] = rate & 0xff; buf[2] = (rate >> 8) & 0xff;
buf[3] = 8; buf[4] = 0; /* 8 bits per character */
buf[5] = 0; buf[6] = 0; /* parity none */
buf[7] = 0; buf[8] = 0; /* 1 stop bit */
return buf;
}
static _cstruct b1config(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
default:
return NULL;
case ISDN_PROTO_L2_V11096:
return b1config_async_v110(9600);
case ISDN_PROTO_L2_V11019:
return b1config_async_v110(19200);
case ISDN_PROTO_L2_V11038:
return b1config_async_v110(38400);
}
}
static inline u16 si2cip(u8 si1, u8 si2)
{
static const u8 cip[17][5] =
{
/* 0 1 2 3 4 */
{0, 0, 0, 0, 0}, /*0 */
{16, 16, 4, 26, 16}, /*1 */
{17, 17, 17, 4, 4}, /*2 */
{2, 2, 2, 2, 2}, /*3 */
{18, 18, 18, 18, 18}, /*4 */
{2, 2, 2, 2, 2}, /*5 */
{0, 0, 0, 0, 0}, /*6 */
{2, 2, 2, 2, 2}, /*7 */
{2, 2, 2, 2, 2}, /*8 */
{21, 21, 21, 21, 21}, /*9 */
{19, 19, 19, 19, 19}, /*10 */
{0, 0, 0, 0, 0}, /*11 */
{0, 0, 0, 0, 0}, /*12 */
{0, 0, 0, 0, 0}, /*13 */
{0, 0, 0, 0, 0}, /*14 */
{22, 22, 22, 22, 22}, /*15 */
{27, 27, 27, 28, 27} /*16 */
};
if (si1 > 16)
si1 = 0;
if (si2 > 4)
si2 = 0;
return (u16) cip[si1][si2];
}
static inline u8 cip2si1(u16 cipval)
{
static const u8 si[32] =
{7, 1, 7, 7, 1, 1, 7, 7, /*0-7 */
7, 1, 0, 0, 0, 0, 0, 0, /*8-15 */
1, 2, 4, 10, 9, 9, 15, 7, /*16-23 */
7, 7, 1, 16, 16, 0, 0, 0}; /*24-31 */
if (cipval > 31)
cipval = 0; /* .... */
return si[cipval];
}
static inline u8 cip2si2(u16 cipval)
{
static const u8 si[32] =
{0, 0, 0, 0, 2, 3, 0, 0, /*0-7 */
0, 3, 0, 0, 0, 0, 0, 0, /*8-15 */
1, 2, 0, 0, 9, 0, 0, 0, /*16-23 */
0, 0, 3, 2, 3, 0, 0, 0}; /*24-31 */
if (cipval > 31)
cipval = 0; /* .... */
return si[cipval];
}
/* -------- controller management ------------------------------------- */
static inline capidrv_contr *findcontrbydriverid(int driverid)
{
unsigned long flags;
capidrv_contr *p;
spin_lock_irqsave(&global_lock, flags);
for (p = global.contr_list; p; p = p->next)
if (p->myid == driverid)
break;
spin_unlock_irqrestore(&global_lock, flags);
return p;
}
static capidrv_contr *findcontrbynumber(u32 contr)
{
unsigned long flags;
capidrv_contr *p = global.contr_list;
spin_lock_irqsave(&global_lock, flags);
for (p = global.contr_list; p; p = p->next)
if (p->contrnr == contr)
break;
spin_unlock_irqrestore(&global_lock, flags);
return p;
}
/* -------- plci management ------------------------------------------ */
static capidrv_plci *new_plci(capidrv_contr * card, int chan)
{
capidrv_plci *plcip;
plcip = kzalloc(sizeof(capidrv_plci), GFP_ATOMIC);
if (plcip == NULL)
return NULL;
plcip->state = ST_PLCI_NONE;
plcip->plci = 0;
plcip->msgid = 0;
plcip->chan = chan;
plcip->next = card->plci_list;
card->plci_list = plcip;
card->bchans[chan].plcip = plcip;
return plcip;
}
static capidrv_plci *find_plci_by_plci(capidrv_contr * card, u32 plci)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->plci == plci)
return p;
return NULL;
}
static capidrv_plci *find_plci_by_msgid(capidrv_contr * card, u16 msgid)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->msgid == msgid)
return p;
return NULL;
}
static capidrv_plci *find_plci_by_ncci(capidrv_contr * card, u32 ncci)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->plci == (ncci & 0xffff))
return p;
return NULL;
}
static void free_plci(capidrv_contr * card, capidrv_plci * plcip)
{
capidrv_plci **pp;
for (pp = &card->plci_list; *pp; pp = &(*pp)->next) {
if (*pp == plcip) {
*pp = (*pp)->next;
card->bchans[plcip->chan].plcip = NULL;
card->bchans[plcip->chan].disconnecting = 0;
card->bchans[plcip->chan].incoming = 0;
kfree(plcip);
return;
}
}
printk(KERN_ERR "capidrv-%d: free_plci %p (0x%x) not found, Huh?\n",
card->contrnr, plcip, plcip->plci);
}
/* -------- ncci management ------------------------------------------ */
static inline capidrv_ncci *new_ncci(capidrv_contr * card,
capidrv_plci * plcip,
u32 ncci)
{
capidrv_ncci *nccip;
nccip = kzalloc(sizeof(capidrv_ncci), GFP_ATOMIC);
if (nccip == NULL)
return NULL;
nccip->ncci = ncci;
nccip->state = ST_NCCI_NONE;
nccip->plcip = plcip;
nccip->chan = plcip->chan;
nccip->datahandle = 0;
nccip->next = plcip->ncci_list;
plcip->ncci_list = nccip;
card->bchans[plcip->chan].nccip = nccip;
return nccip;
}
static inline capidrv_ncci *find_ncci(capidrv_contr * card, u32 ncci)
{
capidrv_plci *plcip;
capidrv_ncci *p;
if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
if (p->ncci == ncci)
return p;
return NULL;
}
static inline capidrv_ncci *find_ncci_by_msgid(capidrv_contr * card,
u32 ncci, u16 msgid)
{
capidrv_plci *plcip;
capidrv_ncci *p;
if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
if (p->msgid == msgid)
return p;
return NULL;
}
static void free_ncci(capidrv_contr * card, struct capidrv_ncci *nccip)
{
struct capidrv_ncci **pp;
for (pp = &(nccip->plcip->ncci_list); *pp; pp = &(*pp)->next) {
if (*pp == nccip) {
*pp = (*pp)->next;
break;
}
}
card->bchans[nccip->chan].nccip = NULL;
kfree(nccip);
}
static int capidrv_add_ack(struct capidrv_ncci *nccip,
u16 datahandle, int len)
{
struct ncci_datahandle_queue *n, **pp;
n = (struct ncci_datahandle_queue *)
kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
if (!n) {
printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
return -1;
}
n->next = NULL;
n->datahandle = datahandle;
n->len = len;
for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next) ;
*pp = n;
return 0;
}
static int capidrv_del_ack(struct capidrv_ncci *nccip, u16 datahandle)
{
struct ncci_datahandle_queue **pp, *p;
int len;
for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next) {
if ((*pp)->datahandle == datahandle) {
p = *pp;
len = p->len;
*pp = (*pp)->next;
kfree(p);
return len;
}
}
return -1;
}
/* -------- convert and send capi message ---------------------------- */
static void send_message(capidrv_contr * card, _cmsg * cmsg)
{
struct sk_buff *skb;
size_t len;
capi_cmsg2message(cmsg, cmsg->buf);
len = CAPIMSG_LEN(cmsg->buf);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "capidrv::send_message: can't allocate mem\n");
return;
}
memcpy(skb_put(skb, len), cmsg->buf, len);
if (capi20_put_message(&global.ap, skb) != CAPI_NOERROR)
kfree_skb(skb);
}
/* -------- state machine -------------------------------------------- */
struct listenstatechange {
int actstate;
int nextstate;
int event;
};
static struct listenstatechange listentable[] =
{
{ST_LISTEN_NONE, ST_LISTEN_WAIT_CONF, EV_LISTEN_REQ},
{ST_LISTEN_ACTIVE, ST_LISTEN_ACTIVE_WAIT_CONF, EV_LISTEN_REQ},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_ERROR},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_ERROR},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
{},
};
static void listen_change_state(capidrv_contr * card, int event)
{
struct listenstatechange *p = listentable;
while (p->event) {
if (card->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: listen_change_state %d -> %d\n",
card->contrnr, card->state, p->nextstate);
card->state = p->nextstate;
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: listen_change_state state=%d event=%d ????\n",
card->contrnr, card->state, event);
}
/* ------------------------------------------------------------------ */
static void p0(capidrv_contr * card, capidrv_plci * plci)
{
isdn_ctrl cmd;
card->bchans[plci->chan].contr = NULL;
cmd.command = ISDN_STAT_DHUP;
cmd.driver = card->myid;
cmd.arg = plci->chan;
card->interface.statcallb(&cmd);
free_plci(card, plci);
}
/* ------------------------------------------------------------------ */
struct plcistatechange {
int actstate;
int nextstate;
int event;
void (*changefunc) (capidrv_contr * card, capidrv_plci * plci);
};
static struct plcistatechange plcitable[] =
{
/* P-0 */
{ST_PLCI_NONE, ST_PLCI_OUTGOING, EV_PLCI_CONNECT_REQ, NULL},
{ST_PLCI_NONE, ST_PLCI_ALLOCATED, EV_PLCI_FACILITY_IND_UP, NULL},
{ST_PLCI_NONE, ST_PLCI_INCOMING, EV_PLCI_CONNECT_IND, NULL},
{ST_PLCI_NONE, ST_PLCI_RESUMEING, EV_PLCI_RESUME_REQ, NULL},
/* P-0.1 */
{ST_PLCI_OUTGOING, ST_PLCI_NONE, EV_PLCI_CONNECT_CONF_ERROR, p0},
{ST_PLCI_OUTGOING, ST_PLCI_ALLOCATED, EV_PLCI_CONNECT_CONF_OK, NULL},
/* P-1 */
{ST_PLCI_ALLOCATED, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-ACT */
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_HELD, EV_PLCI_HOLD_IND, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_SUSPEND_IND, NULL},
/* P-2 */
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
{ST_PLCI_INCOMING, ST_PLCI_FACILITY_IND, EV_PLCI_FACILITY_IND_UP, NULL},
{ST_PLCI_INCOMING, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_RESP, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CD_IND, NULL},
/* P-3 */
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-4 */
{ST_PLCI_ACCEPTING, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-5 */
{ST_PLCI_DISCONNECTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-6 */
{ST_PLCI_DISCONNECTED, ST_PLCI_NONE, EV_PLCI_DISCONNECT_RESP, p0},
/* P-0.Res */
{ST_PLCI_RESUMEING, ST_PLCI_NONE, EV_PLCI_RESUME_CONF_ERROR, p0},
{ST_PLCI_RESUMEING, ST_PLCI_RESUME, EV_PLCI_RESUME_CONF_OK, NULL},
/* P-RES */
{ST_PLCI_RESUME, ST_PLCI_ACTIVE, EV_PLCI_RESUME_IND, NULL},
/* P-HELD */
{ST_PLCI_HELD, ST_PLCI_ACTIVE, EV_PLCI_RETRIEVE_IND, NULL},
{},
};
static void plci_change_state(capidrv_contr * card, capidrv_plci * plci, int event)
{
struct plcistatechange *p = plcitable;
while (p->event) {
if (plci->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: plci_change_state:0x%x %d -> %d\n",
card->contrnr, plci->plci, plci->state, p->nextstate);
plci->state = p->nextstate;
if (p->changefunc)
p->changefunc(card, plci);
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: plci_change_state:0x%x state=%d event=%d ????\n",
card->contrnr, plci->plci, plci->state, event);
}
/* ------------------------------------------------------------------ */
static _cmsg cmsg;
static void n0(capidrv_contr * card, capidrv_ncci * ncci)
{
isdn_ctrl cmd;
capi_fill_DISCONNECT_REQ(&cmsg,
global.ap.applid,
card->msgid++,
ncci->plcip->plci,
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */ /* $$$$ */
NULL /* Facilitydataarray */
);
plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
send_message(card, &cmsg);
cmd.command = ISDN_STAT_BHUP;
cmd.driver = card->myid;
cmd.arg = ncci->chan;
card->interface.statcallb(&cmd);
free_ncci(card, ncci);
}
/* ------------------------------------------------------------------ */
struct nccistatechange {
int actstate;
int nextstate;
int event;
void (*changefunc) (capidrv_contr * card, capidrv_ncci * ncci);
};
static struct nccistatechange nccitable[] =
{
/* N-0 */
{ST_NCCI_NONE, ST_NCCI_OUTGOING, EV_NCCI_CONNECT_B3_REQ, NULL},
{ST_NCCI_NONE, ST_NCCI_INCOMING, EV_NCCI_CONNECT_B3_IND, NULL},
/* N-0.1 */
{ST_NCCI_OUTGOING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_CONF_OK, NULL},
{ST_NCCI_OUTGOING, ST_NCCI_NONE, EV_NCCI_CONNECT_B3_CONF_ERROR, n0},
/* N-1 */
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_CONNECT_B3_REJECT, NULL},
{ST_NCCI_INCOMING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_RESP, NULL},
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-2 */
{ST_NCCI_ALLOCATED, ST_NCCI_ACTIVE, EV_NCCI_CONNECT_B3_ACTIVE_IND, NULL},
{ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-ACT */
{ST_NCCI_ACTIVE, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_RESETING, EV_NCCI_RESET_B3_REQ, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-3 */
{ST_NCCI_RESETING, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
{ST_NCCI_RESETING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_RESETING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-4 */
{ST_NCCI_DISCONNECTING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_DISCONNECTING, ST_NCCI_PREVIOUS, EV_NCCI_DISCONNECT_B3_CONF_ERROR,NULL},
/* N-5 */
{ST_NCCI_DISCONNECTED, ST_NCCI_NONE, EV_NCCI_DISCONNECT_B3_RESP, n0},
{},
};
static void ncci_change_state(capidrv_contr * card, capidrv_ncci * ncci, int event)
{
struct nccistatechange *p = nccitable;
while (p->event) {
if (ncci->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ncci_change_state:0x%x %d -> %d\n",
card->contrnr, ncci->ncci, ncci->state, p->nextstate);
if (p->nextstate == ST_NCCI_PREVIOUS) {
ncci->state = ncci->oldstate;
ncci->oldstate = p->actstate;
} else {
ncci->oldstate = p->actstate;
ncci->state = p->nextstate;
}
if (p->changefunc)
p->changefunc(card, ncci);
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: ncci_change_state:0x%x state=%d event=%d ????\n",
card->contrnr, ncci->ncci, ncci->state, event);
}
/* ------------------------------------------------------------------- */
static inline int new_bchan(capidrv_contr * card)
{
int i;
for (i = 0; i < card->nbchan; i++) {
if (card->bchans[i].plcip == NULL) {
card->bchans[i].disconnecting = 0;
return i;
}
}
return -1;
}
/* ------------------------------------------------------------------- */
static void handle_controller(_cmsg * cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_LISTEN_CONF: /* Controller */
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: listenconf Info=0x%4x (%s) cipmask=0x%x\n",
card->contrnr, cmsg->Info, capi_info2str(cmsg->Info), card->cipmask);
if (cmsg->Info) {
listen_change_state(card, EV_LISTEN_CONF_ERROR);
} else if (card->cipmask == 0) {
listen_change_state(card, EV_LISTEN_CONF_EMPTY);
} else {
listen_change_state(card, EV_LISTEN_CONF_OK);
}
break;
case CAPI_MANUFACTURER_IND: /* Controller */
if ( cmsg->ManuID == 0x214D5641
&& cmsg->Class == 0
&& cmsg->Function == 1) {
u8 *data = cmsg->ManuData+3;
u16 len = cmsg->ManuData[0];
u16 layer;
int direction;
if (len == 255) {
len = (cmsg->ManuData[1] | (cmsg->ManuData[2] << 8));
data += 2;
}
len -= 2;
layer = ((*(data-1)) << 8) | *(data-2);
if (layer & 0x300)
direction = (layer & 0x200) ? 0 : 1;
else direction = (layer & 0x800) ? 0 : 1;
if (layer & 0x0C00) {
if ((layer & 0xff) == 0x80) {
handle_dtrace_data(card, direction, 1, data, len);
break;
}
} else if ((layer & 0xff) < 0x80) {
handle_dtrace_data(card, direction, 0, data, len);
break;
}
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x layer 0x%x, ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController, layer);
break;
}
goto ignored;
case CAPI_MANUFACTURER_CONF: /* Controller */
if (cmsg->ManuID == 0x214D5641) {
char *s = NULL;
switch (cmsg->Class) {
case 0: break;
case 1: s = "unknown class"; break;
case 2: s = "unknown function"; break;
default: s = "unknown error"; break;
}
if (s)
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x function %d: %s\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController,
cmsg->Function, s);
break;
}
goto ignored;
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
case CAPI_INFO_IND: /* Controller/plci */
goto ignored;
case CAPI_INFO_CONF: /* Controller/plci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s from controller 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController);
}
static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
{
capidrv_plci *plcip;
capidrv_bchan *bchan;
isdn_ctrl cmd;
int chan;
if ((chan = new_bchan(card)) == -1) {
printk(KERN_ERR "capidrv-%d: incoming call on not existing bchan ?\n", card->contrnr);
return;
}
bchan = &card->bchans[chan];
if ((plcip = new_plci(card, chan)) == NULL) {
printk(KERN_ERR "capidrv-%d: incoming call: no memory, sorry.\n", card->contrnr);
return;
}
bchan->incoming = 1;
plcip->plci = cmsg->adr.adrPLCI;
plci_change_state(card, plcip, EV_PLCI_CONNECT_IND);
cmd.command = ISDN_STAT_ICALL;
cmd.driver = card->myid;
cmd.arg = chan;
memset(&cmd.parm.setup, 0, sizeof(cmd.parm.setup));
strncpy(cmd.parm.setup.phone,
cmsg->CallingPartyNumber + 3,
cmsg->CallingPartyNumber[0] - 2);
strncpy(cmd.parm.setup.eazmsn,
cmsg->CalledPartyNumber + 2,
cmsg->CalledPartyNumber[0] - 1);
cmd.parm.setup.si1 = cip2si1(cmsg->CIPValue);
cmd.parm.setup.si2 = cip2si2(cmsg->CIPValue);
cmd.parm.setup.plan = cmsg->CallingPartyNumber[1];
cmd.parm.setup.screen = cmsg->CallingPartyNumber[2];
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
if (cmd.parm.setup.si1 == 1 && cmd.parm.setup.si2 != 0) {
printk(KERN_INFO "capidrv-%d: patching si2=%d to 0 for VBOX\n",
card->contrnr,
cmd.parm.setup.si2);
cmd.parm.setup.si2 = 0;
}
switch (card->interface.statcallb(&cmd)) {
case 0:
case 3:
/* No device matching this call.
* and isdn_common.c has send a HANGUP command
* which is ignored in state ST_PLCI_INCOMING,
* so we send RESP to ignore the call
*/
capi_cmsg_answer(cmsg);
cmsg->Reject = 1; /* ignore */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
break;
case 1:
/* At least one device matching this call (RING on ttyI)
* HL-driver may send ALERTING on the D-channel in this
* case.
* really means: RING on ttyI or a net interface
* accepted this call already.
*
* If the call was accepted, state has already changed,
* and CONNECT_RESP already sent.
*/
if (plcip->state == ST_PLCI_INCOMING) {
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s tty alerting\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
capi_fill_ALERT_REQ(cmsg,
global.ap.applid,
card->msgid++,
plcip->plci, /* adr */
NULL,/* BChannelinformation */
NULL,/* Keypadfacility */
NULL,/* Useruserdata */
NULL /* Facilitydataarray */
);
plcip->msgid = cmsg->Messagenumber;
send_message(card, cmsg);
} else {
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s on netdev\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
}
break;
case 2: /* Call will be rejected. */
capi_cmsg_answer(cmsg);
cmsg->Reject = 2; /* reject call, normal call clearing */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
break;
default:
/* An error happened. (Invalid parameters for example.) */
capi_cmsg_answer(cmsg);
cmsg->Reject = 8; /* reject call,
destination out of order */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
break;
}
return;
}
static void handle_plci(_cmsg * cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
isdn_ctrl cmd;
_cdebbuf *cdb;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_DISCONNECT_IND: /* plci */
if (cmsg->Reason) {
printk(KERN_INFO "capidrv-%d: %s reason 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Reason, capi_info2str(cmsg->Reason), cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI))) {
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
goto notfound;
}
card->bchans[plcip->chan].disconnecting = 1;
plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
capi_cmsg_answer(cmsg);
plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
send_message(card, cmsg);
break;
case CAPI_DISCONNECT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
card->bchans[plcip->chan].disconnecting = 1;
break;
case CAPI_ALERT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
break;
case CAPI_CONNECT_IND: /* plci */
handle_incoming_call(card, cmsg);
break;
case CAPI_CONNECT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_msgid(card, cmsg->Messagenumber)))
goto notfound;
plcip->plci = cmsg->adr.adrPLCI;
if (cmsg->Info) {
plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_ERROR);
} else {
plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_OK);
}
break;
case CAPI_CONNECT_ACTIVE_IND: /* plci */
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
if (card->bchans[plcip->chan].incoming) {
capi_cmsg_answer(cmsg);
plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
send_message(card, cmsg);
} else {
capidrv_ncci *nccip;
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
nccip = new_ncci(card, plcip, cmsg->adr.adrPLCI);
if (!nccip) {
printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
break; /* $$$$ */
}
capi_fill_CONNECT_B3_REQ(cmsg,
global.ap.applid,
card->msgid++,
plcip->plci, /* adr */
NULL /* NCPI */
);
nccip->msgid = cmsg->Messagenumber;
plci_change_state(card, plcip,
EV_PLCI_CONNECT_ACTIVE_IND);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
send_message(card, cmsg);
cmd.command = ISDN_STAT_DCONN;
cmd.driver = card->myid;
cmd.arg = plcip->chan;
card->interface.statcallb(&cmd);
}
break;
case CAPI_INFO_IND: /* Controller/plci */
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
if (cmsg->InfoNumber == 0x4000) {
if (cmsg->InfoElement[0] == 4) {
cmd.command = ISDN_STAT_CINF;
cmd.driver = card->myid;
cmd.arg = plcip->chan;
sprintf(cmd.parm.num, "%lu",
(unsigned long)
((u32) cmsg->InfoElement[1]
| ((u32) (cmsg->InfoElement[2]) << 8)
| ((u32) (cmsg->InfoElement[3]) << 16)
| ((u32) (cmsg->InfoElement[4]) << 24)));
card->interface.statcallb(&cmd);
break;
}
}
cdb = capi_cmsg2str(cmsg);
if (cdb) {
printk(KERN_WARNING "capidrv-%d: %s\n",
card->contrnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
card->contrnr, cmsg->InfoNumber);
break;
case CAPI_CONNECT_ACTIVE_CONF: /* plci */
goto ignored;
case CAPI_SELECT_B_PROTOCOL_CONF: /* plci */
goto ignored;
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
case CAPI_INFO_CONF: /* Controller/plci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s for plci 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s for plci 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
return;
notfound:
printk(KERN_ERR "capidrv-%d: %s: plci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
return;
}
static void handle_ncci(_cmsg * cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
capidrv_ncci *nccip;
isdn_ctrl cmd;
int len;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_CONNECT_B3_ACTIVE_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
capi_cmsg_answer(cmsg);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
send_message(card, cmsg);
cmd.command = ISDN_STAT_BCONN;
cmd.driver = card->myid;
cmd.arg = nccip->chan;
card->interface.statcallb(&cmd);
printk(KERN_INFO "capidrv-%d: chan %d up with ncci 0x%x\n",
card->contrnr, nccip->chan, nccip->ncci);
break;
case CAPI_CONNECT_B3_ACTIVE_CONF: /* ncci */
goto ignored;
case CAPI_CONNECT_B3_IND: /* ncci */
plcip = find_plci_by_ncci(card, cmsg->adr.adrNCCI);
if (plcip) {
nccip = new_ncci(card, plcip, cmsg->adr.adrNCCI);
if (nccip) {
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_IND);
capi_fill_CONNECT_B3_RESP(cmsg,
global.ap.applid,
card->msgid++,
nccip->ncci, /* adr */
0, /* Reject */
NULL /* NCPI */
);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
send_message(card, cmsg);
break;
}
printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
} else {
printk(KERN_ERR "capidrv-%d: %s: plci for ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
capi_fill_CONNECT_B3_RESP(cmsg,
global.ap.applid,
card->msgid++,
cmsg->adr.adrNCCI,
2, /* Reject */
NULL /* NCPI */
);
send_message(card, cmsg);
break;
case CAPI_CONNECT_B3_CONF: /* ncci */
if (!(nccip = find_ncci_by_msgid(card,
cmsg->adr.adrNCCI,
cmsg->Messagenumber)))
goto notfound;
nccip->ncci = cmsg->adr.adrNCCI;
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrNCCI);
}
if (cmsg->Info)
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_ERROR);
else
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_OK);
break;
case CAPI_CONNECT_B3_T90_ACTIVE_IND: /* ncci */
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
break;
case CAPI_DATA_B3_IND: /* ncci */
/* handled in handle_data() */
goto ignored;
case CAPI_DATA_B3_CONF: /* ncci */
if (cmsg->Info) {
printk(KERN_WARNING "CAPI_DATA_B3_CONF: Info %x - %s\n",
cmsg->Info, capi_info2str(cmsg->Info));
}
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
len = capidrv_del_ack(nccip, cmsg->DataHandle);
if (len < 0)
break;
cmd.command = ISDN_STAT_BSENT;
cmd.driver = card->myid;
cmd.arg = nccip->chan;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
break;
case CAPI_DISCONNECT_B3_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
card->bchans[nccip->chan].disconnecting = 1;
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
capi_cmsg_answer(cmsg);
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
send_message(card, cmsg);
break;
case CAPI_DISCONNECT_B3_CONF: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrNCCI);
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_CONF_ERROR);
}
break;
case CAPI_RESET_B3_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
ncci_change_state(card, nccip, EV_NCCI_RESET_B3_IND);
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
break;
case CAPI_RESET_B3_CONF: /* ncci */
goto ignored; /* $$$$ */
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s for ncci 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s for ncci 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
return;
notfound:
printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
static void handle_data(_cmsg * cmsg, struct sk_buff *skb)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_ncci *nccip;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
kfree_skb(skb);
return;
}
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI))) {
printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
kfree_skb(skb);
return;
}
(void) skb_pull(skb, CAPIMSG_LEN(skb->data));
card->interface.rcvcallb_skb(card->myid, nccip->chan, skb);
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
}
static _cmsg s_cmsg;
static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
capi_message2cmsg(&s_cmsg, skb->data);
if (debugmode > 3) {
_cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
if (cdb) {
printk(KERN_DEBUG "%s: applid=%d %s\n", __func__,
ap->applid, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
__func__, ap->applid,
capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
}
if (s_cmsg.Command == CAPI_DATA_B3
&& s_cmsg.Subcommand == CAPI_IND) {
handle_data(&s_cmsg, skb);
return;
}
if ((s_cmsg.adr.adrController & 0xffffff00) == 0)
handle_controller(&s_cmsg);
else if ((s_cmsg.adr.adrPLCI & 0xffff0000) == 0)
handle_plci(&s_cmsg);
else
handle_ncci(&s_cmsg);
/*
* data of skb used in s_cmsg,
* free data when s_cmsg is not used again
* thanks to Lars Heete <hel@admin.de>
*/
kfree_skb(skb);
}
/* ------------------------------------------------------------------- */
#define PUTBYTE_TO_STATUS(card, byte) \
do { \
*(card)->q931_write++ = (byte); \
if ((card)->q931_write > (card)->q931_end) \
(card)->q931_write = (card)->q931_buf; \
} while (0)
static void handle_dtrace_data(capidrv_contr *card,
int send, int level2, u8 *data, u16 len)
{
u8 *p, *end;
isdn_ctrl cmd;
if (!len) {
printk(KERN_DEBUG "capidrv-%d: avmb1_q931_data: len == %d\n",
card->contrnr, len);
return;
}
if (level2) {
PUTBYTE_TO_STATUS(card, 'D');
PUTBYTE_TO_STATUS(card, '2');
PUTBYTE_TO_STATUS(card, send ? '>' : '<');
PUTBYTE_TO_STATUS(card, ':');
} else {
PUTBYTE_TO_STATUS(card, 'D');
PUTBYTE_TO_STATUS(card, '3');
PUTBYTE_TO_STATUS(card, send ? '>' : '<');
PUTBYTE_TO_STATUS(card, ':');
}
for (p = data, end = data+len; p < end; p++) {
PUTBYTE_TO_STATUS(card, ' ');
PUTBYTE_TO_STATUS(card, hex_asc_hi(*p));
PUTBYTE_TO_STATUS(card, hex_asc_lo(*p));
}
PUTBYTE_TO_STATUS(card, '\n');
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = len*3+5;
card->interface.statcallb(&cmd);
}
/* ------------------------------------------------------------------- */
static _cmsg cmdcmsg;
static int capidrv_ioctl(isdn_ctrl * c, capidrv_contr * card)
{
switch (c->arg) {
case 1:
debugmode = (int)(*((unsigned int *)c->parm.num));
printk(KERN_DEBUG "capidrv-%d: debugmode=%d\n",
card->contrnr, debugmode);
return 0;
default:
printk(KERN_DEBUG "capidrv-%d: capidrv_ioctl(%ld) called ??\n",
card->contrnr, c->arg);
return -EINVAL;
}
return -EINVAL;
}
/*
* Handle leased lines (CAPI-Bundling)
*/
struct internal_bchannelinfo {
unsigned short channelalloc;
unsigned short operation;
unsigned char cmask[31];
};
static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
{
unsigned long bmask = 0;
int active = !0;
char *s;
int i;
if (strncmp(teln, "FV:", 3) != 0)
return 1;
s = teln + 3;
while (*s && *s == ' ') s++;
if (!*s) return -2;
if (*s == 'p' || *s == 'P') {
active = 0;
s++;
}
if (*s == 'a' || *s == 'A') {
active = !0;
s++;
}
while (*s) {
int digit1 = 0;
int digit2 = 0;
char *endp;
digit1 = simple_strtoul(s, &endp, 10);
if (s == endp)
return -3;
s = endp;
if (digit1 <= 0 || digit1 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
bmask |= (1 << digit1);
digit1 = 0;
if (*s) s++;
continue;
}
if (*s != '-') return -5;
s++;
digit2 = simple_strtoul(s, &endp, 10);
if (s == endp)
return -3;
s = endp;
if (digit2 <= 0 || digit2 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
if (digit1 > digit2)
for (i = digit2; i <= digit1 ; i++)
bmask |= (1 << i);
else
for (i = digit1; i <= digit2 ; i++)
bmask |= (1 << i);
digit1 = digit2 = 0;
if (*s) s++;
continue;
}
return -6;
}
if (activep) *activep = active;
if (bmaskp) *bmaskp = bmask;
return 0;
}
static int FVteln2capi20(char *teln, u8 AdditionalInfo[1+2+2+31])
{
unsigned long bmask;
int active;
int rc, i;
rc = decodeFVteln(teln, &bmask, &active);
if (rc) return rc;
/* Length */
AdditionalInfo[0] = 2+2+31;
/* Channel: 3 => use channel allocation */
AdditionalInfo[1] = 3; AdditionalInfo[2] = 0;
/* Operation: 0 => DTE mode, 1 => DCE mode */
if (active) {
AdditionalInfo[3] = 0; AdditionalInfo[4] = 0;
} else {
AdditionalInfo[3] = 1; AdditionalInfo[4] = 0;
}
/* Channel mask array */
AdditionalInfo[5] = 0; /* no D-Channel */
for (i=1; i <= 30; i++)
AdditionalInfo[5+i] = (bmask & (1 << i)) ? 0xff : 0;
return 0;
}
static int capidrv_command(isdn_ctrl * c, capidrv_contr * card)
{
isdn_ctrl cmd;
struct capidrv_bchan *bchan;
struct capidrv_plci *plcip;
u8 AdditionalInfo[1+2+2+31];
int rc, isleasedline = 0;
if (c->command == ISDN_CMD_IOCTL)
return capidrv_ioctl(c, card);
switch (c->command) {
case ISDN_CMD_DIAL:{
u8 calling[ISDN_MSNLEN + 3];
u8 called[ISDN_MSNLEN + 2];
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_DIAL(ch=%ld,\"%s,%d,%d,%s\")\n",
card->contrnr,
c->arg,
c->parm.setup.phone,
c->parm.setup.si1,
c->parm.setup.si2,
c->parm.setup.eazmsn);
bchan = &card->bchans[c->arg % card->nbchan];
if (bchan->plcip) {
printk(KERN_ERR "capidrv-%d: dail ch=%ld,\"%s,%d,%d,%s\" in use (plci=0x%x)\n",
card->contrnr,
c->arg,
c->parm.setup.phone,
c->parm.setup.si1,
c->parm.setup.si2,
c->parm.setup.eazmsn,
bchan->plcip->plci);
return 0;
}
bchan->si1 = c->parm.setup.si1;
bchan->si2 = c->parm.setup.si2;
strncpy(bchan->num, c->parm.setup.phone, sizeof(bchan->num));
strncpy(bchan->mynum, c->parm.setup.eazmsn, sizeof(bchan->mynum));
rc = FVteln2capi20(bchan->num, AdditionalInfo);
isleasedline = (rc == 0);
if (rc < 0)
printk(KERN_ERR "capidrv-%d: WARNING: invalid leased linedefinition \"%s\"\n", card->contrnr, bchan->num);
if (isleasedline) {
calling[0] = 0;
called[0] = 0;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: connecting leased line\n", card->contrnr);
} else {
calling[0] = strlen(bchan->mynum) + 2;
calling[1] = 0;
calling[2] = 0x80;
strncpy(calling + 3, bchan->mynum, ISDN_MSNLEN);
called[0] = strlen(bchan->num) + 1;
called[1] = 0x80;
strncpy(called + 2, bchan->num, ISDN_MSNLEN);
}
capi_fill_CONNECT_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
card->contrnr, /* adr */
si2cip(bchan->si1, bchan->si2), /* cipvalue */
called, /* CalledPartyNumber */
calling, /* CallingPartyNumber */
NULL, /* CalledPartySubaddress */
NULL, /* CallingPartySubaddress */
b1prot(bchan->l2, bchan->l3), /* B1protocol */
b2prot(bchan->l2, bchan->l3), /* B2protocol */
b3prot(bchan->l2, bchan->l3), /* B3protocol */
b1config(bchan->l2, bchan->l3), /* B1configuration */
NULL, /* B2configuration */
NULL, /* B3configuration */
NULL, /* BC */
NULL, /* LLC */
NULL, /* HLC */
/* BChannelinformation */
isleasedline ? AdditionalInfo : NULL,
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
if ((plcip = new_plci(card, (c->arg % card->nbchan))) == NULL) {
cmd.command = ISDN_STAT_DHUP;
cmd.driver = card->myid;
cmd.arg = (c->arg % card->nbchan);
card->interface.statcallb(&cmd);
return -1;
}
plcip->msgid = cmdcmsg.Messagenumber;
plcip->leasedline = isleasedline;
plci_change_state(card, plcip, EV_PLCI_CONNECT_REQ);
send_message(card, &cmdcmsg);
return 0;
}
case ISDN_CMD_ACCEPTD:
bchan = &card->bchans[c->arg % card->nbchan];
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTD(ch=%ld) l2=%d l3=%d\n",
card->contrnr,
c->arg, bchan->l2, bchan->l3);
capi_fill_CONNECT_RESP(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->plcip->plci, /* adr */
0, /* Reject */
b1prot(bchan->l2, bchan->l3), /* B1protocol */
b2prot(bchan->l2, bchan->l3), /* B2protocol */
b3prot(bchan->l2, bchan->l3), /* B3protocol */
b1config(bchan->l2, bchan->l3), /* B1configuration */
NULL, /* B2configuration */
NULL, /* B3configuration */
NULL, /* ConnectedNumber */
NULL, /* ConnectedSubaddress */
NULL, /* LLC */
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
capi_cmsg2message(&cmdcmsg, cmdcmsg.buf);
plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP);
send_message(card, &cmdcmsg);
return 0;
case ISDN_CMD_ACCEPTB:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTB(ch=%ld)\n",
card->contrnr,
c->arg);
return -ENOSYS;
case ISDN_CMD_HANGUP:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_HANGUP(ch=%ld)\n",
card->contrnr,
c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
if (bchan->disconnecting) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: chan %ld already disconnecting ...\n",
card->contrnr,
c->arg);
return 0;
}
if (bchan->nccip) {
bchan->disconnecting = 1;
capi_fill_DISCONNECT_B3_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->nccip->ncci,
NULL /* NCPI */
);
ncci_change_state(card, bchan->nccip, EV_NCCI_DISCONNECT_B3_REQ);
send_message(card, &cmdcmsg);
return 0;
} else if (bchan->plcip) {
if (bchan->plcip->state == ST_PLCI_INCOMING) {
/*
* just ignore, we a called from
* isdn_status_callback(),
* which will return 0 or 2, this is handled
* by the CONNECT_IND handler
*/
bchan->disconnecting = 1;
return 0;
} else if (bchan->plcip->plci) {
bchan->disconnecting = 1;
capi_fill_DISCONNECT_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->plcip->plci,
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
plci_change_state(card, bchan->plcip, EV_PLCI_DISCONNECT_REQ);
send_message(card, &cmdcmsg);
return 0;
} else {
printk(KERN_ERR "capidrv-%d: chan %ld disconnect request while waiting for CONNECT_CONF\n",
card->contrnr,
c->arg);
return -EINVAL;
}
}
printk(KERN_ERR "capidrv-%d: chan %ld disconnect request on free channel\n",
card->contrnr,
c->arg);
return -EINVAL;
/* ready */
case ISDN_CMD_SETL2:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set L2 on chan %ld to %ld\n",
card->contrnr,
(c->arg & 0xff), (c->arg >> 8));
bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
bchan->l2 = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set L3 on chan %ld to %ld\n",
card->contrnr,
(c->arg & 0xff), (c->arg >> 8));
bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
bchan->l3 = (c->arg >> 8);
return 0;
case ISDN_CMD_SETEAZ:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set EAZ \"%s\" on chan %ld\n",
card->contrnr,
c->parm.num, c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
strncpy(bchan->msn, c->parm.num, ISDN_MSNLEN);
return 0;
case ISDN_CMD_CLREAZ:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: clearing EAZ on chan %ld\n",
card->contrnr, c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
bchan->msn[0] = 0;
return 0;
default:
printk(KERN_ERR "capidrv-%d: ISDN_CMD_%d, Huh?\n",
card->contrnr, c->command);
return -EINVAL;
}
return 0;
}
static int if_command(isdn_ctrl * c)
{
capidrv_contr *card = findcontrbydriverid(c->driver);
if (card)
return capidrv_command(c, card);
printk(KERN_ERR
"capidrv: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static _cmsg sendcmsg;
static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
{
capidrv_contr *card = findcontrbydriverid(id);
capidrv_bchan *bchan;
capidrv_ncci *nccip;
int len = skb->len;
int msglen;
u16 errcode;
u16 datahandle;
u32 data;
if (!card) {
printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n",
id);
return 0;
}
if (debugmode > 4)
printk(KERN_DEBUG "capidrv-%d: sendbuf len=%d skb=%p doack=%d\n",
card->contrnr, len, skb, doack);
bchan = &card->bchans[channel % card->nbchan];
nccip = bchan->nccip;
if (!nccip || nccip->state != ST_NCCI_ACTIVE) {
printk(KERN_ERR "capidrv-%d: if_sendbuf: %s:%d: chan not up!\n",
card->contrnr, card->name, channel);
return 0;
}
datahandle = nccip->datahandle;
/*
* Here we copy pointer skb->data into the 32-bit 'Data' field.
* The 'Data' field is not used in practice in linux kernel
* (neither in 32 or 64 bit), but should have some value,
* since a CAPI message trace will display it.
*
* The correct value in the 32 bit case is the address of the
* data, in 64 bit it makes no sense, we use 0 there.
*/
#ifdef CONFIG_64BIT
data = 0;
#else
data = (unsigned long) skb->data;
#endif
capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++,
nccip->ncci, /* adr */
data, /* Data */
skb->len, /* DataLength */
datahandle, /* DataHandle */
0 /* Flags */
);
if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0)
return 0;
capi_cmsg2message(&sendcmsg, sendcmsg.buf);
msglen = CAPIMSG_LEN(sendcmsg.buf);
if (skb_headroom(skb) < msglen) {
struct sk_buff *nskb = skb_realloc_headroom(skb, msglen);
if (!nskb) {
printk(KERN_ERR "capidrv-%d: if_sendbuf: no memory\n",
card->contrnr);
(void)capidrv_del_ack(nccip, datahandle);
return 0;
}
printk(KERN_DEBUG "capidrv-%d: only %d bytes headroom, need %d\n",
card->contrnr, skb_headroom(skb), msglen);
memcpy(skb_push(nskb, msglen), sendcmsg.buf, msglen);
errcode = capi20_put_message(&global.ap, nskb);
if (errcode == CAPI_NOERROR) {
dev_kfree_skb(skb);
nccip->datahandle++;
return len;
}
if (debugmode > 3)
printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
card->contrnr, errcode, capi_info2str(errcode));
(void)capidrv_del_ack(nccip, datahandle);
dev_kfree_skb(nskb);
return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
} else {
memcpy(skb_push(skb, msglen), sendcmsg.buf, msglen);
errcode = capi20_put_message(&global.ap, skb);
if (errcode == CAPI_NOERROR) {
nccip->datahandle++;
return len;
}
if (debugmode > 3)
printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
card->contrnr, errcode, capi_info2str(errcode));
skb_pull(skb, msglen);
(void)capidrv_del_ack(nccip, datahandle);
return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
}
}
static int if_readstat(u8 __user *buf, int len, int id, int channel)
{
capidrv_contr *card = findcontrbydriverid(id);
int count;
u8 __user *p;
if (!card) {
printk(KERN_ERR "capidrv: if_readstat called with invalid driverId %d!\n",
id);
return -ENODEV;
}
for (p=buf, count=0; count < len; p++, count++) {
if (put_user(*card->q931_read++, p))
return -EFAULT;
if (card->q931_read > card->q931_end)
card->q931_read = card->q931_buf;
}
return count;
}
static void enable_dchannel_trace(capidrv_contr *card)
{
u8 manufacturer[CAPI_MANUFACTURER_LEN];
capi_version version;
u16 contr = card->contrnr;
u16 errcode;
u16 avmversion[3];
errcode = capi20_get_manufacturer(contr, manufacturer);
if (errcode != CAPI_NOERROR) {
printk(KERN_ERR "%s: can't get manufacturer (0x%x)\n",
card->name, errcode);
return;
}
if (strstr(manufacturer, "AVM") == NULL) {
printk(KERN_ERR "%s: not from AVM, no d-channel trace possible (%s)\n",
card->name, manufacturer);
return;
}
errcode = capi20_get_version(contr, &version);
if (errcode != CAPI_NOERROR) {
printk(KERN_ERR "%s: can't get version (0x%x)\n",
card->name, errcode);
return;
}
avmversion[0] = (version.majormanuversion >> 4) & 0x0f;
avmversion[1] = (version.majormanuversion << 4) & 0xf0;
avmversion[1] |= (version.minormanuversion >> 4) & 0x0f;
avmversion[2] |= version.minormanuversion & 0x0f;
if (avmversion[0] > 3 || (avmversion[0] == 3 && avmversion[1] > 5)) {
printk(KERN_INFO "%s: D2 trace enabled\n", card->name);
capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
contr,
0x214D5641, /* ManuID */
0, /* Class */
1, /* Function */
(_cstruct)"\004\200\014\000\000");
} else {
printk(KERN_INFO "%s: D3 trace enabled\n", card->name);
capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
contr,
0x214D5641, /* ManuID */
0, /* Class */
1, /* Function */
(_cstruct)"\004\002\003\000\000");
}
send_message(card, &cmdcmsg);
}
static void send_listen(capidrv_contr *card)
{
capi_fill_LISTEN_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
card->contrnr, /* controller */
1 << 6, /* Infomask */
card->cipmask,
card->cipmask2,
NULL, NULL);
listen_change_state(card, EV_LISTEN_REQ);
send_message(card, &cmdcmsg);
}
static void listentimerfunc(unsigned long x)
{
capidrv_contr *card = (capidrv_contr *)x;
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
mod_timer(&card->listentimer, jiffies + 60*HZ);
}
static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
{
capidrv_contr *card;
unsigned long flags;
isdn_ctrl cmd;
char id[20];
int i;
sprintf(id, "capidrv-%d", contr);
if (!try_module_get(THIS_MODULE)) {
printk(KERN_WARNING "capidrv: (%s) Could not reserve module\n", id);
return -1;
}
if (!(card = kzalloc(sizeof(capidrv_contr), GFP_ATOMIC))) {
printk(KERN_WARNING
"capidrv: (%s) Could not allocate contr-struct.\n", id);
return -1;
}
card->owner = THIS_MODULE;
init_timer(&card->listentimer);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
card->bchans = kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC);
if (!card->bchans) {
printk(KERN_WARNING
"capidrv: (%s) Could not allocate bchan-structs.\n", id);
module_put(card->owner);
kfree(card);
return -1;
}
card->interface.channels = profp->nbchannel;
card->interface.maxbufsize = 2048;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = NULL;
card->interface.readstat = if_readstat;
card->interface.features = ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L2_TRANS |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN |
ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_X75UI |
ISDN_FEATURE_L2_X75BUI;
if (profp->support1 & (1<<2))
card->interface.features |= ISDN_FEATURE_L2_V11096 |
ISDN_FEATURE_L2_V11019 |
ISDN_FEATURE_L2_V11038;
if (profp->support1 & (1<<8))
card->interface.features |= ISDN_FEATURE_L2_MODEM;
card->interface.hl_hdrlen = 22; /* len of DATA_B3_REQ */
strncpy(card->interface.id, id, sizeof(card->interface.id) - 1);
card->q931_read = card->q931_buf;
card->q931_write = card->q931_buf;
card->q931_end = card->q931_buf + sizeof(card->q931_buf) - 1;
if (!register_isdn(&card->interface)) {
printk(KERN_ERR "capidrv: Unable to register contr %s\n", id);
kfree(card->bchans);
module_put(card->owner);
kfree(card);
return -1;
}
card->myid = card->interface.channels;
memset(card->bchans, 0, sizeof(capidrv_bchan) * card->nbchan);
for (i = 0; i < card->nbchan; i++) {
card->bchans[i].contr = card;
}
spin_lock_irqsave(&global_lock, flags);
card->next = global.contr_list;
global.contr_list = card;
global.ncontr++;
spin_unlock_irqrestore(&global_lock, flags);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
card->cipmask = 0x1FFF03FF; /* any */
card->cipmask2 = 0;
card->listentimer.data = (unsigned long)card;
card->listentimer.function = listentimerfunc;
send_listen(card);
mod_timer(&card->listentimer, jiffies + 60*HZ);
printk(KERN_INFO "%s: now up (%d B channels)\n",
card->name, card->nbchan);
enable_dchannel_trace(card);
return 0;
}
static int capidrv_delcontr(u16 contr)
{
capidrv_contr **pp, *card;
unsigned long flags;
isdn_ctrl cmd;
spin_lock_irqsave(&global_lock, flags);
for (card = global.contr_list; card; card = card->next) {
if (card->contrnr == contr)
break;
}
if (!card) {
spin_unlock_irqrestore(&global_lock, flags);
printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr);
return -1;
}
/* FIXME: maybe a race condition the card should be removed
* here from global list /kkeil
*/
spin_unlock_irqrestore(&global_lock, flags);
del_timer(&card->listentimer);
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d unloading\n",
card->contrnr, card->myid);
cmd.command = ISDN_STAT_STOP;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
while (card->nbchan) {
cmd.command = ISDN_STAT_DISCH;
cmd.driver = card->myid;
cmd.arg = card->nbchan-1;
cmd.parm.num[0] = 0;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d disable chan=%ld\n",
card->contrnr, card->myid, cmd.arg);
card->interface.statcallb(&cmd);
if (card->bchans[card->nbchan-1].nccip)
free_ncci(card, card->bchans[card->nbchan-1].nccip);
if (card->bchans[card->nbchan-1].plcip)
free_plci(card, card->bchans[card->nbchan-1].plcip);
if (card->plci_list)
printk(KERN_ERR "capidrv: bug in free_plci()\n");
card->nbchan--;
}
kfree(card->bchans);
card->bchans = NULL;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d isdn unload\n",
card->contrnr, card->myid);
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d remove contr from list\n",
card->contrnr, card->myid);
spin_lock_irqsave(&global_lock, flags);
for (pp = &global.contr_list; *pp; pp = &(*pp)->next) {
if (*pp == card) {
*pp = (*pp)->next;
card->next = NULL;
global.ncontr--;
break;
}
}
spin_unlock_irqrestore(&global_lock, flags);
module_put(card->owner);
printk(KERN_INFO "%s: now down.\n", card->name);
kfree(card);
return 0;
}
static int
lower_callback(struct notifier_block *nb, unsigned long val, void *v)
{
capi_profile profile;
u32 contr = (long)v;
switch (val) {
case CAPICTR_UP:
printk(KERN_INFO "capidrv: controller %hu up\n", contr);
if (capi20_get_profile(contr, &profile) == CAPI_NOERROR)
(void) capidrv_addcontr(contr, &profile);
break;
case CAPICTR_DOWN:
printk(KERN_INFO "capidrv: controller %hu down\n", contr);
(void) capidrv_delcontr(contr);
break;
}
return NOTIFY_OK;
}
/*
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
static int capidrv_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
global.ap.nrecvdatapkt,
global.ap.nsentctlpkt,
global.ap.nsentdatapkt);
return 0;
}
static int capidrv_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, capidrv_proc_show, NULL);
}
static const struct file_operations capidrv_proc_fops = {
.owner = THIS_MODULE,
.open = capidrv_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void __init proc_init(void)
{
proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
}
static void __exit proc_exit(void)
{
remove_proc_entry("capi/capidrv", NULL);
}
static struct notifier_block capictr_nb = {
.notifier_call = lower_callback,
};
static int __init capidrv_init(void)
{
capi_profile profile;
u32 ncontr, contr;
u16 errcode;
global.ap.rparam.level3cnt = -2; /* number of bchannels twice */
global.ap.rparam.datablkcnt = 16;
global.ap.rparam.datablklen = 2048;
global.ap.recv_message = capidrv_recv_message;
errcode = capi20_register(&global.ap);
if (errcode) {
return -EIO;
}
register_capictr_notifier(&capictr_nb);
errcode = capi20_get_profile(0, &profile);
if (errcode != CAPI_NOERROR) {
unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
return -EIO;
}
ncontr = profile.ncontroller;
for (contr = 1; contr <= ncontr; contr++) {
errcode = capi20_get_profile(contr, &profile);
if (errcode != CAPI_NOERROR)
continue;
(void) capidrv_addcontr(contr, &profile);
}
proc_init();
return 0;
}
static void __exit capidrv_exit(void)
{
unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
proc_exit();
}
module_init(capidrv_init);
module_exit(capidrv_exit);
| gpl-2.0 |
Adesh15/onyx_kernel | sound/soc/atmel/atmel_ssc_dai.c | 4566 | 22345 | /*
* atmel_ssc_dai.c -- ALSA SoC ATMEL SSC Audio Layer Platform driver
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2008 Atmel
*
* Author: Sedji Gaouaou <sedji.gaouaou@atmel.com>
* ATMEL CORP.
*
* Based on at91-ssc.c by
* Frank Mandarino <fmandarino@endrelia.com>
* Based on pxa2xx Platform drivers by
* Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <mach/hardware.h>
#include "atmel-pcm.h"
#include "atmel_ssc_dai.h"
#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20)
#define NUM_SSC_DEVICES 1
#else
#define NUM_SSC_DEVICES 3
#endif
/*
* SSC PDC registers required by the PCM DMA engine.
*/
static struct atmel_pdc_regs pdc_tx_reg = {
.xpr = ATMEL_PDC_TPR,
.xcr = ATMEL_PDC_TCR,
.xnpr = ATMEL_PDC_TNPR,
.xncr = ATMEL_PDC_TNCR,
};
static struct atmel_pdc_regs pdc_rx_reg = {
.xpr = ATMEL_PDC_RPR,
.xcr = ATMEL_PDC_RCR,
.xnpr = ATMEL_PDC_RNPR,
.xncr = ATMEL_PDC_RNCR,
};
/*
* SSC & PDC status bits for transmit and receive.
*/
static struct atmel_ssc_mask ssc_tx_mask = {
.ssc_enable = SSC_BIT(CR_TXEN),
.ssc_disable = SSC_BIT(CR_TXDIS),
.ssc_endx = SSC_BIT(SR_ENDTX),
.ssc_endbuf = SSC_BIT(SR_TXBUFE),
.pdc_enable = ATMEL_PDC_TXTEN,
.pdc_disable = ATMEL_PDC_TXTDIS,
};
static struct atmel_ssc_mask ssc_rx_mask = {
.ssc_enable = SSC_BIT(CR_RXEN),
.ssc_disable = SSC_BIT(CR_RXDIS),
.ssc_endx = SSC_BIT(SR_ENDRX),
.ssc_endbuf = SSC_BIT(SR_RXBUFF),
.pdc_enable = ATMEL_PDC_RXTEN,
.pdc_disable = ATMEL_PDC_RXTDIS,
};
/*
* DMA parameters.
*/
static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
{{
.name = "SSC0 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC0 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
#if NUM_SSC_DEVICES == 3
{{
.name = "SSC1 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC1 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
{{
.name = "SSC2 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC2 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
#endif
};
static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
{
.name = "ssc0",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[0].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
#if NUM_SSC_DEVICES == 3
{
.name = "ssc1",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[1].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc2",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[2].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
#endif
};
/*
* SSC interrupt handler. Passes PDC interrupts to the DMA
* interrupt handler in the PCM driver.
*/
static irqreturn_t atmel_ssc_interrupt(int irq, void *dev_id)
{
struct atmel_ssc_info *ssc_p = dev_id;
struct atmel_pcm_dma_params *dma_params;
u32 ssc_sr;
u32 ssc_substream_mask;
int i;
ssc_sr = (unsigned long)ssc_readl(ssc_p->ssc->regs, SR)
& (unsigned long)ssc_readl(ssc_p->ssc->regs, IMR);
/*
* Loop through the substreams attached to this SSC. If
* a DMA-related interrupt occurred on that substream, call
* the DMA interrupt handler function, if one has been
* registered in the dma_params structure by the PCM driver.
*/
for (i = 0; i < ARRAY_SIZE(ssc_p->dma_params); i++) {
dma_params = ssc_p->dma_params[i];
if ((dma_params != NULL) &&
(dma_params->dma_intr_handler != NULL)) {
ssc_substream_mask = (dma_params->mask->ssc_endx |
dma_params->mask->ssc_endbuf);
if (ssc_sr & ssc_substream_mask) {
dma_params->dma_intr_handler(ssc_sr,
dma_params->
substream);
}
}
}
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*\
* DAI functions
\*-------------------------------------------------------------------------*/
/*
* Startup. Only that one substream allowed in each direction.
*/
static int atmel_ssc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
int dir_mask;
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir_mask = SSC_DIR_MASK_PLAYBACK;
else
dir_mask = SSC_DIR_MASK_CAPTURE;
spin_lock_irq(&ssc_p->lock);
if (ssc_p->dir_mask & dir_mask) {
spin_unlock_irq(&ssc_p->lock);
return -EBUSY;
}
ssc_p->dir_mask |= dir_mask;
spin_unlock_irq(&ssc_p->lock);
return 0;
}
/*
* Shutdown. Clear DMA parameters and shutdown the SSC if there
* are no other substreams open.
*/
static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
if (dma_params != NULL) {
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
pr_debug("atmel_ssc_shutdown: %s disabled SSC_SR=0x%08x\n",
(dir ? "receive" : "transmit"),
ssc_readl(ssc_p->ssc->regs, SR));
dma_params->ssc = NULL;
dma_params->substream = NULL;
ssc_p->dma_params[dir] = NULL;
}
dir_mask = 1 << dir;
spin_lock_irq(&ssc_p->lock);
ssc_p->dir_mask &= ~dir_mask;
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
/* Shutdown the SSC clock. */
pr_debug("atmel_ssc_dau: Stopping clock\n");
clk_disable(ssc_p->ssc->clk);
free_irq(ssc_p->ssc->irq, ssc_p);
ssc_p->initialized = 0;
}
/* Reset the SSC */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
/* Clear the SSC dividers */
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
}
spin_unlock_irq(&ssc_p->lock);
}
/*
* Record the DAI format for use in hw_params().
*/
static int atmel_ssc_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct atmel_ssc_info *ssc_p = &ssc_info[cpu_dai->id];
ssc_p->daifmt = fmt;
return 0;
}
/*
* Record SSC clock dividers for use in hw_params().
*/
static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct atmel_ssc_info *ssc_p = &ssc_info[cpu_dai->id];
switch (div_id) {
case ATMEL_SSC_CMR_DIV:
/*
* The same master clock divider is used for both
* transmit and receive, so if a value has already
* been set, it must match this value.
*/
if (ssc_p->cmr_div == 0)
ssc_p->cmr_div = div;
else
if (div != ssc_p->cmr_div)
return -EBUSY;
break;
case ATMEL_SSC_TCMR_PERIOD:
ssc_p->tcmr_period = div;
break;
case ATMEL_SSC_RCMR_PERIOD:
ssc_p->rcmr_period = div;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Configure the SSC.
*/
static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
int id = dai->id;
struct atmel_ssc_info *ssc_p = &ssc_info[id];
struct atmel_pcm_dma_params *dma_params;
int dir, channels, bits;
u32 tfmr, rfmr, tcmr, rcmr;
int start_event;
int ret;
/*
* Currently, there is only one set of dma params for
* each direction. If more are added, this code will
* have to be changed to select the proper set.
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = &ssc_dma_params[id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
ssc_p->dma_params[dir] = dma_params;
/*
* The snd_soc_pcm_stream->dma_data field is only used to communicate
* the appropriate DMA parameters to the pcm driver hw_params()
* function. It should not be used for other purposes
* as it is common to all substreams.
*/
snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_params);
channels = params_channels(params);
/*
* Determine sample size in bits and the PDC increment.
*/
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
bits = 8;
dma_params->pdc_xfer_size = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
bits = 16;
dma_params->pdc_xfer_size = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits = 24;
dma_params->pdc_xfer_size = 4;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bits = 32;
dma_params->pdc_xfer_size = 4;
break;
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported PCM format");
return -EINVAL;
}
/*
* The SSC only supports up to 16-bit samples in I2S format, due
* to the size of the Frame Mode Register FSLEN field.
*/
if ((ssc_p->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S
&& bits > 16) {
printk(KERN_WARNING
"atmel_ssc_dai: sample size %d "
"is too large for I2S\n", bits);
return -EINVAL;
}
/*
* Compute SSC register settings.
*/
switch (ssc_p->daifmt
& (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) {
case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
/*
* I2S format, SSC provides BCLK and LRC clocks.
*
* The SSC transmit and receive clocks are generated
* from the MCK divider, and the BCLK signal
* is output on the SSC TK line.
*/
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NEGATIVE)
| SSC_BF(RFMR_FSLEN, (bits - 1))
| SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NEGATIVE)
| SSC_BF(TFMR_FSLEN, (bits - 1))
| SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
/*
* I2S format, CODEC supplies BCLK and LRC clocks.
*
* The SSC transmit clock is obtained from the BCLK signal on
* on the TK line, and the SSC receive clock is
* generated from the transmit clock.
*
* For single channel data, one sample is transferred
* on the falling edge of the LRC clock.
* For two channel data, one sample is
* transferred on both edges of the LRC clock.
*/
start_event = ((channels == 1)
? SSC_START_FALLING_RF
: SSC_START_EDGE_RF);
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, start_event)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_CLOCK);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(RFMR_FSLEN, 0)
| SSC_BF(RFMR_DATNB, 0)
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, 0)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, start_event)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
| SSC_BF(TCMR_CKS, SSC_CKS_PIN);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(TFMR_FSLEN, 0)
| SSC_BF(TFMR_DATNB, 0)
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
/*
* DSP/PCM Mode A format, SSC provides BCLK and LRC clocks.
*
* The SSC transmit and receive clocks are generated from the
* MCK divider, and the BCLK signal is output
* on the SSC TK line.
*/
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_POSITIVE)
| SSC_BF(RFMR_FSLEN, 0)
| SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_RISING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_POSITIVE)
| SSC_BF(TFMR_FSLEN, 0)
| SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported DAI format 0x%x\n",
ssc_p->daifmt);
return -EINVAL;
}
pr_debug("atmel_ssc_hw_params: "
"RCMR=%08x RFMR=%08x TCMR=%08x TFMR=%08x\n",
rcmr, rfmr, tcmr, tfmr);
if (!ssc_p->initialized) {
/* Enable PMC peripheral clock for this SSC */
pr_debug("atmel_ssc_dai: Starting clock\n");
clk_enable(ssc_p->ssc->clk);
/* Reset the SSC and its PDC registers */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
ssc_writel(ssc_p->ssc->regs, PDC_RPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNCR, 0);
ret = request_irq(ssc_p->ssc->irq, atmel_ssc_interrupt, 0,
ssc_p->name, ssc_p);
if (ret < 0) {
printk(KERN_WARNING
"atmel_ssc_dai: request_irq failure\n");
pr_debug("Atmel_ssc_dai: Stoping clock\n");
clk_disable(ssc_p->ssc->clk);
return ret;
}
ssc_p->initialized = 1;
}
/* set SSC clock mode register */
ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->cmr_div);
/* set receive clock mode and format */
ssc_writel(ssc_p->ssc->regs, RCMR, rcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, rfmr);
/* set transmit clock mode and format */
ssc_writel(ssc_p->ssc->regs, TCMR, tcmr);
ssc_writel(ssc_p->ssc->regs, TFMR, tfmr);
pr_debug("atmel_ssc_dai,hw_params: SSC initialized\n");
return 0;
}
static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
pr_debug("%s enabled SSC_SR=0x%08x\n",
dir ? "receive" : "transmit",
ssc_readl(ssc_p->ssc->regs, SR));
return 0;
}
#ifdef CONFIG_PM
static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
{
struct atmel_ssc_info *ssc_p;
if (!cpu_dai->active)
return 0;
ssc_p = &ssc_info[cpu_dai->id];
/* Save the status register before disabling transmit and receive */
ssc_p->ssc_state.ssc_sr = ssc_readl(ssc_p->ssc->regs, SR);
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_TXDIS) | SSC_BIT(CR_RXDIS));
/* Save the current interrupt mask, then disable unmasked interrupts */
ssc_p->ssc_state.ssc_imr = ssc_readl(ssc_p->ssc->regs, IMR);
ssc_writel(ssc_p->ssc->regs, IDR, ssc_p->ssc_state.ssc_imr);
ssc_p->ssc_state.ssc_cmr = ssc_readl(ssc_p->ssc->regs, CMR);
ssc_p->ssc_state.ssc_rcmr = ssc_readl(ssc_p->ssc->regs, RCMR);
ssc_p->ssc_state.ssc_rfmr = ssc_readl(ssc_p->ssc->regs, RFMR);
ssc_p->ssc_state.ssc_tcmr = ssc_readl(ssc_p->ssc->regs, TCMR);
ssc_p->ssc_state.ssc_tfmr = ssc_readl(ssc_p->ssc->regs, TFMR);
return 0;
}
static int atmel_ssc_resume(struct snd_soc_dai *cpu_dai)
{
struct atmel_ssc_info *ssc_p;
u32 cr;
if (!cpu_dai->active)
return 0;
ssc_p = &ssc_info[cpu_dai->id];
/* restore SSC register settings */
ssc_writel(ssc_p->ssc->regs, TFMR, ssc_p->ssc_state.ssc_tfmr);
ssc_writel(ssc_p->ssc->regs, TCMR, ssc_p->ssc_state.ssc_tcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, ssc_p->ssc_state.ssc_rfmr);
ssc_writel(ssc_p->ssc->regs, RCMR, ssc_p->ssc_state.ssc_rcmr);
ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->ssc_state.ssc_cmr);
/* re-enable interrupts */
ssc_writel(ssc_p->ssc->regs, IER, ssc_p->ssc_state.ssc_imr);
/* Re-enable receive and transmit as appropriate */
cr = 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_RXEN)) ? SSC_BIT(CR_RXEN) : 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_TXEN)) ? SSC_BIT(CR_TXEN) : 0;
ssc_writel(ssc_p->ssc->regs, CR, cr);
return 0;
}
#else /* CONFIG_PM */
# define atmel_ssc_suspend NULL
# define atmel_ssc_resume NULL
#endif /* CONFIG_PM */
static int atmel_ssc_probe(struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
int ret = 0;
snd_soc_dai_set_drvdata(dai, ssc_p);
/*
* Request SSC device
*/
ssc_p->ssc = ssc_request(dai->id);
if (IS_ERR(ssc_p->ssc)) {
printk(KERN_ERR "ASoC: Failed to request SSC %d\n", dai->id);
ret = PTR_ERR(ssc_p->ssc);
}
return ret;
}
static int atmel_ssc_remove(struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = snd_soc_dai_get_drvdata(dai);
ssc_free(ssc_p->ssc);
return 0;
}
#define ATMEL_SSC_RATES (SNDRV_PCM_RATE_8000_96000)
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.startup = atmel_ssc_startup,
.shutdown = atmel_ssc_shutdown,
.prepare = atmel_ssc_prepare,
.hw_params = atmel_ssc_hw_params,
.set_fmt = atmel_ssc_set_dai_fmt,
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
};
static struct snd_soc_dai_driver atmel_ssc_dai[NUM_SSC_DEVICES] = {
{
.name = "atmel-ssc-dai.0",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
#if NUM_SSC_DEVICES == 3
{
.name = "atmel-ssc-dai.1",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
{
.name = "atmel-ssc-dai.2",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
#endif
};
static __devinit int asoc_ssc_probe(struct platform_device *pdev)
{
BUG_ON(pdev->id < 0);
BUG_ON(pdev->id >= ARRAY_SIZE(atmel_ssc_dai));
return snd_soc_register_dai(&pdev->dev, &atmel_ssc_dai[pdev->id]);
}
static int __devexit asoc_ssc_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver asoc_ssc_driver = {
.driver = {
.name = "atmel-ssc-dai",
.owner = THIS_MODULE,
},
.probe = asoc_ssc_probe,
.remove = __devexit_p(asoc_ssc_remove),
};
/**
* atmel_ssc_set_audio - Allocate the specified SSC for audio use.
*/
int atmel_ssc_set_audio(int ssc_id)
{
struct ssc_device *ssc;
static struct platform_device *dma_pdev;
struct platform_device *ssc_pdev;
int ret;
if (ssc_id < 0 || ssc_id >= ARRAY_SIZE(atmel_ssc_dai))
return -EINVAL;
/* Allocate a dummy device for DMA if we don't have one already */
if (!dma_pdev) {
dma_pdev = platform_device_alloc("atmel-pcm-audio", -1);
if (!dma_pdev)
return -ENOMEM;
ret = platform_device_add(dma_pdev);
if (ret < 0) {
platform_device_put(dma_pdev);
dma_pdev = NULL;
return ret;
}
}
ssc_pdev = platform_device_alloc("atmel-ssc-dai", ssc_id);
if (!ssc_pdev)
return -ENOMEM;
/* If we can grab the SSC briefly to parent the DAI device off it */
ssc = ssc_request(ssc_id);
if (IS_ERR(ssc))
pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
PTR_ERR(ssc));
else {
ssc_pdev->dev.parent = &(ssc->pdev->dev);
ssc_free(ssc);
}
ret = platform_device_add(ssc_pdev);
if (ret < 0)
platform_device_put(ssc_pdev);
return ret;
}
EXPORT_SYMBOL_GPL(atmel_ssc_set_audio);
module_platform_driver(asoc_ssc_driver);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou, sedji.gaouaou@atmel.com, www.atmel.com");
MODULE_DESCRIPTION("ATMEL SSC ASoC Interface");
MODULE_LICENSE("GPL");
| gpl-2.0 |
m-labs/linux-milkymist | kernel/trace/trace_functions_graph.c | 4822 | 35705 | /*
*
* Function graph tracer.
* Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
* Mostly borrowed from function tracer which
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include "trace.h"
#include "trace_output.h"
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;
struct fgraph_cpu_data {
pid_t last_pid;
int depth;
int depth_irq;
int ignore;
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
};
struct fgraph_data {
struct fgraph_cpu_data __percpu *cpu_data;
/* Place to preserve last processed entry. */
struct ftrace_graph_ent_entry ent;
struct ftrace_graph_ret_entry ret;
int failed;
int cpu;
};
#define TRACE_GRAPH_INDENT 2
/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
#define TRACE_GRAPH_PRINT_CPU 0x2
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
#define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
#define TRACE_GRAPH_PRINT_IRQS 0x40
static struct tracer_opt trace_opts[] = {
/* Display overruns? (for self-debug purpose) */
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
/* Display CPU ? */
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
/* Display Overhead ? */
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
/* Display proc name/pid */
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
/* Display duration of execution */
{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
/* Display absolute time of an entry */
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
/* Display interrupts */
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns and proc by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
.opts = trace_opts
};
static struct trace_array *graph_array;
/*
* DURATION column is being also used to display IRQ signs,
* following values are used by print_graph_irq and others
* to fill in space into DURATION column.
*/
enum {
DURATION_FILL_FULL = -1,
DURATION_FILL_START = -2,
DURATION_FILL_END = -3,
};
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s,
u32 flags);
/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer)
{
unsigned long long calltime;
int index;
if (!current->ret_stack)
return -EBUSY;
/*
* We must make sure the ret_stack is tested before we read
* anything else.
*/
smp_rmb();
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(¤t->trace_overrun);
return -EBUSY;
}
calltime = trace_clock_local();
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0;
current->ret_stack[index].fp = frame_pointer;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
unsigned long frame_pointer)
{
int index;
index = current->curr_ret_stack;
if (unlikely(index < 0)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)panic;
return;
}
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
/*
* The arch may choose to record the frame pointer used
* and check it here to make sure that it is what we expect it
* to be. If gcc does not set the place holder of the return
* address in the frame pointer, and does a copy instead, then
* the function graph trace will fail. This test detects this
* case.
*
* Currently, x86_32 with optimize for size (-Os) makes the latest
* gcc do the above.
*/
if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ftrace_graph_stop();
WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
" from func %ps return to %lx\n",
current->ret_stack[index].fp,
frame_pointer,
(void *)current->ret_stack[index].func,
current->ret_stack[index].ret);
*ret = (unsigned long)panic;
return;
}
#endif
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(¤t->trace_overrun);
trace->depth = index;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
struct ftrace_graph_ret trace;
unsigned long ret;
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
barrier();
current->curr_ret_stack--;
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)panic;
}
return ret;
}
int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
{
struct ftrace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
return 1;
}
static inline int ftrace_graph_ignore_irqs(void)
{
if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
return 0;
return in_irq();
}
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int ret;
int cpu;
int pc;
if (!ftrace_trace_task(current))
return 0;
/* trace it when it is-nested-in or is a function enabled. */
if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
ftrace_graph_ignore_irqs())
return 0;
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
ret = __trace_graph_entry(tr, trace, flags, pc);
} else {
ret = 0;
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
return ret;
}
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
if (tracing_thresh)
return 1;
else
return trace_graph_entry(trace);
}
static void
__trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long flags, int pc)
{
u64 time = trace_clock_local();
struct ftrace_graph_ent ent = {
.func = ip,
.depth = 0,
};
struct ftrace_graph_ret ret = {
.func = ip,
.depth = 0,
.calltime = time,
.rettime = time,
};
__trace_graph_entry(tr, &ent, flags, pc);
__trace_graph_return(tr, &ret, flags, pc);
}
void
trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc)
{
__trace_graph_function(tr, ip, flags, pc);
}
void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
{
struct ftrace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
{
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
__trace_graph_return(tr, trace, flags, pc);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
void set_graph_array(struct trace_array *tr)
{
graph_array = tr;
/* Make graph_array visible before we start tracing */
smp_mb();
}
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
else
trace_graph_return(trace);
}
static int graph_trace_init(struct trace_array *tr)
{
int ret;
set_graph_array(tr);
if (tracing_thresh)
ret = register_ftrace_graph(&trace_graph_thresh_return,
&trace_graph_thresh_entry);
else
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry);
if (ret)
return ret;
tracing_start_cmdline_record();
return 0;
}
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
unregister_ftrace_graph();
}
static int max_bytes_for_cpu;
static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
int ret;
/*
* Start with a space character - to make it stand out
* to the right a bit when trace output is pasted into
* email:
*/
ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
#define TRACE_GRAPH_PROCINFO_LENGTH 14
static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
char comm[TASK_COMM_LEN];
/* sign + log10(MAX_INT) + '\0' */
char pid_str[11];
int spaces = 0;
int ret;
int len;
int i;
trace_find_cmdline(pid, comm);
comm[7] = '\0';
sprintf(pid_str, "%d", pid);
/* 1 stands for the "-" character */
len = strlen(comm) + strlen(pid_str) + 1;
if (len < TRACE_GRAPH_PROCINFO_LENGTH)
spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
/* First spaces to align center */
for (i = 0; i < spaces / 2; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Last spaces to align center */
for (i = 0; i < spaces - (spaces / 2); i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
if (!trace_seq_putc(s, ' '))
return 0;
return trace_print_lat_fmt(s, entry);
}
/* If the pid changed since the last trace, output this event */
static enum print_line_t
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
{
pid_t prev_pid;
pid_t *last_pid;
int ret;
if (!data)
return TRACE_TYPE_HANDLED;
last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
if (*last_pid == pid)
return TRACE_TYPE_HANDLED;
prev_pid = *last_pid;
*last_pid = pid;
if (prev_pid == -1)
return TRACE_TYPE_HANDLED;
/*
* Context-switch trace line:
------------------------------------------
| 1) migration/0--1 => sshd-1755
------------------------------------------
*/
ret = trace_seq_printf(s,
" ------------------------------------------\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, prev_pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " => ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s,
"\n ------------------------------------------\n\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
struct fgraph_data *data = iter->private;
struct ring_buffer_iter *ring_iter = NULL;
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next;
/*
* If the previous output failed to write to the seq buffer,
* then we just reuse the data from before.
*/
if (data && data->failed) {
curr = &data->ent;
next = &data->ret;
} else {
ring_iter = iter->buffer_iter[iter->cpu];
/* First peek to compare current entry and the next one */
if (ring_iter)
event = ring_buffer_iter_peek(ring_iter, NULL);
else {
/*
* We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->tr->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
NULL, NULL);
}
if (!event)
return NULL;
next = ring_buffer_event_data(event);
if (data) {
/*
* Save current and next entries for later reference
* if the output fails.
*/
data->ent = *curr;
/*
* If the next event is not a return type, then
* we only care about what type it is. Otherwise we can
* safely copy the entire event.
*/
if (next->ent.type == TRACE_GRAPH_RET)
data->ret = *next;
else
data->ret.ent.type = next->ent.type;
}
}
if (next->ent.type != TRACE_GRAPH_RET)
return NULL;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func)
return NULL;
/* this is a leaf, now advance the iterator */
if (ring_iter)
ring_buffer_read(ring_iter, NULL);
return next;
}
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
unsigned long usecs_rem;
usecs_rem = do_div(t, NSEC_PER_SEC);
usecs_rem /= 1000;
return trace_seq_printf(s, "%5lu.%06lu | ",
(unsigned long)t, usecs_rem);
}
static enum print_line_t
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
enum trace_type type, int cpu, pid_t pid, u32 flags)
{
int ret;
struct trace_seq *s = &iter->seq;
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return TRACE_TYPE_UNHANDLED;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
/* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Cpu */
if (flags & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (flags & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
}
/* No overhead */
ret = print_graph_duration(DURATION_FILL_START, s, flags);
if (ret != TRACE_TYPE_HANDLED)
return ret;
if (type == TRACE_GRAPH_ENT)
ret = trace_seq_printf(s, "==========>");
else
ret = trace_seq_printf(s, "<==========");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_duration(DURATION_FILL_END, s, flags);
if (ret != TRACE_TYPE_HANDLED)
return ret;
ret = trace_seq_printf(s, "\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
unsigned long nsecs_rem = do_div(duration, 1000);
/* log10(ULONG_MAX) + '\0' */
char msecs_str[21];
char nsecs_str[5];
int ret, len;
int i;
sprintf(msecs_str, "%lu", (unsigned long) duration);
/* Print msecs */
ret = trace_seq_printf(s, "%s", msecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
len = strlen(msecs_str);
/* Print nsecs (we don't want to exceed 7 numbers) */
if (len < 7) {
size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
ret = trace_seq_printf(s, ".%s", nsecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
len += strlen(nsecs_str);
}
ret = trace_seq_printf(s, " us ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Print remaining spaces to fit the row's width */
for (i = len; i < 7; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s,
u32 flags)
{
int ret = -1;
if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return TRACE_TYPE_HANDLED;
/* No real adata, just filling the column with spaces */
switch (duration) {
case DURATION_FILL_FULL:
ret = trace_seq_printf(s, " | ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_START:
ret = trace_seq_printf(s, " ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_END:
ret = trace_seq_printf(s, " |");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}
/* Signal a overhead of time execution to the output */
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
ret = trace_seq_printf(s, "! ");
/* Duration exceeded 10 msecs */
else if (duration > 10000ULL)
ret = trace_seq_printf(s, "+ ");
}
/*
* The -1 means we either did not exceed the duration tresholds
* or we dont want to print out the overhead. Either way we need
* to fill out the space.
*/
if (ret == -1)
ret = trace_seq_printf(s, " ");
/* Catching here any failure happenned above */
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_print_graph_duration(duration, s);
if (ret != TRACE_TYPE_HANDLED)
return ret;
ret = trace_seq_printf(s, "| ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
/* Case of a leaf function on its call entry */
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry,
struct ftrace_graph_ret_entry *ret_entry,
struct trace_seq *s, u32 flags)
{
struct fgraph_data *data = iter->private;
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
int ret;
int i;
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
/*
* Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments
* equal to this depth.
*/
cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = 0;
}
/* Overhead and duration */
ret = print_graph_duration(duration, s, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_entry_nested(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry,
struct trace_seq *s, int cpu, u32 flags)
{
struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private;
int ret;
int i;
if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
/* Save this function pointer to see if the exit matches */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = call->func;
}
/* No time */
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
if (ret != TRACE_TYPE_HANDLED)
return ret;
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/*
* we already consumed the current entry to check the next one
* and see if this is a leaf.
*/
return TRACE_TYPE_NO_CONSUME;
}
static enum print_line_t
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
int type, unsigned long addr, u32 flags)
{
struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent;
int cpu = iter->cpu;
int ret;
/* Pid */
if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
if (type) {
/* Interrupt */
ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return 0;
/* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Cpu */
if (flags & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (flags & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Latency format */
if (trace_flags & TRACE_ITER_LATENCY_FMT) {
ret = print_graph_lat_fmt(s, ent);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
return 0;
}
/*
* Entry check for irq code
*
* returns 1 if
* - we are inside irq code
* - we just entered irq code
*
* retunns 0 if
* - funcgraph-interrupts option is set
* - we are not inside irq code
*/
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
unsigned long addr, int depth)
{
int cpu = iter->cpu;
int *depth_irq;
struct fgraph_data *data = iter->private;
/*
* If we are either displaying irqs, or we got called as
* a graph event and private data does not exist,
* then we bypass the irq check.
*/
if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
(!data))
return 0;
depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
/*
* We are inside the irq code
*/
if (*depth_irq >= 0)
return 1;
if ((addr < (unsigned long)__irqentry_text_start) ||
(addr >= (unsigned long)__irqentry_text_end))
return 0;
/*
* We are entering irq code.
*/
*depth_irq = depth;
return 1;
}
/*
* Return check for irq code
*
* returns 1 if
* - we are inside irq code
* - we just left irq code
*
* returns 0 if
* - funcgraph-interrupts option is set
* - we are not inside irq code
*/
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
int cpu = iter->cpu;
int *depth_irq;
struct fgraph_data *data = iter->private;
/*
* If we are either displaying irqs, or we got called as
* a graph event and private data does not exist,
* then we bypass the irq check.
*/
if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
(!data))
return 0;
depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
/*
* We are not inside the irq code.
*/
if (*depth_irq == -1)
return 0;
/*
* We are inside the irq code, and this is returning entry.
* Let's not trace it and clear the entry depth, since
* we are out of irq code.
*
* This condition ensures that we 'leave the irq code' once
* we are out of the entry depth. Thus protecting us from
* the RETURN entry loss.
*/
if (*depth_irq >= depth) {
*depth_irq = -1;
return 1;
}
/*
* We are inside the irq code, and this is not the entry.
*/
return 1;
}
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter, u32 flags)
{
struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ret_entry *leaf_ret;
static enum print_line_t ret;
int cpu = iter->cpu;
if (check_irq_entry(iter, flags, call->func, call->depth))
return TRACE_TYPE_HANDLED;
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
return TRACE_TYPE_PARTIAL_LINE;
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
else
ret = print_graph_entry_nested(iter, field, s, cpu, flags);
if (data) {
/*
* If we failed to write our output, then we need to make
* note of it. Because we already consumed our entry.
*/
if (s->full) {
data->failed = 1;
data->cpu = cpu;
} else
data->failed = 0;
}
return ret;
}
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
struct trace_entry *ent, struct trace_iterator *iter,
u32 flags)
{
unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
pid_t pid = ent->pid;
int cpu = iter->cpu;
int func_match = 1;
int ret;
int i;
if (check_irq_return(iter, flags, trace->depth))
return TRACE_TYPE_HANDLED;
if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
/*
* Comments display at + 1 to depth. This is the
* return from a function, we now want the comments
* to display at the same level of the bracket.
*/
cpu_data->depth = trace->depth - 1;
if (trace->depth < FTRACE_RETFUNC_DEPTH) {
if (cpu_data->enter_funcs[trace->depth] != trace->func)
func_match = 0;
cpu_data->enter_funcs[trace->depth] = 0;
}
}
if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
/* Overhead and duration */
ret = print_graph_duration(duration, s, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/*
* If the return function does not have a matching entry,
* then the entry was lost. Instead of just printing
* the '}' and letting the user guess what function this
* belongs to, write out the function name.
*/
if (func_match) {
ret = trace_seq_printf(s, "}\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
} else {
ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Overrun */
if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
ret = trace_seq_printf(s, " (Overruns: %lu)\n",
trace->overrun);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
cpu, pid, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
struct trace_iterator *iter, u32 flags)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct fgraph_data *data = iter->private;
struct trace_event *event;
int depth = 0;
int ret;
int i;
if (data)
depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
/* No time */
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
if (ret != TRACE_TYPE_HANDLED)
return ret;
/* Indentation */
if (depth > 0)
for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* The comment */
ret = trace_seq_printf(s, "/* ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
switch (iter->ent->type) {
case TRACE_BPRINT:
ret = trace_print_bprintk_msg_only(iter);
if (ret != TRACE_TYPE_HANDLED)
return ret;
break;
case TRACE_PRINT:
ret = trace_print_printk_msg_only(iter);
if (ret != TRACE_TYPE_HANDLED)
return ret;
break;
default:
event = ftrace_find_event(ent->type);
if (!event)
return TRACE_TYPE_UNHANDLED;
ret = event->funcs->trace(iter, sym_flags, event);
if (ret != TRACE_TYPE_HANDLED)
return ret;
}
/* Strip ending newline */
if (s->buffer[s->len - 1] == '\n') {
s->buffer[s->len - 1] = '\0';
s->len--;
}
ret = trace_seq_printf(s, " */\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private;
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
int cpu = iter->cpu;
int ret;
if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
return TRACE_TYPE_HANDLED;
}
/*
* If the last output failed, there's a possibility we need
* to print out the missing entry which would never go out.
*/
if (data && data->failed) {
field = &data->ent;
iter->cpu = data->cpu;
ret = print_graph_entry(field, s, iter, flags);
if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
ret = TRACE_TYPE_NO_CONSUME;
}
iter->cpu = cpu;
return ret;
}
switch (entry->type) {
case TRACE_GRAPH_ENT: {
/*
* print_graph_entry() may consume the current event,
* thus @field may become invalid, so we need to save it.
* sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack.
*/
struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry);
saved = *field;
return print_graph_entry(&saved, s, iter, flags);
}
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
return print_graph_return(&field->ret, s, entry, iter, flags);
}
case TRACE_STACK:
case TRACE_FN:
/* dont trace stack and functions as comments */
return TRACE_TYPE_UNHANDLED;
default:
return print_graph_comment(s, entry, iter, flags);
}
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
return print_graph_function_flags(iter, tracer_flags.val);
}
static enum print_line_t
print_graph_function_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
return print_graph_function(iter);
}
static void print_lat_header(struct seq_file *s, u32 flags)
{
static const char spaces[] = " " /* 16 spaces */
" " /* 4 spaces */
" "; /* 17 spaces */
int size = 0;
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
size += 16;
if (flags & TRACE_GRAPH_PRINT_CPU)
size += 4;
if (flags & TRACE_GRAPH_PRINT_PROC)
size += 17;
seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
seq_printf(s, "#%.*s||| / \n", size, spaces);
}
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
{
int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
if (lat)
print_lat_header(s, flags);
/* 1st line */
seq_printf(s, "#");
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_printf(s, " TIME ");
if (flags & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, " CPU");
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " TASK/PID ");
if (lat)
seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " DURATION ");
seq_printf(s, " FUNCTION CALLS\n");
/* 2nd line */
seq_printf(s, "#");
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_printf(s, " | ");
if (flags & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, " | ");
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " | | ");
if (lat)
seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " | | ");
seq_printf(s, " | | | |\n");
}
void print_graph_headers(struct seq_file *s)
{
print_graph_headers_flags(s, tracer_flags.val);
}
void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
struct trace_iterator *iter = s->private;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
if (trace_flags & TRACE_ITER_LATENCY_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
print_trace_header(s, iter);
}
__print_graph_headers_flags(s, flags);
}
void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
struct fgraph_data *data;
int cpu;
iter->private = NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out_err;
data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
if (!data->cpu_data)
goto out_err_free;
for_each_possible_cpu(cpu) {
pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
*pid = -1;
*depth = 0;
*ignore = 0;
*depth_irq = -1;
}
iter->private = data;
return;
out_err_free:
kfree(data);
out_err:
pr_warning("function graph tracer: not enough memory\n");
}
void graph_trace_close(struct trace_iterator *iter)
{
struct fgraph_data *data = iter->private;
if (data) {
free_percpu(data->cpu_data);
kfree(data);
}
}
static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
{
if (bit == TRACE_GRAPH_PRINT_IRQS)
ftrace_graph_skip_irqs = !set;
return 0;
}
static struct trace_event_functions graph_functions = {
.trace = print_graph_function_event,
};
static struct trace_event graph_trace_entry_event = {
.type = TRACE_GRAPH_ENT,
.funcs = &graph_functions,
};
static struct trace_event graph_trace_ret_event = {
.type = TRACE_GRAPH_RET,
.funcs = &graph_functions
};
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.open = graph_trace_open,
.pipe_open = graph_trace_open,
.close = graph_trace_close,
.pipe_close = graph_trace_close,
.wait_pipe = poll_wait_pipe,
.init = graph_trace_init,
.reset = graph_trace_reset,
.print_line = print_graph_function,
.print_header = print_graph_headers,
.flags = &tracer_flags,
.set_flag = func_graph_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_function_graph,
#endif
};
static __init int init_graph_trace(void)
{
max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
if (!register_ftrace_event(&graph_trace_entry_event)) {
pr_warning("Warning: could not register graph trace events\n");
return 1;
}
if (!register_ftrace_event(&graph_trace_ret_event)) {
pr_warning("Warning: could not register graph trace events\n");
return 1;
}
return register_tracer(&graph_trace);
}
device_initcall(init_graph_trace);
| gpl-2.0 |
InfinitiveOS-Devices/android_kernel_sony_apq8064 | drivers/ata/pata_samsung_cf.c | 5590 | 17664 | /*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* PATA driver for Samsung SoCs.
* Supports CF Interface in True IDE mode. Currently only PIO mode has been
* implemented; UDMA support has to be added.
*
* Based on:
* PATA driver for AT91SAM9260 Static Memory Controller
* PATA driver for Toshiba SCC controller
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <plat/ata.h>
#include <plat/regs-ata.h>
#define DRV_NAME "pata_samsung_cf"
#define DRV_VERSION "0.1"
enum s3c_cpu_type {
TYPE_S3C64XX,
TYPE_S5PC100,
TYPE_S5PV210,
};
/*
* struct s3c_ide_info - S3C PATA instance.
* @clk: The clock resource for this controller.
* @ide_addr: The area mapped for the hardware registers.
* @sfr_addr: The area mapped for the special function registers.
* @irq: The IRQ number we are using.
* @cpu_type: The exact type of this controller.
* @fifo_status_reg: The ATA_FIFO_STATUS register offset.
*/
struct s3c_ide_info {
struct clk *clk;
void __iomem *ide_addr;
void __iomem *sfr_addr;
unsigned int irq;
enum s3c_cpu_type cpu_type;
unsigned int fifo_status_reg;
};
static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
{
u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
}
static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
{
/* Select true-ide as the internal operating mode */
writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
s3c_ide_sfrbase + S3C_CFATA_MUX);
}
static unsigned long
pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
{
int t1 = ata->setup;
int t2 = ata->act8b;
int t2i = ata->rec8b;
ulong piotime;
piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
return piotime;
}
static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct s3c_ide_info *info = ap->host->private_data;
struct ata_timing timing;
int cycle_time;
ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
ulong piotime;
/* Enables IORDY if mode requires it */
if (ata_pio_need_iordy(adev))
ata_cfg |= S3C_ATA_CFG_IORDYEN;
else
ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
ata_timing_compute(adev, adev->pio_mode, &timing,
cycle_time * 1000, 0);
piotime = pata_s3c_setup_timing(info, &timing);
writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
}
/*
* Waits until the IDE controller is able to perform next read/write
* operation to the disk. Needed for 64XX series boards only.
*/
static int wait_for_host_ready(struct s3c_ide_info *info)
{
ulong timeout;
void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
/* wait for maximum of 20 msec */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
if ((readl(fifo_reg) >> 28) == 0)
return 0;
}
return -EBUSY;
}
/*
* Writes to one of the task file registers.
*/
static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
{
struct s3c_ide_info *info = host->private_data;
wait_for_host_ready(info);
writeb(addr, reg);
}
/*
* Reads from one of the task file registers.
*/
static u8 ata_inb(struct ata_host *host, void __iomem *reg)
{
struct s3c_ide_info *info = host->private_data;
u8 temp;
wait_for_host_ready(info);
(void) readb(reg);
wait_for_host_ready(info);
temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
return temp;
}
/*
* pata_s3c_tf_load - send taskfile registers to host controller
*/
static void pata_s3c_tf_load(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
if (tf->ctl != ap->last_ctl) {
ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
}
if (is_addr) {
ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
ata_outb(ap->host, tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
/*
* pata_s3c_tf_read - input device's ATA taskfile shadow registers
*/
static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->feature = ata_inb(ap->host, ioaddr->error_addr);
tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
tf->device = ata_inb(ap->host, ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
}
}
/*
* pata_s3c_exec_command - issue ATA command to host controller
*/
static void pata_s3c_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
/*
* pata_s3c_check_status - Read device status register
*/
static u8 pata_s3c_check_status(struct ata_port *ap)
{
return ata_inb(ap->host, ap->ioaddr.status_addr);
}
/*
* pata_s3c_check_altstatus - Read alternate device status register
*/
static u8 pata_s3c_check_altstatus(struct ata_port *ap)
{
return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
}
/*
* pata_s3c_data_xfer - Transfer data by PIO
*/
unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_port *ap = dev->link->ap;
struct s3c_ide_info *info = ap->host->private_data;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1, i;
u16 *data_ptr = (u16 *)buf;
/* Requires wait same as in ata_inb/ata_outb */
if (rw == READ)
for (i = 0; i < words; i++, data_ptr++) {
wait_for_host_ready(info);
(void) readw(data_addr);
wait_for_host_ready(info);
*data_ptr = readw(info->ide_addr
+ S3C_ATA_PIO_RDATA);
}
else
for (i = 0; i < words; i++, data_ptr++) {
wait_for_host_ready(info);
writew(*data_ptr, data_addr);
}
if (buflen & 0x01)
dev_err(ap->dev, "unexpected trailing data\n");
return words << 1;
}
/*
* pata_s3c_dev_select - Select device on ATA bus
*/
static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
{
u8 tmp = ATA_DEVICE_OBS;
if (device != 0)
tmp |= ATA_DEV1;
ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
ata_sff_pause(ap);
}
/*
* pata_s3c_devchk - PATA device presence detection
*/
static unsigned int pata_s3c_devchk(struct ata_port *ap,
unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
pata_s3c_dev_select(ap, device);
ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
nsect = ata_inb(ap->host, ioaddr->nsect_addr);
lbal = ata_inb(ap->host, ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
return 1; /* we found a device */
return 0; /* nothing found */
}
/*
* pata_s3c_wait_after_reset - wait for devices to become ready after reset
*/
static int pata_s3c_wait_after_reset(struct ata_link *link,
unsigned long deadline)
{
int rc;
ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
/* always check readiness of the master device */
rc = ata_sff_wait_ready(link, deadline);
/* -ENODEV means the odd clown forgot the D7 pulldown resistor
* and TF status is 0xff, bail out on it too.
*/
if (rc)
return rc;
return 0;
}
/*
* pata_s3c_bus_softreset - PATA device software reset
*/
static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
/* software reset. causes dev0 to be selected */
ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
udelay(20);
ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
udelay(20);
ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
ap->last_ctl = ap->ctl;
return pata_s3c_wait_after_reset(&ap->link, deadline);
}
/*
* pata_s3c_softreset - reset host port via ATA SRST
*/
static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
unsigned int devmask = 0;
int rc;
u8 err;
/* determine if device 0 is present */
if (pata_s3c_devchk(ap, 0))
devmask |= (1 << 0);
/* select device 0 again */
pata_s3c_dev_select(ap, 0);
/* issue bus reset */
rc = pata_s3c_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && rc != -ENODEV) {
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&ap->link.device[0],
devmask & (1 << 0), &err);
return 0;
}
/*
* pata_s3c_set_devctl - Write device control register
*/
static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
{
ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
}
static struct scsi_host_template pata_s3c_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations pata_s3c_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_check_status = pata_s3c_check_status,
.sff_check_altstatus = pata_s3c_check_altstatus,
.sff_tf_load = pata_s3c_tf_load,
.sff_tf_read = pata_s3c_tf_read,
.sff_data_xfer = pata_s3c_data_xfer,
.sff_exec_command = pata_s3c_exec_command,
.sff_dev_select = pata_s3c_dev_select,
.sff_set_devctl = pata_s3c_set_devctl,
.softreset = pata_s3c_softreset,
.set_piomode = pata_s3c_set_piomode,
};
static struct ata_port_operations pata_s5p_port_ops = {
.inherits = &ata_sff_port_ops,
.set_piomode = pata_s3c_set_piomode,
};
static void pata_s3c_enable(void *s3c_ide_regbase, bool state)
{
u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
temp = state ? (temp | 1) : (temp & ~1);
writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
}
static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct s3c_ide_info *info = host->private_data;
u32 reg;
reg = readl(info->ide_addr + S3C_ATA_IRQ);
writel(reg, info->ide_addr + S3C_ATA_IRQ);
return ata_sff_interrupt(irq, dev_instance);
}
static void pata_s3c_hwinit(struct s3c_ide_info *info,
struct s3c_ide_platdata *pdata)
{
switch (info->cpu_type) {
case TYPE_S3C64XX:
/* Configure as big endian */
pata_s3c_cfg_mode(info->sfr_addr);
pata_s3c_set_endian(info->ide_addr, 1);
pata_s3c_enable(info->ide_addr, true);
msleep(100);
/* Remove IRQ Status */
writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
break;
case TYPE_S5PC100:
pata_s3c_cfg_mode(info->sfr_addr);
/* FALLTHROUGH */
case TYPE_S5PV210:
/* Configure as little endian */
pata_s3c_set_endian(info->ide_addr, 0);
pata_s3c_enable(info->ide_addr, true);
msleep(100);
/* Remove IRQ Status */
writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
break;
default:
BUG();
}
}
static int __init pata_s3c_probe(struct platform_device *pdev)
{
struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct s3c_ide_info *info;
struct resource *res;
struct ata_port *ap;
struct ata_host *host;
enum s3c_cpu_type cpu_type;
int ret;
cpu_type = platform_get_device_id(pdev)->driver_data;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(dev, "failed to allocate memory for device data\n");
return -ENOMEM;
}
info->irq = platform_get_irq(pdev, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "failed to get mem resource\n");
return -EINVAL;
}
if (!devm_request_mem_region(dev, res->start,
resource_size(res), DRV_NAME)) {
dev_err(dev, "error requesting register region\n");
return -EBUSY;
}
info->ide_addr = devm_ioremap(dev, res->start, resource_size(res));
if (!info->ide_addr) {
dev_err(dev, "failed to map IO base address\n");
return -ENOMEM;
}
info->clk = clk_get(&pdev->dev, "cfcon");
if (IS_ERR(info->clk)) {
dev_err(dev, "failed to get access to cf controller clock\n");
ret = PTR_ERR(info->clk);
info->clk = NULL;
return ret;
}
clk_enable(info->clk);
/* init ata host */
host = ata_host_alloc(dev, 1);
if (!host) {
dev_err(dev, "failed to allocate ide host\n");
ret = -ENOMEM;
goto stop_clk;
}
ap = host->ports[0];
ap->pio_mask = ATA_PIO4;
if (cpu_type == TYPE_S3C64XX) {
ap->ops = &pata_s3c_port_ops;
info->sfr_addr = info->ide_addr + 0x1800;
info->ide_addr += 0x1900;
info->fifo_status_reg = 0x94;
} else if (cpu_type == TYPE_S5PC100) {
ap->ops = &pata_s5p_port_ops;
info->sfr_addr = info->ide_addr + 0x1800;
info->ide_addr += 0x1900;
info->fifo_status_reg = 0x84;
} else {
ap->ops = &pata_s5p_port_ops;
info->fifo_status_reg = 0x84;
}
info->cpu_type = cpu_type;
if (info->irq <= 0) {
ap->flags |= ATA_FLAG_PIO_POLLING;
info->irq = 0;
ata_port_desc(ap, "no IRQ, using PIO polling\n");
}
ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD;
ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
ata_port_desc(ap, "mmio cmd 0x%llx ",
(unsigned long long)res->start);
host->private_data = info;
if (pdata && pdata->setup_gpio)
pdata->setup_gpio();
/* Set endianness and enable the interface */
pata_s3c_hwinit(info, pdata);
platform_set_drvdata(pdev, host);
return ata_host_activate(host, info->irq,
info->irq ? pata_s3c_irq : NULL,
0, &pata_s3c_sht);
stop_clk:
clk_disable(info->clk);
clk_put(info->clk);
return ret;
}
static int __exit pata_s3c_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct s3c_ide_info *info = host->private_data;
ata_host_detach(host);
clk_disable(info->clk);
clk_put(info->clk);
return 0;
}
#ifdef CONFIG_PM
static int pata_s3c_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ata_host *host = platform_get_drvdata(pdev);
return ata_host_suspend(host, PMSG_SUSPEND);
}
static int pata_s3c_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ata_host *host = platform_get_drvdata(pdev);
struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
struct s3c_ide_info *info = host->private_data;
pata_s3c_hwinit(info, pdata);
ata_host_resume(host);
return 0;
}
static const struct dev_pm_ops pata_s3c_pm_ops = {
.suspend = pata_s3c_suspend,
.resume = pata_s3c_resume,
};
#endif
/* driver device registration */
static struct platform_device_id pata_s3c_driver_ids[] = {
{
.name = "s3c64xx-pata",
.driver_data = TYPE_S3C64XX,
}, {
.name = "s5pc100-pata",
.driver_data = TYPE_S5PC100,
}, {
.name = "s5pv210-pata",
.driver_data = TYPE_S5PV210,
},
{ }
};
MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
static struct platform_driver pata_s3c_driver = {
.remove = __exit_p(pata_s3c_remove),
.id_table = pata_s3c_driver_ids,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &pata_s3c_pm_ops,
#endif
},
};
static int __init pata_s3c_init(void)
{
return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
}
static void __exit pata_s3c_exit(void)
{
platform_driver_unregister(&pata_s3c_driver);
}
module_init(pata_s3c_init);
module_exit(pata_s3c_exit);
MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
Alucard24/SGS4-SAMMY-Kernel | drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c | 7382 | 3507 | /* nec-terratec-cinergy-xs.h - Keytable for nec_terratec_cinergy_xs Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Terratec Cinergy Hybrid T USB XS FM
Mauro Carvalho Chehab <mchehab@redhat.com>
*/
static struct rc_map_table nec_terratec_cinergy_xs[] = {
/* Terratec Grey IR, with most keys in orange */
{ 0x1441, KEY_HOME},
{ 0x1401, KEY_POWER2},
{ 0x1442, KEY_MENU}, /* DVD menu */
{ 0x1443, KEY_SUBTITLE},
{ 0x1444, KEY_TEXT}, /* Teletext */
{ 0x1445, KEY_DELETE},
{ 0x1402, KEY_1},
{ 0x1403, KEY_2},
{ 0x1404, KEY_3},
{ 0x1405, KEY_4},
{ 0x1406, KEY_5},
{ 0x1407, KEY_6},
{ 0x1408, KEY_7},
{ 0x1409, KEY_8},
{ 0x140a, KEY_9},
{ 0x140c, KEY_0},
{ 0x140b, KEY_TUNER}, /* AV */
{ 0x140d, KEY_MODE}, /* A.B */
{ 0x1446, KEY_TV},
{ 0x1447, KEY_DVD},
{ 0x1449, KEY_VIDEO},
{ 0x144a, KEY_RADIO}, /* Music */
{ 0x144b, KEY_CAMERA}, /* PIC */
{ 0x1410, KEY_UP},
{ 0x1411, KEY_LEFT},
{ 0x1412, KEY_OK},
{ 0x1413, KEY_RIGHT},
{ 0x1414, KEY_DOWN},
{ 0x140f, KEY_EPG},
{ 0x1416, KEY_INFO},
{ 0x144d, KEY_BACKSPACE},
{ 0x141c, KEY_VOLUMEUP},
{ 0x141e, KEY_VOLUMEDOWN},
{ 0x144c, KEY_PLAY},
{ 0x141d, KEY_MUTE},
{ 0x141b, KEY_CHANNELUP},
{ 0x141f, KEY_CHANNELDOWN},
{ 0x1417, KEY_RED},
{ 0x1418, KEY_GREEN},
{ 0x1419, KEY_YELLOW},
{ 0x141a, KEY_BLUE},
{ 0x1458, KEY_RECORD},
{ 0x1448, KEY_STOP},
{ 0x1440, KEY_PAUSE},
{ 0x1454, KEY_LAST},
{ 0x144e, KEY_REWIND},
{ 0x144f, KEY_FASTFORWARD},
{ 0x145c, KEY_NEXT},
/* Terratec Black IR, with most keys in black */
{ 0x04eb01, KEY_POWER2},
{ 0x04eb02, KEY_1},
{ 0x04eb03, KEY_2},
{ 0x04eb04, KEY_3},
{ 0x04eb05, KEY_4},
{ 0x04eb06, KEY_5},
{ 0x04eb07, KEY_6},
{ 0x04eb08, KEY_7},
{ 0x04eb09, KEY_8},
{ 0x04eb0a, KEY_9},
{ 0x04eb0c, KEY_0},
{ 0x04eb0b, KEY_TEXT}, /* TXT */
{ 0x04eb0d, KEY_REFRESH}, /* Refresh */
{ 0x04eb0e, KEY_HOME},
{ 0x04eb0f, KEY_EPG},
{ 0x04eb10, KEY_UP},
{ 0x04eb11, KEY_LEFT},
{ 0x04eb12, KEY_OK},
{ 0x04eb13, KEY_RIGHT},
{ 0x04eb14, KEY_DOWN},
{ 0x04eb15, KEY_BACKSPACE},
{ 0x04eb16, KEY_INFO},
{ 0x04eb17, KEY_RED},
{ 0x04eb18, KEY_GREEN},
{ 0x04eb19, KEY_YELLOW},
{ 0x04eb1a, KEY_BLUE},
{ 0x04eb1c, KEY_VOLUMEUP},
{ 0x04eb1e, KEY_VOLUMEDOWN},
{ 0x04eb1d, KEY_MUTE},
{ 0x04eb1b, KEY_CHANNELUP},
{ 0x04eb1f, KEY_CHANNELDOWN},
{ 0x04eb40, KEY_RECORD},
{ 0x04eb4c, KEY_PLAY},
{ 0x04eb58, KEY_PAUSE},
{ 0x04eb54, KEY_REWIND},
{ 0x04eb48, KEY_STOP},
{ 0x04eb5c, KEY_NEXT},
};
static struct rc_map_list nec_terratec_cinergy_xs_map = {
.map = {
.scan = nec_terratec_cinergy_xs,
.size = ARRAY_SIZE(nec_terratec_cinergy_xs),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_NEC_TERRATEC_CINERGY_XS,
}
};
static int __init init_rc_map_nec_terratec_cinergy_xs(void)
{
return rc_map_register(&nec_terratec_cinergy_xs_map);
}
static void __exit exit_rc_map_nec_terratec_cinergy_xs(void)
{
rc_map_unregister(&nec_terratec_cinergy_xs_map);
}
module_init(init_rc_map_nec_terratec_cinergy_xs)
module_exit(exit_rc_map_nec_terratec_cinergy_xs)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
MoKee/android_kernel_google_msm | drivers/isdn/icn/icn.c | 7382 | 42513 | /* $Id: icn.c,v 1.65.6.8 2001/09/23 22:24:55 kai Exp $
*
* ISDN low-level module for the ICN active ISDN-Card.
*
* Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include "icn.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
static int portbase = ICN_BASEADDR;
static unsigned long membase = ICN_MEMADDR;
static char *icn_id = "\0";
static char *icn_id2 = "\0";
MODULE_DESCRIPTION("ISDN4Linux: Driver for ICN active ISDN card");
MODULE_AUTHOR("Fritz Elfert");
MODULE_LICENSE("GPL");
module_param(portbase, int, 0);
MODULE_PARM_DESC(portbase, "Port address of first card");
module_param(membase, ulong, 0);
MODULE_PARM_DESC(membase, "Shared memory address of all cards");
module_param(icn_id, charp, 0);
MODULE_PARM_DESC(icn_id, "ID-String of first card");
module_param(icn_id2, charp, 0);
MODULE_PARM_DESC(icn_id2, "ID-String of first card, second S0 (4B only)");
/*
* Verbose bootcode- and protocol-downloading.
*/
#undef BOOT_DEBUG
/*
* Verbose Shmem-Mapping.
*/
#undef MAP_DEBUG
static char
*revision = "$Revision: 1.65.6.8 $";
static int icn_addcard(int, char *, char *);
/*
* Free send-queue completely.
* Parameter:
* card = pointer to card struct
* channel = channel number
*/
static void
icn_free_queue(icn_card *card, int channel)
{
struct sk_buff_head *queue = &card->spqueue[channel];
struct sk_buff *skb;
skb_queue_purge(queue);
card->xlen[channel] = 0;
card->sndcount[channel] = 0;
if ((skb = card->xskb[channel])) {
card->xskb[channel] = NULL;
dev_kfree_skb(skb);
}
}
/* Put a value into a shift-register, highest bit first.
* Parameters:
* port = port for output (bit 0 is significant)
* val = value to be output
* firstbit = Bit-Number of highest bit
* bitcount = Number of bits to output
*/
static inline void
icn_shiftout(unsigned short port,
unsigned long val,
int firstbit,
int bitcount)
{
register u_char s;
register u_char c;
for (s = firstbit, c = bitcount; c > 0; s--, c--)
OUTB_P((u_char) ((val >> s) & 1) ? 0xff : 0, port);
}
/*
* disable a cards shared memory
*/
static inline void
icn_disable_ram(icn_card *card)
{
OUTB_P(0, ICN_MAPRAM);
}
/*
* enable a cards shared memory
*/
static inline void
icn_enable_ram(icn_card *card)
{
OUTB_P(0xff, ICN_MAPRAM);
}
/*
* Map a cards channel0 (Bank0/Bank8) or channel1 (Bank4/Bank12)
*
* must called with holding the devlock
*/
static inline void
icn_map_channel(icn_card *card, int channel)
{
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_map_channel %d %d\n", dev.channel, channel);
#endif
if ((channel == dev.channel) && (card == dev.mcard))
return;
if (dev.mcard)
icn_disable_ram(dev.mcard);
icn_shiftout(ICN_BANK, chan2bank[channel], 3, 4); /* Select Bank */
icn_enable_ram(card);
dev.mcard = card;
dev.channel = channel;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_map_channel done\n");
#endif
}
/*
* Lock a cards channel.
* Return 0 if requested card/channel is unmapped (failure).
* Return 1 on success.
*
* must called with holding the devlock
*/
static inline int
icn_lock_channel(icn_card *card, int channel)
{
register int retval;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d\n", channel);
#endif
if ((dev.channel == channel) && (card == dev.mcard)) {
dev.chanlock++;
retval = 1;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d OK\n", channel);
#endif
} else {
retval = 0;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d FAILED, dc=%d\n", channel, dev.channel);
#endif
}
return retval;
}
/*
* Release current card/channel lock
*
* must called with holding the devlock
*/
static inline void
__icn_release_channel(void)
{
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_release_channel l=%d\n", dev.chanlock);
#endif
if (dev.chanlock > 0)
dev.chanlock--;
}
/*
* Release current card/channel lock
*/
static inline void
icn_release_channel(void)
{
ulong flags;
spin_lock_irqsave(&dev.devlock, flags);
__icn_release_channel();
spin_unlock_irqrestore(&dev.devlock, flags);
}
/*
* Try to map and lock a cards channel.
* Return 1 on success, 0 on failure.
*/
static inline int
icn_trymaplock_channel(icn_card *card, int channel)
{
ulong flags;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock c=%d dc=%d l=%d\n", channel, dev.channel,
dev.chanlock);
#endif
spin_lock_irqsave(&dev.devlock, flags);
if ((!dev.chanlock) ||
((dev.channel == channel) && (dev.mcard == card))) {
dev.chanlock++;
icn_map_channel(card, channel);
spin_unlock_irqrestore(&dev.devlock, flags);
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock %d OK\n", channel);
#endif
return 1;
}
spin_unlock_irqrestore(&dev.devlock, flags);
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock %d FAILED\n", channel);
#endif
return 0;
}
/*
* Release current card/channel lock,
* then map same or other channel without locking.
*/
static inline void
icn_maprelease_channel(icn_card *card, int channel)
{
ulong flags;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "map_release c=%d l=%d\n", channel, dev.chanlock);
#endif
spin_lock_irqsave(&dev.devlock, flags);
if (dev.chanlock > 0)
dev.chanlock--;
if (!dev.chanlock)
icn_map_channel(card, channel);
spin_unlock_irqrestore(&dev.devlock, flags);
}
/* Get Data from the B-Channel, assemble fragmented packets and put them
* into receive-queue. Wake up any B-Channel-reading processes.
* This routine is called via timer-callback from icn_pollbchan().
*/
static void
icn_pollbchan_receive(int channel, icn_card *card)
{
int mch = channel + ((card->secondhalf) ? 2 : 0);
int eflag;
int cnt;
struct sk_buff *skb;
if (icn_trymaplock_channel(card, mch)) {
while (rbavl) {
cnt = readb(&rbuf_l);
if ((card->rcvidx[channel] + cnt) > 4000) {
printk(KERN_WARNING
"icn: (%s) bogus packet on ch%d, dropping.\n",
CID,
channel + 1);
card->rcvidx[channel] = 0;
eflag = 0;
} else {
memcpy_fromio(&card->rcvbuf[channel][card->rcvidx[channel]],
&rbuf_d, cnt);
card->rcvidx[channel] += cnt;
eflag = readb(&rbuf_f);
}
rbnext;
icn_maprelease_channel(card, mch & 2);
if (!eflag) {
if ((cnt = card->rcvidx[channel])) {
if (!(skb = dev_alloc_skb(cnt))) {
printk(KERN_WARNING "icn: receive out of memory\n");
break;
}
memcpy(skb_put(skb, cnt), card->rcvbuf[channel], cnt);
card->rcvidx[channel] = 0;
card->interface.rcvcallb_skb(card->myid, channel, skb);
}
}
if (!icn_trymaplock_channel(card, mch))
break;
}
icn_maprelease_channel(card, mch & 2);
}
}
/* Send data-packet to B-Channel, split it up into fragments of
* ICN_FRAGSIZE length. If last fragment is sent out, signal
* success to upper layers via statcallb with ISDN_STAT_BSENT argument.
* This routine is called via timer-callback from icn_pollbchan() or
* directly from icn_sendbuf().
*/
static void
icn_pollbchan_send(int channel, icn_card *card)
{
int mch = channel + ((card->secondhalf) ? 2 : 0);
int cnt;
unsigned long flags;
struct sk_buff *skb;
isdn_ctrl cmd;
if (!(card->sndcount[channel] || card->xskb[channel] ||
!skb_queue_empty(&card->spqueue[channel])))
return;
if (icn_trymaplock_channel(card, mch)) {
while (sbfree &&
(card->sndcount[channel] ||
!skb_queue_empty(&card->spqueue[channel]) ||
card->xskb[channel])) {
spin_lock_irqsave(&card->lock, flags);
if (card->xmit_lock[channel]) {
spin_unlock_irqrestore(&card->lock, flags);
break;
}
card->xmit_lock[channel]++;
spin_unlock_irqrestore(&card->lock, flags);
skb = card->xskb[channel];
if (!skb) {
skb = skb_dequeue(&card->spqueue[channel]);
if (skb) {
/* Pop ACK-flag off skb.
* Store length to xlen.
*/
if (*(skb_pull(skb, 1)))
card->xlen[channel] = skb->len;
else
card->xlen[channel] = 0;
}
}
if (!skb)
break;
if (skb->len > ICN_FRAGSIZE) {
writeb(0xff, &sbuf_f);
cnt = ICN_FRAGSIZE;
} else {
writeb(0x0, &sbuf_f);
cnt = skb->len;
}
writeb(cnt, &sbuf_l);
memcpy_toio(&sbuf_d, skb->data, cnt);
skb_pull(skb, cnt);
sbnext; /* switch to next buffer */
icn_maprelease_channel(card, mch & 2);
spin_lock_irqsave(&card->lock, flags);
card->sndcount[channel] -= cnt;
if (!skb->len) {
if (card->xskb[channel])
card->xskb[channel] = NULL;
card->xmit_lock[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
dev_kfree_skb(skb);
if (card->xlen[channel]) {
cmd.command = ISDN_STAT_BSENT;
cmd.driver = card->myid;
cmd.arg = channel;
cmd.parm.length = card->xlen[channel];
card->interface.statcallb(&cmd);
}
} else {
card->xskb[channel] = skb;
card->xmit_lock[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
}
if (!icn_trymaplock_channel(card, mch))
break;
}
icn_maprelease_channel(card, mch & 2);
}
}
/* Send/Receive Data to/from the B-Channel.
* This routine is called via timer-callback.
* It schedules itself while any B-Channel is open.
*/
static void
icn_pollbchan(unsigned long data)
{
icn_card *card = (icn_card *) data;
unsigned long flags;
if (card->flags & ICN_FLAGS_B1ACTIVE) {
icn_pollbchan_receive(0, card);
icn_pollbchan_send(0, card);
}
if (card->flags & ICN_FLAGS_B2ACTIVE) {
icn_pollbchan_receive(1, card);
icn_pollbchan_send(1, card);
}
if (card->flags & (ICN_FLAGS_B1ACTIVE | ICN_FLAGS_B2ACTIVE)) {
/* schedule b-channel polling again */
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->rb_timer, jiffies + ICN_TIMER_BCREAD);
card->flags |= ICN_FLAGS_RBTIMER;
spin_unlock_irqrestore(&card->lock, flags);
} else
card->flags &= ~ICN_FLAGS_RBTIMER;
}
typedef struct icn_stat {
char *statstr;
int command;
int action;
} icn_stat;
/* *INDENT-OFF* */
static icn_stat icn_stat_table[] =
{
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
/*
** add d-channel connect and disconnect support to link-level
*/
{"DCON_", ISDN_STAT_DCONN, 10}, /* D-Channel connected */
{"DDIS_", ISDN_STAT_DHUP, 11}, /* D-Channel disconnected */
{"DCAL_I", ISDN_STAT_ICALL, 3}, /* Incoming call dialup-line */
{"DSCA_I", ISDN_STAT_ICALL, 3}, /* Incoming call 1TR6-SPV */
{"FCALL", ISDN_STAT_ICALL, 4}, /* Leased line connection up */
{"CIF", ISDN_STAT_CINF, 5}, /* Charge-info, 1TR6-type */
{"AOC", ISDN_STAT_CINF, 6}, /* Charge-info, DSS1-type */
{"CAU", ISDN_STAT_CAUSE, 7}, /* Cause code */
{"TEI OK", ISDN_STAT_RUN, 0}, /* Card connected to wallplug */
{"E_L1: ACT FAIL", ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{"E_L2: DATA LIN", ISDN_STAT_BHUP, 8}, /* Layer-2 data link lost */
{"E_L1: ACTIVATION FAILED",
ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{NULL, 0, -1}
};
/* *INDENT-ON* */
/*
* Check Statusqueue-Pointer from isdn-cards.
* If there are new status-replies from the interface, check
* them against B-Channel-connects/disconnects and set flags accordingly.
* Wake-Up any processes, who are reading the status-device.
* If there are B-Channels open, initiate a timer-callback to
* icn_pollbchan().
* This routine is called periodically via timer.
*/
static void
icn_parse_status(u_char *status, int channel, icn_card *card)
{
icn_stat *s = icn_stat_table;
int action = -1;
unsigned long flags;
isdn_ctrl cmd;
while (s->statstr) {
if (!strncmp(status, s->statstr, strlen(s->statstr))) {
cmd.command = s->command;
action = s->action;
break;
}
s++;
}
if (action == -1)
return;
cmd.driver = card->myid;
cmd.arg = channel;
switch (action) {
case 11:
spin_lock_irqsave(&card->lock, flags);
icn_free_queue(card, channel);
card->rcvidx[channel] = 0;
if (card->flags &
((channel) ? ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE)) {
isdn_ctrl ncmd;
card->flags &= ~((channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE);
memset(&ncmd, 0, sizeof(ncmd));
ncmd.driver = card->myid;
ncmd.arg = channel;
ncmd.command = ISDN_STAT_BHUP;
spin_unlock_irqrestore(&card->lock, flags);
card->interface.statcallb(&cmd);
} else
spin_unlock_irqrestore(&card->lock, flags);
break;
case 1:
spin_lock_irqsave(&card->lock, flags);
icn_free_queue(card, channel);
card->flags |= (channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE;
spin_unlock_irqrestore(&card->lock, flags);
break;
case 2:
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~((channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE);
icn_free_queue(card, channel);
card->rcvidx[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
break;
case 3:
{
char *t = status + 6;
char *s = strchr(t, ',');
*s++ = '\0';
strlcpy(cmd.parm.setup.phone, t,
sizeof(cmd.parm.setup.phone));
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd.parm.setup.si1 = 0;
else
cmd.parm.setup.si1 =
simple_strtoul(t, NULL, 10);
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd.parm.setup.si2 = 0;
else
cmd.parm.setup.si2 =
simple_strtoul(t, NULL, 10);
strlcpy(cmd.parm.setup.eazmsn, s,
sizeof(cmd.parm.setup.eazmsn));
}
cmd.parm.setup.plan = 0;
cmd.parm.setup.screen = 0;
break;
case 4:
sprintf(cmd.parm.setup.phone, "LEASED%d", card->myid);
sprintf(cmd.parm.setup.eazmsn, "%d", channel + 1);
cmd.parm.setup.si1 = 7;
cmd.parm.setup.si2 = 0;
cmd.parm.setup.plan = 0;
cmd.parm.setup.screen = 0;
break;
case 5:
strlcpy(cmd.parm.num, status + 3, sizeof(cmd.parm.num));
break;
case 6:
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%d",
(int) simple_strtoul(status + 7, NULL, 16));
break;
case 7:
status += 3;
if (strlen(status) == 4)
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%s%c%c",
status + 2, *status, *(status + 1));
else
strlcpy(cmd.parm.num, status + 1, sizeof(cmd.parm.num));
break;
case 8:
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~ICN_FLAGS_B1ACTIVE;
icn_free_queue(card, 0);
card->rcvidx[0] = 0;
spin_unlock_irqrestore(&card->lock, flags);
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_BHUP;
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~ICN_FLAGS_B2ACTIVE;
icn_free_queue(card, 1);
card->rcvidx[1] = 0;
spin_unlock_irqrestore(&card->lock, flags);
cmd.arg = 1;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 1;
cmd.driver = card->myid;
break;
}
card->interface.statcallb(&cmd);
return;
}
static void
icn_putmsg(icn_card *card, unsigned char c)
{
ulong flags;
spin_lock_irqsave(&card->lock, flags);
*card->msg_buf_write++ = (c == 0xff) ? '\n' : c;
if (card->msg_buf_write == card->msg_buf_read) {
if (++card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
if (card->msg_buf_write > card->msg_buf_end)
card->msg_buf_write = card->msg_buf;
spin_unlock_irqrestore(&card->lock, flags);
}
static void
icn_polldchan(unsigned long data)
{
icn_card *card = (icn_card *) data;
int mch = card->secondhalf ? 2 : 0;
int avail = 0;
int left;
u_char c;
int ch;
unsigned long flags;
int i;
u_char *p;
isdn_ctrl cmd;
if (icn_trymaplock_channel(card, mch)) {
avail = msg_avail;
for (left = avail, i = readb(&msg_o); left > 0; i++, left--) {
c = readb(&dev.shmem->comm_buffers.iopc_buf[i & 0xff]);
icn_putmsg(card, c);
if (c == 0xff) {
card->imsg[card->iptr] = 0;
card->iptr = 0;
if (card->imsg[0] == '0' && card->imsg[1] >= '0' &&
card->imsg[1] <= '2' && card->imsg[2] == ';') {
ch = (card->imsg[1] - '0') - 1;
p = &card->imsg[3];
icn_parse_status(p, ch, card);
} else {
p = card->imsg;
if (!strncmp(p, "DRV1.", 5)) {
u_char vstr[10];
u_char *q = vstr;
printk(KERN_INFO "icn: (%s) %s\n", CID, p);
if (!strncmp(p + 7, "TC", 2)) {
card->ptype = ISDN_PTYPE_1TR6;
card->interface.features |= ISDN_FEATURE_P_1TR6;
printk(KERN_INFO
"icn: (%s) 1TR6-Protocol loaded and running\n", CID);
}
if (!strncmp(p + 7, "EC", 2)) {
card->ptype = ISDN_PTYPE_EURO;
card->interface.features |= ISDN_FEATURE_P_EURO;
printk(KERN_INFO
"icn: (%s) Euro-Protocol loaded and running\n", CID);
}
p = strstr(card->imsg, "BRV") + 3;
while (*p) {
if (*p >= '0' && *p <= '9')
*q++ = *p;
p++;
}
*q = '\0';
strcat(vstr, "000");
vstr[3] = '\0';
card->fw_rev = (int) simple_strtoul(vstr, NULL, 10);
continue;
}
}
} else {
card->imsg[card->iptr] = c;
if (card->iptr < 59)
card->iptr++;
}
}
writeb((readb(&msg_o) + avail) & 0xff, &msg_o);
icn_release_channel();
}
if (avail) {
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = avail;
card->interface.statcallb(&cmd);
}
spin_lock_irqsave(&card->lock, flags);
if (card->flags & (ICN_FLAGS_B1ACTIVE | ICN_FLAGS_B2ACTIVE))
if (!(card->flags & ICN_FLAGS_RBTIMER)) {
/* schedule b-channel polling */
card->flags |= ICN_FLAGS_RBTIMER;
del_timer(&card->rb_timer);
card->rb_timer.function = icn_pollbchan;
card->rb_timer.data = (unsigned long) card;
card->rb_timer.expires = jiffies + ICN_TIMER_BCREAD;
add_timer(&card->rb_timer);
}
/* schedule again */
mod_timer(&card->st_timer, jiffies + ICN_TIMER_DCREAD);
spin_unlock_irqrestore(&card->lock, flags);
}
/* Append a packet to the transmit buffer-queue.
* Parameters:
* channel = Number of B-channel
* skb = pointer to sk_buff
* card = pointer to card-struct
* Return:
* Number of bytes transferred, -E??? on error
*/
static int
icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
{
int len = skb->len;
unsigned long flags;
struct sk_buff *nskb;
if (len > 4000) {
printk(KERN_WARNING
"icn: Send packet too large\n");
return -EINVAL;
}
if (len) {
if (!(card->flags & (channel) ? ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE))
return 0;
if (card->sndcount[channel] > ICN_MAX_SQUEUE)
return 0;
#warning TODO test headroom or use skb->nb to flag ACK
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
/* Push ACK flag as one
* byte in front of data.
*/
*(skb_push(nskb, 1)) = ack ? 1 : 0;
skb_queue_tail(&card->spqueue[channel], nskb);
dev_kfree_skb(skb);
} else
len = 0;
spin_lock_irqsave(&card->lock, flags);
card->sndcount[channel] += len;
spin_unlock_irqrestore(&card->lock, flags);
}
return len;
}
/*
* Check card's status after starting the bootstrap loader.
* On entry, the card's shared memory has already to be mapped.
* Return:
* 0 on success (Boot loader ready)
* -EIO on failure (timeout)
*/
static int
icn_check_loader(int cardnumber)
{
int timer = 0;
while (1) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d ?\n", cardnumber);
#endif
if (readb(&dev.shmem->data_control.scns) ||
readb(&dev.shmem->data_control.scnr)) {
if (timer++ > 5) {
printk(KERN_WARNING
"icn: Boot-Loader %d timed out.\n",
cardnumber);
icn_release_channel();
return -EIO;
}
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d TO?\n", cardnumber);
#endif
msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d OK\n", cardnumber);
#endif
icn_release_channel();
return 0;
}
}
}
/* Load the boot-code into the interface-card's memory and start it.
* Always called from user-process.
*
* Parameters:
* buffer = pointer to packet
* Return:
* 0 if successfully loaded
*/
#ifdef BOOT_DEBUG
#define SLEEP(sec) { \
int slsec = sec; \
printk(KERN_DEBUG "SLEEP(%d)\n", slsec); \
while (slsec) { \
msleep_interruptible(1000); \
slsec--; \
} \
}
#else
#define SLEEP(sec)
#endif
static int
icn_loadboot(u_char __user *buffer, icn_card *card)
{
int ret;
u_char *codebuf;
unsigned long flags;
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "icn_loadboot called, buffaddr=%08lx\n", (ulong) buffer);
#endif
if (!(codebuf = kmalloc(ICN_CODE_STAGE1, GFP_KERNEL))) {
printk(KERN_WARNING "icn: Could not allocate code buffer\n");
ret = -ENOMEM;
goto out;
}
if (copy_from_user(codebuf, buffer, ICN_CODE_STAGE1)) {
ret = -EFAULT;
goto out_kfree;
}
if (!card->rvalid) {
if (!request_region(card->port, ICN_PORTLEN, card->regname)) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID,
card->port,
card->port + ICN_PORTLEN);
ret = -EBUSY;
goto out_kfree;
}
card->rvalid = 1;
if (card->doubleS0)
card->other->rvalid = 1;
}
if (!dev.mvalid) {
if (!request_mem_region(dev.memaddr, 0x4000, "icn-isdn (all cards)")) {
printk(KERN_WARNING
"icn: memory at 0x%08lx in use.\n", dev.memaddr);
ret = -EBUSY;
goto out_kfree;
}
dev.shmem = ioremap(dev.memaddr, 0x4000);
dev.mvalid = 1;
}
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
icn_shiftout(ICN_CFG, 0x0f, 3, 4); /* Windowsize= 16k */
icn_shiftout(ICN_CFG, dev.memaddr, 23, 10); /* Set RAM-Addr. */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "shmem=%08lx\n", dev.memaddr);
#endif
SLEEP(1);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 0\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
icn_map_channel(card, 0); /* Select Bank 0 */
icn_lock_channel(card, 0); /* Lock Bank 0 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
memcpy_toio(dev.shmem, codebuf, ICN_CODE_STAGE1); /* Copy code */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Bootloader transferred\n");
#endif
if (card->doubleS0) {
SLEEP(1);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 8\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
__icn_release_channel();
icn_map_channel(card, 2); /* Select Bank 8 */
icn_lock_channel(card, 2); /* Lock Bank 8 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
memcpy_toio(dev.shmem, codebuf, ICN_CODE_STAGE1); /* Copy code */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Bootloader transferred\n");
#endif
}
SLEEP(1);
OUTB_P(0xff, ICN_RUN); /* Start Boot-Code */
if ((ret = icn_check_loader(card->doubleS0 ? 2 : 1))) {
goto out_kfree;
}
if (!card->doubleS0) {
ret = 0;
goto out_kfree;
}
/* reached only, if we have a Double-S0-Card */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 0\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
icn_map_channel(card, 0); /* Select Bank 0 */
icn_lock_channel(card, 0); /* Lock Bank 0 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
ret = (icn_check_loader(1));
out_kfree:
kfree(codebuf);
out:
return ret;
}
static int
icn_loadproto(u_char __user *buffer, icn_card *card)
{
register u_char __user *p = buffer;
u_char codebuf[256];
uint left = ICN_CODE_STAGE2;
uint cnt;
int timer;
unsigned long flags;
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "icn_loadproto called\n");
#endif
if (!access_ok(VERIFY_READ, buffer, ICN_CODE_STAGE2))
return -EFAULT;
timer = 0;
spin_lock_irqsave(&dev.devlock, flags);
if (card->secondhalf) {
icn_map_channel(card, 2);
icn_lock_channel(card, 2);
} else {
icn_map_channel(card, 0);
icn_lock_channel(card, 0);
}
spin_unlock_irqrestore(&dev.devlock, flags);
while (left) {
if (sbfree) { /* If there is a free buffer... */
cnt = left;
if (cnt > 256)
cnt = 256;
if (copy_from_user(codebuf, p, cnt)) {
icn_maprelease_channel(card, 0);
return -EFAULT;
}
memcpy_toio(&sbuf_l, codebuf, cnt); /* copy data */
sbnext; /* switch to next buffer */
p += cnt;
left -= cnt;
timer = 0;
} else {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "boot 2 !sbfree\n");
#endif
if (timer++ > 5) {
icn_maprelease_channel(card, 0);
return -EIO;
}
schedule_timeout_interruptible(10);
}
}
writeb(0x20, &sbuf_n);
timer = 0;
while (1) {
if (readb(&cmd_o) || readb(&cmd_i)) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto?\n");
#endif
if (timer++ > 5) {
printk(KERN_WARNING
"icn: (%s) Protocol timed out.\n",
CID);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto TO!\n");
#endif
icn_maprelease_channel(card, 0);
return -EIO;
}
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto TO?\n");
#endif
msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
if ((card->secondhalf) || (!card->doubleS0)) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto loaded, install poll-timer %d\n",
card->secondhalf);
#endif
spin_lock_irqsave(&card->lock, flags);
init_timer(&card->st_timer);
card->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
card->st_timer.function = icn_polldchan;
card->st_timer.data = (unsigned long) card;
add_timer(&card->st_timer);
card->flags |= ICN_FLAGS_RUNNING;
if (card->doubleS0) {
init_timer(&card->other->st_timer);
card->other->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
card->other->st_timer.function = icn_polldchan;
card->other->st_timer.data = (unsigned long) card->other;
add_timer(&card->other->st_timer);
card->other->flags |= ICN_FLAGS_RUNNING;
}
spin_unlock_irqrestore(&card->lock, flags);
}
icn_maprelease_channel(card, 0);
return 0;
}
}
}
/* Read the Status-replies from the Interface */
static int
icn_readstatus(u_char __user *buf, int len, icn_card *card)
{
int count;
u_char __user *p;
for (p = buf, count = 0; count < len; p++, count++) {
if (card->msg_buf_read == card->msg_buf_write)
return count;
if (put_user(*card->msg_buf_read++, p))
return -EFAULT;
if (card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
return count;
}
/* Put command-strings into the command-queue of the Interface */
static int
icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
{
int mch = card->secondhalf ? 2 : 0;
int pp;
int i;
int count;
int xcount;
int ocount;
int loop;
unsigned long flags;
int lastmap_channel;
struct icn_card *lastmap_card;
u_char *p;
isdn_ctrl cmd;
u_char msg[0x100];
ocount = 1;
xcount = loop = 0;
while (len) {
count = cmd_free;
if (count > len)
count = len;
if (user) {
if (copy_from_user(msg, buf, count))
return -EFAULT;
} else
memcpy(msg, buf, count);
spin_lock_irqsave(&dev.devlock, flags);
lastmap_card = dev.mcard;
lastmap_channel = dev.channel;
icn_map_channel(card, mch);
icn_putmsg(card, '>');
for (p = msg, pp = readb(&cmd_i), i = count; i > 0; i--, p++, pp
++) {
writeb((*p == '\n') ? 0xff : *p,
&dev.shmem->comm_buffers.pcio_buf[pp & 0xff]);
len--;
xcount++;
icn_putmsg(card, *p);
if ((*p == '\n') && (i > 1)) {
icn_putmsg(card, '>');
ocount++;
}
ocount++;
}
writeb((readb(&cmd_i) + count) & 0xff, &cmd_i);
if (lastmap_card)
icn_map_channel(lastmap_card, lastmap_channel);
spin_unlock_irqrestore(&dev.devlock, flags);
if (len) {
mdelay(1);
if (loop++ > 20)
break;
} else
break;
}
if (len && (!user))
printk(KERN_WARNING "icn: writemsg incomplete!\n");
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = ocount;
card->interface.statcallb(&cmd);
return xcount;
}
/*
* Delete card's pending timers, send STOP to linklevel
*/
static void
icn_stopcard(icn_card *card)
{
unsigned long flags;
isdn_ctrl cmd;
spin_lock_irqsave(&card->lock, flags);
if (card->flags & ICN_FLAGS_RUNNING) {
card->flags &= ~ICN_FLAGS_RUNNING;
del_timer(&card->st_timer);
del_timer(&card->rb_timer);
spin_unlock_irqrestore(&card->lock, flags);
cmd.command = ISDN_STAT_STOP;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
if (card->doubleS0)
icn_stopcard(card->other);
} else
spin_unlock_irqrestore(&card->lock, flags);
}
static void
icn_stopallcards(void)
{
icn_card *p = cards;
while (p) {
icn_stopcard(p);
p = p->next;
}
}
/*
* Unmap all cards, because some of them may be mapped accidetly during
* autoprobing of some network drivers (SMC-driver?)
*/
static void
icn_disable_cards(void)
{
icn_card *card = cards;
while (card) {
if (!request_region(card->port, ICN_PORTLEN, "icn-isdn")) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID,
card->port,
card->port + ICN_PORTLEN);
} else {
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
release_region(card->port, ICN_PORTLEN);
}
card = card->next;
}
}
static int
icn_command(isdn_ctrl *c, icn_card *card)
{
ulong a;
ulong flags;
int i;
char cbuf[60];
isdn_ctrl cmd;
icn_cdef cdef;
char __user *arg;
switch (c->command) {
case ISDN_CMD_IOCTL:
memcpy(&a, c->parm.num, sizeof(ulong));
arg = (char __user *)a;
switch (c->arg) {
case ICN_IOCTL_SETMMIO:
if (dev.memaddr != (a & 0x0ffc000)) {
if (!request_mem_region(a & 0x0ffc000, 0x4000, "icn-isdn (all cards)")) {
printk(KERN_WARNING
"icn: memory at 0x%08lx in use.\n",
a & 0x0ffc000);
return -EINVAL;
}
release_mem_region(a & 0x0ffc000, 0x4000);
icn_stopallcards();
spin_lock_irqsave(&card->lock, flags);
if (dev.mvalid) {
iounmap(dev.shmem);
release_mem_region(dev.memaddr, 0x4000);
}
dev.mvalid = 0;
dev.memaddr = a & 0x0ffc000;
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_INFO
"icn: (%s) mmio set to 0x%08lx\n",
CID,
dev.memaddr);
}
break;
case ICN_IOCTL_GETMMIO:
return (long) dev.memaddr;
case ICN_IOCTL_SETPORT:
if (a == 0x300 || a == 0x310 || a == 0x320 || a == 0x330
|| a == 0x340 || a == 0x350 || a == 0x360 ||
a == 0x308 || a == 0x318 || a == 0x328 || a == 0x338
|| a == 0x348 || a == 0x358 || a == 0x368) {
if (card->port != (unsigned short) a) {
if (!request_region((unsigned short) a, ICN_PORTLEN, "icn-isdn")) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID, (int) a, (int) a + ICN_PORTLEN);
return -EINVAL;
}
release_region((unsigned short) a, ICN_PORTLEN);
icn_stopcard(card);
spin_lock_irqsave(&card->lock, flags);
if (card->rvalid)
release_region(card->port, ICN_PORTLEN);
card->port = (unsigned short) a;
card->rvalid = 0;
if (card->doubleS0) {
card->other->port = (unsigned short) a;
card->other->rvalid = 0;
}
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_INFO
"icn: (%s) port set to 0x%03x\n",
CID, card->port);
}
} else
return -EINVAL;
break;
case ICN_IOCTL_GETPORT:
return (int) card->port;
case ICN_IOCTL_GETDOUBLE:
return (int) card->doubleS0;
case ICN_IOCTL_DEBUGVAR:
if (copy_to_user(arg,
&card,
sizeof(ulong)))
return -EFAULT;
a += sizeof(ulong);
{
ulong l = (ulong)&dev;
if (copy_to_user(arg,
&l,
sizeof(ulong)))
return -EFAULT;
}
return 0;
case ICN_IOCTL_LOADBOOT:
if (dev.firstload) {
icn_disable_cards();
dev.firstload = 0;
}
icn_stopcard(card);
return (icn_loadboot(arg, card));
case ICN_IOCTL_LOADPROTO:
icn_stopcard(card);
if ((i = (icn_loadproto(arg, card))))
return i;
if (card->doubleS0)
i = icn_loadproto(arg + ICN_CODE_STAGE2, card->other);
return i;
break;
case ICN_IOCTL_ADDCARD:
if (!dev.firstload)
return -EBUSY;
if (copy_from_user(&cdef,
arg,
sizeof(cdef)))
return -EFAULT;
return (icn_addcard(cdef.port, cdef.id1, cdef.id2));
break;
case ICN_IOCTL_LEASEDCFG:
if (a) {
if (!card->leased) {
card->leased = 1;
while (card->ptype == ISDN_PTYPE_UNKNOWN) {
msleep_interruptible(ICN_BOOT_TIMEOUT1);
}
msleep_interruptible(ICN_BOOT_TIMEOUT1);
sprintf(cbuf, "00;FV2ON\n01;EAZ%c\n02;EAZ%c\n",
(a & 1) ? '1' : 'C', (a & 2) ? '2' : 'C');
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode enabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
} else {
if (card->leased) {
card->leased = 0;
sprintf(cbuf, "00;FV2OFF\n");
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode disabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
}
return 0;
default:
return -EINVAL;
}
break;
case ISDN_CMD_DIAL:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if ((c->arg & 255) < ICN_BCH) {
char *p;
char dial[50];
char dcode[4];
a = c->arg;
p = c->parm.setup.phone;
if (*p == 's' || *p == 'S') {
/* Dial for SPV */
p++;
strcpy(dcode, "SCA");
} else
/* Normal Dial */
strcpy(dcode, "CAL");
strcpy(dial, p);
sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
dcode, dial, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTD:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->fw_rev >= 300) {
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) a);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) a);
break;
}
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
sprintf(cbuf, "%02d;DCON_R\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTB:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->fw_rev >= 300)
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BCON_R,BX75\n", (int) a);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int) a);
break;
} else
sprintf(cbuf, "%02d;BCON_R\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_HANGUP:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int) a, (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETEAZ:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO) {
sprintf(cbuf, "%02d;MS%s%s\n", (int) a,
c->parm.num[0] ? "N" : "ALL", c->parm.num);
} else
sprintf(cbuf, "%02d;EAZ%s\n", (int) a,
c->parm.num[0] ? (char *)(c->parm.num) : "0123456789");
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_CLREAZ:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO)
sprintf(cbuf, "%02d;MSNC\n", (int) a);
else
sprintf(cbuf, "%02d;EAZC\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETL2:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ICN_BCH) {
a = c->arg;
switch (a >> 8) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) (a & 255) + 1);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
break;
default:
return -EINVAL;
}
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
card->l2_proto[a & 255] = (a >> 8);
}
break;
case ISDN_CMD_SETL3:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
return -EINVAL;
}
return 0;
}
/*
* Find card with given driverId
*/
static inline icn_card *
icn_findcard(int driverid)
{
icn_card *p = cards;
while (p) {
if (p->myid == driverid)
return p;
p = p->next;
}
return (icn_card *) 0;
}
/*
* Wrapper functions for interface to linklevel
*/
static int
if_command(isdn_ctrl *c)
{
icn_card *card = icn_findcard(c->driver);
if (card)
return (icn_command(c, card));
printk(KERN_ERR
"icn: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static int
if_writecmd(const u_char __user *buf, int len, int id, int channel)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_writecmd(buf, len, 1, card));
}
printk(KERN_ERR
"icn: if_writecmd called with invalid driverId!\n");
return -ENODEV;
}
static int
if_readstatus(u_char __user *buf, int len, int id, int channel)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_readstatus(buf, len, card));
}
printk(KERN_ERR
"icn: if_readstatus called with invalid driverId!\n");
return -ENODEV;
}
static int
if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_sendbuf(channel, ack, skb, card));
}
printk(KERN_ERR
"icn: if_sendbuf called with invalid driverId!\n");
return -ENODEV;
}
/*
* Allocate a new card-struct, initialize it
* link it into cards-list and register it at linklevel.
*/
static icn_card *
icn_initcard(int port, char *id)
{
icn_card *card;
int i;
if (!(card = kzalloc(sizeof(icn_card), GFP_KERNEL))) {
printk(KERN_WARNING
"icn: (%s) Could not allocate card-struct.\n", id);
return (icn_card *) 0;
}
spin_lock_init(&card->lock);
card->port = port;
card->interface.owner = THIS_MODULE;
card->interface.hl_hdrlen = 1;
card->interface.channels = ICN_BCH;
card->interface.maxbufsize = 4000;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = if_writecmd;
card->interface.readstat = if_readstatus;
card->interface.features = ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN;
card->ptype = ISDN_PTYPE_UNKNOWN;
strlcpy(card->interface.id, id, sizeof(card->interface.id));
card->msg_buf_write = card->msg_buf;
card->msg_buf_read = card->msg_buf;
card->msg_buf_end = &card->msg_buf[sizeof(card->msg_buf) - 1];
for (i = 0; i < ICN_BCH; i++) {
card->l2_proto[i] = ISDN_PROTO_L2_X75I;
skb_queue_head_init(&card->spqueue[i]);
}
card->next = cards;
cards = card;
if (!register_isdn(&card->interface)) {
cards = cards->next;
printk(KERN_WARNING
"icn: Unable to register %s\n", id);
kfree(card);
return (icn_card *) 0;
}
card->myid = card->interface.channels;
sprintf(card->regname, "icn-isdn (%s)", card->interface.id);
return card;
}
static int
icn_addcard(int port, char *id1, char *id2)
{
icn_card *card;
icn_card *card2;
if (!(card = icn_initcard(port, id1))) {
return -EIO;
}
if (!strlen(id2)) {
printk(KERN_INFO
"icn: (%s) ICN-2B, port 0x%x added\n",
card->interface.id, port);
return 0;
}
if (!(card2 = icn_initcard(port, id2))) {
printk(KERN_INFO
"icn: (%s) half ICN-4B, port 0x%x added\n",
card2->interface.id, port);
return 0;
}
card->doubleS0 = 1;
card->secondhalf = 0;
card->other = card2;
card2->doubleS0 = 1;
card2->secondhalf = 1;
card2->other = card;
printk(KERN_INFO
"icn: (%s and %s) ICN-4B, port 0x%x added\n",
card->interface.id, card2->interface.id, port);
return 0;
}
#ifndef MODULE
static int __init
icn_setup(char *line)
{
char *p, *str;
int ints[3];
static char sid[20];
static char sid2[20];
str = get_options(line, 2, ints);
if (ints[0])
portbase = ints[1];
if (ints[0] > 1)
membase = (unsigned long)ints[2];
if (str && *str) {
strcpy(sid, str);
icn_id = sid;
if ((p = strchr(sid, ','))) {
*p++ = 0;
strcpy(sid2, p);
icn_id2 = sid2;
}
}
return (1);
}
__setup("icn=", icn_setup);
#endif /* MODULE */
static int __init icn_init(void)
{
char *p;
char rev[21];
memset(&dev, 0, sizeof(icn_dev));
dev.memaddr = (membase & 0x0ffc000);
dev.channel = -1;
dev.mcard = NULL;
dev.firstload = 1;
spin_lock_init(&dev.devlock);
if ((p = strchr(revision, ':'))) {
strncpy(rev, p + 1, 20);
rev[20] = '\0';
p = strchr(rev, '$');
if (p)
*p = 0;
} else
strcpy(rev, " ??? ");
printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
dev.memaddr);
return (icn_addcard(portbase, icn_id, icn_id2));
}
static void __exit icn_exit(void)
{
isdn_ctrl cmd;
icn_card *card = cards;
icn_card *last, *tmpcard;
int i;
unsigned long flags;
icn_stopallcards();
while (card) {
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
spin_lock_irqsave(&card->lock, flags);
if (card->rvalid) {
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
if (card->secondhalf || (!card->doubleS0)) {
release_region(card->port, ICN_PORTLEN);
card->rvalid = 0;
}
for (i = 0; i < ICN_BCH; i++)
icn_free_queue(card, i);
}
tmpcard = card->next;
spin_unlock_irqrestore(&card->lock, flags);
card = tmpcard;
}
card = cards;
cards = NULL;
while (card) {
last = card;
card = card->next;
kfree(last);
}
if (dev.mvalid) {
iounmap(dev.shmem);
release_mem_region(dev.memaddr, 0x4000);
}
printk(KERN_NOTICE "ICN-ISDN-driver unloaded\n");
}
module_init(icn_init);
module_exit(icn_exit);
| gpl-2.0 |
erorcun/android_kernel_oneplus_msm8974-3.10 | drivers/isdn/icn/icn.c | 7382 | 42513 | /* $Id: icn.c,v 1.65.6.8 2001/09/23 22:24:55 kai Exp $
*
* ISDN low-level module for the ICN active ISDN-Card.
*
* Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include "icn.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
static int portbase = ICN_BASEADDR;
static unsigned long membase = ICN_MEMADDR;
static char *icn_id = "\0";
static char *icn_id2 = "\0";
MODULE_DESCRIPTION("ISDN4Linux: Driver for ICN active ISDN card");
MODULE_AUTHOR("Fritz Elfert");
MODULE_LICENSE("GPL");
module_param(portbase, int, 0);
MODULE_PARM_DESC(portbase, "Port address of first card");
module_param(membase, ulong, 0);
MODULE_PARM_DESC(membase, "Shared memory address of all cards");
module_param(icn_id, charp, 0);
MODULE_PARM_DESC(icn_id, "ID-String of first card");
module_param(icn_id2, charp, 0);
MODULE_PARM_DESC(icn_id2, "ID-String of first card, second S0 (4B only)");
/*
* Verbose bootcode- and protocol-downloading.
*/
#undef BOOT_DEBUG
/*
* Verbose Shmem-Mapping.
*/
#undef MAP_DEBUG
static char
*revision = "$Revision: 1.65.6.8 $";
static int icn_addcard(int, char *, char *);
/*
* Free send-queue completely.
* Parameter:
* card = pointer to card struct
* channel = channel number
*/
static void
icn_free_queue(icn_card *card, int channel)
{
struct sk_buff_head *queue = &card->spqueue[channel];
struct sk_buff *skb;
skb_queue_purge(queue);
card->xlen[channel] = 0;
card->sndcount[channel] = 0;
if ((skb = card->xskb[channel])) {
card->xskb[channel] = NULL;
dev_kfree_skb(skb);
}
}
/* Put a value into a shift-register, highest bit first.
* Parameters:
* port = port for output (bit 0 is significant)
* val = value to be output
* firstbit = Bit-Number of highest bit
* bitcount = Number of bits to output
*/
static inline void
icn_shiftout(unsigned short port,
unsigned long val,
int firstbit,
int bitcount)
{
register u_char s;
register u_char c;
for (s = firstbit, c = bitcount; c > 0; s--, c--)
OUTB_P((u_char) ((val >> s) & 1) ? 0xff : 0, port);
}
/*
* disable a cards shared memory
*/
static inline void
icn_disable_ram(icn_card *card)
{
OUTB_P(0, ICN_MAPRAM);
}
/*
* enable a cards shared memory
*/
static inline void
icn_enable_ram(icn_card *card)
{
OUTB_P(0xff, ICN_MAPRAM);
}
/*
* Map a cards channel0 (Bank0/Bank8) or channel1 (Bank4/Bank12)
*
* must called with holding the devlock
*/
static inline void
icn_map_channel(icn_card *card, int channel)
{
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_map_channel %d %d\n", dev.channel, channel);
#endif
if ((channel == dev.channel) && (card == dev.mcard))
return;
if (dev.mcard)
icn_disable_ram(dev.mcard);
icn_shiftout(ICN_BANK, chan2bank[channel], 3, 4); /* Select Bank */
icn_enable_ram(card);
dev.mcard = card;
dev.channel = channel;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_map_channel done\n");
#endif
}
/*
* Lock a cards channel.
* Return 0 if requested card/channel is unmapped (failure).
* Return 1 on success.
*
* must called with holding the devlock
*/
static inline int
icn_lock_channel(icn_card *card, int channel)
{
register int retval;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d\n", channel);
#endif
if ((dev.channel == channel) && (card == dev.mcard)) {
dev.chanlock++;
retval = 1;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d OK\n", channel);
#endif
} else {
retval = 0;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_lock_channel %d FAILED, dc=%d\n", channel, dev.channel);
#endif
}
return retval;
}
/*
* Release current card/channel lock
*
* must called with holding the devlock
*/
static inline void
__icn_release_channel(void)
{
#ifdef MAP_DEBUG
printk(KERN_DEBUG "icn_release_channel l=%d\n", dev.chanlock);
#endif
if (dev.chanlock > 0)
dev.chanlock--;
}
/*
* Release current card/channel lock
*/
static inline void
icn_release_channel(void)
{
ulong flags;
spin_lock_irqsave(&dev.devlock, flags);
__icn_release_channel();
spin_unlock_irqrestore(&dev.devlock, flags);
}
/*
* Try to map and lock a cards channel.
* Return 1 on success, 0 on failure.
*/
static inline int
icn_trymaplock_channel(icn_card *card, int channel)
{
ulong flags;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock c=%d dc=%d l=%d\n", channel, dev.channel,
dev.chanlock);
#endif
spin_lock_irqsave(&dev.devlock, flags);
if ((!dev.chanlock) ||
((dev.channel == channel) && (dev.mcard == card))) {
dev.chanlock++;
icn_map_channel(card, channel);
spin_unlock_irqrestore(&dev.devlock, flags);
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock %d OK\n", channel);
#endif
return 1;
}
spin_unlock_irqrestore(&dev.devlock, flags);
#ifdef MAP_DEBUG
printk(KERN_DEBUG "trymaplock %d FAILED\n", channel);
#endif
return 0;
}
/*
* Release current card/channel lock,
* then map same or other channel without locking.
*/
static inline void
icn_maprelease_channel(icn_card *card, int channel)
{
ulong flags;
#ifdef MAP_DEBUG
printk(KERN_DEBUG "map_release c=%d l=%d\n", channel, dev.chanlock);
#endif
spin_lock_irqsave(&dev.devlock, flags);
if (dev.chanlock > 0)
dev.chanlock--;
if (!dev.chanlock)
icn_map_channel(card, channel);
spin_unlock_irqrestore(&dev.devlock, flags);
}
/* Get Data from the B-Channel, assemble fragmented packets and put them
* into receive-queue. Wake up any B-Channel-reading processes.
* This routine is called via timer-callback from icn_pollbchan().
*/
static void
icn_pollbchan_receive(int channel, icn_card *card)
{
int mch = channel + ((card->secondhalf) ? 2 : 0);
int eflag;
int cnt;
struct sk_buff *skb;
if (icn_trymaplock_channel(card, mch)) {
while (rbavl) {
cnt = readb(&rbuf_l);
if ((card->rcvidx[channel] + cnt) > 4000) {
printk(KERN_WARNING
"icn: (%s) bogus packet on ch%d, dropping.\n",
CID,
channel + 1);
card->rcvidx[channel] = 0;
eflag = 0;
} else {
memcpy_fromio(&card->rcvbuf[channel][card->rcvidx[channel]],
&rbuf_d, cnt);
card->rcvidx[channel] += cnt;
eflag = readb(&rbuf_f);
}
rbnext;
icn_maprelease_channel(card, mch & 2);
if (!eflag) {
if ((cnt = card->rcvidx[channel])) {
if (!(skb = dev_alloc_skb(cnt))) {
printk(KERN_WARNING "icn: receive out of memory\n");
break;
}
memcpy(skb_put(skb, cnt), card->rcvbuf[channel], cnt);
card->rcvidx[channel] = 0;
card->interface.rcvcallb_skb(card->myid, channel, skb);
}
}
if (!icn_trymaplock_channel(card, mch))
break;
}
icn_maprelease_channel(card, mch & 2);
}
}
/* Send data-packet to B-Channel, split it up into fragments of
* ICN_FRAGSIZE length. If last fragment is sent out, signal
* success to upper layers via statcallb with ISDN_STAT_BSENT argument.
* This routine is called via timer-callback from icn_pollbchan() or
* directly from icn_sendbuf().
*/
static void
icn_pollbchan_send(int channel, icn_card *card)
{
int mch = channel + ((card->secondhalf) ? 2 : 0);
int cnt;
unsigned long flags;
struct sk_buff *skb;
isdn_ctrl cmd;
if (!(card->sndcount[channel] || card->xskb[channel] ||
!skb_queue_empty(&card->spqueue[channel])))
return;
if (icn_trymaplock_channel(card, mch)) {
while (sbfree &&
(card->sndcount[channel] ||
!skb_queue_empty(&card->spqueue[channel]) ||
card->xskb[channel])) {
spin_lock_irqsave(&card->lock, flags);
if (card->xmit_lock[channel]) {
spin_unlock_irqrestore(&card->lock, flags);
break;
}
card->xmit_lock[channel]++;
spin_unlock_irqrestore(&card->lock, flags);
skb = card->xskb[channel];
if (!skb) {
skb = skb_dequeue(&card->spqueue[channel]);
if (skb) {
/* Pop ACK-flag off skb.
* Store length to xlen.
*/
if (*(skb_pull(skb, 1)))
card->xlen[channel] = skb->len;
else
card->xlen[channel] = 0;
}
}
if (!skb)
break;
if (skb->len > ICN_FRAGSIZE) {
writeb(0xff, &sbuf_f);
cnt = ICN_FRAGSIZE;
} else {
writeb(0x0, &sbuf_f);
cnt = skb->len;
}
writeb(cnt, &sbuf_l);
memcpy_toio(&sbuf_d, skb->data, cnt);
skb_pull(skb, cnt);
sbnext; /* switch to next buffer */
icn_maprelease_channel(card, mch & 2);
spin_lock_irqsave(&card->lock, flags);
card->sndcount[channel] -= cnt;
if (!skb->len) {
if (card->xskb[channel])
card->xskb[channel] = NULL;
card->xmit_lock[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
dev_kfree_skb(skb);
if (card->xlen[channel]) {
cmd.command = ISDN_STAT_BSENT;
cmd.driver = card->myid;
cmd.arg = channel;
cmd.parm.length = card->xlen[channel];
card->interface.statcallb(&cmd);
}
} else {
card->xskb[channel] = skb;
card->xmit_lock[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
}
if (!icn_trymaplock_channel(card, mch))
break;
}
icn_maprelease_channel(card, mch & 2);
}
}
/* Send/Receive Data to/from the B-Channel.
* This routine is called via timer-callback.
* It schedules itself while any B-Channel is open.
*/
static void
icn_pollbchan(unsigned long data)
{
icn_card *card = (icn_card *) data;
unsigned long flags;
if (card->flags & ICN_FLAGS_B1ACTIVE) {
icn_pollbchan_receive(0, card);
icn_pollbchan_send(0, card);
}
if (card->flags & ICN_FLAGS_B2ACTIVE) {
icn_pollbchan_receive(1, card);
icn_pollbchan_send(1, card);
}
if (card->flags & (ICN_FLAGS_B1ACTIVE | ICN_FLAGS_B2ACTIVE)) {
/* schedule b-channel polling again */
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->rb_timer, jiffies + ICN_TIMER_BCREAD);
card->flags |= ICN_FLAGS_RBTIMER;
spin_unlock_irqrestore(&card->lock, flags);
} else
card->flags &= ~ICN_FLAGS_RBTIMER;
}
typedef struct icn_stat {
char *statstr;
int command;
int action;
} icn_stat;
/* *INDENT-OFF* */
static icn_stat icn_stat_table[] =
{
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
/*
** add d-channel connect and disconnect support to link-level
*/
{"DCON_", ISDN_STAT_DCONN, 10}, /* D-Channel connected */
{"DDIS_", ISDN_STAT_DHUP, 11}, /* D-Channel disconnected */
{"DCAL_I", ISDN_STAT_ICALL, 3}, /* Incoming call dialup-line */
{"DSCA_I", ISDN_STAT_ICALL, 3}, /* Incoming call 1TR6-SPV */
{"FCALL", ISDN_STAT_ICALL, 4}, /* Leased line connection up */
{"CIF", ISDN_STAT_CINF, 5}, /* Charge-info, 1TR6-type */
{"AOC", ISDN_STAT_CINF, 6}, /* Charge-info, DSS1-type */
{"CAU", ISDN_STAT_CAUSE, 7}, /* Cause code */
{"TEI OK", ISDN_STAT_RUN, 0}, /* Card connected to wallplug */
{"E_L1: ACT FAIL", ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{"E_L2: DATA LIN", ISDN_STAT_BHUP, 8}, /* Layer-2 data link lost */
{"E_L1: ACTIVATION FAILED",
ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{NULL, 0, -1}
};
/* *INDENT-ON* */
/*
* Check Statusqueue-Pointer from isdn-cards.
* If there are new status-replies from the interface, check
* them against B-Channel-connects/disconnects and set flags accordingly.
* Wake-Up any processes, who are reading the status-device.
* If there are B-Channels open, initiate a timer-callback to
* icn_pollbchan().
* This routine is called periodically via timer.
*/
static void
icn_parse_status(u_char *status, int channel, icn_card *card)
{
icn_stat *s = icn_stat_table;
int action = -1;
unsigned long flags;
isdn_ctrl cmd;
while (s->statstr) {
if (!strncmp(status, s->statstr, strlen(s->statstr))) {
cmd.command = s->command;
action = s->action;
break;
}
s++;
}
if (action == -1)
return;
cmd.driver = card->myid;
cmd.arg = channel;
switch (action) {
case 11:
spin_lock_irqsave(&card->lock, flags);
icn_free_queue(card, channel);
card->rcvidx[channel] = 0;
if (card->flags &
((channel) ? ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE)) {
isdn_ctrl ncmd;
card->flags &= ~((channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE);
memset(&ncmd, 0, sizeof(ncmd));
ncmd.driver = card->myid;
ncmd.arg = channel;
ncmd.command = ISDN_STAT_BHUP;
spin_unlock_irqrestore(&card->lock, flags);
card->interface.statcallb(&cmd);
} else
spin_unlock_irqrestore(&card->lock, flags);
break;
case 1:
spin_lock_irqsave(&card->lock, flags);
icn_free_queue(card, channel);
card->flags |= (channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE;
spin_unlock_irqrestore(&card->lock, flags);
break;
case 2:
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~((channel) ?
ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE);
icn_free_queue(card, channel);
card->rcvidx[channel] = 0;
spin_unlock_irqrestore(&card->lock, flags);
break;
case 3:
{
char *t = status + 6;
char *s = strchr(t, ',');
*s++ = '\0';
strlcpy(cmd.parm.setup.phone, t,
sizeof(cmd.parm.setup.phone));
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd.parm.setup.si1 = 0;
else
cmd.parm.setup.si1 =
simple_strtoul(t, NULL, 10);
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd.parm.setup.si2 = 0;
else
cmd.parm.setup.si2 =
simple_strtoul(t, NULL, 10);
strlcpy(cmd.parm.setup.eazmsn, s,
sizeof(cmd.parm.setup.eazmsn));
}
cmd.parm.setup.plan = 0;
cmd.parm.setup.screen = 0;
break;
case 4:
sprintf(cmd.parm.setup.phone, "LEASED%d", card->myid);
sprintf(cmd.parm.setup.eazmsn, "%d", channel + 1);
cmd.parm.setup.si1 = 7;
cmd.parm.setup.si2 = 0;
cmd.parm.setup.plan = 0;
cmd.parm.setup.screen = 0;
break;
case 5:
strlcpy(cmd.parm.num, status + 3, sizeof(cmd.parm.num));
break;
case 6:
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%d",
(int) simple_strtoul(status + 7, NULL, 16));
break;
case 7:
status += 3;
if (strlen(status) == 4)
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%s%c%c",
status + 2, *status, *(status + 1));
else
strlcpy(cmd.parm.num, status + 1, sizeof(cmd.parm.num));
break;
case 8:
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~ICN_FLAGS_B1ACTIVE;
icn_free_queue(card, 0);
card->rcvidx[0] = 0;
spin_unlock_irqrestore(&card->lock, flags);
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_BHUP;
spin_lock_irqsave(&card->lock, flags);
card->flags &= ~ICN_FLAGS_B2ACTIVE;
icn_free_queue(card, 1);
card->rcvidx[1] = 0;
spin_unlock_irqrestore(&card->lock, flags);
cmd.arg = 1;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 1;
cmd.driver = card->myid;
break;
}
card->interface.statcallb(&cmd);
return;
}
static void
icn_putmsg(icn_card *card, unsigned char c)
{
ulong flags;
spin_lock_irqsave(&card->lock, flags);
*card->msg_buf_write++ = (c == 0xff) ? '\n' : c;
if (card->msg_buf_write == card->msg_buf_read) {
if (++card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
if (card->msg_buf_write > card->msg_buf_end)
card->msg_buf_write = card->msg_buf;
spin_unlock_irqrestore(&card->lock, flags);
}
static void
icn_polldchan(unsigned long data)
{
icn_card *card = (icn_card *) data;
int mch = card->secondhalf ? 2 : 0;
int avail = 0;
int left;
u_char c;
int ch;
unsigned long flags;
int i;
u_char *p;
isdn_ctrl cmd;
if (icn_trymaplock_channel(card, mch)) {
avail = msg_avail;
for (left = avail, i = readb(&msg_o); left > 0; i++, left--) {
c = readb(&dev.shmem->comm_buffers.iopc_buf[i & 0xff]);
icn_putmsg(card, c);
if (c == 0xff) {
card->imsg[card->iptr] = 0;
card->iptr = 0;
if (card->imsg[0] == '0' && card->imsg[1] >= '0' &&
card->imsg[1] <= '2' && card->imsg[2] == ';') {
ch = (card->imsg[1] - '0') - 1;
p = &card->imsg[3];
icn_parse_status(p, ch, card);
} else {
p = card->imsg;
if (!strncmp(p, "DRV1.", 5)) {
u_char vstr[10];
u_char *q = vstr;
printk(KERN_INFO "icn: (%s) %s\n", CID, p);
if (!strncmp(p + 7, "TC", 2)) {
card->ptype = ISDN_PTYPE_1TR6;
card->interface.features |= ISDN_FEATURE_P_1TR6;
printk(KERN_INFO
"icn: (%s) 1TR6-Protocol loaded and running\n", CID);
}
if (!strncmp(p + 7, "EC", 2)) {
card->ptype = ISDN_PTYPE_EURO;
card->interface.features |= ISDN_FEATURE_P_EURO;
printk(KERN_INFO
"icn: (%s) Euro-Protocol loaded and running\n", CID);
}
p = strstr(card->imsg, "BRV") + 3;
while (*p) {
if (*p >= '0' && *p <= '9')
*q++ = *p;
p++;
}
*q = '\0';
strcat(vstr, "000");
vstr[3] = '\0';
card->fw_rev = (int) simple_strtoul(vstr, NULL, 10);
continue;
}
}
} else {
card->imsg[card->iptr] = c;
if (card->iptr < 59)
card->iptr++;
}
}
writeb((readb(&msg_o) + avail) & 0xff, &msg_o);
icn_release_channel();
}
if (avail) {
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = avail;
card->interface.statcallb(&cmd);
}
spin_lock_irqsave(&card->lock, flags);
if (card->flags & (ICN_FLAGS_B1ACTIVE | ICN_FLAGS_B2ACTIVE))
if (!(card->flags & ICN_FLAGS_RBTIMER)) {
/* schedule b-channel polling */
card->flags |= ICN_FLAGS_RBTIMER;
del_timer(&card->rb_timer);
card->rb_timer.function = icn_pollbchan;
card->rb_timer.data = (unsigned long) card;
card->rb_timer.expires = jiffies + ICN_TIMER_BCREAD;
add_timer(&card->rb_timer);
}
/* schedule again */
mod_timer(&card->st_timer, jiffies + ICN_TIMER_DCREAD);
spin_unlock_irqrestore(&card->lock, flags);
}
/* Append a packet to the transmit buffer-queue.
* Parameters:
* channel = Number of B-channel
* skb = pointer to sk_buff
* card = pointer to card-struct
* Return:
* Number of bytes transferred, -E??? on error
*/
static int
icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
{
int len = skb->len;
unsigned long flags;
struct sk_buff *nskb;
if (len > 4000) {
printk(KERN_WARNING
"icn: Send packet too large\n");
return -EINVAL;
}
if (len) {
if (!(card->flags & (channel) ? ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE))
return 0;
if (card->sndcount[channel] > ICN_MAX_SQUEUE)
return 0;
#warning TODO test headroom or use skb->nb to flag ACK
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
/* Push ACK flag as one
* byte in front of data.
*/
*(skb_push(nskb, 1)) = ack ? 1 : 0;
skb_queue_tail(&card->spqueue[channel], nskb);
dev_kfree_skb(skb);
} else
len = 0;
spin_lock_irqsave(&card->lock, flags);
card->sndcount[channel] += len;
spin_unlock_irqrestore(&card->lock, flags);
}
return len;
}
/*
* Check card's status after starting the bootstrap loader.
* On entry, the card's shared memory has already to be mapped.
* Return:
* 0 on success (Boot loader ready)
* -EIO on failure (timeout)
*/
static int
icn_check_loader(int cardnumber)
{
int timer = 0;
while (1) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d ?\n", cardnumber);
#endif
if (readb(&dev.shmem->data_control.scns) ||
readb(&dev.shmem->data_control.scnr)) {
if (timer++ > 5) {
printk(KERN_WARNING
"icn: Boot-Loader %d timed out.\n",
cardnumber);
icn_release_channel();
return -EIO;
}
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d TO?\n", cardnumber);
#endif
msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d OK\n", cardnumber);
#endif
icn_release_channel();
return 0;
}
}
}
/* Load the boot-code into the interface-card's memory and start it.
* Always called from user-process.
*
* Parameters:
* buffer = pointer to packet
* Return:
* 0 if successfully loaded
*/
#ifdef BOOT_DEBUG
#define SLEEP(sec) { \
int slsec = sec; \
printk(KERN_DEBUG "SLEEP(%d)\n", slsec); \
while (slsec) { \
msleep_interruptible(1000); \
slsec--; \
} \
}
#else
#define SLEEP(sec)
#endif
static int
icn_loadboot(u_char __user *buffer, icn_card *card)
{
int ret;
u_char *codebuf;
unsigned long flags;
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "icn_loadboot called, buffaddr=%08lx\n", (ulong) buffer);
#endif
if (!(codebuf = kmalloc(ICN_CODE_STAGE1, GFP_KERNEL))) {
printk(KERN_WARNING "icn: Could not allocate code buffer\n");
ret = -ENOMEM;
goto out;
}
if (copy_from_user(codebuf, buffer, ICN_CODE_STAGE1)) {
ret = -EFAULT;
goto out_kfree;
}
if (!card->rvalid) {
if (!request_region(card->port, ICN_PORTLEN, card->regname)) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID,
card->port,
card->port + ICN_PORTLEN);
ret = -EBUSY;
goto out_kfree;
}
card->rvalid = 1;
if (card->doubleS0)
card->other->rvalid = 1;
}
if (!dev.mvalid) {
if (!request_mem_region(dev.memaddr, 0x4000, "icn-isdn (all cards)")) {
printk(KERN_WARNING
"icn: memory at 0x%08lx in use.\n", dev.memaddr);
ret = -EBUSY;
goto out_kfree;
}
dev.shmem = ioremap(dev.memaddr, 0x4000);
dev.mvalid = 1;
}
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
icn_shiftout(ICN_CFG, 0x0f, 3, 4); /* Windowsize= 16k */
icn_shiftout(ICN_CFG, dev.memaddr, 23, 10); /* Set RAM-Addr. */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "shmem=%08lx\n", dev.memaddr);
#endif
SLEEP(1);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 0\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
icn_map_channel(card, 0); /* Select Bank 0 */
icn_lock_channel(card, 0); /* Lock Bank 0 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
memcpy_toio(dev.shmem, codebuf, ICN_CODE_STAGE1); /* Copy code */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Bootloader transferred\n");
#endif
if (card->doubleS0) {
SLEEP(1);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 8\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
__icn_release_channel();
icn_map_channel(card, 2); /* Select Bank 8 */
icn_lock_channel(card, 2); /* Lock Bank 8 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
memcpy_toio(dev.shmem, codebuf, ICN_CODE_STAGE1); /* Copy code */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Bootloader transferred\n");
#endif
}
SLEEP(1);
OUTB_P(0xff, ICN_RUN); /* Start Boot-Code */
if ((ret = icn_check_loader(card->doubleS0 ? 2 : 1))) {
goto out_kfree;
}
if (!card->doubleS0) {
ret = 0;
goto out_kfree;
}
/* reached only, if we have a Double-S0-Card */
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Map Bank 0\n");
#endif
spin_lock_irqsave(&dev.devlock, flags);
icn_map_channel(card, 0); /* Select Bank 0 */
icn_lock_channel(card, 0); /* Lock Bank 0 */
spin_unlock_irqrestore(&dev.devlock, flags);
SLEEP(1);
ret = (icn_check_loader(1));
out_kfree:
kfree(codebuf);
out:
return ret;
}
static int
icn_loadproto(u_char __user *buffer, icn_card *card)
{
register u_char __user *p = buffer;
u_char codebuf[256];
uint left = ICN_CODE_STAGE2;
uint cnt;
int timer;
unsigned long flags;
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "icn_loadproto called\n");
#endif
if (!access_ok(VERIFY_READ, buffer, ICN_CODE_STAGE2))
return -EFAULT;
timer = 0;
spin_lock_irqsave(&dev.devlock, flags);
if (card->secondhalf) {
icn_map_channel(card, 2);
icn_lock_channel(card, 2);
} else {
icn_map_channel(card, 0);
icn_lock_channel(card, 0);
}
spin_unlock_irqrestore(&dev.devlock, flags);
while (left) {
if (sbfree) { /* If there is a free buffer... */
cnt = left;
if (cnt > 256)
cnt = 256;
if (copy_from_user(codebuf, p, cnt)) {
icn_maprelease_channel(card, 0);
return -EFAULT;
}
memcpy_toio(&sbuf_l, codebuf, cnt); /* copy data */
sbnext; /* switch to next buffer */
p += cnt;
left -= cnt;
timer = 0;
} else {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "boot 2 !sbfree\n");
#endif
if (timer++ > 5) {
icn_maprelease_channel(card, 0);
return -EIO;
}
schedule_timeout_interruptible(10);
}
}
writeb(0x20, &sbuf_n);
timer = 0;
while (1) {
if (readb(&cmd_o) || readb(&cmd_i)) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto?\n");
#endif
if (timer++ > 5) {
printk(KERN_WARNING
"icn: (%s) Protocol timed out.\n",
CID);
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto TO!\n");
#endif
icn_maprelease_channel(card, 0);
return -EIO;
}
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto TO?\n");
#endif
msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
if ((card->secondhalf) || (!card->doubleS0)) {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto loaded, install poll-timer %d\n",
card->secondhalf);
#endif
spin_lock_irqsave(&card->lock, flags);
init_timer(&card->st_timer);
card->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
card->st_timer.function = icn_polldchan;
card->st_timer.data = (unsigned long) card;
add_timer(&card->st_timer);
card->flags |= ICN_FLAGS_RUNNING;
if (card->doubleS0) {
init_timer(&card->other->st_timer);
card->other->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
card->other->st_timer.function = icn_polldchan;
card->other->st_timer.data = (unsigned long) card->other;
add_timer(&card->other->st_timer);
card->other->flags |= ICN_FLAGS_RUNNING;
}
spin_unlock_irqrestore(&card->lock, flags);
}
icn_maprelease_channel(card, 0);
return 0;
}
}
}
/* Read the Status-replies from the Interface */
static int
icn_readstatus(u_char __user *buf, int len, icn_card *card)
{
int count;
u_char __user *p;
for (p = buf, count = 0; count < len; p++, count++) {
if (card->msg_buf_read == card->msg_buf_write)
return count;
if (put_user(*card->msg_buf_read++, p))
return -EFAULT;
if (card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
return count;
}
/* Put command-strings into the command-queue of the Interface */
static int
icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
{
int mch = card->secondhalf ? 2 : 0;
int pp;
int i;
int count;
int xcount;
int ocount;
int loop;
unsigned long flags;
int lastmap_channel;
struct icn_card *lastmap_card;
u_char *p;
isdn_ctrl cmd;
u_char msg[0x100];
ocount = 1;
xcount = loop = 0;
while (len) {
count = cmd_free;
if (count > len)
count = len;
if (user) {
if (copy_from_user(msg, buf, count))
return -EFAULT;
} else
memcpy(msg, buf, count);
spin_lock_irqsave(&dev.devlock, flags);
lastmap_card = dev.mcard;
lastmap_channel = dev.channel;
icn_map_channel(card, mch);
icn_putmsg(card, '>');
for (p = msg, pp = readb(&cmd_i), i = count; i > 0; i--, p++, pp
++) {
writeb((*p == '\n') ? 0xff : *p,
&dev.shmem->comm_buffers.pcio_buf[pp & 0xff]);
len--;
xcount++;
icn_putmsg(card, *p);
if ((*p == '\n') && (i > 1)) {
icn_putmsg(card, '>');
ocount++;
}
ocount++;
}
writeb((readb(&cmd_i) + count) & 0xff, &cmd_i);
if (lastmap_card)
icn_map_channel(lastmap_card, lastmap_channel);
spin_unlock_irqrestore(&dev.devlock, flags);
if (len) {
mdelay(1);
if (loop++ > 20)
break;
} else
break;
}
if (len && (!user))
printk(KERN_WARNING "icn: writemsg incomplete!\n");
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = ocount;
card->interface.statcallb(&cmd);
return xcount;
}
/*
* Delete card's pending timers, send STOP to linklevel
*/
static void
icn_stopcard(icn_card *card)
{
unsigned long flags;
isdn_ctrl cmd;
spin_lock_irqsave(&card->lock, flags);
if (card->flags & ICN_FLAGS_RUNNING) {
card->flags &= ~ICN_FLAGS_RUNNING;
del_timer(&card->st_timer);
del_timer(&card->rb_timer);
spin_unlock_irqrestore(&card->lock, flags);
cmd.command = ISDN_STAT_STOP;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
if (card->doubleS0)
icn_stopcard(card->other);
} else
spin_unlock_irqrestore(&card->lock, flags);
}
static void
icn_stopallcards(void)
{
icn_card *p = cards;
while (p) {
icn_stopcard(p);
p = p->next;
}
}
/*
* Unmap all cards, because some of them may be mapped accidetly during
* autoprobing of some network drivers (SMC-driver?)
*/
static void
icn_disable_cards(void)
{
icn_card *card = cards;
while (card) {
if (!request_region(card->port, ICN_PORTLEN, "icn-isdn")) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID,
card->port,
card->port + ICN_PORTLEN);
} else {
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
release_region(card->port, ICN_PORTLEN);
}
card = card->next;
}
}
static int
icn_command(isdn_ctrl *c, icn_card *card)
{
ulong a;
ulong flags;
int i;
char cbuf[60];
isdn_ctrl cmd;
icn_cdef cdef;
char __user *arg;
switch (c->command) {
case ISDN_CMD_IOCTL:
memcpy(&a, c->parm.num, sizeof(ulong));
arg = (char __user *)a;
switch (c->arg) {
case ICN_IOCTL_SETMMIO:
if (dev.memaddr != (a & 0x0ffc000)) {
if (!request_mem_region(a & 0x0ffc000, 0x4000, "icn-isdn (all cards)")) {
printk(KERN_WARNING
"icn: memory at 0x%08lx in use.\n",
a & 0x0ffc000);
return -EINVAL;
}
release_mem_region(a & 0x0ffc000, 0x4000);
icn_stopallcards();
spin_lock_irqsave(&card->lock, flags);
if (dev.mvalid) {
iounmap(dev.shmem);
release_mem_region(dev.memaddr, 0x4000);
}
dev.mvalid = 0;
dev.memaddr = a & 0x0ffc000;
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_INFO
"icn: (%s) mmio set to 0x%08lx\n",
CID,
dev.memaddr);
}
break;
case ICN_IOCTL_GETMMIO:
return (long) dev.memaddr;
case ICN_IOCTL_SETPORT:
if (a == 0x300 || a == 0x310 || a == 0x320 || a == 0x330
|| a == 0x340 || a == 0x350 || a == 0x360 ||
a == 0x308 || a == 0x318 || a == 0x328 || a == 0x338
|| a == 0x348 || a == 0x358 || a == 0x368) {
if (card->port != (unsigned short) a) {
if (!request_region((unsigned short) a, ICN_PORTLEN, "icn-isdn")) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
CID, (int) a, (int) a + ICN_PORTLEN);
return -EINVAL;
}
release_region((unsigned short) a, ICN_PORTLEN);
icn_stopcard(card);
spin_lock_irqsave(&card->lock, flags);
if (card->rvalid)
release_region(card->port, ICN_PORTLEN);
card->port = (unsigned short) a;
card->rvalid = 0;
if (card->doubleS0) {
card->other->port = (unsigned short) a;
card->other->rvalid = 0;
}
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_INFO
"icn: (%s) port set to 0x%03x\n",
CID, card->port);
}
} else
return -EINVAL;
break;
case ICN_IOCTL_GETPORT:
return (int) card->port;
case ICN_IOCTL_GETDOUBLE:
return (int) card->doubleS0;
case ICN_IOCTL_DEBUGVAR:
if (copy_to_user(arg,
&card,
sizeof(ulong)))
return -EFAULT;
a += sizeof(ulong);
{
ulong l = (ulong)&dev;
if (copy_to_user(arg,
&l,
sizeof(ulong)))
return -EFAULT;
}
return 0;
case ICN_IOCTL_LOADBOOT:
if (dev.firstload) {
icn_disable_cards();
dev.firstload = 0;
}
icn_stopcard(card);
return (icn_loadboot(arg, card));
case ICN_IOCTL_LOADPROTO:
icn_stopcard(card);
if ((i = (icn_loadproto(arg, card))))
return i;
if (card->doubleS0)
i = icn_loadproto(arg + ICN_CODE_STAGE2, card->other);
return i;
break;
case ICN_IOCTL_ADDCARD:
if (!dev.firstload)
return -EBUSY;
if (copy_from_user(&cdef,
arg,
sizeof(cdef)))
return -EFAULT;
return (icn_addcard(cdef.port, cdef.id1, cdef.id2));
break;
case ICN_IOCTL_LEASEDCFG:
if (a) {
if (!card->leased) {
card->leased = 1;
while (card->ptype == ISDN_PTYPE_UNKNOWN) {
msleep_interruptible(ICN_BOOT_TIMEOUT1);
}
msleep_interruptible(ICN_BOOT_TIMEOUT1);
sprintf(cbuf, "00;FV2ON\n01;EAZ%c\n02;EAZ%c\n",
(a & 1) ? '1' : 'C', (a & 2) ? '2' : 'C');
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode enabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
} else {
if (card->leased) {
card->leased = 0;
sprintf(cbuf, "00;FV2OFF\n");
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode disabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
}
return 0;
default:
return -EINVAL;
}
break;
case ISDN_CMD_DIAL:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if ((c->arg & 255) < ICN_BCH) {
char *p;
char dial[50];
char dcode[4];
a = c->arg;
p = c->parm.setup.phone;
if (*p == 's' || *p == 'S') {
/* Dial for SPV */
p++;
strcpy(dcode, "SCA");
} else
/* Normal Dial */
strcpy(dcode, "CAL");
strcpy(dial, p);
sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
dcode, dial, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTD:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->fw_rev >= 300) {
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) a);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) a);
break;
}
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
sprintf(cbuf, "%02d;DCON_R\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTB:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->fw_rev >= 300)
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BCON_R,BX75\n", (int) a);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int) a);
break;
} else
sprintf(cbuf, "%02d;BCON_R\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_HANGUP:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int) a, (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETEAZ:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO) {
sprintf(cbuf, "%02d;MS%s%s\n", (int) a,
c->parm.num[0] ? "N" : "ALL", c->parm.num);
} else
sprintf(cbuf, "%02d;EAZ%s\n", (int) a,
c->parm.num[0] ? (char *)(c->parm.num) : "0123456789");
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_CLREAZ:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO)
sprintf(cbuf, "%02d;MSNC\n", (int) a);
else
sprintf(cbuf, "%02d;EAZC\n", (int) a);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETL2:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ICN_BCH) {
a = c->arg;
switch (a >> 8) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) (a & 255) + 1);
break;
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
break;
default:
return -EINVAL;
}
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
card->l2_proto[a & 255] = (a >> 8);
}
break;
case ISDN_CMD_SETL3:
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
return -EINVAL;
}
return 0;
}
/*
* Find card with given driverId
*/
static inline icn_card *
icn_findcard(int driverid)
{
icn_card *p = cards;
while (p) {
if (p->myid == driverid)
return p;
p = p->next;
}
return (icn_card *) 0;
}
/*
* Wrapper functions for interface to linklevel
*/
static int
if_command(isdn_ctrl *c)
{
icn_card *card = icn_findcard(c->driver);
if (card)
return (icn_command(c, card));
printk(KERN_ERR
"icn: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static int
if_writecmd(const u_char __user *buf, int len, int id, int channel)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_writecmd(buf, len, 1, card));
}
printk(KERN_ERR
"icn: if_writecmd called with invalid driverId!\n");
return -ENODEV;
}
static int
if_readstatus(u_char __user *buf, int len, int id, int channel)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_readstatus(buf, len, card));
}
printk(KERN_ERR
"icn: if_readstatus called with invalid driverId!\n");
return -ENODEV;
}
static int
if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
{
icn_card *card = icn_findcard(id);
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_sendbuf(channel, ack, skb, card));
}
printk(KERN_ERR
"icn: if_sendbuf called with invalid driverId!\n");
return -ENODEV;
}
/*
* Allocate a new card-struct, initialize it
* link it into cards-list and register it at linklevel.
*/
static icn_card *
icn_initcard(int port, char *id)
{
icn_card *card;
int i;
if (!(card = kzalloc(sizeof(icn_card), GFP_KERNEL))) {
printk(KERN_WARNING
"icn: (%s) Could not allocate card-struct.\n", id);
return (icn_card *) 0;
}
spin_lock_init(&card->lock);
card->port = port;
card->interface.owner = THIS_MODULE;
card->interface.hl_hdrlen = 1;
card->interface.channels = ICN_BCH;
card->interface.maxbufsize = 4000;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = if_writecmd;
card->interface.readstat = if_readstatus;
card->interface.features = ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN;
card->ptype = ISDN_PTYPE_UNKNOWN;
strlcpy(card->interface.id, id, sizeof(card->interface.id));
card->msg_buf_write = card->msg_buf;
card->msg_buf_read = card->msg_buf;
card->msg_buf_end = &card->msg_buf[sizeof(card->msg_buf) - 1];
for (i = 0; i < ICN_BCH; i++) {
card->l2_proto[i] = ISDN_PROTO_L2_X75I;
skb_queue_head_init(&card->spqueue[i]);
}
card->next = cards;
cards = card;
if (!register_isdn(&card->interface)) {
cards = cards->next;
printk(KERN_WARNING
"icn: Unable to register %s\n", id);
kfree(card);
return (icn_card *) 0;
}
card->myid = card->interface.channels;
sprintf(card->regname, "icn-isdn (%s)", card->interface.id);
return card;
}
static int
icn_addcard(int port, char *id1, char *id2)
{
icn_card *card;
icn_card *card2;
if (!(card = icn_initcard(port, id1))) {
return -EIO;
}
if (!strlen(id2)) {
printk(KERN_INFO
"icn: (%s) ICN-2B, port 0x%x added\n",
card->interface.id, port);
return 0;
}
if (!(card2 = icn_initcard(port, id2))) {
printk(KERN_INFO
"icn: (%s) half ICN-4B, port 0x%x added\n",
card2->interface.id, port);
return 0;
}
card->doubleS0 = 1;
card->secondhalf = 0;
card->other = card2;
card2->doubleS0 = 1;
card2->secondhalf = 1;
card2->other = card;
printk(KERN_INFO
"icn: (%s and %s) ICN-4B, port 0x%x added\n",
card->interface.id, card2->interface.id, port);
return 0;
}
#ifndef MODULE
static int __init
icn_setup(char *line)
{
char *p, *str;
int ints[3];
static char sid[20];
static char sid2[20];
str = get_options(line, 2, ints);
if (ints[0])
portbase = ints[1];
if (ints[0] > 1)
membase = (unsigned long)ints[2];
if (str && *str) {
strcpy(sid, str);
icn_id = sid;
if ((p = strchr(sid, ','))) {
*p++ = 0;
strcpy(sid2, p);
icn_id2 = sid2;
}
}
return (1);
}
__setup("icn=", icn_setup);
#endif /* MODULE */
static int __init icn_init(void)
{
char *p;
char rev[21];
memset(&dev, 0, sizeof(icn_dev));
dev.memaddr = (membase & 0x0ffc000);
dev.channel = -1;
dev.mcard = NULL;
dev.firstload = 1;
spin_lock_init(&dev.devlock);
if ((p = strchr(revision, ':'))) {
strncpy(rev, p + 1, 20);
rev[20] = '\0';
p = strchr(rev, '$');
if (p)
*p = 0;
} else
strcpy(rev, " ??? ");
printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
dev.memaddr);
return (icn_addcard(portbase, icn_id, icn_id2));
}
static void __exit icn_exit(void)
{
isdn_ctrl cmd;
icn_card *card = cards;
icn_card *last, *tmpcard;
int i;
unsigned long flags;
icn_stopallcards();
while (card) {
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
spin_lock_irqsave(&card->lock, flags);
if (card->rvalid) {
OUTB_P(0, ICN_RUN); /* Reset Controller */
OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
if (card->secondhalf || (!card->doubleS0)) {
release_region(card->port, ICN_PORTLEN);
card->rvalid = 0;
}
for (i = 0; i < ICN_BCH; i++)
icn_free_queue(card, i);
}
tmpcard = card->next;
spin_unlock_irqrestore(&card->lock, flags);
card = tmpcard;
}
card = cards;
cards = NULL;
while (card) {
last = card;
card = card->next;
kfree(last);
}
if (dev.mvalid) {
iounmap(dev.shmem);
release_mem_region(dev.memaddr, 0x4000);
}
printk(KERN_NOTICE "ICN-ISDN-driver unloaded\n");
}
module_init(icn_init);
module_exit(icn_exit);
| gpl-2.0 |
QduZ9zEVr6/kernel-msm | arch/mips/cavium-octeon/executive/cvmx-helper-loop.c | 9942 | 2712 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for LOOP initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pip-defs.h>
/**
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_loop_probe(int interface)
{
union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
int num_ports = 4;
int port;
/* We need to disable length checking so packet < 64 bytes and jumbo
frames don't get errors */
for (port = 0; port < num_ports; port++) {
union cvmx_pip_prt_cfgx port_cfg;
int ipd_port = cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
}
/* Disable FCS stripping for loopback ports */
ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
ipd_sub_port_fcs.s.port_bit2 = 0;
cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
return num_ports;
}
/**
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_loop_enable(int interface)
{
/* Do nothing. */
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_motorola_otus | drivers/pnp/core.c | 11734 | 4880 | /*
* core.c - contains all core device and protocol registration functions
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
*/
#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include "base.h"
static LIST_HEAD(pnp_protocols);
LIST_HEAD(pnp_global);
DEFINE_SPINLOCK(pnp_lock);
/*
* ACPI or PNPBIOS should tell us about all platform devices, so we can
* skip some blind probes. ISAPNP typically enumerates only plug-in ISA
* devices, not built-in things like COM ports.
*/
int pnp_platform_devices;
EXPORT_SYMBOL(pnp_platform_devices);
void *pnp_alloc(long size)
{
void *result;
result = kzalloc(size, GFP_KERNEL);
if (!result) {
printk(KERN_ERR "pnp: Out of Memory\n");
return NULL;
}
return result;
}
/**
* pnp_protocol_register - adds a pnp protocol to the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*
* Ex protocols: ISAPNP, PNPBIOS, etc
*/
int pnp_register_protocol(struct pnp_protocol *protocol)
{
int nodenum;
struct list_head *pos;
INIT_LIST_HEAD(&protocol->devices);
INIT_LIST_HEAD(&protocol->cards);
nodenum = 0;
spin_lock(&pnp_lock);
/* assign the lowest unused number */
list_for_each(pos, &pnp_protocols) {
struct pnp_protocol *cur = to_pnp_protocol(pos);
if (cur->number == nodenum) {
pos = &pnp_protocols;
nodenum++;
}
}
list_add_tail(&protocol->protocol_list, &pnp_protocols);
spin_unlock(&pnp_lock);
protocol->number = nodenum;
dev_set_name(&protocol->dev, "pnp%d", nodenum);
return device_register(&protocol->dev);
}
/**
* pnp_protocol_unregister - removes a pnp protocol from the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
{
spin_lock(&pnp_lock);
list_del(&protocol->protocol_list);
spin_unlock(&pnp_lock);
device_unregister(&protocol->dev);
}
static void pnp_free_ids(struct pnp_dev *dev)
{
struct pnp_id *id;
struct pnp_id *next;
id = dev->id;
while (id) {
next = id->next;
kfree(id);
id = next;
}
}
void pnp_free_resource(struct pnp_resource *pnp_res)
{
list_del(&pnp_res->list);
kfree(pnp_res);
}
void pnp_free_resources(struct pnp_dev *dev)
{
struct pnp_resource *pnp_res, *tmp;
list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
pnp_free_resource(pnp_res);
}
}
static void pnp_release_device(struct device *dmdev)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_free_ids(dev);
pnp_free_resources(dev);
pnp_free_options(dev);
kfree(dev);
}
struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
const char *pnpid)
{
struct pnp_dev *dev;
struct pnp_id *dev_id;
dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
if (!dev)
return NULL;
INIT_LIST_HEAD(&dev->resources);
INIT_LIST_HEAD(&dev->options);
dev->protocol = protocol;
dev->number = id;
dev->dma_mask = DMA_BIT_MASK(24);
dev->dev.parent = &dev->protocol->dev;
dev->dev.bus = &pnp_bus_type;
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
dev->dev.release = &pnp_release_device;
dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
dev_id = pnp_add_id(dev, pnpid);
if (!dev_id) {
kfree(dev);
return NULL;
}
return dev;
}
int __pnp_add_device(struct pnp_dev *dev)
{
pnp_fixup_device(dev);
dev->status = PNP_READY;
spin_lock(&pnp_lock);
list_add_tail(&dev->global_list, &pnp_global);
list_add_tail(&dev->protocol_list, &dev->protocol->devices);
spin_unlock(&pnp_lock);
if (dev->protocol->can_wakeup)
device_set_wakeup_capable(&dev->dev,
dev->protocol->can_wakeup(dev));
return device_register(&dev->dev);
}
/*
* pnp_add_device - adds a pnp device to the pnp layer
* @dev: pointer to dev to add
*
* adds to driver model, name database, fixups, interface, etc.
*/
int pnp_add_device(struct pnp_dev *dev)
{
int ret;
char buf[128];
int len = 0;
struct pnp_id *id;
if (dev->card)
return -EINVAL;
ret = __pnp_add_device(dev);
if (ret)
return ret;
buf[0] = '\0';
for (id = dev->id; id; id = id->next)
len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
dev->protocol->name, buf,
dev->active ? "active" : "disabled");
return 0;
}
void __pnp_remove_device(struct pnp_dev *dev)
{
spin_lock(&pnp_lock);
list_del(&dev->global_list);
list_del(&dev->protocol_list);
spin_unlock(&pnp_lock);
device_unregister(&dev->dev);
}
static int __init pnp_init(void)
{
return bus_register(&pnp_bus_type);
}
subsys_initcall(pnp_init);
int pnp_debug;
#if defined(CONFIG_PNP_DEBUG_MESSAGES)
module_param_named(debug, pnp_debug, int, 0644);
#endif
| gpl-2.0 |
gospo/net-next | drivers/w1/slaves/w1_ds2406.c | 215 | 3460 | /*
* w1_ds2406.c - w1 family 12 (DS2406) driver
* based on w1_ds2413.c by Mariusz Bialonczyk <manio@skyboo.net>
*
* Copyright (c) 2014 Scott Alfter <scott@alfter.us>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/crc16.h>
#include "../w1.h"
#include "../w1_int.h"
#include "../w1_family.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
#define W1_F12_FUNC_READ_STATUS 0xAA
#define W1_F12_FUNC_WRITE_STATUS 0x55
static ssize_t w1_f12_read_state(
struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
u8 w1_buf[6]={W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u16 crc=0;
int i;
ssize_t rtnval=1;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
w1_write_block(sl->master, w1_buf, 3);
w1_read_block(sl->master, w1_buf+3, 3);
for (i=0; i<6; i++)
crc=crc16_byte(crc, w1_buf[i]);
if (crc==0xb001) /* good read? */
*buf=((w1_buf[3]>>5)&3)|0x30;
else
rtnval=-EIO;
mutex_unlock(&sl->master->bus_mutex);
return rtnval;
}
static ssize_t w1_f12_write_output(
struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[6]={W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0};
u16 crc=0;
int i;
ssize_t rtnval=1;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
w1_buf[3] = (((*buf)&3)<<5)|0x1F;
w1_write_block(sl->master, w1_buf, 4);
w1_read_block(sl->master, w1_buf+4, 2);
for (i=0; i<6; i++)
crc=crc16_byte(crc, w1_buf[i]);
if (crc==0xb001) /* good read? */
w1_write_8(sl->master, 0xFF);
else
rtnval=-EIO;
mutex_unlock(&sl->master->bus_mutex);
return rtnval;
}
#define NB_SYSFS_BIN_FILES 2
static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
{
.attr = {
.name = "state",
.mode = S_IRUGO,
},
.size = 1,
.read = w1_f12_read_state,
},
{
.attr = {
.name = "output",
.mode = S_IRUGO | S_IWUSR | S_IWGRP,
},
.size = 1,
.write = w1_f12_write_output,
}
};
static int w1_f12_add_slave(struct w1_slave *sl)
{
int err = 0;
int i;
for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
err = sysfs_create_bin_file(
&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
if (err)
while (--i >= 0)
sysfs_remove_bin_file(&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
return err;
}
static void w1_f12_remove_slave(struct w1_slave *sl)
{
int i;
for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
sysfs_remove_bin_file(&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
}
static struct w1_family_ops w1_f12_fops = {
.add_slave = w1_f12_add_slave,
.remove_slave = w1_f12_remove_slave,
};
static struct w1_family w1_family_12 = {
.fid = W1_FAMILY_DS2406,
.fops = &w1_f12_fops,
};
module_w1_family(w1_family_12);
| gpl-2.0 |
k2wl/5282 | net/core/dst.c | 727 | 10009 | /*
* net/core/dst.c Protocol independent destination cache.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
#include <net/dst.h>
/*
* Theory of operations:
* 1) We use a list, protected by a spinlock, to add
* new entries from both BH and non-BH context.
* 2) In order to keep spinlock held for a small delay,
* we use a second list where are stored long lived
* entries, that are handled by the garbage collect thread
* fired by a workqueue.
* 3) This list is guarded by a mutex,
* so that the gc_task and dst_dev_event() can be synchronized.
*/
/*
* We want to keep lock & list close together
* to dirty as few cache lines as possible in __dst_free().
* As this is not a very strong hint, we dont force an alignment on SMP.
*/
static struct {
spinlock_t lock;
struct dst_entry *list;
unsigned long timer_inc;
unsigned long timer_expires;
} dst_garbage = {
.lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
.timer_inc = DST_GC_MAX,
};
static void dst_gc_task(struct work_struct *work);
static void ___dst_free(struct dst_entry *dst);
static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
static DEFINE_MUTEX(dst_gc_mutex);
/*
* long lived entries are maintained in this list, guarded by dst_gc_mutex
*/
static struct dst_entry *dst_busy_list;
static void dst_gc_task(struct work_struct *work)
{
int delayed = 0;
int work_performed = 0;
unsigned long expires = ~0L;
struct dst_entry *dst, *next, head;
struct dst_entry *last = &head;
mutex_lock(&dst_gc_mutex);
next = dst_busy_list;
loop:
while ((dst = next) != NULL) {
next = dst->next;
prefetch(&next->next);
cond_resched();
if (likely(atomic_read(&dst->__refcnt))) {
last->next = dst;
last = dst;
delayed++;
continue;
}
work_performed++;
dst = dst_destroy(dst);
if (dst) {
/* NOHASH and still referenced. Unless it is already
* on gc list, invalidate it and add to gc list.
*
* Note: this is temporary. Actually, NOHASH dst's
* must be obsoleted when parent is obsoleted.
* But we do not have state "obsoleted, but
* referenced by parent", so it is right.
*/
if (dst->obsolete > 1)
continue;
___dst_free(dst);
dst->next = next;
next = dst;
}
}
spin_lock_bh(&dst_garbage.lock);
next = dst_garbage.list;
if (next) {
dst_garbage.list = NULL;
spin_unlock_bh(&dst_garbage.lock);
goto loop;
}
last->next = NULL;
dst_busy_list = head.next;
if (!dst_busy_list)
dst_garbage.timer_inc = DST_GC_MAX;
else {
/*
* if we freed less than 1/10 of delayed entries,
* we can sleep longer.
*/
if (work_performed <= delayed/10) {
dst_garbage.timer_expires += dst_garbage.timer_inc;
if (dst_garbage.timer_expires > DST_GC_MAX)
dst_garbage.timer_expires = DST_GC_MAX;
dst_garbage.timer_inc += DST_GC_INC;
} else {
dst_garbage.timer_inc = DST_GC_INC;
dst_garbage.timer_expires = DST_GC_MIN;
}
expires = dst_garbage.timer_expires;
/*
* if the next desired timer is more than 4 seconds in the
* future then round the timer to whole seconds
*/
if (expires > 4*HZ)
expires = round_jiffies_relative(expires);
schedule_delayed_work(&dst_gc_work, expires);
}
spin_unlock_bh(&dst_garbage.lock);
mutex_unlock(&dst_gc_mutex);
}
int dst_discard(struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(dst_discard);
const u32 dst_default_metrics[RTAX_MAX];
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_ref, int initial_obsolete, int flags)
{
struct dst_entry *dst;
if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
if (ops->gc(ops))
return NULL;
}
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
dst->child = NULL;
dst->dev = dev;
if (dev)
dev_hold(dev);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
dst->neighbour = NULL;
dst->hh = NULL;
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
dst->input = dst_discard;
dst->output = dst_discard;
dst->error = 0;
dst->obsolete = initial_obsolete;
dst->header_len = 0;
dst->trailer_len = 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = 0;
#endif
atomic_set(&dst->__refcnt, initial_ref);
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
return dst;
}
EXPORT_SYMBOL(dst_alloc);
static void ___dst_free(struct dst_entry *dst)
{
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
dst->input = dst->output = dst_discard;
dst->obsolete = 2;
}
void __dst_free(struct dst_entry *dst)
{
spin_lock_bh(&dst_garbage.lock);
___dst_free(dst);
dst->next = dst_garbage.list;
dst_garbage.list = dst;
if (dst_garbage.timer_inc > DST_GC_INC) {
dst_garbage.timer_inc = DST_GC_INC;
dst_garbage.timer_expires = DST_GC_MIN;
cancel_delayed_work(&dst_gc_work);
schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
}
spin_unlock_bh(&dst_garbage.lock);
}
EXPORT_SYMBOL(__dst_free);
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
struct dst_entry *child;
struct neighbour *neigh;
struct hh_cache *hh;
smp_rmb();
again:
neigh = dst->neighbour;
hh = dst->hh;
child = dst->child;
dst->hh = NULL;
if (hh)
hh_cache_put(hh);
if (neigh) {
dst->neighbour = NULL;
neigh_release(neigh);
}
if (!(dst->flags & DST_NOCOUNT))
dst_entries_add(dst->ops, -1);
if (dst->ops->destroy)
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
if (dst) {
int nohash = dst->flags & DST_NOHASH;
if (atomic_dec_and_test(&dst->__refcnt)) {
/* We were real parent of this dst, so kill child. */
if (nohash)
goto again;
} else {
/* Child is still referenced, return it for freeing. */
if (nohash)
return dst;
/* Child is still in his hash table */
}
}
return NULL;
}
EXPORT_SYMBOL(dst_destroy);
void dst_release(struct dst_entry *dst)
{
if (dst) {
int newrefcnt;
newrefcnt = atomic_dec_return(&dst->__refcnt);
WARN_ON(newrefcnt < 0);
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
dst = dst_destroy(dst);
if (dst)
__dst_free(dst);
}
}
}
EXPORT_SYMBOL(dst_release);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
if (p) {
u32 *old_p = __DST_METRICS_PTR(old);
unsigned long prev, new;
memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
kfree(p);
p = __DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
}
}
return p;
}
EXPORT_SYMBOL(dst_cow_metrics_generic);
/* Caller asserts that dst_metrics_read_only(dst) is false. */
void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{
unsigned long prev, new;
new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
}
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
/**
* skb_dst_set_noref - sets skb dst, without a reference
* @skb: buffer
* @dst: dst entry
*
* Sets skb dst, assuming a reference was not taken on dst
* skb_dst_drop() should not dst_release() this dst
*/
void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
/* If dst not in cache, we must take a reference, because
* dst_release() will destroy dst as soon as its refcount becomes zero
*/
if (unlikely(dst->flags & DST_NOCACHE)) {
dst_hold(dst);
skb_dst_set(skb, dst);
} else {
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
}
EXPORT_SYMBOL(skb_dst_set_noref);
/* Dirty hack. We did it in 2.2 (in __dst_free),
* we have _very_ good reasons not to repeat
* this mistake in 2.3, but we have no choice
* now. _It_ _is_ _explicit_ _deliberate_
* _race_ _condition_.
*
* Commented and originally written by Alexey.
*/
static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int unregister)
{
if (dst->ops->ifdown)
dst->ops->ifdown(dst, dev, unregister);
if (dev != dst->dev)
return;
if (!unregister) {
dst->input = dst->output = dst_discard;
} else {
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
if (dst->neighbour && dst->neighbour->dev == dev) {
dst->neighbour->dev = dst->dev;
dev_hold(dst->dev);
dev_put(dev);
}
}
}
static int dst_dev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
struct dst_entry *dst, *last = NULL;
switch (event) {
case NETDEV_UNREGISTER:
case NETDEV_DOWN:
mutex_lock(&dst_gc_mutex);
for (dst = dst_busy_list; dst; dst = dst->next) {
last = dst;
dst_ifdown(dst, dev, event != NETDEV_DOWN);
}
spin_lock_bh(&dst_garbage.lock);
dst = dst_garbage.list;
dst_garbage.list = NULL;
spin_unlock_bh(&dst_garbage.lock);
if (last)
last->next = dst;
else
dst_busy_list = dst;
for (; dst; dst = dst->next)
dst_ifdown(dst, dev, event != NETDEV_DOWN);
mutex_unlock(&dst_gc_mutex);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block dst_dev_notifier = {
.notifier_call = dst_dev_event,
.priority = -10, /* must be called after other network notifiers */
};
void __init dst_init(void)
{
register_netdevice_notifier(&dst_dev_notifier);
}
| gpl-2.0 |
gerbert/linux-tas5715 | arch/blackfin/kernel/setup.c | 983 | 40323 | /*
* Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/pfn.h>
#ifdef CONFIG_MTD_UCLINUX
#include <linux/mtd/map.h>
#include <linux/ext2_fs.h>
#include <uapi/linux/cramfs_fs.h>
#include <linux/romfs_fs.h>
#endif
#include <asm/cplb.h>
#include <asm/cacheflush.h>
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
#include <asm/clocks.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/fixed_code.h>
#include <asm/early_printk.h>
#include <asm/irq_handler.h>
#include <asm/pda.h>
#ifdef CONFIG_BF60x
#include <mach/pm.h>
#endif
#ifdef CONFIG_SCB_PRIORITY
#include <asm/scb.h>
#endif
u16 _bfin_swrst;
EXPORT_SYMBOL(_bfin_swrst);
unsigned long memory_start, memory_end, physical_mem_end;
unsigned long _rambase, _ramstart, _ramend;
unsigned long reserved_mem_dcache_on;
unsigned long reserved_mem_icache_on;
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
EXPORT_SYMBOL(physical_mem_end);
EXPORT_SYMBOL(_ramend);
EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
#endif
char __initdata command_line[COMMAND_LINE_SIZE];
struct blackfin_initial_pda __initdata initial_pda;
/* boot memmap, for parsing "memmap=" */
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
#define BFIN_MEMMAP_RAM 1
#define BFIN_MEMMAP_RESERVED 2
static struct bfin_memmap {
int nr_map;
struct bfin_memmap_entry {
unsigned long long addr; /* start of memory segment */
unsigned long long size;
unsigned long type;
} map[BFIN_MEMMAP_MAX];
} bfin_memmap __initdata;
/* for memmap sanitization */
struct change_member {
struct bfin_memmap_entry *pentry; /* pointer to original entry */
unsigned long long addr; /* address for this change point */
};
static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
static int early_init_clkin_hz(char *buf);
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
void __init generate_cplb_tables(void)
{
unsigned int cpu;
generate_cplb_tables_all();
/* Generate per-CPU I&D CPLB tables */
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
generate_cplb_tables_cpu(cpu);
}
#endif
void bfin_setup_caches(unsigned int cpu)
{
#ifdef CONFIG_BFIN_ICACHE
bfin_icache_init(icplb_tbl[cpu]);
#endif
#ifdef CONFIG_BFIN_DCACHE
bfin_dcache_init(dcplb_tbl[cpu]);
#endif
bfin_setup_cpudata(cpu);
/*
* In cache coherence emulation mode, we need to have the
* D-cache enabled before running any atomic operation which
* might involve cache invalidation (i.e. spinlock, rwlock).
* So printk's are deferred until then.
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
printk(KERN_INFO " External memory:"
# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
" cacheable"
# else
" uncacheable"
# endif
" in instruction cache\n");
if (L2_LENGTH)
printk(KERN_INFO " L2 SRAM :"
# ifdef CONFIG_BFIN_L2_ICACHEABLE
" cacheable"
# else
" uncacheable"
# endif
" in instruction cache\n");
#else
printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
#endif
#ifdef CONFIG_BFIN_DCACHE
printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
printk(KERN_INFO " External memory:"
# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
" cacheable (write-back)"
# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
" cacheable (write-through)"
# else
" uncacheable"
# endif
" in data cache\n");
if (L2_LENGTH)
printk(KERN_INFO " L2 SRAM :"
# if defined CONFIG_BFIN_L2_WRITEBACK
" cacheable (write-back)"
# elif defined CONFIG_BFIN_L2_WRITETHROUGH
" cacheable (write-through)"
# else
" uncacheable"
# endif
" in data cache\n");
#else
printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
void bfin_setup_cpudata(unsigned int cpu)
{
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
cpudata->imemctl = bfin_read_IMEM_CONTROL();
cpudata->dmemctl = bfin_read_DMEM_CONTROL();
}
void __init bfin_cache_init(void)
{
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
generate_cplb_tables();
#endif
bfin_setup_caches(0);
}
void __init bfin_relocate_l1_mem(void)
{
unsigned long text_l1_len = (unsigned long)_text_l1_len;
unsigned long data_l1_len = (unsigned long)_data_l1_len;
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
unsigned long l2_len = (unsigned long)_l2_len;
early_shadow_stamp();
/*
* due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
* we know that everything about l1 text/data is nice and aligned,
* so copy by 4 byte chunks, and don't worry about overlapping
* src/dest.
*
* We can't use the dma_memcpy functions, since they can call
* scheduler functions which might be in L1 :( and core writes
* into L1 instruction cause bad access errors, so we are stuck,
* we are required to use DMA, but can't use the common dma
* functions. We can't use memcpy either - since that might be
* going to be in the relocated L1
*/
blackfin_dma_early_init();
/* if necessary, copy L1 text to L1 instruction SRAM */
if (L1_CODE_LENGTH && text_l1_len)
early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
/* if necessary, copy L1 data to L1 data bank A SRAM */
if (L1_DATA_A_LENGTH && data_l1_len)
early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
/* if necessary, copy L1 data B to L1 data bank B SRAM */
if (L1_DATA_B_LENGTH && data_b_l1_len)
early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
early_dma_memcpy_done();
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
#endif
/* if necessary, copy L2 text/data to L2 SRAM */
if (L2_LENGTH && l2_len)
memcpy(_stext_l2, _l2_lma, l2_len);
}
#ifdef CONFIG_SMP
void __init bfin_relocate_coreb_l1_mem(void)
{
unsigned long text_l1_len = (unsigned long)_text_l1_len;
unsigned long data_l1_len = (unsigned long)_data_l1_len;
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
blackfin_dma_early_init();
/* if necessary, copy L1 text to L1 instruction SRAM */
if (L1_CODE_LENGTH && text_l1_len)
early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
text_l1_len);
/* if necessary, copy L1 data to L1 data bank A SRAM */
if (L1_DATA_A_LENGTH && data_l1_len)
early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
data_l1_len);
/* if necessary, copy L1 data B to L1 data bank B SRAM */
if (L1_DATA_B_LENGTH && data_b_l1_len)
early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
data_b_l1_len);
early_dma_memcpy_done();
#ifdef CONFIG_ICACHE_FLUSH_L1
blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
(unsigned long)_stext_l1 + COREB_L1_CODE_START;
#endif
}
#endif
#ifdef CONFIG_ROMKERNEL
void __init bfin_relocate_xip_data(void)
{
early_shadow_stamp();
memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
}
#endif
/* add_memory_region to memmap */
static void __init add_memory_region(unsigned long long start,
unsigned long long size, int type)
{
int i;
i = bfin_memmap.nr_map;
if (i == BFIN_MEMMAP_MAX) {
printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
return;
}
bfin_memmap.map[i].addr = start;
bfin_memmap.map[i].size = size;
bfin_memmap.map[i].type = type;
bfin_memmap.nr_map++;
}
/*
* Sanitize the boot memmap, removing overlaps.
*/
static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
{
struct change_member *change_tmp;
unsigned long current_type, last_type;
unsigned long long last_addr;
int chgidx, still_changing;
int overlap_entries;
int new_entry;
int old_nr, new_nr, chg_nr;
int i;
/*
Visually we're performing the following (1,2,3,4 = memory types)
Sample memory map (w/overlaps):
____22__________________
______________________4_
____1111________________
_44_____________________
11111111________________
____________________33__
___________44___________
__________33333_________
______________22________
___________________2222_
_________111111111______
_____________________11_
_________________4______
Sanitized equivalent (no overlap):
1_______________________
_44_____________________
___1____________________
____22__________________
______11________________
_________1______________
__________3_____________
___________44___________
_____________33_________
_______________2________
________________1_______
_________________4______
___________________2____
____________________33__
______________________4_
*/
/* if there's only one memory region, don't bother */
if (*pnr_map < 2)
return -1;
old_nr = *pnr_map;
/* bail out if we find any unreasonable addresses in memmap */
for (i = 0; i < old_nr; i++)
if (map[i].addr + map[i].size < map[i].addr)
return -1;
/* create pointers for initial change-point information (for sorting) */
for (i = 0; i < 2*old_nr; i++)
change_point[i] = &change_point_list[i];
/* record all known change-points (starting and ending addresses),
omitting those that are for empty memory regions */
chgidx = 0;
for (i = 0; i < old_nr; i++) {
if (map[i].size != 0) {
change_point[chgidx]->addr = map[i].addr;
change_point[chgidx++]->pentry = &map[i];
change_point[chgidx]->addr = map[i].addr + map[i].size;
change_point[chgidx++]->pentry = &map[i];
}
}
chg_nr = chgidx; /* true number of change-points */
/* sort change-point list by memory addresses (low -> high) */
still_changing = 1;
while (still_changing) {
still_changing = 0;
for (i = 1; i < chg_nr; i++) {
/* if <current_addr> > <last_addr>, swap */
/* or, if current=<start_addr> & last=<end_addr>, swap */
if ((change_point[i]->addr < change_point[i-1]->addr) ||
((change_point[i]->addr == change_point[i-1]->addr) &&
(change_point[i]->addr == change_point[i]->pentry->addr) &&
(change_point[i-1]->addr != change_point[i-1]->pentry->addr))
) {
change_tmp = change_point[i];
change_point[i] = change_point[i-1];
change_point[i-1] = change_tmp;
still_changing = 1;
}
}
}
/* create a new memmap, removing overlaps */
overlap_entries = 0; /* number of entries in the overlap table */
new_entry = 0; /* index for creating new memmap entries */
last_type = 0; /* start with undefined memory type */
last_addr = 0; /* start with 0 as last starting address */
/* loop through change-points, determining affect on the new memmap */
for (chgidx = 0; chgidx < chg_nr; chgidx++) {
/* keep track of all overlapping memmap entries */
if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
/* add map entry to overlap list (> 1 entry implies an overlap) */
overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
} else {
/* remove entry from list (order independent, so swap with last) */
for (i = 0; i < overlap_entries; i++) {
if (overlap_list[i] == change_point[chgidx]->pentry)
overlap_list[i] = overlap_list[overlap_entries-1];
}
overlap_entries--;
}
/* if there are overlapping entries, decide which "type" to use */
/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
current_type = 0;
for (i = 0; i < overlap_entries; i++)
if (overlap_list[i]->type > current_type)
current_type = overlap_list[i]->type;
/* continue building up new memmap based on this information */
if (current_type != last_type) {
if (last_type != 0) {
new_map[new_entry].size =
change_point[chgidx]->addr - last_addr;
/* move forward only if the new size was non-zero */
if (new_map[new_entry].size != 0)
if (++new_entry >= BFIN_MEMMAP_MAX)
break; /* no more space left for new entries */
}
if (current_type != 0) {
new_map[new_entry].addr = change_point[chgidx]->addr;
new_map[new_entry].type = current_type;
last_addr = change_point[chgidx]->addr;
}
last_type = current_type;
}
}
new_nr = new_entry; /* retain count for new entries */
/* copy new mapping into original location */
memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
*pnr_map = new_nr;
return 0;
}
static void __init print_memory_map(char *who)
{
int i;
for (i = 0; i < bfin_memmap.nr_map; i++) {
printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
bfin_memmap.map[i].addr,
bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
switch (bfin_memmap.map[i].type) {
case BFIN_MEMMAP_RAM:
printk(KERN_CONT "(usable)\n");
break;
case BFIN_MEMMAP_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
default:
printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
break;
}
}
}
static __init int parse_memmap(char *arg)
{
unsigned long long start_at, mem_size;
if (!arg)
return -EINVAL;
mem_size = memparse(arg, &arg);
if (*arg == '@') {
start_at = memparse(arg+1, &arg);
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
} else if (*arg == '$') {
start_at = memparse(arg+1, &arg);
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
}
return 0;
}
/*
* Initial parsing of the command line. Currently, we support:
* - Controlling the linux memory size: mem=xxx[KMG]
* - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
* $ -> reserved memory is dcacheable
* # -> reserved memory is icacheable
* - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
* @ from <start> to <start>+<mem>, type RAM
* $ from <start> to <start>+<mem>, type RESERVED
*/
static __init void parse_cmdline_early(char *cmdline_p)
{
char c = ' ', *to = cmdline_p;
unsigned int memsize;
for (;;) {
if (c == ' ') {
if (!memcmp(to, "mem=", 4)) {
to += 4;
memsize = memparse(to, &to);
if (memsize)
_ramend = memsize;
} else if (!memcmp(to, "max_mem=", 8)) {
to += 8;
memsize = memparse(to, &to);
if (memsize) {
physical_mem_end = memsize;
if (*to != ' ') {
if (*to == '$'
|| *(to + 1) == '$')
reserved_mem_dcache_on = 1;
if (*to == '#'
|| *(to + 1) == '#')
reserved_mem_icache_on = 1;
}
}
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
}
}
c = *(to++);
if (!c)
break;
}
}
/*
* Setup memory defaults from user config.
* The physical memory layout looks like:
*
* [_rambase, _ramstart]: kernel image
* [memory_start, memory_end]: dynamic memory managed by kernel
* [memory_end, _ramend]: reserved memory
* [memory_mtd_start(memory_end),
* memory_mtd_start + mtd_size]: rootfs (if any)
* [_ramend - DMA_UNCACHED_REGION,
* _ramend]: uncached DMA region
* [_ramend, physical_mem_end]: memory not managed by kernel
*/
static __init void memory_setup(void)
{
#ifdef CONFIG_MTD_UCLINUX
unsigned long mtd_phys = 0;
#endif
unsigned long max_mem;
_rambase = CONFIG_BOOT_LOAD;
_ramstart = (unsigned long)_end;
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
console_init();
panic("DMA region exceeds memory limit: %lu.",
_ramend - _ramstart);
}
max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
*/
# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
if (max_mem >= 56 * 1024 * 1024)
max_mem = 56 * 1024 * 1024;
# else
if (max_mem >= 60 * 1024 * 1024)
max_mem = 60 * 1024 * 1024;
# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
#endif /* ANOMALY_05000263 */
#ifdef CONFIG_MPU
/* Round up to multiple of 4MB */
memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
#else
memory_start = PAGE_ALIGN(_ramstart);
#endif
#if defined(CONFIG_MTD_UCLINUX)
/* generic memory mapped MTD driver */
memory_mtd_end = memory_end;
mtd_phys = _ramstart;
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
mtd_size =
PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
# endif
# if defined(CONFIG_CRAMFS)
if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
# endif
# if defined(CONFIG_ROMFS_FS)
if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
/* ROM_FS is XIP, so if we found it, we need to limit memory */
if (memory_end > max_mem) {
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
memory_end = max_mem;
}
}
# endif /* CONFIG_ROMFS_FS */
/* Since the default MTD_UCLINUX has no magic number, we just blindly
* read 8 past the end of the kernel's image, and look at it.
* When no image is attached, mtd_size is set to a random number
* Do some basic sanity checks before operating on things
*/
if (mtd_size == 0 || memory_end <= mtd_size) {
pr_emerg("Could not find valid ram mtd attached.\n");
} else {
memory_end -= mtd_size;
/* Relocate MTD image to the top of memory after the uncached memory area */
uclinux_ram_map.phys = memory_mtd_start = memory_end;
uclinux_ram_map.size = mtd_size;
pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
_end, mtd_size, (void *)memory_mtd_start);
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
}
#endif /* CONFIG_MTD_UCLINUX */
/* We need lo limit memory, since everything could have a text section
* of userspace in it, and expose anomaly 05000263. If the anomaly
* doesn't exist, or we don't need to - then dont.
*/
if (memory_end > max_mem) {
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
memory_end = max_mem;
}
#ifdef CONFIG_MPU
#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
#else
page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
#endif
page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
#endif
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)0;
printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
printk(KERN_INFO "Memory map:\n"
" fixedcode = 0x%p-0x%p\n"
" text = 0x%p-0x%p\n"
" rodata = 0x%p-0x%p\n"
" bss = 0x%p-0x%p\n"
" data = 0x%p-0x%p\n"
" stack = 0x%p-0x%p\n"
" init = 0x%p-0x%p\n"
" available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX
" rootfs = 0x%p-0x%p\n"
#endif
#if DMA_UNCACHED_REGION > 0
" DMA Zone = 0x%p-0x%p\n"
#endif
, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
_stext, _etext,
__start_rodata, __end_rodata,
__bss_start, __bss_stop,
_sdata, _edata,
(void *)&init_thread_union,
(void *)((int)(&init_thread_union) + THREAD_SIZE),
__init_begin, __init_end,
(void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
#endif
#if DMA_UNCACHED_REGION > 0
, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
#endif
);
}
/*
* Find the lowest, highest page frame number we have available
*/
void __init find_min_max_pfn(void)
{
int i;
max_pfn = 0;
min_low_pfn = PFN_DOWN(memory_end);
for (i = 0; i < bfin_memmap.nr_map; i++) {
unsigned long start, end;
/* RAM? */
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
continue;
start = PFN_UP(bfin_memmap.map[i].addr);
end = PFN_DOWN(bfin_memmap.map[i].addr +
bfin_memmap.map[i].size);
if (start >= end)
continue;
if (end > max_pfn)
max_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
}
}
static __init void setup_bootmem_allocator(void)
{
int bootmap_size;
int i;
unsigned long start_pfn, end_pfn;
unsigned long curr_pfn, last_pfn, size;
/* mark memory between memory_start and memory_end usable */
add_memory_region(memory_start,
memory_end - memory_start, BFIN_MEMMAP_RAM);
/* sanity check for overlap */
sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
print_memory_map("boot memmap");
/* initialize globals in linux/bootmem.h */
find_min_max_pfn();
/* pfn of the last usable page frame */
if (max_pfn > memory_end >> PAGE_SHIFT)
max_pfn = memory_end >> PAGE_SHIFT;
/* pfn of last page frame directly mapped by kernel */
max_low_pfn = max_pfn;
/* pfn of the first usable page frame after kernel image*/
if (min_low_pfn < memory_start >> PAGE_SHIFT)
min_low_pfn = memory_start >> PAGE_SHIFT;
start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT;
end_pfn = memory_end >> PAGE_SHIFT;
/*
* give all the memory to the bootmap allocator, tell it to put the
* boot mem_map at the start of memory.
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
memory_start >> PAGE_SHIFT, /* map goes here */
start_pfn, end_pfn);
/* register the memmap regions with the bootmem allocator */
for (i = 0; i < bfin_memmap.nr_map; i++) {
/*
* Reserve usable memory
*/
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
continue;
/*
* We are rounding up the start address of usable memory:
*/
curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
if (curr_pfn >= end_pfn)
continue;
/*
* ... and at the end of the usable range downwards:
*/
last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
bfin_memmap.map[i].size);
if (last_pfn > end_pfn)
last_pfn = end_pfn;
/*
* .. finally, did all the rounding and playing
* around just make the area go away?
*/
if (last_pfn <= curr_pfn)
continue;
size = last_pfn - curr_pfn;
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
}
/* reserve memory before memory_start, including bootmap */
reserve_bootmem(CONFIG_PHY_RAM_BASE_ADDRESS,
memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
BOOTMEM_DEFAULT);
}
#define EBSZ_TO_MEG(ebsz) \
({ \
int meg = 0; \
switch (ebsz & 0xf) { \
case 0x1: meg = 16; break; \
case 0x3: meg = 32; break; \
case 0x5: meg = 64; break; \
case 0x7: meg = 128; break; \
case 0x9: meg = 256; break; \
case 0xb: meg = 512; break; \
} \
meg; \
})
static inline int __init get_mem_size(void)
{
#if defined(EBIU_SDBCTL)
# if defined(BF561_FAMILY)
int ret = 0;
u32 sdbctl = bfin_read_EBIU_SDBCTL();
ret += EBSZ_TO_MEG(sdbctl >> 0);
ret += EBSZ_TO_MEG(sdbctl >> 8);
ret += EBSZ_TO_MEG(sdbctl >> 16);
ret += EBSZ_TO_MEG(sdbctl >> 24);
return ret;
# else
return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
# endif
#elif defined(EBIU_DDRCTL1)
u32 ddrctl = bfin_read_EBIU_DDRCTL1();
int ret = 0;
switch (ddrctl & 0xc0000) {
case DEVSZ_64:
ret = 64 / 8;
break;
case DEVSZ_128:
ret = 128 / 8;
break;
case DEVSZ_256:
ret = 256 / 8;
break;
case DEVSZ_512:
ret = 512 / 8;
break;
}
switch (ddrctl & 0x30000) {
case DEVWD_4:
ret *= 2;
case DEVWD_8:
ret *= 2;
case DEVWD_16:
break;
}
if ((ddrctl & 0xc000) == 0x4000)
ret *= 2;
return ret;
#elif defined(CONFIG_BF60x)
u32 ddrctl = bfin_read_DMC0_CFG();
int ret;
switch (ddrctl & 0xf00) {
case DEVSZ_64:
ret = 64 / 8;
break;
case DEVSZ_128:
ret = 128 / 8;
break;
case DEVSZ_256:
ret = 256 / 8;
break;
case DEVSZ_512:
ret = 512 / 8;
break;
case DEVSZ_1G:
ret = 1024 / 8;
break;
case DEVSZ_2G:
ret = 2048 / 8;
break;
}
return ret;
#endif
BUG();
}
__attribute__((weak))
void __init native_machine_early_platform_add_devices(void)
{
}
#ifdef CONFIG_BF60x
static inline u_long bfin_get_clk(char *name)
{
struct clk *clk;
u_long clk_rate;
clk = clk_get(NULL, name);
if (IS_ERR(clk))
return 0;
clk_rate = clk_get_rate(clk);
clk_put(clk);
return clk_rate;
}
#endif
void __init setup_arch(char **cmdline_p)
{
u32 mmr;
unsigned long sclk, cclk;
native_machine_early_platform_add_devices();
enable_shadow_console();
/* Check to make sure we are running on the right processor */
mmr = bfin_cpuid();
if (unlikely(CPUID != bfin_cpuid()))
printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
CPU, bfin_cpuid(), bfin_revid());
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
#if defined(CONFIG_CMDLINE_BOOL)
strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
command_line[sizeof(command_line) - 1] = 0;
#endif
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
#ifdef CONFIG_BF60x
/* Should init clock device before parse command early */
clk_init();
#endif
/* If the user does not specify things on the command line, use
* what the bootloader set things up as
*/
physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
if (_ramend == 0)
_ramend = get_mem_size() * 1024 * 1024;
if (physical_mem_end == 0)
physical_mem_end = _ramend;
memory_setup();
#ifndef CONFIG_BF60x
/* Initialize Async memory banks */
bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
bfin_write_EBIU_AMGCTL(AMGCTLVAL);
#ifdef CONFIG_EBIU_MBSCTLVAL
bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
#endif
#endif
#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
#endif
cclk = get_cclk();
sclk = get_sclk();
if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
#ifdef BF561_FAMILY
if (ANOMALY_05000266) {
bfin_read_IMDMA_D0_IRQ_STATUS();
bfin_read_IMDMA_D1_IRQ_STATUS();
}
#endif
mmr = bfin_read_TBUFCTL();
printk(KERN_INFO "Hardware Trace %s and %sabled\n",
(mmr & 0x1) ? "active" : "off",
(mmr & 0x2) ? "en" : "dis");
#ifndef CONFIG_BF60x
mmr = bfin_read_SYSCR();
printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
/* Newer parts mirror SWRST bits in SYSCR */
#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
/* Clear boot mode field */
_bfin_swrst = mmr & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
#endif
#ifdef CONFIG_SMP
if (_bfin_swrst & SWRST_DBL_FAULT_A) {
#else
if (_bfin_swrst & RESET_DOUBLE) {
#endif
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* We assume the crashing kernel, and the current symbol table match */
printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
initial_pda.retx_doublefault);
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
initial_pda.dcplb_doublefault_addr);
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
initial_pda.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
initial_pda.retx);
} else if (_bfin_swrst & RESET_WDOG)
printk(KERN_INFO "Recovering from Watchdog event\n");
else if (_bfin_swrst & RESET_SOFTWARE)
printk(KERN_NOTICE "Reset caused by Software reset\n");
#endif
printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
if (bfin_compiled_revid() == 0xffff)
printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
else if (bfin_compiled_revid() == -1)
printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
bfin_revid());
else if (bfin_compiled_revid() != 0xffff) {
printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
bfin_compiled_revid(), bfin_revid());
if (bfin_compiled_revid() > bfin_revid())
panic("Error: you are missing anomaly workarounds for this rev");
}
}
if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
CPU, bfin_revid());
}
printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
#ifdef CONFIG_BF60x
printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
#else
printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
cclk / 1000000, sclk / 1000000);
#endif
setup_bootmem_allocator();
paging_init();
/* Copy atomic sequences to their fixed location, and sanity check that
these locations are the ones that we advertise to userspace. */
memcpy((void *)FIXED_CODE_START, &fixed_code_start,
FIXED_CODE_END - FIXED_CODE_START);
BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
!= SIGRETURN_STUB - FIXED_CODE_START);
BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
!= ATOMIC_XCHG32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
!= ATOMIC_CAS32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
!= ATOMIC_ADD32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
!= ATOMIC_SUB32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
!= ATOMIC_IOR32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
!= ATOMIC_AND32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
!= ATOMIC_XOR32 - FIXED_CODE_START);
BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
#ifdef CONFIG_SMP
platform_init_cpus();
#endif
init_exception_vectors();
bfin_cache_init(); /* Initialize caches for the boot CPU */
#ifdef CONFIG_SCB_PRIORITY
init_scb();
#endif
}
static int __init topology_init(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
}
return 0;
}
subsys_initcall(topology_init);
/* Get the input clock frequency */
static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
#ifndef CONFIG_BF60x
static u_long get_clkin_hz(void)
{
return cached_clkin_hz;
}
#endif
static int __init early_init_clkin_hz(char *buf)
{
cached_clkin_hz = simple_strtoul(buf, NULL, 0);
#ifdef BFIN_KERNEL_CLOCK
if (cached_clkin_hz != CONFIG_CLKIN_HZ)
panic("cannot change clkin_hz when reprogramming clocks");
#endif
return 1;
}
early_param("clkin_hz=", early_init_clkin_hz);
#ifndef CONFIG_BF60x
/* Get the voltage input multiplier */
static u_long get_vco(void)
{
static u_long cached_vco;
u_long msel, pll_ctl;
/* The assumption here is that VCO never changes at runtime.
* If, someday, we support that, then we'll have to change this.
*/
if (cached_vco)
return cached_vco;
pll_ctl = bfin_read_PLL_CTL();
msel = (pll_ctl >> 9) & 0x3F;
if (0 == msel)
msel = 64;
cached_vco = get_clkin_hz();
cached_vco >>= (1 & pll_ctl); /* DF bit */
cached_vco *= msel;
return cached_vco;
}
#endif
/* Get the Core clock */
u_long get_cclk(void)
{
#ifdef CONFIG_BF60x
return bfin_get_clk("CCLK");
#else
static u_long cached_cclk_pll_div, cached_cclk;
u_long csel, ssel;
if (bfin_read_PLL_STAT() & 0x1)
return get_clkin_hz();
ssel = bfin_read_PLL_DIV();
if (ssel == cached_cclk_pll_div)
return cached_cclk;
else
cached_cclk_pll_div = ssel;
csel = ((ssel >> 4) & 0x03);
ssel &= 0xf;
if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
cached_cclk = get_vco() / ssel;
else
cached_cclk = get_vco() >> csel;
return cached_cclk;
#endif
}
EXPORT_SYMBOL(get_cclk);
#ifdef CONFIG_BF60x
/* Get the bf60x clock of SCLK0 domain */
u_long get_sclk0(void)
{
return bfin_get_clk("SCLK0");
}
EXPORT_SYMBOL(get_sclk0);
/* Get the bf60x clock of SCLK1 domain */
u_long get_sclk1(void)
{
return bfin_get_clk("SCLK1");
}
EXPORT_SYMBOL(get_sclk1);
/* Get the bf60x DRAM clock */
u_long get_dclk(void)
{
return bfin_get_clk("DCLK");
}
EXPORT_SYMBOL(get_dclk);
#endif
/* Get the default system clock */
u_long get_sclk(void)
{
#ifdef CONFIG_BF60x
return get_sclk0();
#else
static u_long cached_sclk;
u_long ssel;
/* The assumption here is that SCLK never changes at runtime.
* If, someday, we support that, then we'll have to change this.
*/
if (cached_sclk)
return cached_sclk;
if (bfin_read_PLL_STAT() & 0x1)
return get_clkin_hz();
ssel = bfin_read_PLL_DIV() & 0xf;
if (0 == ssel) {
printk(KERN_WARNING "Invalid System Clock\n");
ssel = 1;
}
cached_sclk = get_vco() / ssel;
return cached_sclk;
#endif
}
EXPORT_SYMBOL(get_sclk);
unsigned long sclk_to_usecs(unsigned long sclk)
{
u64 tmp = USEC_PER_SEC * (u64)sclk;
do_div(tmp, get_sclk());
return tmp;
}
EXPORT_SYMBOL(sclk_to_usecs);
unsigned long usecs_to_sclk(unsigned long usecs)
{
u64 tmp = get_sclk() * (u64)usecs;
do_div(tmp, USEC_PER_SEC);
return tmp;
}
EXPORT_SYMBOL(usecs_to_sclk);
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *cpu, *mmu, *fpu, *vendor, *cache;
uint32_t revid;
int cpu_num = *(unsigned int *)v;
u_long sclk, cclk;
u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
cpu = CPU;
mmu = "none";
fpu = "none";
revid = bfin_revid();
sclk = get_sclk();
cclk = get_cclk();
switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
case 0xca:
vendor = "Analog Devices";
break;
default:
vendor = "unknown";
break;
}
seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
if (CPUID == bfin_cpuid())
seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
else
seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
CPUID, bfin_cpuid());
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
"stepping\t: %d ",
cpu, cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU
"mpu on",
#else
"mpu off",
#endif
revid);
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
seq_printf(m, "(Compiled for Rev none)");
else if (bfin_compiled_revid() == 0xffff)
seq_printf(m, "(Compiled for Rev any)");
else
seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
}
seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
cclk/1000000, cclk%1000000,
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
"Calibration\t: %lu loops\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100,
(loops_per_jiffy * HZ));
/* Check Cache configutation */
switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
case ACACHE_BSRAM:
cache = "dbank-A/B\t: cache/sram";
dcache_size = 16;
dsup_banks = 1;
break;
case ACACHE_BCACHE:
cache = "dbank-A/B\t: cache/cache";
dcache_size = 32;
dsup_banks = 2;
break;
case ASRAM_BSRAM:
cache = "dbank-A/B\t: sram/sram";
dcache_size = 0;
dsup_banks = 0;
break;
default:
cache = "unknown";
dcache_size = 0;
dsup_banks = 0;
break;
}
/* Is it turned on? */
if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
dcache_size = 0;
if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
"%d KB(L1 dcache) %d KB(L2 cache)\n",
icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
seq_printf(m, "external memory\t: "
#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
"cacheable"
#else
"uncacheable"
#endif
" in instruction cache\n");
seq_printf(m, "external memory\t: "
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
"cacheable (write-back)"
#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
"cacheable (write-through)"
#else
"uncacheable"
#endif
" in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
else
seq_printf(m, "icache setup\t: off\n");
seq_printf(m,
"dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
BFIN_DLINES);
#ifdef __ARCH_SYNC_CORE_DCACHE
seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
#endif
#ifdef __ARCH_SYNC_CORE_ICACHE
seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
#endif
seq_printf(m, "\n");
if (cpu_num != num_possible_cpus() - 1)
return 0;
if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
seq_printf(m, "L2 SRAM\t\t: "
#if defined(CONFIG_BFIN_L2_ICACHEABLE)
"cacheable"
#else
"uncacheable"
#endif
" in instruction cache\n");
seq_printf(m, "L2 SRAM\t\t: "
#if defined(CONFIG_BFIN_L2_WRITEBACK)
"cacheable (write-back)"
#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
"cacheable (write-through)"
#else
"uncacheable"
#endif
" in data cache\n");
}
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
physical_mem_end >> 10, 0ul, physical_mem_end);
seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
((int)memory_end - (int)_rambase) >> 10,
_rambase, memory_end);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0)
*pos = cpumask_first(cpu_online_mask);
if (*pos >= num_online_cpus())
return NULL;
return pos;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
*pos = cpumask_next(*pos, cpu_online_mask);
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
void __init cmdline_init(const char *r0)
{
early_shadow_stamp();
if (r0)
strncpy(command_line, r0, COMMAND_LINE_SIZE);
}
| gpl-2.0 |
anomalchik/android_kernel_xiaomi | arch/arm/mach-ks8695/time.c | 1239 | 4865 | /*
* arch/arm/mach-ks8695/time.c
*
* Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk>
* Copyright (C) 2006 Simtec Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <asm/mach/time.h>
#include <asm/system_misc.h>
#include <mach/regs-irq.h>
#include "generic.h"
#define KS8695_TMR_OFFSET (0xF0000 + 0xE400)
#define KS8695_TMR_VA (KS8695_IO_VA + KS8695_TMR_OFFSET)
#define KS8695_TMR_PA (KS8695_IO_PA + KS8695_TMR_OFFSET)
/*
* Timer registers
*/
#define KS8695_TMCON (0x00) /* Timer Control Register */
#define KS8695_T1TC (0x04) /* Timer 1 Timeout Count Register */
#define KS8695_T0TC (0x08) /* Timer 0 Timeout Count Register */
#define KS8695_T1PD (0x0C) /* Timer 1 Pulse Count Register */
#define KS8695_T0PD (0x10) /* Timer 0 Pulse Count Register */
/* Timer Control Register */
#define TMCON_T1EN (1 << 1) /* Timer 1 Enable */
#define TMCON_T0EN (1 << 0) /* Timer 0 Enable */
/* Timer0 Timeout Counter Register */
#define T0TC_WATCHDOG (0xff) /* Enable watchdog mode */
static void ks8695_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
u32 tmcon;
if (mode == CLOCK_EVT_FEAT_PERIODIC) {
u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ);
u32 half = DIV_ROUND_CLOSEST(rate, 2);
/* Disable timer 1 */
tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
tmcon &= ~TMCON_T1EN;
writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
/* Both registers need to count down */
writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC);
writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD);
/* Re-enable timer1 */
tmcon |= TMCON_T1EN;
writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
}
}
static int ks8695_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
u32 half = DIV_ROUND_CLOSEST(cycles, 2);
u32 tmcon;
/* Disable timer 1 */
tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
tmcon &= ~TMCON_T1EN;
writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
/* Both registers need to count down */
writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC);
writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD);
/* Re-enable timer1 */
tmcon |= TMCON_T1EN;
writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
return 0;
}
static struct clock_event_device clockevent_ks8695 = {
.name = "ks8695_t1tc",
.rating = 300, /* Reasonably fast and accurate clock event */
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = ks8695_set_next_event,
.set_mode = ks8695_set_mode,
};
/*
* IRQ handler for the timer.
*/
static irqreturn_t ks8695_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_ks8695;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction ks8695_timer_irq = {
.name = "ks8695_tick",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ks8695_timer_interrupt,
};
static void ks8695_timer_setup(void)
{
unsigned long tmcon;
/* Disable timer 0 and 1 */
tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
tmcon &= ~TMCON_T0EN;
tmcon &= ~TMCON_T1EN;
writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
/*
* Use timer 1 to fire IRQs on the timeline, minimum 2 cycles
* (one on each counter) maximum 2*2^32, but the API will only
* accept up to a 32bit full word (0xFFFFFFFFU).
*/
clockevents_config_and_register(&clockevent_ks8695,
KS8695_CLOCK_RATE, 2,
0xFFFFFFFFU);
}
void __init ks8695_timer_init(void)
{
ks8695_timer_setup();
/* Enable timer interrupts */
setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq);
}
void ks8695_restart(char mode, const char *cmd)
{
unsigned int reg;
if (mode == 's')
soft_restart(0);
/* disable timer0 */
reg = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
writel_relaxed(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
/* enable watchdog mode */
writel_relaxed((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
/* re-enable timer0 */
writel_relaxed(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
}
| gpl-2.0 |
npf-ati/linux-2.6-imx | arch/blackfin/mach-bf537/boards/cm_bf537e.c | 1751 | 20384 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <asm/bfin_sport.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM BF537E";
#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
/* all SPI peripherals info goes here */
#if IS_ENABLED(CONFIG_MTD_M25P80)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
.name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if IS_ENABLED(CONFIG_MMC_SPI)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if IS_ENABLED(CONFIG_MTD_M25P80)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
},
#endif
#if IS_ENABLED(CONFIG_MMC_SPI)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
/* SPORT SPI controller data */
static struct bfin5xx_spi_master bfin_sport_spi0_info = {
.num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI,
P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0},
};
static struct resource bfin_sport_spi0_resource[] = {
[0] = {
.start = SPORT0_TCR1,
.end = SPORT0_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi0_device = {
.name = "bfin-sport-spi",
.id = 1, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi0_resource),
.resource = bfin_sport_spi0_resource,
.dev = {
.platform_data = &bfin_sport_spi0_info, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bfin_sport_spi1_info = {
.num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0},
};
static struct resource bfin_sport_spi1_resource[] = {
[0] = {
.start = SPORT1_TCR1,
.end = SPORT1_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi1_device = {
.name = "bfin-sport-spi",
.id = 2, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi1_resource),
.resource = bfin_sport_spi1_resource,
.dev = {
.platform_data = &bfin_sport_spi1_info, /* Passed to driver */
},
};
#endif /* sport spi master and devices */
#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
static struct platform_device hitachi_fb_device = {
.name = "hitachi-tx09",
};
#endif
#if IS_ENABLED(CONFIG_SMC91X)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.start = 0x20200300,
.end = 0x20200300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF14,
.end = IRQ_PF14,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20308000,
.end = 0x20308000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20308004,
.end = 0x20308004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG15,
.end = IRQ_PG15,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if IS_ENABLED(CONFIG_USB_NET2272)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG13,
.end = IRQ_PG13,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
static struct mtd_partition cm_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data cm_flash_data = {
.width = 2,
.parts = cm_partitions,
.nr_parts = ARRAY_SIZE(cm_partitions),
};
static unsigned cm_flash_gpios[] = { GPIO_PF4 };
static struct resource cm_flash_resource[] = {
{
.name = "cfi_probe",
.start = 0x20000000,
.end = 0x201fffff,
.flags = IORESOURCE_MEM,
}, {
.start = (unsigned long)cm_flash_gpios,
.end = ARRAY_SIZE(cm_flash_gpios),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device cm_flash_device = {
.name = "gpio-addr-flash",
.id = 0,
.dev = {
.platform_data = &cm_flash_data,
},
.num_resources = ARRAY_SIZE(cm_flash_resource),
.resource = cm_flash_resource,
};
#endif
#if IS_ENABLED(CONFIG_SERIAL_BFIN)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if IS_ENABLED(CONFIG_BFIN_SIR)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
.dev = {
.platform_data = &bfin_twi0_pins,
},
};
#endif
#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT) \
|| IS_ENABLED(CONFIG_BFIN_SPORT)
unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
};
#endif
#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if IS_ENABLED(CONFIG_BFIN_SPORT)
static struct resource bfin_sport0_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_TX,
.end = IRQ_SPORT0_TX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_SPORT0_TX,
.end = CH_SPORT0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_SPORT0_RX,
.end = CH_SPORT0_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sport0_device = {
.name = "bfin_sport_raw",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_resources),
.resource = bfin_sport0_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#if IS_ENABLED(CONFIG_BFIN_MAC)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if IS_ENABLED(CONFIG_PATA_PLATFORM)
#define PATA_INT IRQ_PF14
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x2030C000,
.end = 0x2030C01F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2030D018,
.end = 0x2030D01B,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cm_bf537e_devices[] __initdata = {
&bfin_dpmc,
#if IS_ENABLED(CONFIG_BFIN_SPORT)
&bfin_sport0_device,
#endif
#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
&hitachi_fb_device,
#endif
#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
&rtc_device,
#endif
#if IS_ENABLED(CONFIG_SERIAL_BFIN)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if IS_ENABLED(CONFIG_BFIN_SIR)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
&i2c_bfin_twi_device,
#endif
#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
&isp1362_hcd_device,
#endif
#if IS_ENABLED(CONFIG_SMC91X)
&smc91x_device,
#endif
#if IS_ENABLED(CONFIG_BFIN_MAC)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if IS_ENABLED(CONFIG_USB_NET2272)
&net2272_bfin_device,
#endif
#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
&bfin_spi0_device,
#endif
#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
&bfin_sport_spi0_device,
&bfin_sport_spi1_device,
#endif
#if IS_ENABLED(CONFIG_PATA_PLATFORM)
&bfin_pata_device,
#endif
#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
&cm_flash_device,
#endif
};
static int __init net2272_init(void)
{
#if IS_ENABLED(CONFIG_USB_NET2272)
int ret;
ret = gpio_request(GPIO_PG14, "net2272");
if (ret)
return ret;
/* Reset USB Chip, PG14 */
gpio_direction_output(GPIO_PG14, 0);
mdelay(2);
gpio_set_value(GPIO_PG14, 1);
#endif
return 0;
}
static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
#if IS_ENABLED(CONFIG_PATA_PLATFORM)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
if (net2272_init())
pr_warning("unable to configure net2272; it probably won't work\n");
return 0;
}
arch_initcall(cm_bf537e_init);
static struct platform_device *cm_bf537e_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cm_bf537e_early_devices,
ARRAY_SIZE(cm_bf537e_early_devices));
}
int bfin_get_ether_addr(char *addr)
{
return 1;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
KunYi/linux_samx6i | scripts/kconfig/lxdialog/inputbox.c | 2519 | 7168 | /*
* inputbox.c -- implements the input box
*
* ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
* MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "dialog.h"
char dialog_input_result[MAX_LEN + 1];
/*
* Print the termination buttons
*/
static void print_buttons(WINDOW * dialog, int height, int width, int selected)
{
int x = width / 2 - 11;
int y = height - 2;
print_button(dialog, gettext(" Ok "), y, x, selected == 0);
print_button(dialog, gettext(" Help "), y, x + 14, selected == 1);
wmove(dialog, y, x + 1 + 14 * selected);
wrefresh(dialog);
}
/*
* Display a dialog box for inputing a string
*/
int dialog_inputbox(const char *title, const char *prompt, int height, int width,
const char *init)
{
int i, x, y, box_y, box_x, box_width;
int input_x = 0, key = 0, button = -1;
int show_x, len, pos;
char *instr = dialog_input_result;
WINDOW *dialog;
if (!init)
instr[0] = '\0';
else
strcpy(instr, init);
do_resize:
if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN))
return -ERRDISPLAYTOOSMALL;
if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN))
return -ERRDISPLAYTOOSMALL;
/* center dialog box on screen */
x = (getmaxx(stdscr) - width) / 2;
y = (getmaxy(stdscr) - height) / 2;
draw_shadow(stdscr, y, x, height, width);
dialog = newwin(height, width, y, x);
keypad(dialog, TRUE);
draw_box(dialog, 0, 0, height, width,
dlg.dialog.atr, dlg.border.atr);
wattrset(dialog, dlg.border.atr);
mvwaddch(dialog, height - 3, 0, ACS_LTEE);
for (i = 0; i < width - 2; i++)
waddch(dialog, ACS_HLINE);
wattrset(dialog, dlg.dialog.atr);
waddch(dialog, ACS_RTEE);
print_title(dialog, title, width);
wattrset(dialog, dlg.dialog.atr);
print_autowrap(dialog, prompt, width - 2, 1, 3);
/* Draw the input field box */
box_width = width - 6;
getyx(dialog, y, x);
box_y = y + 2;
box_x = (width - box_width) / 2;
draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2,
dlg.dialog.atr, dlg.border.atr);
print_buttons(dialog, height, width, 0);
/* Set up the initial value */
wmove(dialog, box_y, box_x);
wattrset(dialog, dlg.inputbox.atr);
len = strlen(instr);
pos = len;
if (len >= box_width) {
show_x = len - box_width + 1;
input_x = box_width - 1;
for (i = 0; i < box_width - 1; i++)
waddch(dialog, instr[show_x + i]);
} else {
show_x = 0;
input_x = len;
waddstr(dialog, instr);
}
wmove(dialog, box_y, box_x + input_x);
wrefresh(dialog);
while (key != KEY_ESC) {
key = wgetch(dialog);
if (button == -1) { /* Input box selected */
switch (key) {
case TAB:
case KEY_UP:
case KEY_DOWN:
break;
case KEY_BACKSPACE:
case 127:
if (pos) {
wattrset(dialog, dlg.inputbox.atr);
if (input_x == 0) {
show_x--;
} else
input_x--;
if (pos < len) {
for (i = pos - 1; i < len; i++) {
instr[i] = instr[i+1];
}
}
pos--;
len--;
instr[len] = '\0';
wmove(dialog, box_y, box_x);
for (i = 0; i < box_width; i++) {
if (!instr[show_x + i]) {
waddch(dialog, ' ');
break;
}
waddch(dialog, instr[show_x + i]);
}
wmove(dialog, box_y, input_x + box_x);
wrefresh(dialog);
}
continue;
case KEY_LEFT:
if (pos > 0) {
if (input_x > 0) {
wmove(dialog, box_y, --input_x + box_x);
} else if (input_x == 0) {
show_x--;
wmove(dialog, box_y, box_x);
for (i = 0; i < box_width; i++) {
if (!instr[show_x + i]) {
waddch(dialog, ' ');
break;
}
waddch(dialog, instr[show_x + i]);
}
wmove(dialog, box_y, box_x);
}
pos--;
}
continue;
case KEY_RIGHT:
if (pos < len) {
if (input_x < box_width - 1) {
wmove(dialog, box_y, ++input_x + box_x);
} else if (input_x == box_width - 1) {
show_x++;
wmove(dialog, box_y, box_x);
for (i = 0; i < box_width; i++) {
if (!instr[show_x + i]) {
waddch(dialog, ' ');
break;
}
waddch(dialog, instr[show_x + i]);
}
wmove(dialog, box_y, input_x + box_x);
}
pos++;
}
continue;
default:
if (key < 0x100 && isprint(key)) {
if (len < MAX_LEN) {
wattrset(dialog, dlg.inputbox.atr);
if (pos < len) {
for (i = len; i > pos; i--)
instr[i] = instr[i-1];
instr[pos] = key;
} else {
instr[len] = key;
}
pos++;
len++;
instr[len] = '\0';
if (input_x == box_width - 1) {
show_x++;
} else {
input_x++;
}
wmove(dialog, box_y, box_x);
for (i = 0; i < box_width; i++) {
if (!instr[show_x + i]) {
waddch(dialog, ' ');
break;
}
waddch(dialog, instr[show_x + i]);
}
wmove(dialog, box_y, input_x + box_x);
wrefresh(dialog);
} else
flash(); /* Alarm user about overflow */
continue;
}
}
}
switch (key) {
case 'O':
case 'o':
delwin(dialog);
return 0;
case 'H':
case 'h':
delwin(dialog);
return 1;
case KEY_UP:
case KEY_LEFT:
switch (button) {
case -1:
button = 1; /* Indicates "Help" button is selected */
print_buttons(dialog, height, width, 1);
break;
case 0:
button = -1; /* Indicates input box is selected */
print_buttons(dialog, height, width, 0);
wmove(dialog, box_y, box_x + input_x);
wrefresh(dialog);
break;
case 1:
button = 0; /* Indicates "OK" button is selected */
print_buttons(dialog, height, width, 0);
break;
}
break;
case TAB:
case KEY_DOWN:
case KEY_RIGHT:
switch (button) {
case -1:
button = 0; /* Indicates "OK" button is selected */
print_buttons(dialog, height, width, 0);
break;
case 0:
button = 1; /* Indicates "Help" button is selected */
print_buttons(dialog, height, width, 1);
break;
case 1:
button = -1; /* Indicates input box is selected */
print_buttons(dialog, height, width, 0);
wmove(dialog, box_y, box_x + input_x);
wrefresh(dialog);
break;
}
break;
case ' ':
case '\n':
delwin(dialog);
return (button == -1 ? 0 : button);
case 'X':
case 'x':
key = KEY_ESC;
break;
case KEY_ESC:
key = on_key_esc(dialog);
break;
case KEY_RESIZE:
delwin(dialog);
on_key_resize();
goto do_resize;
}
}
delwin(dialog);
return KEY_ESC; /* ESC pressed */
}
| gpl-2.0 |
percy-g2/bbbandroid-kernel | drivers/pci/hotplug/ibmphp_ebda.c | 2519 | 35513 | /*
* IBM Hot Plug Controller Driver
*
* Written By: Tong Yu, IBM Corporation
*
* Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001-2003 IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <gregkh@us.ibm.com>
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/init.h>
#include "ibmphp.h"
/*
* POST builds data blocks(in this data block definition, a char-1
* byte, short(or word)-2 byte, long(dword)-4 byte) in the Extended
* BIOS Data Area which describe the configuration of the hot-plug
* controllers and resources used by the PCI Hot-Plug devices.
*
* This file walks EBDA, maps data block from physical addr,
* reconstruct linked lists about all system resource(MEM, PFM, IO)
* already assigned by POST, as well as linked lists about hot plug
* controllers (ctlr#, slot#, bus&slot features...)
*/
/* Global lists */
LIST_HEAD (ibmphp_ebda_pci_rsrc_head);
LIST_HEAD (ibmphp_slot_head);
/* Local variables */
static struct ebda_hpc_list *hpc_list_ptr;
static struct ebda_rsrc_list *rsrc_list_ptr;
static struct rio_table_hdr *rio_table_ptr = NULL;
static LIST_HEAD (ebda_hpc_head);
static LIST_HEAD (bus_info_head);
static LIST_HEAD (rio_vg_head);
static LIST_HEAD (rio_lo_head);
static LIST_HEAD (opt_vg_head);
static LIST_HEAD (opt_lo_head);
static void __iomem *io_mem;
/* Local functions */
static int ebda_rsrc_controller (void);
static int ebda_rsrc_rsrc (void);
static int ebda_rio_table (void);
static struct ebda_hpc_list * __init alloc_ebda_hpc_list (void)
{
return kzalloc(sizeof(struct ebda_hpc_list), GFP_KERNEL);
}
static struct controller *alloc_ebda_hpc (u32 slot_count, u32 bus_count)
{
struct controller *controller;
struct ebda_hpc_slot *slots;
struct ebda_hpc_bus *buses;
controller = kzalloc(sizeof(struct controller), GFP_KERNEL);
if (!controller)
goto error;
slots = kcalloc(slot_count, sizeof(struct ebda_hpc_slot), GFP_KERNEL);
if (!slots)
goto error_contr;
controller->slots = slots;
buses = kcalloc(bus_count, sizeof(struct ebda_hpc_bus), GFP_KERNEL);
if (!buses)
goto error_slots;
controller->buses = buses;
return controller;
error_slots:
kfree(controller->slots);
error_contr:
kfree(controller);
error:
return NULL;
}
static void free_ebda_hpc (struct controller *controller)
{
kfree (controller->slots);
kfree (controller->buses);
kfree (controller);
}
static struct ebda_rsrc_list * __init alloc_ebda_rsrc_list (void)
{
return kzalloc(sizeof(struct ebda_rsrc_list), GFP_KERNEL);
}
static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
{
return kzalloc(sizeof(struct ebda_pci_rsrc), GFP_KERNEL);
}
static void __init print_bus_info (void)
{
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
debug ("%s - slot_count = %x\n", __func__, ptr->slot_count);
debug ("%s - bus# = %x\n", __func__, ptr->busno);
debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
debug ("%s - slots_at_100_pcix = %x\n", __func__, ptr->slots_at_100_pcix);
debug ("%s - slots_at_133_pcix = %x\n", __func__, ptr->slots_at_133_pcix);
}
}
static void print_lo_info (void)
{
struct rio_detail *ptr;
debug ("print_lo_info ----\n");
list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
debug ("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num);
debug ("%s - wpindex = %x\n", __func__, ptr->wpindex);
debug ("%s - chassis_num = %x\n", __func__, ptr->chassis_num);
}
}
static void print_vg_info (void)
{
struct rio_detail *ptr;
debug ("%s ---\n", __func__);
list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) {
debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
debug ("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num);
debug ("%s - wpindex = %x\n", __func__, ptr->wpindex);
debug ("%s - chassis_num = %x\n", __func__, ptr->chassis_num);
}
}
static void __init print_ebda_pci_rsrc (void)
{
struct ebda_pci_rsrc *ptr;
list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
__func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
}
}
static void __init print_ibm_slot (void)
{
struct slot *ptr;
list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) {
debug ("%s - slot_number: %x\n", __func__, ptr->number);
}
}
static void __init print_opt_vg (void)
{
struct opt_rio *ptr;
debug ("%s ---\n", __func__);
list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
debug ("%s - rio_type %x\n", __func__, ptr->rio_type);
debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num);
debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num);
debug ("%s - middle_num: %x\n", __func__, ptr->middle_num);
}
}
static void __init print_ebda_hpc (void)
{
struct controller *hpc_ptr;
u16 index;
list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) {
for (index = 0; index < hpc_ptr->slot_count; index++) {
debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num);
debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num);
debug ("%s - index into ctlr addr: %x\n", __func__, hpc_ptr->slots[index].ctl_index);
debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
}
for (index = 0; index < hpc_ptr->bus_count; index++) {
debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
}
debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
switch (hpc_ptr->ctlr_type) {
case 1:
debug ("%s - bus: %x\n", __func__, hpc_ptr->u.pci_ctlr.bus);
debug ("%s - dev_fun: %x\n", __func__, hpc_ptr->u.pci_ctlr.dev_fun);
debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
break;
case 0:
debug ("%s - io_start: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_start);
debug ("%s - io_end: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_end);
debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
break;
case 2:
case 4:
debug ("%s - wpegbbar: %lx\n", __func__, hpc_ptr->u.wpeg_ctlr.wpegbbar);
debug ("%s - i2c_addr: %x\n", __func__, hpc_ptr->u.wpeg_ctlr.i2c_addr);
debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
break;
}
}
}
int __init ibmphp_access_ebda (void)
{
u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
int rc = 0;
rio_complete = 0;
hs_complete = 0;
io_mem = ioremap ((0x40 << 4) + 0x0e, 2);
if (!io_mem )
return -ENOMEM;
ebda_seg = readw (io_mem);
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
io_mem = ioremap(ebda_seg<<4, 1);
if (!io_mem)
return -ENOMEM;
ebda_sz = readb(io_mem);
iounmap(io_mem);
debug("ebda size: %d(KiB)\n", ebda_sz);
if (ebda_sz == 0)
return -ENOMEM;
io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
if (!io_mem )
return -ENOMEM;
next_offset = 0x180;
for (;;) {
offset = next_offset;
/* Make sure what we read is still in the mapped section */
if (WARN(offset > (ebda_sz * 1024 - 4),
"ibmphp_ebda: next read is beyond ebda_sz\n"))
break;
next_offset = readw (io_mem + offset); /* offset of next blk */
offset += 2;
if (next_offset == 0) /* 0 indicate it's last blk */
break;
blk_id = readw (io_mem + offset); /* this blk id */
offset += 2;
/* check if it is hot swap block or rio block */
if (blk_id != 0x4853 && blk_id != 0x4752)
continue;
/* found hs table */
if (blk_id == 0x4853) {
debug ("now enter hot swap block---\n");
debug ("hot blk id: %x\n", blk_id);
format = readb (io_mem + offset);
offset += 1;
if (format != 4)
goto error_nodev;
debug ("hot blk format: %x\n", format);
/* hot swap sub blk */
base = offset;
sub_addr = base;
re = readw (io_mem + sub_addr); /* next sub blk */
sub_addr += 2;
rc_id = readw (io_mem + sub_addr); /* sub blk id */
sub_addr += 2;
if (rc_id != 0x5243)
goto error_nodev;
/* rc sub blk signature */
num_ctlrs = readb (io_mem + sub_addr);
sub_addr += 1;
hpc_list_ptr = alloc_ebda_hpc_list ();
if (!hpc_list_ptr) {
rc = -ENOMEM;
goto out;
}
hpc_list_ptr->format = format;
hpc_list_ptr->num_ctlrs = num_ctlrs;
hpc_list_ptr->phys_addr = sub_addr; /* offset of RSRC_CONTROLLER blk */
debug ("info about hpc descriptor---\n");
debug ("hot blk format: %x\n", format);
debug ("num of controller: %x\n", num_ctlrs);
debug ("offset of hpc data structure enteries: %x\n ", sub_addr);
sub_addr = base + re; /* re sub blk */
/* FIXME: rc is never used/checked */
rc = readw (io_mem + sub_addr); /* next sub blk */
sub_addr += 2;
re_id = readw (io_mem + sub_addr); /* sub blk id */
sub_addr += 2;
if (re_id != 0x5245)
goto error_nodev;
/* signature of re */
num_entries = readw (io_mem + sub_addr);
sub_addr += 2; /* offset of RSRC_ENTRIES blk */
rsrc_list_ptr = alloc_ebda_rsrc_list ();
if (!rsrc_list_ptr ) {
rc = -ENOMEM;
goto out;
}
rsrc_list_ptr->format = format;
rsrc_list_ptr->num_entries = num_entries;
rsrc_list_ptr->phys_addr = sub_addr;
debug ("info about rsrc descriptor---\n");
debug ("format: %x\n", format);
debug ("num of rsrc: %x\n", num_entries);
debug ("offset of rsrc data structure enteries: %x\n ", sub_addr);
hs_complete = 1;
} else {
/* found rio table, blk_id == 0x4752 */
debug ("now enter io table ---\n");
debug ("rio blk id: %x\n", blk_id);
rio_table_ptr = kzalloc(sizeof(struct rio_table_hdr), GFP_KERNEL);
if (!rio_table_ptr) {
rc = -ENOMEM;
goto out;
}
rio_table_ptr->ver_num = readb (io_mem + offset);
rio_table_ptr->scal_count = readb (io_mem + offset + 1);
rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
rio_table_ptr->offset = offset +3 ;
debug("info about rio table hdr ---\n");
debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
rio_table_ptr->ver_num, rio_table_ptr->scal_count,
rio_table_ptr->riodev_count, rio_table_ptr->offset);
rio_complete = 1;
}
}
if (!hs_complete && !rio_complete)
goto error_nodev;
if (rio_table_ptr) {
if (rio_complete && rio_table_ptr->ver_num == 3) {
rc = ebda_rio_table ();
if (rc)
goto out;
}
}
rc = ebda_rsrc_controller ();
if (rc)
goto out;
rc = ebda_rsrc_rsrc ();
goto out;
error_nodev:
rc = -ENODEV;
out:
iounmap (io_mem);
return rc;
}
/*
* map info of scalability details and rio details from physical address
*/
static int __init ebda_rio_table (void)
{
u16 offset;
u8 i;
struct rio_detail *rio_detail_ptr;
offset = rio_table_ptr->offset;
offset += 12 * rio_table_ptr->scal_count;
// we do concern about rio details
for (i = 0; i < rio_table_ptr->riodev_count; i++) {
rio_detail_ptr = kzalloc(sizeof(struct rio_detail), GFP_KERNEL);
if (!rio_detail_ptr)
return -ENOMEM;
rio_detail_ptr->rio_node_id = readb (io_mem + offset);
rio_detail_ptr->bbar = readl (io_mem + offset + 1);
rio_detail_ptr->rio_type = readb (io_mem + offset + 5);
rio_detail_ptr->owner_id = readb (io_mem + offset + 6);
rio_detail_ptr->port0_node_connect = readb (io_mem + offset + 7);
rio_detail_ptr->port0_port_connect = readb (io_mem + offset + 8);
rio_detail_ptr->port1_node_connect = readb (io_mem + offset + 9);
rio_detail_ptr->port1_port_connect = readb (io_mem + offset + 10);
rio_detail_ptr->first_slot_num = readb (io_mem + offset + 11);
rio_detail_ptr->status = readb (io_mem + offset + 12);
rio_detail_ptr->wpindex = readb (io_mem + offset + 13);
rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
//create linked list of chassis
if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
//create linked list of expansion box
else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
else
// not in my concern
kfree (rio_detail_ptr);
offset += 15;
}
print_lo_info ();
print_vg_info ();
return 0;
}
/*
* reorganizing linked list of chassis
*/
static struct opt_rio *search_opt_vg (u8 chassis_num)
{
struct opt_rio *ptr;
list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
}
return NULL;
}
static int __init combine_wpg_for_chassis (void)
{
struct opt_rio *opt_rio_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
if (!opt_rio_ptr) {
opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL);
if (!opt_rio_ptr)
return -ENOMEM;
opt_rio_ptr->rio_type = rio_detail_ptr->rio_type;
opt_rio_ptr->chassis_num = rio_detail_ptr->chassis_num;
opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
} else {
opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
}
}
print_opt_vg ();
return 0;
}
/*
* reorganizing linked list of expansion box
*/
static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
{
struct opt_rio_lo *ptr;
list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
}
return NULL;
}
static int combine_wpg_for_expansion (void)
{
struct opt_rio_lo *opt_rio_lo_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
if (!opt_rio_lo_ptr) {
opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL);
if (!opt_rio_lo_ptr)
return -ENOMEM;
opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type;
opt_rio_lo_ptr->chassis_num = rio_detail_ptr->chassis_num;
opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->pack_count = 1;
list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
} else {
opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->pack_count = 2;
}
}
return 0;
}
/* Since we don't know the max slot number per each chassis, hence go
* through the list of all chassis to find out the range
* Arguments: slot_num, 1st slot number of the chassis we think we are on,
* var (0 = chassis, 1 = expansion box)
*/
static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
{
struct opt_rio *opt_vg_ptr = NULL;
struct opt_rio_lo *opt_lo_ptr = NULL;
int rc = 0;
if (!var) {
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
rc = -ENODEV;
break;
}
}
} else {
list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) {
rc = -ENODEV;
break;
}
}
}
return rc;
}
static struct opt_rio_lo * find_rxe_num (u8 slot_num)
{
struct opt_rio_lo *opt_lo_ptr;
list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
//check to see if this slot_num belongs to expansion box
if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
return opt_lo_ptr;
}
return NULL;
}
static struct opt_rio * find_chassis_num (u8 slot_num)
{
struct opt_rio *opt_vg_ptr;
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
//check to see if this slot_num belongs to chassis
if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
return opt_vg_ptr;
}
return NULL;
}
/* This routine will find out how many slots are in the chassis, so that
* the slot numbers for rxe100 would start from 1, and not from 7, or 6 etc
*/
static u8 calculate_first_slot (u8 slot_num)
{
u8 first_slot = 1;
struct slot * slot_cur;
list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
if (slot_cur->ctrl) {
if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
first_slot = slot_cur->ctrl->ending_slot_num;
}
}
return first_slot + 1;
}
#define SLOT_NAME_SIZE 30
static char *create_file_name (struct slot * slot_cur)
{
struct opt_rio *opt_vg_ptr = NULL;
struct opt_rio_lo *opt_lo_ptr = NULL;
static char str[SLOT_NAME_SIZE];
int which = 0; /* rxe = 1, chassis = 0 */
u8 number = 1; /* either chassis or rxe # */
u8 first_slot = 1;
u8 slot_num;
u8 flag = 0;
if (!slot_cur) {
err ("Structure passed is empty\n");
return NULL;
}
slot_num = slot_cur->number;
memset (str, 0, sizeof(str));
if (rio_table_ptr) {
if (rio_table_ptr->ver_num == 3) {
opt_vg_ptr = find_chassis_num (slot_num);
opt_lo_ptr = find_rxe_num (slot_num);
}
}
if (opt_vg_ptr) {
if (opt_lo_ptr) {
if ((slot_num - opt_vg_ptr->first_slot_num) > (slot_num - opt_lo_ptr->first_slot_num)) {
number = opt_lo_ptr->chassis_num;
first_slot = opt_lo_ptr->first_slot_num;
which = 1; /* it is RXE */
} else {
first_slot = opt_vg_ptr->first_slot_num;
number = opt_vg_ptr->chassis_num;
which = 0;
}
} else {
first_slot = opt_vg_ptr->first_slot_num;
number = opt_vg_ptr->chassis_num;
which = 0;
}
++flag;
} else if (opt_lo_ptr) {
number = opt_lo_ptr->chassis_num;
first_slot = opt_lo_ptr->first_slot_num;
which = 1;
++flag;
} else if (rio_table_ptr) {
if (rio_table_ptr->ver_num == 3) {
/* if both NULL and we DO have correct RIO table in BIOS */
return NULL;
}
}
if (!flag) {
if (slot_cur->ctrl->ctlr_type == 4) {
first_slot = calculate_first_slot (slot_num);
which = 1;
} else {
which = 0;
}
}
sprintf(str, "%s%dslot%d",
which == 0 ? "chassis" : "rxe",
number, slot_num - first_slot + 1);
return str;
}
static int fillslotinfo(struct hotplug_slot *hotplug_slot)
{
struct slot *slot;
int rc = 0;
if (!hotplug_slot || !hotplug_slot->private)
return -EINVAL;
slot = hotplug_slot->private;
rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL);
if (rc)
return rc;
// power - enabled:1 not:0
hotplug_slot->info->power_status = SLOT_POWER(slot->status);
// attention - off:0, on:1, blinking:2
hotplug_slot->info->attention_status = SLOT_ATTN(slot->status, slot->ext_status);
// latch - open:1 closed:0
hotplug_slot->info->latch_status = SLOT_LATCH(slot->status);
// pci board - present:1 not:0
if (SLOT_PRESENT (slot->status))
hotplug_slot->info->adapter_status = 1;
else
hotplug_slot->info->adapter_status = 0;
/*
if (slot->bus_on->supported_bus_mode
&& (slot->bus_on->supported_speed == BUS_SPEED_66))
hotplug_slot->info->max_bus_speed_status = BUS_SPEED_66PCIX;
else
hotplug_slot->info->max_bus_speed_status = slot->bus_on->supported_speed;
*/
return rc;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot;
if (!hotplug_slot || !hotplug_slot->private)
return;
slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
slot->ctrl = NULL;
slot->bus_on = NULL;
/* we don't want to actually remove the resources, since free_resources will do just that */
ibmphp_unconfigure_card(&slot, -1);
kfree (slot);
}
static struct pci_driver ibmphp_driver;
/*
* map info (ctlr-id, slot count, slot#.. bus count, bus#, ctlr type...) of
* each hpc from physical address to a list of hot plug controllers based on
* hpc descriptors.
*/
static int __init ebda_rsrc_controller (void)
{
u16 addr, addr_slot, addr_bus;
u8 ctlr_id, temp, bus_index;
u16 ctlr, slot, bus;
u16 slot_num, bus_num, index;
struct hotplug_slot *hp_slot_ptr;
struct controller *hpc_ptr;
struct ebda_hpc_bus *bus_ptr;
struct ebda_hpc_slot *slot_ptr;
struct bus_info *bus_info_ptr1, *bus_info_ptr2;
int rc;
struct slot *tmp_slot;
char name[SLOT_NAME_SIZE];
addr = hpc_list_ptr->phys_addr;
for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
bus_index = 1;
ctlr_id = readb (io_mem + addr);
addr += 1;
slot_num = readb (io_mem + addr);
addr += 1;
addr_slot = addr; /* offset of slot structure */
addr += (slot_num * 4);
bus_num = readb (io_mem + addr);
addr += 1;
addr_bus = addr; /* offset of bus */
addr += (bus_num * 9); /* offset of ctlr_type */
temp = readb (io_mem + addr);
addr += 1;
/* init hpc structure */
hpc_ptr = alloc_ebda_hpc (slot_num, bus_num);
if (!hpc_ptr ) {
rc = -ENOMEM;
goto error_no_hpc;
}
hpc_ptr->ctlr_id = ctlr_id;
hpc_ptr->ctlr_relative_id = ctlr;
hpc_ptr->slot_count = slot_num;
hpc_ptr->bus_count = bus_num;
debug ("now enter ctlr data structure ---\n");
debug ("ctlr id: %x\n", ctlr_id);
debug ("ctlr_relative_id: %x\n", hpc_ptr->ctlr_relative_id);
debug ("count of slots controlled by this ctlr: %x\n", slot_num);
debug ("count of buses controlled by this ctlr: %x\n", bus_num);
/* init slot structure, fetch slot, bus, cap... */
slot_ptr = hpc_ptr->slots;
for (slot = 0; slot < slot_num; slot++) {
slot_ptr->slot_num = readb (io_mem + addr_slot);
slot_ptr->slot_bus_num = readb (io_mem + addr_slot + slot_num);
slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
// create bus_info lined list --- if only one slot per bus: slot_min = slot_max
bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
if (!bus_info_ptr2) {
bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL);
if (!bus_info_ptr1) {
rc = -ENOMEM;
goto error_no_hp_slot;
}
bus_info_ptr1->slot_min = slot_ptr->slot_num;
bus_info_ptr1->slot_max = slot_ptr->slot_num;
bus_info_ptr1->slot_count += 1;
bus_info_ptr1->busno = slot_ptr->slot_bus_num;
bus_info_ptr1->index = bus_index++;
bus_info_ptr1->current_speed = 0xff;
bus_info_ptr1->current_bus_mode = 0xff;
bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
} else {
bus_info_ptr2->slot_min = min (bus_info_ptr2->slot_min, slot_ptr->slot_num);
bus_info_ptr2->slot_max = max (bus_info_ptr2->slot_max, slot_ptr->slot_num);
bus_info_ptr2->slot_count += 1;
}
// end of creating the bus_info linked list
slot_ptr++;
addr_slot += 1;
}
/* init bus structure */
bus_ptr = hpc_ptr->buses;
for (bus = 0; bus < bus_num; bus++) {
bus_ptr->bus_num = readb (io_mem + addr_bus + bus);
bus_ptr->slots_at_33_conv = readb (io_mem + addr_bus + bus_num + 8 * bus);
bus_ptr->slots_at_66_conv = readb (io_mem + addr_bus + bus_num + 8 * bus + 1);
bus_ptr->slots_at_66_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 2);
bus_ptr->slots_at_100_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 3);
bus_ptr->slots_at_133_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 4);
bus_info_ptr2 = ibmphp_find_same_bus_num (bus_ptr->bus_num);
if (bus_info_ptr2) {
bus_info_ptr2->slots_at_33_conv = bus_ptr->slots_at_33_conv;
bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
}
bus_ptr++;
}
hpc_ptr->ctlr_type = temp;
switch (hpc_ptr->ctlr_type) {
case 1:
hpc_ptr->u.pci_ctlr.bus = readb (io_mem + addr);
hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
hpc_ptr->irq = readb (io_mem + addr + 2);
addr += 3;
debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
hpc_ptr->u.pci_ctlr.bus,
hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
break;
case 0:
hpc_ptr->u.isa_ctlr.io_start = readw (io_mem + addr);
hpc_ptr->u.isa_ctlr.io_end = readw (io_mem + addr + 2);
if (!request_region (hpc_ptr->u.isa_ctlr.io_start,
(hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1),
"ibmphp")) {
rc = -ENODEV;
goto error_no_hp_slot;
}
hpc_ptr->irq = readb (io_mem + addr + 4);
addr += 5;
break;
case 2:
case 4:
hpc_ptr->u.wpeg_ctlr.wpegbbar = readl (io_mem + addr);
hpc_ptr->u.wpeg_ctlr.i2c_addr = readb (io_mem + addr + 4);
hpc_ptr->irq = readb (io_mem + addr + 5);
addr += 6;
break;
default:
rc = -ENODEV;
goto error_no_hp_slot;
}
//reorganize chassis' linked list
combine_wpg_for_chassis ();
combine_wpg_for_expansion ();
hpc_ptr->revision = 0xff;
hpc_ptr->options = 0xff;
hpc_ptr->starting_slot_num = hpc_ptr->slots[0].slot_num;
hpc_ptr->ending_slot_num = hpc_ptr->slots[slot_num-1].slot_num;
// register slots with hpc core as well as create linked list of ibm slot
for (index = 0; index < hpc_ptr->slot_count; index++) {
hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL);
if (!hp_slot_ptr) {
rc = -ENOMEM;
goto error_no_hp_slot;
}
hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
if (!hp_slot_ptr->info) {
rc = -ENOMEM;
goto error_no_hp_info;
}
tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
if (!tmp_slot) {
rc = -ENOMEM;
goto error_no_slot;
}
tmp_slot->flag = 1;
tmp_slot->capabilities = hpc_ptr->slots[index].slot_cap;
if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_133_MAX) == EBDA_SLOT_133_MAX)
tmp_slot->supported_speed = 3;
else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_100_MAX) == EBDA_SLOT_100_MAX)
tmp_slot->supported_speed = 2;
else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
tmp_slot->supported_speed = 1;
if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
tmp_slot->supported_bus_mode = 1;
else
tmp_slot->supported_bus_mode = 0;
tmp_slot->bus = hpc_ptr->slots[index].slot_bus_num;
bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num);
if (!bus_info_ptr1) {
kfree(tmp_slot);
rc = -ENODEV;
goto error;
}
tmp_slot->bus_on = bus_info_ptr1;
bus_info_ptr1 = NULL;
tmp_slot->ctrl = hpc_ptr;
tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index;
tmp_slot->number = hpc_ptr->slots[index].slot_num;
tmp_slot->hotplug_slot = hp_slot_ptr;
hp_slot_ptr->private = tmp_slot;
hp_slot_ptr->release = release_slot;
rc = fillslotinfo(hp_slot_ptr);
if (rc)
goto error;
rc = ibmphp_init_devno ((struct slot **) &hp_slot_ptr->private);
if (rc)
goto error;
hp_slot_ptr->ops = &ibmphp_hotplug_slot_ops;
// end of registering ibm slot with hotplug core
list_add (& ((struct slot *)(hp_slot_ptr->private))->ibm_slot_list, &ibmphp_slot_head);
}
print_bus_info ();
list_add (&hpc_ptr->ebda_hpc_list, &ebda_hpc_head );
} /* each hpc */
list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
pci_hp_register(tmp_slot->hotplug_slot,
pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
}
print_ebda_hpc ();
print_ibm_slot ();
return 0;
error:
kfree (hp_slot_ptr->private);
error_no_slot:
kfree (hp_slot_ptr->info);
error_no_hp_info:
kfree (hp_slot_ptr);
error_no_hp_slot:
free_ebda_hpc (hpc_ptr);
error_no_hpc:
iounmap (io_mem);
return rc;
}
/*
* map info (bus, devfun, start addr, end addr..) of i/o, memory,
* pfm from the physical addr to a list of resource.
*/
static int __init ebda_rsrc_rsrc (void)
{
u16 addr;
short rsrc;
u8 type, rsrc_type;
struct ebda_pci_rsrc *rsrc_ptr;
addr = rsrc_list_ptr->phys_addr;
debug ("now entering rsrc land\n");
debug ("offset of rsrc: %x\n", rsrc_list_ptr->phys_addr);
for (rsrc = 0; rsrc < rsrc_list_ptr->num_entries; rsrc++) {
type = readb (io_mem + addr);
addr += 1;
rsrc_type = type & EBDA_RSRC_TYPE_MASK;
if (rsrc_type == EBDA_IO_RSRC_TYPE) {
rsrc_ptr = alloc_ebda_pci_rsrc ();
if (!rsrc_ptr) {
iounmap (io_mem);
return -ENOMEM;
}
rsrc_ptr->rsrc_type = type;
rsrc_ptr->bus_num = readb (io_mem + addr);
rsrc_ptr->dev_fun = readb (io_mem + addr + 1);
rsrc_ptr->start_addr = readw (io_mem + addr + 2);
rsrc_ptr->end_addr = readw (io_mem + addr + 4);
addr += 6;
debug ("rsrc from io type ----\n");
debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
}
if (rsrc_type == EBDA_MEM_RSRC_TYPE || rsrc_type == EBDA_PFM_RSRC_TYPE) {
rsrc_ptr = alloc_ebda_pci_rsrc ();
if (!rsrc_ptr ) {
iounmap (io_mem);
return -ENOMEM;
}
rsrc_ptr->rsrc_type = type;
rsrc_ptr->bus_num = readb (io_mem + addr);
rsrc_ptr->dev_fun = readb (io_mem + addr + 1);
rsrc_ptr->start_addr = readl (io_mem + addr + 2);
rsrc_ptr->end_addr = readl (io_mem + addr + 6);
addr += 10;
debug ("rsrc from mem or pfm ---\n");
debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
}
}
kfree (rsrc_list_ptr);
rsrc_list_ptr = NULL;
print_ebda_pci_rsrc ();
return 0;
}
u16 ibmphp_get_total_controllers (void)
{
return hpc_list_ptr->num_ctlrs;
}
struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
{
struct slot *slot;
list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) {
if (slot->number == physical_num)
return slot;
}
return NULL;
}
/* To find:
* - the smallest slot number
* - the largest slot number
* - the total number of the slots based on each bus
* (if only one slot per bus slot_min = slot_max )
*/
struct bus_info *ibmphp_find_same_bus_num (u32 num)
{
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
if (ptr->busno == num)
return ptr;
}
return NULL;
}
/* Finding relative bus number, in order to map corresponding
* bus register
*/
int ibmphp_get_bus_index (u8 num)
{
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
if (ptr->busno == num)
return ptr->index;
}
return -ENODEV;
}
void ibmphp_free_bus_info_queue (void)
{
struct bus_info *bus_info;
struct list_head *list;
struct list_head *next;
list_for_each_safe (list, next, &bus_info_head ) {
bus_info = list_entry (list, struct bus_info, bus_info_list);
kfree (bus_info);
}
}
void ibmphp_free_ebda_hpc_queue (void)
{
struct controller *controller = NULL;
struct list_head *list;
struct list_head *next;
int pci_flag = 0;
list_for_each_safe (list, next, &ebda_hpc_head) {
controller = list_entry (list, struct controller, ebda_hpc_list);
if (controller->ctlr_type == 0)
release_region (controller->u.isa_ctlr.io_start, (controller->u.isa_ctlr.io_end - controller->u.isa_ctlr.io_start + 1));
else if ((controller->ctlr_type == 1) && (!pci_flag)) {
++pci_flag;
pci_unregister_driver (&ibmphp_driver);
}
free_ebda_hpc (controller);
}
}
void ibmphp_free_ebda_pci_rsrc_queue (void)
{
struct ebda_pci_rsrc *resource;
struct list_head *list;
struct list_head *next;
list_for_each_safe (list, next, &ibmphp_ebda_pci_rsrc_head) {
resource = list_entry (list, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
kfree (resource);
resource = NULL;
}
}
static struct pci_device_id id_table[] = {
{
.vendor = PCI_VENDOR_ID_IBM,
.device = HPC_DEVICE_ID,
.subvendor = PCI_VENDOR_ID_IBM,
.subdevice = HPC_SUBSYSTEM_ID,
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
}, {}
};
MODULE_DEVICE_TABLE(pci, id_table);
static int ibmphp_probe (struct pci_dev *, const struct pci_device_id *);
static struct pci_driver ibmphp_driver = {
.name = "ibmphp",
.id_table = id_table,
.probe = ibmphp_probe,
};
int ibmphp_register_pci (void)
{
struct controller *ctrl;
int rc = 0;
list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
if (ctrl->ctlr_type == 1) {
rc = pci_register_driver(&ibmphp_driver);
break;
}
}
return rc;
}
static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
{
struct controller *ctrl;
debug ("inside ibmphp_probe\n");
list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
if (ctrl->ctlr_type == 1) {
if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
ctrl->ctrl_dev = dev;
debug ("found device!!!\n");
debug ("dev->device = %x, dev->subsystem_device = %x\n", dev->device, dev->subsystem_device);
return 0;
}
}
}
return -ENODEV;
}
| gpl-2.0 |
vakkov/android_kernel_samsung_tuna | drivers/net/mace.c | 3543 | 27978 | /*
* Network device driver for the MACE ethernet controller on
* Apple Powermacs. Assumes it's under a DBDMA controller.
*
* Copyright (C) 1996 Paul Mackerras.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/spinlock.h>
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/macio.h>
#include "mace.h"
static int port_aaui = -1;
#define N_RX_RING 8
#define N_TX_RING 6
#define MAX_TX_ACTIVE 1
#define NCMDS_TX 1 /* dma commands per element in tx ring */
#define RX_BUFLEN (ETH_FRAME_LEN + 8)
#define TX_TIMEOUT HZ /* 1 second */
/* Chip rev needs workaround on HW & multicast addr change */
#define BROKEN_ADDRCHG_REV 0x0941
/* Bits in transmit DMA status */
#define TX_DMA_ERR 0x80
struct mace_data {
volatile struct mace __iomem *mace;
volatile struct dbdma_regs __iomem *tx_dma;
int tx_dma_intr;
volatile struct dbdma_regs __iomem *rx_dma;
int rx_dma_intr;
volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
struct sk_buff *rx_bufs[N_RX_RING];
int rx_fill;
int rx_empty;
struct sk_buff *tx_bufs[N_TX_RING];
int tx_fill;
int tx_empty;
unsigned char maccc;
unsigned char tx_fullup;
unsigned char tx_active;
unsigned char tx_bad_runt;
struct timer_list tx_timeout;
int timeout_active;
int port_aaui;
int chipid;
struct macio_dev *mdev;
spinlock_t lock;
};
/*
* Number of bytes of private data per MACE: allow enough for
* the rx and tx dma commands plus a branch dma command each,
* and another 16 bytes to allow us to align the dma command
* buffers on a 16 byte boundary.
*/
#define PRIV_BYTES (sizeof(struct mace_data) \
+ (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static void mace_reset(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(unsigned long data);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
static void __mace_set_address(struct net_device *dev, void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
*/
static unsigned char *dummy_buf;
static const struct net_device_ops mace_netdev_ops = {
.ndo_open = mace_open,
.ndo_stop = mace_close,
.ndo_start_xmit = mace_xmit_start,
.ndo_set_multicast_list = mace_set_multicast,
.ndo_set_mac_address = mace_set_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *mace = macio_get_of_node(mdev);
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
mace->full_name);
return -ENODEV;
}
addr = of_get_property(mace, "mac-address", NULL);
if (addr == NULL) {
addr = of_get_property(mace, "local-mac-address", NULL);
if (addr == NULL) {
printk(KERN_ERR "Can't get mac-address for MACE %s\n",
mace->full_name);
return -ENODEV;
}
}
/*
* lazy allocate the driver-wide dummy buffer. (Note that we
* never have more than one MACE in the system anyway)
*/
if (dummy_buf == NULL) {
dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
if (dummy_buf == NULL) {
printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
return -ENOMEM;
}
}
if (macio_request_resources(mdev, "mace")) {
printk(KERN_ERR "MACE: can't request IO resources !\n");
return -EBUSY;
}
dev = alloc_etherdev(PRIV_BYTES);
if (!dev) {
printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
rc = -ENOMEM;
goto err_release;
}
SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
mp = netdev_priv(dev);
mp->mdev = mdev;
macio_set_drvdata(mdev, dev);
dev->base_addr = macio_resource_start(mdev, 0);
mp->mace = ioremap(dev->base_addr, 0x1000);
if (mp->mace == NULL) {
printk(KERN_ERR "MACE: can't map IO resources !\n");
rc = -ENOMEM;
goto err_free;
}
dev->irq = macio_irq(mdev, 0);
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
}
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
mp = netdev_priv(dev);
mp->maccc = ENXMT | ENRCV;
mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
if (mp->tx_dma == NULL) {
printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
rc = -ENOMEM;
goto err_unmap_io;
}
mp->tx_dma_intr = macio_irq(mdev, 1);
mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
if (mp->rx_dma == NULL) {
printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
rc = -ENOMEM;
goto err_unmap_tx_dma;
}
mp->rx_dma_intr = macio_irq(mdev, 2);
mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
memset((char *) mp->tx_cmds, 0,
(NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
init_timer(&mp->tx_timeout);
spin_lock_init(&mp->lock);
mp->timeout_active = 0;
if (port_aaui >= 0)
mp->port_aaui = port_aaui;
else {
/* Apple Network Server uses the AAUI port */
if (of_machine_is_compatible("AAPL,ShinerESB"))
mp->port_aaui = 1;
else {
#ifdef CONFIG_MACE_AAUI_PORT
mp->port_aaui = 1;
#else
mp->port_aaui = 0;
#endif
}
}
dev->netdev_ops = &mace_netdev_ops;
/*
* Most of what is below could be moved to mace_open()
*/
mace_reset(dev);
rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
if (rc) {
printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
goto err_unmap_rx_dma;
}
rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
if (rc) {
printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
goto err_free_irq;
}
rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
if (rc) {
printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
goto err_free_tx_irq;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
goto err_free_rx_irq;
}
printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
dev->name, dev->dev_addr,
mp->chipid >> 8, mp->chipid & 0xff);
return 0;
err_free_rx_irq:
free_irq(macio_irq(mdev, 2), dev);
err_free_tx_irq:
free_irq(macio_irq(mdev, 1), dev);
err_free_irq:
free_irq(macio_irq(mdev, 0), dev);
err_unmap_rx_dma:
iounmap(mp->rx_dma);
err_unmap_tx_dma:
iounmap(mp->tx_dma);
err_unmap_io:
iounmap(mp->mace);
err_free:
free_netdev(dev);
err_release:
macio_release_resources(mdev);
return rc;
}
static int __devexit mace_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct mace_data *mp;
BUG_ON(dev == NULL);
macio_set_drvdata(mdev, NULL);
mp = netdev_priv(dev);
unregister_netdev(dev);
free_irq(dev->irq, dev);
free_irq(mp->tx_dma_intr, dev);
free_irq(mp->rx_dma_intr, dev);
iounmap(mp->rx_dma);
iounmap(mp->tx_dma);
iounmap(mp->mace);
free_netdev(dev);
macio_release_resources(mdev);
return 0;
}
static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
{
int i;
out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
/*
* Yes this looks peculiar, but apparently it needs to be this
* way on some machines.
*/
for (i = 200; i > 0; --i)
if (ld_le32(&dma->control) & RUN)
udelay(1);
}
static void mace_reset(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
int i;
/* soft-reset the chip */
i = 200;
while (--i) {
out_8(&mb->biucc, SWRST);
if (in_8(&mb->biucc) & SWRST) {
udelay(10);
continue;
}
break;
}
if (!i) {
printk(KERN_ERR "mace: cannot reset chip!\n");
return;
}
out_8(&mb->imr, 0xff); /* disable all intrs for now */
i = in_8(&mb->ir);
out_8(&mb->maccc, 0); /* turn off tx, rx */
out_8(&mb->biucc, XMTSP_64);
out_8(&mb->utr, RTRD);
out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
out_8(&mb->rcvfc, 0);
/* load up the hardware address */
__mace_set_address(dev, dev->dev_addr);
/* clear the multicast filter */
if (mp->chipid == BROKEN_ADDRCHG_REV)
out_8(&mb->iac, LOGADDR);
else {
out_8(&mb->iac, ADDRCHG | LOGADDR);
while ((in_8(&mb->iac) & ADDRCHG) != 0)
;
}
for (i = 0; i < 8; ++i)
out_8(&mb->ladrf, 0);
/* done changing address */
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
if (mp->port_aaui)
out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
else
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
static void __mace_set_address(struct net_device *dev, void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
unsigned char *p = addr;
int i;
/* load up the hardware address */
if (mp->chipid == BROKEN_ADDRCHG_REV)
out_8(&mb->iac, PHYADDR);
else {
out_8(&mb->iac, ADDRCHG | PHYADDR);
while ((in_8(&mb->iac) & ADDRCHG) != 0)
;
}
for (i = 0; i < 6; ++i)
out_8(&mb->padr, dev->dev_addr[i] = p[i]);
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
static int mace_set_address(struct net_device *dev, void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
__mace_set_address(dev, addr);
/* note: setting ADDRCHG clears ENRCV */
out_8(&mb->maccc, mp->maccc);
spin_unlock_irqrestore(&mp->lock, flags);
return 0;
}
static inline void mace_clean_rings(struct mace_data *mp)
{
int i;
/* free some skb's */
for (i = 0; i < N_RX_RING; ++i) {
if (mp->rx_bufs[i] != NULL) {
dev_kfree_skb(mp->rx_bufs[i]);
mp->rx_bufs[i] = NULL;
}
}
for (i = mp->tx_empty; i != mp->tx_fill; ) {
dev_kfree_skb(mp->tx_bufs[i]);
if (++i >= N_TX_RING)
i = 0;
}
}
static int mace_open(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
volatile struct dbdma_cmd *cp;
int i;
struct sk_buff *skb;
unsigned char *data;
/* reset the chip */
mace_reset(dev);
/* initialize list of sk_buffs for receiving and set up recv dma */
mace_clean_rings(mp);
memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
cp = mp->rx_cmds;
for (i = 0; i < N_RX_RING - 1; ++i) {
skb = dev_alloc_skb(RX_BUFLEN + 2);
if (!skb) {
data = dummy_buf;
} else {
skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
data = skb->data;
}
mp->rx_bufs[i] = skb;
st_le16(&cp->req_count, RX_BUFLEN);
st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
st_le32(&cp->phy_addr, virt_to_bus(data));
cp->xfer_status = 0;
++cp;
}
mp->rx_bufs[i] = NULL;
st_le16(&cp->command, DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
/* Put a branch back to the beginning of the receive command list */
++cp;
st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
/* start rx dma */
out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
out_le32(&rd->control, (RUN << 16) | RUN);
/* put a branch at the end of the tx command list */
cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
/* reset tx dma */
out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
mp->tx_fill = 0;
mp->tx_empty = 0;
mp->tx_fullup = 0;
mp->tx_active = 0;
mp->tx_bad_runt = 0;
/* turn it on! */
out_8(&mb->maccc, mp->maccc);
/* enable all interrupts except receive interrupts */
out_8(&mb->imr, RCVINT);
return 0;
}
static int mace_close(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
/* disable rx and tx */
out_8(&mb->maccc, 0);
out_8(&mb->imr, 0xff); /* disable all intrs */
/* disable rx and tx dma */
st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
mace_clean_rings(mp);
return 0;
}
static inline void mace_set_timeout(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
if (mp->timeout_active)
del_timer(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
mp->tx_timeout.function = mace_tx_timeout;
mp->tx_timeout.data = (unsigned long) dev;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
}
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
volatile struct dbdma_cmd *cp, *np;
unsigned long flags;
int fill, next, len;
/* see if there's a free slot in the tx ring */
spin_lock_irqsave(&mp->lock, flags);
fill = mp->tx_fill;
next = fill + 1;
if (next >= N_TX_RING)
next = 0;
if (next == mp->tx_empty) {
netif_stop_queue(dev);
mp->tx_fullup = 1;
spin_unlock_irqrestore(&mp->lock, flags);
return NETDEV_TX_BUSY; /* can't take it at the moment */
}
spin_unlock_irqrestore(&mp->lock, flags);
/* partially fill in the dma command block */
len = skb->len;
if (len > ETH_FRAME_LEN) {
printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
len = ETH_FRAME_LEN;
}
mp->tx_bufs[fill] = skb;
cp = mp->tx_cmds + NCMDS_TX * fill;
st_le16(&cp->req_count, len);
st_le32(&cp->phy_addr, virt_to_bus(skb->data));
np = mp->tx_cmds + NCMDS_TX * next;
out_le16(&np->command, DBDMA_STOP);
/* poke the tx dma channel */
spin_lock_irqsave(&mp->lock, flags);
mp->tx_fill = next;
if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, OUTPUT_LAST);
out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
++mp->tx_active;
mace_set_timeout(dev);
}
if (++next >= N_TX_RING)
next = 0;
if (next == mp->tx_empty)
netif_stop_queue(dev);
spin_unlock_irqrestore(&mp->lock, flags);
return NETDEV_TX_OK;
}
static void mace_set_multicast(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
int i;
u32 crc;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
mp->maccc &= ~PROM;
if (dev->flags & IFF_PROMISC) {
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0xff;
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
i = crc >> 26; /* bit number in multicast_filter */
multicast_filter[i >> 3] |= 1 << (i & 7);
}
}
#if 0
printk("Multicast filter :");
for (i = 0; i < 8; i++)
printk("%02x ", multicast_filter[i]);
printk("\n");
#endif
if (mp->chipid == BROKEN_ADDRCHG_REV)
out_8(&mb->iac, LOGADDR);
else {
out_8(&mb->iac, ADDRCHG | LOGADDR);
while ((in_8(&mb->iac) & ADDRCHG) != 0)
;
}
for (i = 0; i < 8; ++i)
out_8(&mb->ladrf, multicast_filter[i]);
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
/* reset maccc */
out_8(&mb->maccc, mp->maccc);
spin_unlock_irqrestore(&mp->lock, flags);
}
static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
{
volatile struct mace __iomem *mb = mp->mace;
static int mace_babbles, mace_jabbers;
if (intr & MPCO)
dev->stats.rx_missed_errors += 256;
dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
if (intr & RNTPCO)
dev->stats.rx_length_errors += 256;
dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
if (intr & CERR)
++dev->stats.tx_heartbeat_errors;
if (intr & BABBLE)
if (mace_babbles++ < 4)
printk(KERN_DEBUG "mace: babbling transmitter\n");
if (intr & JABBER)
if (mace_jabbers++ < 4)
printk(KERN_DEBUG "mace: jabbering transceiver\n");
}
static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
volatile struct dbdma_cmd *cp;
int intr, fs, i, stat, x;
int xcount, dstat;
unsigned long flags;
/* static int mace_last_fs, mace_last_xcount; */
spin_lock_irqsave(&mp->lock, flags);
intr = in_8(&mb->ir); /* read interrupt register */
in_8(&mb->xmtrc); /* get retries */
mace_handle_misc_intrs(mp, intr, dev);
i = mp->tx_empty;
while (in_8(&mb->pr) & XMTSV) {
del_timer(&mp->tx_timeout);
mp->timeout_active = 0;
/*
* Clear any interrupt indication associated with this status
* word. This appears to unlatch any error indication from
* the DMA controller.
*/
intr = in_8(&mb->ir);
if (intr != 0)
mace_handle_misc_intrs(mp, intr, dev);
if (mp->tx_bad_runt) {
fs = in_8(&mb->xmtfs);
mp->tx_bad_runt = 0;
out_8(&mb->xmtfc, AUTO_PAD_XMIT);
continue;
}
dstat = ld_le32(&td->status);
/* stop DMA controller */
out_le32(&td->control, RUN << 16);
/*
* xcount is the number of complete frames which have been
* written to the fifo but for which status has not been read.
*/
xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
if (xcount == 0 || (dstat & DEAD)) {
/*
* If a packet was aborted before the DMA controller has
* finished transferring it, it seems that there are 2 bytes
* which are stuck in some buffer somewhere. These will get
* transmitted as soon as we read the frame status (which
* reenables the transmit data transfer request). Turning
* off the DMA controller and/or resetting the MACE doesn't
* help. So we disable auto-padding and FCS transmission
* so the two bytes will only be a runt packet which should
* be ignored by other stations.
*/
out_8(&mb->xmtfc, DXMTFCS);
}
fs = in_8(&mb->xmtfs);
if ((fs & XMTSV) == 0) {
printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
fs, xcount, dstat);
mace_reset(dev);
/*
* XXX mace likes to hang the machine after a xmtfs error.
* This is hard to reproduce, reseting *may* help
*/
}
cp = mp->tx_cmds + NCMDS_TX * i;
stat = ld_le16(&cp->xfer_status);
if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
/*
* Check whether there were in fact 2 bytes written to
* the transmit FIFO.
*/
udelay(1);
x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
if (x != 0) {
/* there were two bytes with an end-of-packet indication */
mp->tx_bad_runt = 1;
mace_set_timeout(dev);
} else {
/*
* Either there weren't the two bytes buffered up, or they
* didn't have an end-of-packet indication.
* We flush the transmit FIFO just in case (by setting the
* XMTFWU bit with the transmitter disabled).
*/
out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
udelay(1);
out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
out_8(&mb->xmtfc, AUTO_PAD_XMIT);
}
}
/* dma should have finished */
if (i == mp->tx_fill) {
printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
fs, xcount, dstat);
continue;
}
/* Update stats */
if (fs & (UFLO|LCOL|LCAR|RTRY)) {
++dev->stats.tx_errors;
if (fs & LCAR)
++dev->stats.tx_carrier_errors;
if (fs & (UFLO|LCOL|RTRY))
++dev->stats.tx_aborted_errors;
} else {
dev->stats.tx_bytes += mp->tx_bufs[i]->len;
++dev->stats.tx_packets;
}
dev_kfree_skb_irq(mp->tx_bufs[i]);
--mp->tx_active;
if (++i >= N_TX_RING)
i = 0;
#if 0
mace_last_fs = fs;
mace_last_xcount = xcount;
#endif
}
if (i != mp->tx_empty) {
mp->tx_fullup = 0;
netif_wake_queue(dev);
}
mp->tx_empty = i;
i += mp->tx_active;
if (i >= N_TX_RING)
i -= N_TX_RING;
if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
do {
/* set up the next one */
cp = mp->tx_cmds + NCMDS_TX * i;
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, OUTPUT_LAST);
++mp->tx_active;
if (++i >= N_TX_RING)
i = 0;
} while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
mace_set_timeout(dev);
}
spin_unlock_irqrestore(&mp->lock, flags);
return IRQ_HANDLED;
}
static void mace_tx_timeout(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
volatile struct dbdma_cmd *cp;
unsigned long flags;
int i;
spin_lock_irqsave(&mp->lock, flags);
mp->timeout_active = 0;
if (mp->tx_active == 0 && !mp->tx_bad_runt)
goto out;
/* update various counters */
mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
/* turn off both tx and rx and reset the chip */
out_8(&mb->maccc, 0);
printk(KERN_ERR "mace: transmit timeout - resetting\n");
dbdma_reset(td);
mace_reset(dev);
/* restart rx dma */
cp = bus_to_virt(ld_le32(&rd->cmdptr));
dbdma_reset(rd);
out_le16(&cp->xfer_status, 0);
out_le32(&rd->cmdptr, virt_to_bus(cp));
out_le32(&rd->control, (RUN << 16) | RUN);
/* fix up the transmit side */
i = mp->tx_empty;
mp->tx_active = 0;
++dev->stats.tx_errors;
if (mp->tx_bad_runt) {
mp->tx_bad_runt = 0;
} else if (i != mp->tx_fill) {
dev_kfree_skb(mp->tx_bufs[i]);
if (++i >= N_TX_RING)
i = 0;
mp->tx_empty = i;
}
mp->tx_fullup = 0;
netif_wake_queue(dev);
if (i != mp->tx_fill) {
cp = mp->tx_cmds + NCMDS_TX * i;
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, OUTPUT_LAST);
out_le32(&td->cmdptr, virt_to_bus(cp));
out_le32(&td->control, (RUN << 16) | RUN);
++mp->tx_active;
mace_set_timeout(dev);
}
/* turn it back on */
out_8(&mb->imr, RCVINT);
out_8(&mb->maccc, mp->maccc);
out:
spin_unlock_irqrestore(&mp->lock, flags);
}
static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
volatile struct dbdma_cmd *cp, *np;
int i, nb, stat, next;
struct sk_buff *skb;
unsigned frame_status;
static int mace_lost_status;
unsigned char *data;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
for (i = mp->rx_empty; i != mp->rx_fill; ) {
cp = mp->rx_cmds + i;
stat = ld_le16(&cp->xfer_status);
if ((stat & ACTIVE) == 0) {
next = i + 1;
if (next >= N_RX_RING)
next = 0;
np = mp->rx_cmds + next;
if (next != mp->rx_fill &&
(ld_le16(&np->xfer_status) & ACTIVE) != 0) {
printk(KERN_DEBUG "mace: lost a status word\n");
++mace_lost_status;
} else
break;
}
nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
out_le16(&cp->command, DBDMA_STOP);
/* got a packet, have a look at it */
skb = mp->rx_bufs[i];
if (!skb) {
++dev->stats.rx_dropped;
} else if (nb > 8) {
data = skb->data;
frame_status = (data[nb-3] << 8) + data[nb-4];
if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
++dev->stats.rx_errors;
if (frame_status & RS_OFLO)
++dev->stats.rx_over_errors;
if (frame_status & RS_FRAMERR)
++dev->stats.rx_frame_errors;
if (frame_status & RS_FCSERR)
++dev->stats.rx_crc_errors;
} else {
/* Mace feature AUTO_STRIP_RCV is on by default, dropping the
* FCS on frames with 802.3 headers. This means that Ethernet
* frames have 8 extra octets at the end, while 802.3 frames
* have only 4. We need to correctly account for this. */
if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
nb -= 4;
else /* Ethernet header; mace includes FCS */
nb -= 8;
skb_put(skb, nb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
mp->rx_bufs[i] = NULL;
++dev->stats.rx_packets;
}
} else {
++dev->stats.rx_errors;
++dev->stats.rx_length_errors;
}
/* advance to next */
if (++i >= N_RX_RING)
i = 0;
}
mp->rx_empty = i;
i = mp->rx_fill;
for (;;) {
next = i + 1;
if (next >= N_RX_RING)
next = 0;
if (next == mp->rx_empty)
break;
cp = mp->rx_cmds + i;
skb = mp->rx_bufs[i];
if (!skb) {
skb = dev_alloc_skb(RX_BUFLEN + 2);
if (skb) {
skb_reserve(skb, 2);
mp->rx_bufs[i] = skb;
}
}
st_le16(&cp->req_count, RX_BUFLEN);
data = skb? skb->data: dummy_buf;
st_le32(&cp->phy_addr, virt_to_bus(data));
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
#if 0
if ((ld_le32(&rd->status) & ACTIVE) != 0) {
out_le32(&rd->control, (PAUSE << 16) | PAUSE);
while ((in_le32(&rd->status) & ACTIVE) != 0)
;
}
#endif
i = next;
}
if (i != mp->rx_fill) {
out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
mp->rx_fill = i;
}
spin_unlock_irqrestore(&mp->lock, flags);
return IRQ_HANDLED;
}
static struct of_device_id mace_match[] =
{
{
.name = "mace",
},
{},
};
MODULE_DEVICE_TABLE (of, mace_match);
static struct macio_driver mace_driver =
{
.driver = {
.name = "mace",
.owner = THIS_MODULE,
.of_match_table = mace_match,
},
.probe = mace_probe,
.remove = mace_remove,
};
static int __init mace_init(void)
{
return macio_register_driver(&mace_driver);
}
static void __exit mace_cleanup(void)
{
macio_unregister_driver(&mace_driver);
kfree(dummy_buf);
dummy_buf = NULL;
}
MODULE_AUTHOR("Paul Mackerras");
MODULE_DESCRIPTION("PowerMac MACE driver.");
module_param(port_aaui, int, 0);
MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
MODULE_LICENSE("GPL");
module_init(mace_init);
module_exit(mace_cleanup);
| gpl-2.0 |
PureNexusProject/android_kernel_htc_flounder | arch/xtensa/kernel/stacktrace.c | 3543 | 2335 | /*
* arch/xtensa/kernel/stacktrace.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data),
void *data)
{
unsigned long a0, a1;
unsigned long sp_end;
a1 = (unsigned long)sp;
sp_end = ALIGN(a1, THREAD_SIZE);
spill_registers();
while (a1 < sp_end) {
struct stackframe frame;
sp = (unsigned long *)a1;
a0 = *(sp - 4);
a1 = *(sp - 3);
if (a1 <= (unsigned long)sp)
break;
frame.pc = MAKE_PC_FROM_RA(a0, a1);
frame.sp = a1;
if (fn(&frame, data))
return;
}
}
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned skip;
};
static int stack_trace_cb(struct stackframe *frame, void *data)
{
struct stack_trace_data *trace_data = data;
struct stack_trace *trace = trace_data->trace;
if (trace_data->skip) {
--trace_data->skip;
return 0;
}
if (!kernel_text_address(frame->pc))
return 0;
trace->entries[trace->nr_entries++] = frame->pc;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
{
struct stack_trace_data trace_data = {
.trace = trace,
.skip = trace->skip,
};
walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
#ifdef CONFIG_FRAME_POINTER
struct return_addr_data {
unsigned long addr;
unsigned skip;
};
static int return_address_cb(struct stackframe *frame, void *data)
{
struct return_addr_data *r = data;
if (r->skip) {
--r->skip;
return 0;
}
if (!kernel_text_address(frame->pc))
return 0;
r->addr = frame->pc;
return 1;
}
unsigned long return_address(unsigned level)
{
struct return_addr_data r = {
.skip = level + 1,
};
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
return r.addr;
}
EXPORT_SYMBOL(return_address);
#endif
| gpl-2.0 |
nazgee/igep-kernel | drivers/net/smc-mca.c | 3543 | 16490 | /* smc-mca.c: A SMC Ultra ethernet driver for linux. */
/*
Most of this driver, except for ultramca_probe is nearly
verbatim from smc-ultra.c by Donald Becker. The rest is
written and copyright 1996 by David Weis, weisd3458@uni.edu
This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
This driver uses the cards in the 8390-compatible, shared memory mode.
Most of the run-time complexity is handled by the generic code in
8390.c.
This driver enables the shared memory only when doing the actual data
transfers to avoid a bug in early version of the card that corrupted
data transferred by a AHA1542.
This driver does not support the programmed-I/O data transfer mode of
the EtherEZ. That support (if available) is smc-ez.c. Nor does it
use the non-8390-compatible "Altego" mode. (No support currently planned.)
Changelog:
Paul Gortmaker : multiple card support for module users.
David Weis : Micro Channel-ized it.
Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A
Christopher Turcksin : Changed MCA-probe so that multiple adapters are
found correctly (Jul 16, 1997)
Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997)
Tom Sightler : Fixed minor detection bug caused by above merge
Tom Sightler : Added support for three more Western Digital
MCA-adapters
Tom Sightler : Added support for 2.2.x mca_find_unused_adapter
Hartmut Schmidt : - Modified parameter detection to handle each
card differently depending on a switch-list
- 'card_ver' removed from the adapter list
- Some minor bug fixes
*/
#include <linux/mca.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <asm/io.h>
#include <asm/system.h>
#include "8390.h"
#define DRV_NAME "smc-mca"
static int ultramca_open(struct net_device *dev);
static void ultramca_reset_8390(struct net_device *dev);
static void ultramca_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr,
int ring_page);
static void ultramca_block_input(struct net_device *dev, int count,
struct sk_buff *skb,
int ring_offset);
static void ultramca_block_output(struct net_device *dev, int count,
const unsigned char *buf,
const int start_page);
static int ultramca_close_card(struct net_device *dev);
#define START_PG 0x00 /* First page of TX buffer */
#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
#define ULTRA_IO_EXTENT 32
#define EN0_ERWCNT 0x08 /* Early receive warning count. */
#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0
#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1
#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2
#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3
#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4
#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5
#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6
#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7
struct smc_mca_adapters_t {
unsigned int id;
char *name;
};
#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */
static int ultra_io[MAX_ULTRAMCA_CARDS];
static int ultra_irq[MAX_ULTRAMCA_CARDS];
MODULE_LICENSE("GPL");
module_param_array(ultra_io, int, NULL, 0);
module_param_array(ultra_irq, int, NULL, 0);
MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
static const struct {
unsigned int base_addr;
} addr_table[] = {
{ 0x0800 },
{ 0x1800 },
{ 0x2800 },
{ 0x3800 },
{ 0x4800 },
{ 0x5800 },
{ 0x6800 },
{ 0x7800 },
{ 0x8800 },
{ 0x9800 },
{ 0xa800 },
{ 0xb800 },
{ 0xc800 },
{ 0xd800 },
{ 0xe800 },
{ 0xf800 }
};
#define MEM_MASK 64
static const struct {
unsigned char mem_index;
unsigned long mem_start;
unsigned char num_pages;
} mem_table[] = {
{ 16, 0x0c0000, 40 },
{ 18, 0x0c4000, 40 },
{ 20, 0x0c8000, 40 },
{ 22, 0x0cc000, 40 },
{ 24, 0x0d0000, 40 },
{ 26, 0x0d4000, 40 },
{ 28, 0x0d8000, 40 },
{ 30, 0x0dc000, 40 },
{144, 0xfc0000, 40 },
{148, 0xfc8000, 40 },
{154, 0xfd0000, 40 },
{156, 0xfd8000, 40 },
{ 0, 0x0c0000, 20 },
{ 1, 0x0c2000, 20 },
{ 2, 0x0c4000, 20 },
{ 3, 0x0c6000, 20 }
};
#define IRQ_MASK 243
static const struct {
unsigned char new_irq;
unsigned char old_irq;
} irq_table[] = {
{ 3, 3 },
{ 4, 4 },
{ 10, 10 },
{ 14, 15 }
};
static short smc_mca_adapter_ids[] __initdata = {
0x61c8,
0x61c9,
0x6fc0,
0x6fc1,
0x6fc2,
0xefd4,
0xefd5,
0xefe5,
0x0000
};
static char *smc_mca_adapter_names[] __initdata = {
"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
"WD Starcard PLUS/A (WD8003ST/A)",
"WD Ethercard PLUS 10T/A (WD8003W/A)",
"IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)",
"IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)",
"IBM PS/2 Adapter/A for Ethernet",
NULL
};
static int ultra_found = 0;
static const struct net_device_ops ultramca_netdev_ops = {
.ndo_open = ultramca_open,
.ndo_stop = ultramca_close_card,
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
};
static int __init ultramca_probe(struct device *gen_dev)
{
unsigned short ioaddr;
struct net_device *dev;
unsigned char reg4, num_pages;
struct mca_device *mca_dev = to_mca_device(gen_dev);
char slot = mca_dev->slot;
unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff;
int i, rc;
int adapter = mca_dev->index;
int tbase = 0;
int tirq = 0;
int base_addr = ultra_io[ultra_found];
int irq = ultra_irq[ultra_found];
if (base_addr || irq) {
printk(KERN_INFO "Probing for SMC MCA adapter");
if (base_addr) {
printk(KERN_INFO " at I/O address 0x%04x%c",
base_addr, irq ? ' ' : '\n');
}
if (irq) {
printk(KERN_INFO "using irq %d\n", irq);
}
}
tirq = 0;
tbase = 0;
/* If we're trying to match a specificied irq or io address,
* we'll reject the adapter found unless it's the one we're
* looking for */
pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */
pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */
pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */
pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */
/* Test the following conditions:
* - If an irq parameter is supplied, compare it
* with the irq of the adapter we found
* - If a base_addr paramater is given, compare it
* with the base_addr of the adapter we found
* - Check that the irq and the base_addr of the
* adapter we found is not already in use by
* this driver
*/
switch (mca_dev->index) {
case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
{
tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr;
tirq = irq_table[(pos5 & 0xc) >> 2].new_irq;
break;
}
case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
{
tbase = ((pos2 & 0x0fe) * 0x10);
tirq = irq_table[(pos5 & 3)].old_irq;
break;
}
}
if(!tirq || !tbase ||
(irq && irq != tirq) ||
(base_addr && tbase != base_addr))
/* FIXME: we're trying to force the ordering of the
* devices here, there should be a way of getting this
* to happen */
return -ENXIO;
/* Adapter found. */
dev = alloc_ei_netdev();
if(!dev)
return -ENODEV;
SET_NETDEV_DEV(dev, gen_dev);
mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]);
mca_device_set_claim(mca_dev, 1);
printk(KERN_INFO "smc_mca: %s found in slot %d\n",
smc_mca_adapter_names[adapter], slot + 1);
ultra_found++;
dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase);
dev->irq = mca_device_transform_irq(mca_dev, tirq);
dev->mem_start = 0;
num_pages = 40;
switch (adapter) { /* card-# in const array above [hs] */
case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
{
for (i = 0; i < 16; i++) { /* taking 16 counts
* up to 15 [hs] */
if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) {
dev->mem_start = (unsigned long)
mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start);
num_pages = mem_table[i].num_pages;
}
}
break;
}
case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
{
dev->mem_start = (unsigned long)
mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000));
num_pages = 0x40;
break;
}
case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
{
/* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates
* the index of the 0x2000 step.
* beware different number of pages [hs]
*/
dev->mem_start = (unsigned long)
mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf))));
num_pages = 0x20 + (2 * (pos3 & 0x10));
break;
}
}
/* sanity check, shouldn't happen */
if (dev->mem_start == 0) {
rc = -ENODEV;
goto err_unclaim;
}
if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) {
rc = -ENODEV;
goto err_unclaim;
}
reg4 = inb(ioaddr + 4) & 0x7f;
outb(reg4, ioaddr + 4);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM",
slot + 1, ioaddr, dev->dev_addr);
/* Switch from the station address to the alternate register set
* and read the useful registers there.
*/
outb(0x80 | reg4, ioaddr + 4);
/* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot.
*/
outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
/* Switch back to the station address register set so that
* the MS-DOS driver can find the card after a warm boot.
*/
outb(reg4, ioaddr + 4);
dev_set_drvdata(gen_dev, dev);
/* The 8390 isn't at the base address, so fake the offset
*/
dev->base_addr = ioaddr + ULTRA_NIC_OFFSET;
ei_status.name = "SMC Ultra MCA";
ei_status.word16 = 1;
ei_status.tx_start_page = START_PG;
ei_status.rx_start_page = START_PG + TX_PAGES;
ei_status.stop_page = num_pages;
ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256);
if (!ei_status.mem) {
rc = -ENOMEM;
goto err_release_region;
}
dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256;
printk(", IRQ %d memory %#lx-%#lx.\n",
dev->irq, dev->mem_start, dev->mem_end - 1);
ei_status.reset_8390 = &ultramca_reset_8390;
ei_status.block_input = &ultramca_block_input;
ei_status.block_output = &ultramca_block_output;
ei_status.get_8390_hdr = &ultramca_get_8390_hdr;
ei_status.priv = slot;
dev->netdev_ops = &ultramca_netdev_ops;
NS8390_init(dev, 0);
rc = register_netdev(dev);
if (rc)
goto err_unmap;
return 0;
err_unmap:
iounmap(ei_status.mem);
err_release_region:
release_region(ioaddr, ULTRA_IO_EXTENT);
err_unclaim:
mca_device_set_claim(mca_dev, 0);
free_netdev(dev);
return rc;
}
static int ultramca_open(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
int retval;
if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
return retval;
outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
outb(0x80, ioaddr + 5); /* ??? */
outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
outb(0x04, ioaddr + 5); /* ??? */
/* Set the early receive warning level in window 0 high enough not
* to receive ERW interrupts.
*/
/* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr);
* outb(0xff, dev->base_addr + EN0_ERWCNT);
*/
ei_open(dev);
return 0;
}
static void ultramca_reset_8390(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
outb(ULTRA_RESET, ioaddr);
if (ei_debug > 1)
printk("resetting Ultra, t=%ld...", jiffies);
ei_status.txing = 0;
outb(0x80, ioaddr + 5); /* ??? */
outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
if (ei_debug > 1)
printk("reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
* we don't need to be concerned with ring wrap as the header will be at
* the start of a page, so we optimize accordingly.
*/
static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8);
#ifdef notdef
/* Officially this is what we are doing, but the readl() is faster */
memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
#else
((unsigned int*)hdr)[0] = readl(hdr_start);
#endif
}
/* Block input and output are easy on shared memory ethercards, the only
* complication is when the ring buffer wraps.
*/
static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256;
if (ring_offset + count > ei_status.stop_page * 256) {
/* We must wrap the input move. */
int semi_count = ei_status.stop_page * 256 - ring_offset;
memcpy_fromio(skb->data, xfer_start, semi_count);
count -= semi_count;
memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
} else {
memcpy_fromio(skb->data, xfer_start, count);
}
}
static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf,
int start_page)
{
void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8);
memcpy_toio(shmem, buf, count);
}
static int ultramca_close_card(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
netif_stop_queue(dev);
if (ei_debug > 1)
printk("%s: Shutting down ethercard.\n", dev->name);
outb(0x00, ioaddr + 6); /* Disable interrupts. */
free_irq(dev->irq, dev);
NS8390_init(dev, 0);
/* We should someday disable shared memory and change to 8-bit mode
* "just in case"...
*/
return 0;
}
static int ultramca_remove(struct device *gen_dev)
{
struct mca_device *mca_dev = to_mca_device(gen_dev);
struct net_device *dev = dev_get_drvdata(gen_dev);
if (dev) {
/* NB: ultra_close_card() does free_irq */
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
unregister_netdev(dev);
mca_device_set_claim(mca_dev, 0);
release_region(ioaddr, ULTRA_IO_EXTENT);
iounmap(ei_status.mem);
free_netdev(dev);
}
return 0;
}
static struct mca_driver ultra_driver = {
.id_table = smc_mca_adapter_ids,
.driver = {
.name = "smc-mca",
.bus = &mca_bus_type,
.probe = ultramca_probe,
.remove = ultramca_remove,
}
};
static int __init ultramca_init_module(void)
{
if(!MCA_bus)
return -ENXIO;
mca_register_driver(&ultra_driver);
return ultra_found ? 0 : -ENXIO;
}
static void __exit ultramca_cleanup_module(void)
{
mca_unregister_driver(&ultra_driver);
}
module_init(ultramca_init_module);
module_exit(ultramca_cleanup_module);
| gpl-2.0 |
allenbh/ntrdma | net/caif/cfcnfg.c | 4311 | 14653 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
#include <net/caif/cfctrl.h>
#include <net/caif/cfmuxl.h>
#include <net/caif/cffrml.h>
#include <net/caif/cfserl.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/caif_dev.h>
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
*/
struct cfcnfg_phyinfo {
struct list_head node;
bool up;
/* Pointer to the layer below the MUX (framing layer) */
struct cflayer *frm_layer;
/* Pointer to the lowest actual physical layer */
struct cflayer *phy_layer;
/* Unique identifier of the physical interface */
unsigned int id;
/* Preference of the physical in interface */
enum cfcnfg_phy_preference pref;
/* Information about the physical device */
struct dev_info dev_info;
/* Interface index */
int ifindex;
/* Protocol head room added for CAIF link layer */
int head_room;
/* Use Start of frame checksum */
bool use_fcs;
};
struct cfcnfg {
struct cflayer layer;
struct cflayer *ctrl;
struct cflayer *mux;
struct list_head phys;
struct mutex lock;
};
static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
enum cfctrl_srv serv, u8 phyid,
struct cflayer *adapt_layer);
static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
struct cflayer *adapt_layer);
static void cfctrl_resp_func(void);
static void cfctrl_enum_resp(void);
struct cfcnfg *cfcnfg_create(void)
{
struct cfcnfg *this;
struct cfctrl_rsp *resp;
might_sleep();
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
if (!this)
return NULL;
this->mux = cfmuxl_create();
if (!this->mux)
goto out_of_mem;
this->ctrl = cfctrl_create();
if (!this->ctrl)
goto out_of_mem;
/* Initiate response functions */
resp = cfctrl_get_respfuncs(this->ctrl);
resp->enum_rsp = cfctrl_enum_resp;
resp->linkerror_ind = cfctrl_resp_func;
resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
resp->sleep_rsp = cfctrl_resp_func;
resp->wake_rsp = cfctrl_resp_func;
resp->restart_rsp = cfctrl_resp_func;
resp->radioset_rsp = cfctrl_resp_func;
resp->linksetup_rsp = cfcnfg_linkup_rsp;
resp->reject_rsp = cfcnfg_reject_rsp;
INIT_LIST_HEAD(&this->phys);
cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
layer_set_dn(this->ctrl, this->mux);
layer_set_up(this->ctrl, this);
mutex_init(&this->lock);
return this;
out_of_mem:
synchronize_rcu();
kfree(this->mux);
kfree(this->ctrl);
kfree(this);
return NULL;
}
void cfcnfg_remove(struct cfcnfg *cfg)
{
might_sleep();
if (cfg) {
synchronize_rcu();
kfree(cfg->mux);
cfctrl_remove(cfg->ctrl);
kfree(cfg);
}
}
static void cfctrl_resp_func(void)
{
}
static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
u8 phyid)
{
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->id == phyid)
return phy;
return NULL;
}
static void cfctrl_enum_resp(void)
{
}
static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
enum cfcnfg_phy_preference phy_pref)
{
/* Try to match with specified preference */
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node) {
if (phy->up && phy->pref == phy_pref &&
phy->frm_layer != NULL)
return &phy->dev_info;
}
/* Otherwise just return something */
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->up)
return &phy->dev_info;
return NULL;
}
static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
{
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->ifindex == ifi && phy->up)
return phy->id;
return -ENODEV;
}
int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
{
u8 channel_id;
struct cfcnfg *cfg = get_cfcnfg(net);
caif_assert(adap_layer != NULL);
cfctrl_cancel_req(cfg->ctrl, adap_layer);
channel_id = adap_layer->id;
if (channel_id != 0) {
struct cflayer *servl;
servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
if (servl != NULL)
layer_set_up(servl, NULL);
} else
pr_debug("nothing to disconnect\n");
/* Do RCU sync before initiating cleanup */
synchronize_rcu();
if (adap_layer->ctrlcmd != NULL)
adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
return 0;
}
EXPORT_SYMBOL(caif_disconnect_client);
static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
{
}
static const int protohead[CFCTRL_SRV_MASK] = {
[CFCTRL_SRV_VEI] = 4,
[CFCTRL_SRV_DATAGRAM] = 7,
[CFCTRL_SRV_UTIL] = 4,
[CFCTRL_SRV_RFM] = 3,
[CFCTRL_SRV_DBG] = 3,
};
static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
struct caif_connect_request *s,
struct cfctrl_link_param *l)
{
struct dev_info *dev_info;
enum cfcnfg_phy_preference pref;
int res;
memset(l, 0, sizeof(*l));
/* In caif protocol low value is high priority */
l->priority = CAIF_PRIO_MAX - s->priority + 1;
if (s->ifindex != 0) {
res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
if (res < 0)
return res;
l->phyid = res;
} else {
switch (s->link_selector) {
case CAIF_LINK_HIGH_BANDW:
pref = CFPHYPREF_HIGH_BW;
break;
case CAIF_LINK_LOW_LATENCY:
pref = CFPHYPREF_LOW_LAT;
break;
default:
return -EINVAL;
}
dev_info = cfcnfg_get_phyid(cnfg, pref);
if (dev_info == NULL)
return -ENODEV;
l->phyid = dev_info->id;
}
switch (s->protocol) {
case CAIFPROTO_AT:
l->linktype = CFCTRL_SRV_VEI;
l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
l->chtype = s->sockaddr.u.at.type & 0x3;
break;
case CAIFPROTO_DATAGRAM:
l->linktype = CFCTRL_SRV_DATAGRAM;
l->chtype = 0x00;
l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
break;
case CAIFPROTO_DATAGRAM_LOOP:
l->linktype = CFCTRL_SRV_DATAGRAM;
l->chtype = 0x03;
l->endpoint = 0x00;
l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
break;
case CAIFPROTO_RFM:
l->linktype = CFCTRL_SRV_RFM;
l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
sizeof(l->u.rfm.volume)-1);
l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
break;
case CAIFPROTO_UTIL:
l->linktype = CFCTRL_SRV_UTIL;
l->endpoint = 0x00;
l->chtype = 0x00;
strncpy(l->u.utility.name, s->sockaddr.u.util.service,
sizeof(l->u.utility.name)-1);
l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
caif_assert(sizeof(l->u.utility.name) > 10);
l->u.utility.paramlen = s->param.size;
if (l->u.utility.paramlen > sizeof(l->u.utility.params))
l->u.utility.paramlen = sizeof(l->u.utility.params);
memcpy(l->u.utility.params, s->param.data,
l->u.utility.paramlen);
break;
case CAIFPROTO_DEBUG:
l->linktype = CFCTRL_SRV_DBG;
l->endpoint = s->sockaddr.u.dbg.service;
l->chtype = s->sockaddr.u.dbg.type;
break;
default:
return -EINVAL;
}
return 0;
}
int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
struct cflayer *adap_layer, int *ifindex,
int *proto_head, int *proto_tail)
{
struct cflayer *frml;
struct cfcnfg_phyinfo *phy;
int err;
struct cfctrl_link_param param;
struct cfcnfg *cfg = get_cfcnfg(net);
rcu_read_lock();
err = caif_connect_req_to_link_param(cfg, conn_req, ¶m);
if (err)
goto unlock;
phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
if (!phy) {
err = -ENODEV;
goto unlock;
}
err = -EINVAL;
if (adap_layer == NULL) {
pr_err("adap_layer is zero\n");
goto unlock;
}
if (adap_layer->receive == NULL) {
pr_err("adap_layer->receive is NULL\n");
goto unlock;
}
if (adap_layer->ctrlcmd == NULL) {
pr_err("adap_layer->ctrlcmd == NULL\n");
goto unlock;
}
err = -ENODEV;
frml = phy->frm_layer;
if (frml == NULL) {
pr_err("Specified PHY type does not exist!\n");
goto unlock;
}
caif_assert(param.phyid == phy->id);
caif_assert(phy->frm_layer->id ==
param.phyid);
caif_assert(phy->phy_layer->id ==
param.phyid);
*ifindex = phy->ifindex;
*proto_tail = 2;
*proto_head = protohead[param.linktype] + phy->head_room;
rcu_read_unlock();
/* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
cfctrl_enum_req(cfg->ctrl, param.phyid);
return cfctrl_linkup_request(cfg->ctrl, ¶m, adap_layer);
unlock:
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(caif_connect_client);
static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
struct cflayer *adapt_layer)
{
if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
adapt_layer->ctrlcmd(adapt_layer,
CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
}
static void
cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
u8 phyid, struct cflayer *adapt_layer)
{
struct cfcnfg *cnfg = container_obj(layer);
struct cflayer *servicel = NULL;
struct cfcnfg_phyinfo *phyinfo;
struct net_device *netdev;
if (channel_id == 0) {
pr_warn("received channel_id zero\n");
if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
adapt_layer->ctrlcmd(adapt_layer,
CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
return;
}
rcu_read_lock();
if (adapt_layer == NULL) {
pr_debug("link setup response but no client exist,"
"send linkdown back\n");
cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
goto unlock;
}
caif_assert(cnfg != NULL);
caif_assert(phyid != 0);
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
if (phyinfo == NULL) {
pr_err("ERROR: Link Layer Device disappeared"
"while connecting\n");
goto unlock;
}
caif_assert(phyinfo != NULL);
caif_assert(phyinfo->id == phyid);
caif_assert(phyinfo->phy_layer != NULL);
caif_assert(phyinfo->phy_layer->id == phyid);
adapt_layer->id = channel_id;
switch (serv) {
case CFCTRL_SRV_VEI:
servicel = cfvei_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_DATAGRAM:
servicel = cfdgml_create(channel_id,
&phyinfo->dev_info);
break;
case CFCTRL_SRV_RFM:
netdev = phyinfo->dev_info.dev;
servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
netdev->mtu);
break;
case CFCTRL_SRV_UTIL:
servicel = cfutill_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_VIDEO:
servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_DBG:
servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
break;
default:
pr_err("Protocol error. Link setup response "
"- unknown channel type\n");
goto unlock;
}
if (!servicel)
goto unlock;
layer_set_dn(servicel, cnfg->mux);
cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
layer_set_up(servicel, adapt_layer);
layer_set_dn(adapt_layer, servicel);
rcu_read_unlock();
servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
return;
unlock:
rcu_read_unlock();
}
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
struct cflayer *link_support,
bool fcs, int head_room)
{
struct cflayer *frml;
struct cfcnfg_phyinfo *phyinfo = NULL;
int i;
u8 phyid;
mutex_lock(&cnfg->lock);
/* CAIF protocol allow maximum 6 link-layers */
for (i = 0; i < 7; i++) {
phyid = (dev->ifindex + i) & 0x7;
if (phyid == 0)
continue;
if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
goto out;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
if (!phyinfo)
goto out_err;
phy_layer->id = phyid;
phyinfo->pref = pref;
phyinfo->id = phyid;
phyinfo->dev_info.id = phyid;
phyinfo->dev_info.dev = dev;
phyinfo->phy_layer = phy_layer;
phyinfo->ifindex = dev->ifindex;
phyinfo->head_room = head_room;
phyinfo->use_fcs = fcs;
frml = cffrml_create(phyid, fcs);
if (!frml)
goto out_err;
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
if (link_support != NULL) {
link_support->id = phyid;
layer_set_dn(frml, link_support);
layer_set_up(link_support, frml);
layer_set_dn(link_support, phy_layer);
layer_set_up(phy_layer, link_support);
} else {
layer_set_dn(frml, phy_layer);
layer_set_up(phy_layer, frml);
}
list_add_rcu(&phyinfo->node, &cnfg->phys);
out:
mutex_unlock(&cnfg->lock);
return;
out_err:
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
bool up)
{
struct cfcnfg_phyinfo *phyinfo;
rcu_read_lock();
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
if (phyinfo == NULL) {
rcu_read_unlock();
return -ENODEV;
}
if (phyinfo->up == up) {
rcu_read_unlock();
return 0;
}
phyinfo->up = up;
if (up) {
cffrml_hold(phyinfo->frm_layer);
cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
phy_layer->id);
} else {
cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
cffrml_put(phyinfo->frm_layer);
}
rcu_read_unlock();
return 0;
}
EXPORT_SYMBOL(cfcnfg_set_phy_state);
int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
{
struct cflayer *frml, *frml_dn;
u16 phyid;
struct cfcnfg_phyinfo *phyinfo;
might_sleep();
mutex_lock(&cnfg->lock);
phyid = phy_layer->id;
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
if (phyinfo == NULL) {
mutex_unlock(&cnfg->lock);
return 0;
}
caif_assert(phyid == phyinfo->id);
caif_assert(phy_layer == phyinfo->phy_layer);
caif_assert(phy_layer->id == phyid);
caif_assert(phyinfo->frm_layer->id == phyid);
list_del_rcu(&phyinfo->node);
synchronize_rcu();
/* Fail if reference count is not zero */
if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
pr_info("Wait for device inuse\n");
list_add_rcu(&phyinfo->node, &cnfg->phys);
mutex_unlock(&cnfg->lock);
return -EAGAIN;
}
frml = phyinfo->frm_layer;
frml_dn = frml->dn;
cffrml_set_uplayer(frml, NULL);
cffrml_set_dnlayer(frml, NULL);
if (phy_layer != frml_dn) {
layer_set_up(frml_dn, NULL);
layer_set_dn(frml_dn, NULL);
}
layer_set_up(phy_layer, NULL);
if (phyinfo->phy_layer != frml_dn)
kfree(frml_dn);
cffrml_free(frml);
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
return 0;
}
EXPORT_SYMBOL(cfcnfg_del_phy_layer);
| gpl-2.0 |
cm-3470/android_kernel_samsung_gardalte | sound/soc/codecs/wm8741.c | 4567 | 14855 | /*
* wm8741.c -- WM8741 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
*
* Author: Ian Lartey <ian@opensource.wolfsonmicro.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8741.h"
#define WM8741_NUM_SUPPLIES 2
static const char *wm8741_supply_names[WM8741_NUM_SUPPLIES] = {
"AVDD",
"DVDD",
};
#define WM8741_NUM_RATES 6
/* codec private data */
struct wm8741_priv {
enum snd_soc_control_type control_type;
struct regulator_bulk_data supplies[WM8741_NUM_SUPPLIES];
unsigned int sysclk;
struct snd_pcm_hw_constraint_list *sysclk_constraints;
};
static const u16 wm8741_reg_defaults[WM8741_REGISTER_COUNT] = {
0x0000, /* R0 - DACLLSB Attenuation */
0x0000, /* R1 - DACLMSB Attenuation */
0x0000, /* R2 - DACRLSB Attenuation */
0x0000, /* R3 - DACRMSB Attenuation */
0x0000, /* R4 - Volume Control */
0x000A, /* R5 - Format Control */
0x0000, /* R6 - Filter Control */
0x0000, /* R7 - Mode Control 1 */
0x0002, /* R8 - Mode Control 2 */
0x0000, /* R9 - Reset */
0x0002, /* R32 - ADDITONAL_CONTROL_1 */
};
static int wm8741_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8741_RESET, 0);
}
static const DECLARE_TLV_DB_SCALE(dac_tlv_fine, -12700, 13, 0);
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 400, 0);
static const struct snd_kcontrol_new wm8741_snd_controls[] = {
SOC_DOUBLE_R_TLV("Fine Playback Volume", WM8741_DACLLSB_ATTENUATION,
WM8741_DACRLSB_ATTENUATION, 1, 255, 1, dac_tlv_fine),
SOC_DOUBLE_R_TLV("Playback Volume", WM8741_DACLMSB_ATTENUATION,
WM8741_DACRMSB_ATTENUATION, 0, 511, 1, dac_tlv),
};
static const struct snd_soc_dapm_widget wm8741_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DACL", "Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACR", "Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("VOUTLP"),
SND_SOC_DAPM_OUTPUT("VOUTLN"),
SND_SOC_DAPM_OUTPUT("VOUTRP"),
SND_SOC_DAPM_OUTPUT("VOUTRN"),
};
static const struct snd_soc_dapm_route wm8741_dapm_routes[] = {
{ "VOUTLP", NULL, "DACL" },
{ "VOUTLN", NULL, "DACL" },
{ "VOUTRP", NULL, "DACR" },
{ "VOUTRN", NULL, "DACR" },
};
static struct {
int value;
int ratio;
} lrclk_ratios[WM8741_NUM_RATES] = {
{ 1, 128 },
{ 2, 192 },
{ 3, 256 },
{ 4, 384 },
{ 5, 512 },
{ 6, 768 },
};
static unsigned int rates_11289[] = {
44100, 88235,
};
static struct snd_pcm_hw_constraint_list constraints_11289 = {
.count = ARRAY_SIZE(rates_11289),
.list = rates_11289,
};
static unsigned int rates_12288[] = {
32000, 48000, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_12288 = {
.count = ARRAY_SIZE(rates_12288),
.list = rates_12288,
};
static unsigned int rates_16384[] = {
32000,
};
static struct snd_pcm_hw_constraint_list constraints_16384 = {
.count = ARRAY_SIZE(rates_16384),
.list = rates_16384,
};
static unsigned int rates_16934[] = {
44100, 88235,
};
static struct snd_pcm_hw_constraint_list constraints_16934 = {
.count = ARRAY_SIZE(rates_16934),
.list = rates_16934,
};
static unsigned int rates_18432[] = {
48000, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_18432 = {
.count = ARRAY_SIZE(rates_18432),
.list = rates_18432,
};
static unsigned int rates_22579[] = {
44100, 88235, 1764000
};
static struct snd_pcm_hw_constraint_list constraints_22579 = {
.count = ARRAY_SIZE(rates_22579),
.list = rates_22579,
};
static unsigned int rates_24576[] = {
32000, 48000, 96000, 192000
};
static struct snd_pcm_hw_constraint_list constraints_24576 = {
.count = ARRAY_SIZE(rates_24576),
.list = rates_24576,
};
static unsigned int rates_36864[] = {
48000, 96000, 19200
};
static struct snd_pcm_hw_constraint_list constraints_36864 = {
.count = ARRAY_SIZE(rates_36864),
.list = rates_36864,
};
static int wm8741_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
/* The set of sample rates that can be supported depends on the
* MCLK supplied to the CODEC - enforce this.
*/
if (!wm8741->sysclk) {
dev_err(codec->dev,
"No MCLK configured, call set_sysclk() on init\n");
return -EINVAL;
}
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
wm8741->sysclk_constraints);
return 0;
}
static int wm8741_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1FC;
int i;
/* Find a supported LRCLK ratio */
for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
if (wm8741->sysclk / params_rate(params) ==
lrclk_ratios[i].ratio)
break;
}
/* Should never happen, should be handled by constraints */
if (i == ARRAY_SIZE(lrclk_ratios)) {
dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n",
wm8741->sysclk / params_rate(params));
return -EINVAL;
}
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
iface |= 0x0001;
break;
case SNDRV_PCM_FORMAT_S24_LE:
iface |= 0x0002;
break;
case SNDRV_PCM_FORMAT_S32_LE:
iface |= 0x0003;
break;
default:
dev_dbg(codec->dev, "wm8741_hw_params: Unsupported bit size param = %d",
params_format(params));
return -EINVAL;
}
dev_dbg(codec->dev, "wm8741_hw_params: bit size param = %d",
params_format(params));
snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
return 0;
}
static int wm8741_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "wm8741_set_dai_sysclk info: freq=%dHz\n", freq);
switch (freq) {
case 11289600:
wm8741->sysclk_constraints = &constraints_11289;
wm8741->sysclk = freq;
return 0;
case 12288000:
wm8741->sysclk_constraints = &constraints_12288;
wm8741->sysclk = freq;
return 0;
case 16384000:
wm8741->sysclk_constraints = &constraints_16384;
wm8741->sysclk = freq;
return 0;
case 16934400:
wm8741->sysclk_constraints = &constraints_16934;
wm8741->sysclk = freq;
return 0;
case 18432000:
wm8741->sysclk_constraints = &constraints_18432;
wm8741->sysclk = freq;
return 0;
case 22579200:
case 33868800:
wm8741->sysclk_constraints = &constraints_22579;
wm8741->sysclk = freq;
return 0;
case 24576000:
wm8741->sysclk_constraints = &constraints_24576;
wm8741->sysclk = freq;
return 0;
case 36864000:
wm8741->sysclk_constraints = &constraints_36864;
wm8741->sysclk = freq;
return 0;
}
return -EINVAL;
}
static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1C3;
/* check master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0008;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0004;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x000C;
break;
case SND_SOC_DAIFMT_DSP_B:
iface |= 0x001C;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0010;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0020;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0030;
break;
default:
return -EINVAL;
}
dev_dbg(codec->dev, "wm8741_set_dai_fmt: Format=%x, Clock Inv=%x\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK,
((fmt & SND_SOC_DAIFMT_INV_MASK)));
snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
return 0;
}
#define WM8741_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
SNDRV_PCM_RATE_192000)
#define WM8741_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8741_dai_ops = {
.startup = wm8741_startup,
.hw_params = wm8741_hw_params,
.set_sysclk = wm8741_set_dai_sysclk,
.set_fmt = wm8741_set_dai_fmt,
};
static struct snd_soc_dai_driver wm8741_dai = {
.name = "wm8741",
.playback = {
.stream_name = "Playback",
.channels_min = 2, /* Mono modes not yet supported */
.channels_max = 2,
.rates = WM8741_RATES,
.formats = WM8741_FORMATS,
},
.ops = &wm8741_dai_ops,
};
#ifdef CONFIG_PM
static int wm8741_resume(struct snd_soc_codec *codec)
{
snd_soc_cache_sync(codec);
return 0;
}
#else
#define wm8741_suspend NULL
#define wm8741_resume NULL
#endif
static int wm8741_probe(struct snd_soc_codec *codec)
{
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm8741->supplies); i++)
wm8741->supplies[i].supply = wm8741_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8741->supplies),
wm8741->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
goto err;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8741->supplies),
wm8741->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8741->control_type);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
goto err_enable;
}
ret = wm8741_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
goto err_enable;
}
/* Change some default settings - latch VU */
snd_soc_update_bits(codec, WM8741_DACLLSB_ATTENUATION,
WM8741_UPDATELL, WM8741_UPDATELL);
snd_soc_update_bits(codec, WM8741_DACLMSB_ATTENUATION,
WM8741_UPDATELM, WM8741_UPDATELM);
snd_soc_update_bits(codec, WM8741_DACRLSB_ATTENUATION,
WM8741_UPDATERL, WM8741_UPDATERL);
snd_soc_update_bits(codec, WM8741_DACRMSB_ATTENUATION,
WM8741_UPDATERM, WM8741_UPDATERM);
dev_dbg(codec->dev, "Successful registration\n");
return ret;
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
err_get:
regulator_bulk_free(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
err:
return ret;
}
static int wm8741_remove(struct snd_soc_codec *codec)
{
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
regulator_bulk_disable(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8741 = {
.probe = wm8741_probe,
.remove = wm8741_remove,
.resume = wm8741_resume,
.reg_cache_size = ARRAY_SIZE(wm8741_reg_defaults),
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8741_reg_defaults,
.controls = wm8741_snd_controls,
.num_controls = ARRAY_SIZE(wm8741_snd_controls),
.dapm_widgets = wm8741_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8741_dapm_widgets),
.dapm_routes = wm8741_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8741_dapm_routes),
};
static const struct of_device_id wm8741_of_match[] = {
{ .compatible = "wlf,wm8741", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8741_of_match);
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8741_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8741_priv *wm8741;
int ret;
wm8741 = devm_kzalloc(&i2c->dev, sizeof(struct wm8741_priv),
GFP_KERNEL);
if (wm8741 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8741);
wm8741->control_type = SND_SOC_I2C;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8741, &wm8741_dai, 1);
return ret;
}
static int wm8741_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8741_i2c_id[] = {
{ "wm8741", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8741_i2c_id);
static struct i2c_driver wm8741_i2c_driver = {
.driver = {
.name = "wm8741",
.owner = THIS_MODULE,
.of_match_table = wm8741_of_match,
},
.probe = wm8741_i2c_probe,
.remove = wm8741_i2c_remove,
.id_table = wm8741_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
static int __devinit wm8741_spi_probe(struct spi_device *spi)
{
struct wm8741_priv *wm8741;
int ret;
wm8741 = devm_kzalloc(&spi->dev, sizeof(struct wm8741_priv),
GFP_KERNEL);
if (wm8741 == NULL)
return -ENOMEM;
wm8741->control_type = SND_SOC_SPI;
spi_set_drvdata(spi, wm8741);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8741, &wm8741_dai, 1);
return ret;
}
static int __devexit wm8741_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
}
static struct spi_driver wm8741_spi_driver = {
.driver = {
.name = "wm8741",
.owner = THIS_MODULE,
.of_match_table = wm8741_of_match,
},
.probe = wm8741_spi_probe,
.remove = __devexit_p(wm8741_spi_remove),
};
#endif /* CONFIG_SPI_MASTER */
static int __init wm8741_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8741_i2c_driver);
if (ret != 0)
pr_err("Failed to register WM8741 I2C driver: %d\n", ret);
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8741_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register wm8741 SPI driver: %d\n",
ret);
}
#endif
return ret;
}
module_init(wm8741_modinit);
static void __exit wm8741_exit(void)
{
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8741_spi_driver);
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8741_i2c_driver);
#endif
}
module_exit(wm8741_exit);
MODULE_DESCRIPTION("ASoC WM8741 driver");
MODULE_AUTHOR("Ian Lartey <ian@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
reposte/android_kernel_xiaomi_msm8992 | drivers/net/ethernet/intel/ixgb/ixgb_hw.c | 4823 | 38922 | /*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ixgb_hw.c
* Shared functions for accessing and configuring the adapter
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci_ids.h>
#include "ixgb_hw.h"
#include "ixgb_ids.h"
#include <linux/etherdevice.h>
/* Local function prototypes */
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
static void ixgb_get_bus_info(struct ixgb_hw *hw);
static bool ixgb_link_reset(struct ixgb_hw *hw);
static void ixgb_optics_reset(struct ixgb_hw *hw);
static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
static void ixgb_clear_vfta(struct ixgb_hw *hw);
static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
u32 reg_address,
u32 phy_address,
u32 device_type);
static bool ixgb_setup_fc(struct ixgb_hw *hw);
static bool mac_addr_valid(u8 *mac_addr);
static u32 ixgb_mac_reset(struct ixgb_hw *hw)
{
u32 ctrl_reg;
ctrl_reg = IXGB_CTRL0_RST |
IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
IXGB_CTRL0_SDP2_DIR |
IXGB_CTRL0_SDP1_DIR |
IXGB_CTRL0_SDP0_DIR |
IXGB_CTRL0_SDP3 | /* Initial value 1101 */
IXGB_CTRL0_SDP2 |
IXGB_CTRL0_SDP0;
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
#else
IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
#endif
/* Delay a few ms just to allow the reset to complete */
msleep(IXGB_DELAY_AFTER_RESET);
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
#ifdef DBG
/* Make sure the self-clearing global reset bit did self clear */
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
ctrl_reg = /* Enable interrupt from XFP and SerDes */
IXGB_CTRL1_GPI0_EN |
IXGB_CTRL1_SDP6_DIR |
IXGB_CTRL1_SDP7_DIR |
IXGB_CTRL1_SDP6 |
IXGB_CTRL1_SDP7;
IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
ixgb_optics_reset_bcm(hw);
}
if (hw->phy_type == ixgb_phy_type_txn17401)
ixgb_optics_reset(hw);
return ctrl_reg;
}
/******************************************************************************
* Reset the transmit and receive units; mask and clear all interrupts.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
bool
ixgb_adapter_stop(struct ixgb_hw *hw)
{
u32 ctrl_reg;
u32 icr_reg;
ENTER();
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
pr_debug("Exiting because the adapter is already stopped!!!\n");
return false;
}
/* Set the Adapter Stopped flag so other driver functions stop
* touching the Hardware.
*/
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
/* Disable the Transmit and Receive units. Then delay to allow
* any pending transactions to complete before we hit the MAC with
* the global reset.
*/
IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
IXGB_WRITE_FLUSH(hw);
msleep(IXGB_DELAY_BEFORE_RESET);
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
/* Clear interrupt mask to stop board from generating interrupts */
pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
icr_reg = IXGB_READ_REG(hw, ICR);
return ctrl_reg & IXGB_CTRL0_RST;
}
/******************************************************************************
* Identifies the vendor of the optics module on the adapter. The SR adapters
* support two different types of XPAK optics, so it is necessary to determine
* which optics are present before applying any optics-specific workarounds.
*
* hw - Struct containing variables accessed by shared code.
*
* Returns: the vendor of the XPAK optics module.
*****************************************************************************/
static ixgb_xpak_vendor
ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
{
u32 i;
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
ENTER();
/* Read the first few bytes of the vendor string from the XPAK NVR
* registers. These are standard XENPAK/XPAK registers, so all XPAK
* devices should implement them. */
for (i = 0; i < 5; i++) {
vendor_name[i] = ixgb_read_phy_reg(hw,
MDIO_PMA_PMD_XPAK_VENDOR_NAME
+ i, IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD);
}
/* Determine the actual vendor */
if (vendor_name[0] == 'I' &&
vendor_name[1] == 'N' &&
vendor_name[2] == 'T' &&
vendor_name[3] == 'E' && vendor_name[4] == 'L') {
xpak_vendor = ixgb_xpak_vendor_intel;
} else {
xpak_vendor = ixgb_xpak_vendor_infineon;
}
return xpak_vendor;
}
/******************************************************************************
* Determine the physical layer module on the adapter.
*
* hw - Struct containing variables accessed by shared code. The device_id
* field must be (correctly) populated before calling this routine.
*
* Returns: the phy type of the adapter.
*****************************************************************************/
static ixgb_phy_type
ixgb_identify_phy(struct ixgb_hw *hw)
{
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
ENTER();
/* Infer the transceiver/phy type from the device id */
switch (hw->device_id) {
case IXGB_DEVICE_ID_82597EX:
pr_debug("Identified TXN17401 optics\n");
phy_type = ixgb_phy_type_txn17401;
break;
case IXGB_DEVICE_ID_82597EX_SR:
/* The SR adapters carry two different types of XPAK optics
* modules; read the vendor identifier to determine the exact
* type of optics. */
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
case IXGB_DEVICE_ID_82597EX_LR:
pr_debug("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
case IXGB_DEVICE_ID_82597EX_CX4:
pr_debug("Identified CX4\n");
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
default:
pr_debug("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
break;
}
/* update phy type for sun specific board */
if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
phy_type = ixgb_phy_type_bcm;
return phy_type;
}
/******************************************************************************
* Performs basic configuration of the adapter.
*
* hw - Struct containing variables accessed by shared code
*
* Resets the controller.
* Reads and validates the EEPROM.
* Initializes the receive address registers.
* Initializes the multicast table.
* Clears all on-chip counters.
* Calls routine to setup flow control settings.
* Leaves the transmit and receive units disabled and uninitialized.
*
* Returns:
* true if successful,
* false if unrecoverable problems were encountered.
*****************************************************************************/
bool
ixgb_init_hw(struct ixgb_hw *hw)
{
u32 i;
u32 ctrl_reg;
bool status;
ENTER();
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
#else
IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
#endif
/* Delay a few ms just to allow the reset to complete */
msleep(IXGB_DELAY_AFTER_EE_RESET);
if (!ixgb_get_eeprom_data(hw))
return false;
/* Use the device id to determine the type of phy/transceiver. */
hw->device_id = ixgb_get_ee_device_id(hw);
hw->phy_type = ixgb_identify_phy(hw);
/* Setup the receive addresses.
* Receive Address Registers (RARs 0 - 15).
*/
ixgb_init_rx_addrs(hw);
/*
* Check that a valid MAC address has been set.
* If it is not valid, we fail hardware init.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
return(false);
}
/* tell the routines in this file they can access hardware again */
hw->adapter_stopped = false;
/* Fill in the bus_info structure */
ixgb_get_bus_info(hw);
/* Zero out the Multicast HASH table */
pr_debug("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Zero out the VLAN Filter Table Array */
ixgb_clear_vfta(hw);
/* Zero all of the hardware counters */
ixgb_clear_hw_cntrs(hw);
/* Call a subroutine to setup flow control. */
status = ixgb_setup_fc(hw);
/* 82597EX errata: Call check-for-link in case lane deskew is locked */
ixgb_check_for_link(hw);
return status;
}
/******************************************************************************
* Initializes receive address filters.
*
* hw - Struct containing variables accessed by shared code
*
* Places the MAC address in receive address register 0 and clears the rest
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
*****************************************************************************/
static void
ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
u32 i;
ENTER();
/*
* If the current mac address is valid, assume it is a software override
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
/* Get the MAC address from the eeprom for later reference */
ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
pr_debug("Keeping Permanent MAC Addr = %pM\n",
hw->curr_mac_addr);
} else {
/* Setup the receive address. */
pr_debug("Overriding MAC Address in RAR[0]\n");
pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
ixgb_rar_set(hw, hw->curr_mac_addr, 0);
}
/* Zero out the other 15 receive addresses. */
pr_debug("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
}
}
/******************************************************************************
* Updates the MAC's list of multicast addresses.
*
* hw - Struct containing variables accessed by shared code
* mc_addr_list - the list of new multicast addresses
* mc_addr_count - number of addresses
* pad - number of bytes between addresses in the list
*
* The given list replaces any existing list. Clears the last 15 receive
* address registers and the multicast table. Uses receive address registers
* for the first 15 multicast addresses, and hashes the rest into the
* multicast table.
*****************************************************************************/
void
ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count,
u32 pad)
{
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
u8 *mca;
ENTER();
/* Set the new number of MC addresses that we are being requested to use. */
hw->num_mc_addrs = mc_addr_count;
/* Clear RAR[1-15] */
pr_debug("Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
pr_debug("Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Add the new addresses */
mca = mc_addr_list;
for (i = 0; i < mc_addr_count; i++) {
pr_debug("Adding the multicast addresses:\n");
pr_debug("MC Addr #%d = %pM\n", i, mca);
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
ixgb_rar_set(hw, mca, rar_used_count);
pr_debug("Added a multicast address to RAR[%d]\n", i);
rar_used_count++;
} else {
hash_value = ixgb_hash_mc_addr(hw, mca);
pr_debug("Hash value = 0x%03X\n", hash_value);
ixgb_mta_set(hw, hash_value);
}
mca += ETH_ALEN + pad;
}
pr_debug("MC Update Complete\n");
}
/******************************************************************************
* Hashes an address to determine its location in the multicast table
*
* hw - Struct containing variables accessed by shared code
* mc_addr - the multicast address to hash
*
* Returns:
* The hash value
*****************************************************************************/
static u32
ixgb_hash_mc_addr(struct ixgb_hw *hw,
u8 *mc_addr)
{
u32 hash_value = 0;
ENTER();
/* The portion of the address that is used for the hash table is
* determined by the mc_filter_type setting.
*/
switch (hw->mc_filter_type) {
/* [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56
* LSB MSB - According to H/W docs */
case 0:
/* [47:36] i.e. 0x563 for above example address */
hash_value =
((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
break;
case 1: /* [46:35] i.e. 0xAC6 for above example address */
hash_value =
((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
break;
case 2: /* [45:34] i.e. 0x5D8 for above example address */
hash_value =
((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
break;
case 3: /* [43:32] i.e. 0x634 for above example address */
hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
break;
default:
/* Invalid mc_filter_type, what should we do? */
pr_debug("MC filter type param set incorrectly\n");
ASSERT(0);
break;
}
hash_value &= 0xFFF;
return hash_value;
}
/******************************************************************************
* Sets the bit in the multicast table corresponding to the hash value.
*
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*****************************************************************************/
static void
ixgb_mta_set(struct ixgb_hw *hw,
u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta_reg;
/* The MTA is a register array of 128 32-bit registers.
* It is treated like an array of 4096 bits. We want to set
* bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
* back the new value. The register is determined by the
* upper 7 bits of the hash value and the bit within that
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
mta_reg |= (1 << hash_bit);
IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
}
/******************************************************************************
* Puts an ethernet address into a receive address register.
*
* hw - Struct containing variables accessed by shared code
* addr - Address to put into receive address register
* index - Receive address register to write
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
u8 *addr,
u32 index)
{
u32 rar_low, rar_high;
ENTER();
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32) addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) |
((u32)addr[3] << 24));
rar_high = ((u32) addr[4] |
((u32)addr[5] << 8) |
IXGB_RAH_AV);
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
}
/******************************************************************************
* Writes a value to the specified offset in the VLAN filter table.
*
* hw - Struct containing variables accessed by shared code
* offset - Offset in VLAN filer table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
void
ixgb_write_vfta(struct ixgb_hw *hw,
u32 offset,
u32 value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
}
/******************************************************************************
* Clears the VLAN filer table
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_clear_vfta(struct ixgb_hw *hw)
{
u32 offset;
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
}
/******************************************************************************
* Configures the flow control settings based on SW configuration.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static bool
ixgb_setup_fc(struct ixgb_hw *hw)
{
u32 ctrl_reg;
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
ENTER();
/* Get the current control reg 0 settings */
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
/* Clear the Receive Pause Enable and Transmit Pause Enable bits */
ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
/* The possible values of the "flow_control" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* but we do not support receiving pause frames).
* 3: Both Rx and TX flow control (symmetric) are enabled.
* other: Invalid.
*/
switch (hw->fc.type) {
case ixgb_fc_none: /* 0 */
/* Set CMDC bit to disable Rx Flow control */
ctrl_reg |= (IXGB_CTRL0_CMDC);
break;
case ixgb_fc_rx_pause: /* 1 */
/* RX Flow control is enabled, and TX Flow control is
* disabled.
*/
ctrl_reg |= (IXGB_CTRL0_RPE);
break;
case ixgb_fc_tx_pause: /* 2 */
/* TX Flow control is enabled, and RX Flow control is
* disabled, by a software over-ride.
*/
ctrl_reg |= (IXGB_CTRL0_TPE);
pap_reg = hw->fc.pause_time;
break;
case ixgb_fc_full: /* 3 */
/* Flow control (both RX and TX) is enabled by a software
* over-ride.
*/
ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
pap_reg = hw->fc.pause_time;
break;
default:
/* We should never get here. The value should be 0-3. */
pr_debug("Flow control param set incorrectly\n");
ASSERT(0);
break;
}
/* Write the new settings */
IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
if (pap_reg != 0)
IXGB_WRITE_REG(hw, PAP, pap_reg);
/* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames in not enabled, then these
* registers will be set to 0.
*/
if (!(hw->fc.type & ixgb_fc_tx_pause)) {
IXGB_WRITE_REG(hw, FCRTL, 0);
IXGB_WRITE_REG(hw, FCRTH, 0);
} else {
/* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of XON
* frames. */
if (hw->fc.send_xon) {
IXGB_WRITE_REG(hw, FCRTL,
(hw->fc.low_water | IXGB_FCRTL_XONE));
} else {
IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
}
IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
}
return status;
}
/******************************************************************************
* Reads a word from a device over the Management Data Interface (MDI) bus.
* This interface is used to manage Physical layer devices.
*
* hw - Struct containing variables accessed by hw code
* reg_address - Offset of device register being read.
* phy_address - Address of device on MDI.
*
* Returns: Data word (16 bits) from MDI device.
*
* The 82597EX has support for several MDI access methods. This routine
* uses the new protocol MDI Single Command and Address Operation.
* This requires that first an address cycle command is sent, followed by a
* read command.
*****************************************************************************/
static u16
ixgb_read_phy_reg(struct ixgb_hw *hw,
u32 reg_address,
u32 phy_address,
u32 device_type)
{
u32 i;
u32 data;
u32 command = 0;
ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
/* Setup and write the address cycle command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the address cycle completed
** The COMMAND bit will clear when the operation is complete.
** This may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
break;
}
ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
/* Address cycle complete, setup and write the read command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the read command completed
** The COMMAND bit will clear when the operation is complete.
** The read may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
break;
}
ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
/* Operation is complete, get the data from the MDIO Read/Write Data
* register and return.
*/
data = IXGB_READ_REG(hw, MSRWD);
data >>= IXGB_MSRWD_READ_DATA_SHIFT;
return((u16) data);
}
/******************************************************************************
* Writes a word to a device over the Management Data Interface (MDI) bus.
* This interface is used to manage Physical layer devices.
*
* hw - Struct containing variables accessed by hw code
* reg_address - Offset of device register being read.
* phy_address - Address of device on MDI.
* device_type - Also known as the Device ID or DID.
* data - 16-bit value to be written
*
* Returns: void.
*
* The 82597EX has support for several MDI access methods. This routine
* uses the new protocol MDI Single Command and Address Operation.
* This requires that first an address cycle command is sent, followed by a
* write command.
*****************************************************************************/
static void
ixgb_write_phy_reg(struct ixgb_hw *hw,
u32 reg_address,
u32 phy_address,
u32 device_type,
u16 data)
{
u32 i;
u32 command = 0;
ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
/* Put the data in the MDIO Read/Write Data register */
IXGB_WRITE_REG(hw, MSRWD, (u32)data);
/* Setup and write the address cycle command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the address cycle completed
** The COMMAND bit will clear when the operation is complete.
** This may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
break;
}
ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
/* Address cycle complete, setup and write the write command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the read command completed
** The COMMAND bit will clear when the operation is complete.
** The write may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
break;
}
ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
/* Operation is complete, return. */
}
/******************************************************************************
* Checks to see if the link status of the hardware has changed.
*
* hw - Struct containing variables accessed by hw code
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
void
ixgb_check_for_link(struct ixgb_hw *hw)
{
u32 status_reg;
u32 xpcss_reg;
ENTER();
xpcss_reg = IXGB_READ_REG(hw, XPCSS);
status_reg = IXGB_READ_REG(hw, STATUS);
if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
pr_debug("XPCSS Not Aligned while Status:LU is set\n");
hw->link_up = ixgb_link_reset(hw);
} else {
/*
* 82597EX errata. Since the lane deskew problem may prevent
* link, reset the link before reporting link down.
*/
hw->link_up = ixgb_link_reset(hw);
}
/* Anything else for 10 Gig?? */
}
/******************************************************************************
* Check for a bad link condition that may have occurred.
* The indication is that the RFC / LFC registers may be incrementing
* continually. A full adapter reset is required to recover.
*
* hw - Struct containing variables accessed by hw code
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
{
u32 newLFC, newRFC;
bool bad_link_returncode = false;
if (hw->phy_type == ixgb_phy_type_txn17401) {
newLFC = IXGB_READ_REG(hw, LFC);
newRFC = IXGB_READ_REG(hw, RFC);
if ((hw->lastLFC + 250 < newLFC)
|| (hw->lastRFC + 250 < newRFC)) {
pr_debug("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
hw->lastRFC = newRFC;
}
return bad_link_returncode;
}
/******************************************************************************
* Clears all hardware statistics counters.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile u32 temp_reg;
ENTER();
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
pr_debug("Exiting because the adapter is stopped!!!\n");
return;
}
temp_reg = IXGB_READ_REG(hw, TPRL);
temp_reg = IXGB_READ_REG(hw, TPRH);
temp_reg = IXGB_READ_REG(hw, GPRCL);
temp_reg = IXGB_READ_REG(hw, GPRCH);
temp_reg = IXGB_READ_REG(hw, BPRCL);
temp_reg = IXGB_READ_REG(hw, BPRCH);
temp_reg = IXGB_READ_REG(hw, MPRCL);
temp_reg = IXGB_READ_REG(hw, MPRCH);
temp_reg = IXGB_READ_REG(hw, UPRCL);
temp_reg = IXGB_READ_REG(hw, UPRCH);
temp_reg = IXGB_READ_REG(hw, VPRCL);
temp_reg = IXGB_READ_REG(hw, VPRCH);
temp_reg = IXGB_READ_REG(hw, JPRCL);
temp_reg = IXGB_READ_REG(hw, JPRCH);
temp_reg = IXGB_READ_REG(hw, GORCL);
temp_reg = IXGB_READ_REG(hw, GORCH);
temp_reg = IXGB_READ_REG(hw, TORL);
temp_reg = IXGB_READ_REG(hw, TORH);
temp_reg = IXGB_READ_REG(hw, RNBC);
temp_reg = IXGB_READ_REG(hw, RUC);
temp_reg = IXGB_READ_REG(hw, ROC);
temp_reg = IXGB_READ_REG(hw, RLEC);
temp_reg = IXGB_READ_REG(hw, CRCERRS);
temp_reg = IXGB_READ_REG(hw, ICBC);
temp_reg = IXGB_READ_REG(hw, ECBC);
temp_reg = IXGB_READ_REG(hw, MPC);
temp_reg = IXGB_READ_REG(hw, TPTL);
temp_reg = IXGB_READ_REG(hw, TPTH);
temp_reg = IXGB_READ_REG(hw, GPTCL);
temp_reg = IXGB_READ_REG(hw, GPTCH);
temp_reg = IXGB_READ_REG(hw, BPTCL);
temp_reg = IXGB_READ_REG(hw, BPTCH);
temp_reg = IXGB_READ_REG(hw, MPTCL);
temp_reg = IXGB_READ_REG(hw, MPTCH);
temp_reg = IXGB_READ_REG(hw, UPTCL);
temp_reg = IXGB_READ_REG(hw, UPTCH);
temp_reg = IXGB_READ_REG(hw, VPTCL);
temp_reg = IXGB_READ_REG(hw, VPTCH);
temp_reg = IXGB_READ_REG(hw, JPTCL);
temp_reg = IXGB_READ_REG(hw, JPTCH);
temp_reg = IXGB_READ_REG(hw, GOTCL);
temp_reg = IXGB_READ_REG(hw, GOTCH);
temp_reg = IXGB_READ_REG(hw, TOTL);
temp_reg = IXGB_READ_REG(hw, TOTH);
temp_reg = IXGB_READ_REG(hw, DC);
temp_reg = IXGB_READ_REG(hw, PLT64C);
temp_reg = IXGB_READ_REG(hw, TSCTC);
temp_reg = IXGB_READ_REG(hw, TSCTFC);
temp_reg = IXGB_READ_REG(hw, IBIC);
temp_reg = IXGB_READ_REG(hw, RFC);
temp_reg = IXGB_READ_REG(hw, LFC);
temp_reg = IXGB_READ_REG(hw, PFRC);
temp_reg = IXGB_READ_REG(hw, PFTC);
temp_reg = IXGB_READ_REG(hw, MCFRC);
temp_reg = IXGB_READ_REG(hw, MCFTC);
temp_reg = IXGB_READ_REG(hw, XONRXC);
temp_reg = IXGB_READ_REG(hw, XONTXC);
temp_reg = IXGB_READ_REG(hw, XOFFRXC);
temp_reg = IXGB_READ_REG(hw, XOFFTXC);
temp_reg = IXGB_READ_REG(hw, RJC);
}
/******************************************************************************
* Turns on the software controllable LED
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void
ixgb_led_on(struct ixgb_hw *hw)
{
u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
/* To turn on the LED, clear software-definable pin 0 (SDP0). */
ctrl0_reg &= ~IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
}
/******************************************************************************
* Turns off the software controllable LED
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void
ixgb_led_off(struct ixgb_hw *hw)
{
u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
/* To turn off the LED, set software-definable pin 0 (SDP0). */
ctrl0_reg |= IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
}
/******************************************************************************
* Gets the current PCI bus type, speed, and width of the hardware
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_get_bus_info(struct ixgb_hw *hw)
{
u32 status_reg;
status_reg = IXGB_READ_REG(hw, STATUS);
hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
ixgb_bus_type_pcix : ixgb_bus_type_pci;
if (hw->bus.type == ixgb_bus_type_pci) {
hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
ixgb_bus_speed_66 : ixgb_bus_speed_33;
} else {
switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
case IXGB_STATUS_PCIX_SPD_66:
hw->bus.speed = ixgb_bus_speed_66;
break;
case IXGB_STATUS_PCIX_SPD_100:
hw->bus.speed = ixgb_bus_speed_100;
break;
case IXGB_STATUS_PCIX_SPD_133:
hw->bus.speed = ixgb_bus_speed_133;
break;
default:
hw->bus.speed = ixgb_bus_speed_reserved;
break;
}
}
hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
ixgb_bus_width_64 : ixgb_bus_width_32;
}
/******************************************************************************
* Tests a MAC address to ensure it is a valid Individual Address
*
* mac_addr - pointer to MAC address.
*
*****************************************************************************/
static bool
mac_addr_valid(u8 *mac_addr)
{
bool is_valid = true;
ENTER();
/* Make sure it is not a multicast address */
if (is_multicast_ether_addr(mac_addr)) {
pr_debug("MAC address is multicast\n");
is_valid = false;
}
/* Not a broadcast address */
else if (is_broadcast_ether_addr(mac_addr)) {
pr_debug("MAC address is broadcast\n");
is_valid = false;
}
/* Reject the zero address */
else if (is_zero_ether_addr(mac_addr)) {
pr_debug("MAC address is all zeros\n");
is_valid = false;
}
return is_valid;
}
/******************************************************************************
* Resets the 10GbE link. Waits the settle time and returns the state of
* the link.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static bool
ixgb_link_reset(struct ixgb_hw *hw)
{
bool link_status = false;
u8 wait_retries = MAX_RESET_ITERATIONS;
u8 lrst_retries = MAX_RESET_ITERATIONS;
do {
/* Reset the link */
IXGB_WRITE_REG(hw, CTRL0,
IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
/* Wait for link-up and lane re-alignment */
do {
udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
link_status =
((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
&& (IXGB_READ_REG(hw, XPCSS) &
IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
} while (!link_status && --wait_retries);
} while (!link_status && --lrst_retries);
return link_status;
}
/******************************************************************************
* Resets the 10GbE optics module.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
u16 mdio_reg;
ixgb_write_phy_reg(hw,
MDIO_CTRL1,
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD,
MDIO_CTRL1_RESET);
mdio_reg = ixgb_read_phy_reg(hw,
MDIO_CTRL1,
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD);
}
}
/******************************************************************************
* Resets the 10GbE optics module for Sun variant NIC.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803
#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164
#define IXGB_BCM8704_USER_CTRL_REG 0xC800
#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF
#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003
#define IXGB_SUN_PHY_ADDRESS 0x0000
#define IXGB_SUN_PHY_RESET_DELAY 305
static void
ixgb_optics_reset_bcm(struct ixgb_hw *hw)
{
u32 ctrl = IXGB_READ_REG(hw, CTRL0);
ctrl &= ~IXGB_CTRL0_SDP2;
ctrl |= IXGB_CTRL0_SDP3;
IXGB_WRITE_REG(hw, CTRL0, ctrl);
IXGB_WRITE_FLUSH(hw);
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
/* Broadcom 7408L configuration */
/* Reference clock config */
ixgb_write_phy_reg(hw,
IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR,
IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
/* we must read the registers twice */
ixgb_read_phy_reg(hw,
IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR);
ixgb_read_phy_reg(hw,
IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR);
ixgb_write_phy_reg(hw,
IXGB_BCM8704_USER_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR,
IXGB_BCM8704_USER_CTRL_REG_VAL);
ixgb_read_phy_reg(hw,
IXGB_BCM8704_USER_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR);
ixgb_read_phy_reg(hw,
IXGB_BCM8704_USER_CTRL_REG,
IXGB_SUN_PHY_ADDRESS,
IXGB_BCM8704_USER_DEV3_ADDR);
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
}
| gpl-2.0 |
hiikezoe/android_kernel_kyocera_dm015k | arch/arm/mach-at91/board-gsia18s.c | 4823 | 14026 | /*
* Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
* taskit GmbH
* 2010 Igor Plyatov <plyatov@gmail.com>
* GeoSIG Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/w1-gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/board.h>
#include <mach/at91sam9_smc.h>
#include <mach/gsia18s.h>
#include <mach/stamp9g20.h>
#include "sam9_smc.h"
#include "generic.h"
static void __init gsia18s_init_early(void)
{
stamp9g20_init_early();
/*
* USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
* Used for Internal Analog Modem.
*/
at91_register_uart(AT91SAM9260_ID_US0, 1,
ATMEL_UART_CTS | ATMEL_UART_RTS |
ATMEL_UART_DTR | ATMEL_UART_DSR |
ATMEL_UART_DCD | ATMEL_UART_RI);
/*
* USART1 on ttyS2 (Rx, Tx, CTS, RTS).
* Used for GPS or WiFi or Data stream.
*/
at91_register_uart(AT91SAM9260_ID_US1, 2,
ATMEL_UART_CTS | ATMEL_UART_RTS);
/*
* USART2 on ttyS3 (Rx, Tx, CTS, RTS).
* Used for External Modem.
*/
at91_register_uart(AT91SAM9260_ID_US2, 3,
ATMEL_UART_CTS | ATMEL_UART_RTS);
/*
* USART3 on ttyS4 (Rx, Tx, RTS).
* Used for RS-485.
*/
at91_register_uart(AT91SAM9260_ID_US3, 4, ATMEL_UART_RTS);
/*
* USART4 on ttyS5 (Rx, Tx).
* Used for TRX433 Radio Module.
*/
at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
}
/*
* Two USB Host ports
*/
static struct at91_usbh_data __initdata usbh_data = {
.ports = 2,
.vbus_pin = {-EINVAL, -EINVAL},
.overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
* USB Device port
*/
static struct at91_udc_data __initdata udc_data = {
.vbus_pin = AT91_PIN_PA22,
.pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/*
* MACB Ethernet device
*/
static struct macb_platform_data __initdata macb_data = {
.phy_irq_pin = AT91_PIN_PA28,
.is_rmii = 1,
};
/*
* LEDs and GPOs
*/
static struct gpio_led gpio_leds[] = {
{
.name = "gpo:spi1reset",
.gpio = AT91_PIN_PC1,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "gpo:trig_net_out",
.gpio = AT91_PIN_PB20,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "gpo:trig_net_dir",
.gpio = AT91_PIN_PB19,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "gpo:charge_dis",
.gpio = AT91_PIN_PC2,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "led:event",
.gpio = AT91_PIN_PB17,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "led:lan",
.gpio = AT91_PIN_PB18,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "led:error",
.gpio = AT91_PIN_PB16,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_ON,
}
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds = {
.name = "leds-gpio",
.id = 0,
.dev = {
.platform_data = &gpio_led_info,
}
};
static void __init gsia18s_leds_init(void)
{
platform_device_register(&leds);
}
/* PCF8574 0x20 GPIO - U1 on the GS_IA18-CB_V3 board */
static struct gpio_led pcf_gpio_leds1[] = {
{ /* bit 0 */
.name = "gpo:hdc_power",
.gpio = PCF_GPIO_HDC_POWER,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 1 */
.name = "gpo:wifi_setup",
.gpio = PCF_GPIO_WIFI_SETUP,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 2 */
.name = "gpo:wifi_enable",
.gpio = PCF_GPIO_WIFI_ENABLE,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 3 */
.name = "gpo:wifi_reset",
.gpio = PCF_GPIO_WIFI_RESET,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_ON,
},
/* bit 4 used as GPI */
{ /* bit 5 */
.name = "gpo:gps_setup",
.gpio = PCF_GPIO_GPS_SETUP,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 6 */
.name = "gpo:gps_standby",
.gpio = PCF_GPIO_GPS_STANDBY,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_ON,
},
{ /* bit 7 */
.name = "gpo:gps_power",
.gpio = PCF_GPIO_GPS_POWER,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
}
};
static struct gpio_led_platform_data pcf_gpio_led_info1 = {
.leds = pcf_gpio_leds1,
.num_leds = ARRAY_SIZE(pcf_gpio_leds1),
};
static struct platform_device pcf_leds1 = {
.name = "leds-gpio", /* GS_IA18-CB_board */
.id = 1,
.dev = {
.platform_data = &pcf_gpio_led_info1,
}
};
/* PCF8574 0x22 GPIO - U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
static struct gpio_led pcf_gpio_leds2[] = {
{ /* bit 0 */
.name = "gpo:alarm_1",
.gpio = PCF_GPIO_ALARM1,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 1 */
.name = "gpo:alarm_2",
.gpio = PCF_GPIO_ALARM2,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 2 */
.name = "gpo:alarm_3",
.gpio = PCF_GPIO_ALARM3,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{ /* bit 3 */
.name = "gpo:alarm_4",
.gpio = PCF_GPIO_ALARM4,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
/* bits 4, 5, 6 not used */
{ /* bit 7 */
.name = "gpo:alarm_v_relay_on",
.gpio = PCF_GPIO_ALARM_V_RELAY_ON,
.active_low = 0,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
};
static struct gpio_led_platform_data pcf_gpio_led_info2 = {
.leds = pcf_gpio_leds2,
.num_leds = ARRAY_SIZE(pcf_gpio_leds2),
};
static struct platform_device pcf_leds2 = {
.name = "leds-gpio",
.id = 2,
.dev = {
.platform_data = &pcf_gpio_led_info2,
}
};
/* PCF8574 0x24 GPIO U1 on the GS_2G-OPT23-A_V0 board (Modem) */
static struct gpio_led pcf_gpio_leds3[] = {
{ /* bit 0 */
.name = "gpo:modem_power",
.gpio = PCF_GPIO_MODEM_POWER,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
/* bits 1 and 2 not used */
{ /* bit 3 */
.name = "gpo:modem_reset",
.gpio = PCF_GPIO_MODEM_RESET,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_ON,
},
/* bits 4, 5 and 6 not used */
{ /* bit 7 */
.name = "gpo:trx_reset",
.gpio = PCF_GPIO_TRX_RESET,
.active_low = 1,
.default_trigger = "none",
.default_state = LEDS_GPIO_DEFSTATE_ON,
}
};
static struct gpio_led_platform_data pcf_gpio_led_info3 = {
.leds = pcf_gpio_leds3,
.num_leds = ARRAY_SIZE(pcf_gpio_leds3),
};
static struct platform_device pcf_leds3 = {
.name = "leds-gpio",
.id = 3,
.dev = {
.platform_data = &pcf_gpio_led_info3,
}
};
static void __init gsia18s_pcf_leds_init(void)
{
platform_device_register(&pcf_leds1);
platform_device_register(&pcf_leds2);
platform_device_register(&pcf_leds3);
}
/*
* SPI busses.
*/
static struct spi_board_info gsia18s_spi_devices[] = {
{ /* User accessible spi0, cs0 used for communication with MSP RTC */
.modalias = "spidev",
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 580000,
.mode = SPI_MODE_1,
},
{ /* User accessible spi1, cs0 used for communication with int. DSP */
.modalias = "spidev",
.bus_num = 1,
.chip_select = 0,
.max_speed_hz = 5600000,
.mode = SPI_MODE_0,
},
{ /* User accessible spi1, cs1 used for communication with ext. DSP */
.modalias = "spidev",
.bus_num = 1,
.chip_select = 1,
.max_speed_hz = 5600000,
.mode = SPI_MODE_0,
},
{ /* User accessible spi1, cs2 used for communication with ext. DSP */
.modalias = "spidev",
.bus_num = 1,
.chip_select = 2,
.max_speed_hz = 5600000,
.mode = SPI_MODE_0,
},
{ /* User accessible spi1, cs3 used for communication with ext. DSP */
.modalias = "spidev",
.bus_num = 1,
.chip_select = 3,
.max_speed_hz = 5600000,
.mode = SPI_MODE_0,
}
};
/*
* GPI Buttons
*/
static struct gpio_keys_button buttons[] = {
{
.gpio = GPIO_TRIG_NET_IN,
.code = BTN_1,
.desc = "TRIG_NET_IN",
.type = EV_KEY,
.active_low = 0,
.wakeup = 1,
},
{ /* SW80 on the GS_IA18_S-MN board*/
.gpio = GPIO_CARD_UNMOUNT_0,
.code = BTN_2,
.desc = "Card umount 0",
.type = EV_KEY,
.active_low = 1,
.wakeup = 1,
},
{ /* SW79 on the GS_IA18_S-MN board*/
.gpio = GPIO_CARD_UNMOUNT_1,
.code = BTN_3,
.desc = "Card umount 1",
.type = EV_KEY,
.active_low = 1,
.wakeup = 1,
},
{ /* SW280 on the GS_IA18-CB board*/
.gpio = GPIO_KEY_POWER,
.code = KEY_POWER,
.desc = "Power Off Button",
.type = EV_KEY,
.active_low = 0,
.wakeup = 1,
}
};
static struct gpio_keys_platform_data button_data = {
.buttons = buttons,
.nbuttons = ARRAY_SIZE(buttons),
};
static struct platform_device button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &button_data,
}
};
static void __init gsia18s_add_device_buttons(void)
{
at91_set_gpio_input(GPIO_TRIG_NET_IN, 1);
at91_set_deglitch(GPIO_TRIG_NET_IN, 1);
at91_set_gpio_input(GPIO_CARD_UNMOUNT_0, 1);
at91_set_deglitch(GPIO_CARD_UNMOUNT_0, 1);
at91_set_gpio_input(GPIO_CARD_UNMOUNT_1, 1);
at91_set_deglitch(GPIO_CARD_UNMOUNT_1, 1);
at91_set_gpio_input(GPIO_KEY_POWER, 0);
at91_set_deglitch(GPIO_KEY_POWER, 1);
platform_device_register(&button_device);
}
/*
* I2C
*/
static int pcf8574x_0x20_setup(struct i2c_client *client, int gpio,
unsigned int ngpio, void *context)
{
int status;
status = gpio_request(gpio + PCF_GPIO_ETH_DETECT, "eth_det");
if (status < 0) {
pr_err("error: can't request GPIO%d\n",
gpio + PCF_GPIO_ETH_DETECT);
return status;
}
status = gpio_direction_input(gpio + PCF_GPIO_ETH_DETECT);
if (status < 0) {
pr_err("error: can't setup GPIO%d as input\n",
gpio + PCF_GPIO_ETH_DETECT);
return status;
}
status = gpio_export(gpio + PCF_GPIO_ETH_DETECT, false);
if (status < 0) {
pr_err("error: can't export GPIO%d\n",
gpio + PCF_GPIO_ETH_DETECT);
return status;
}
status = gpio_sysfs_set_active_low(gpio + PCF_GPIO_ETH_DETECT, 1);
if (status < 0) {
pr_err("error: gpio_sysfs_set active_low(GPIO%d, 1)\n",
gpio + PCF_GPIO_ETH_DETECT);
return status;
}
return 0;
}
static int pcf8574x_0x20_teardown(struct i2c_client *client, int gpio,
unsigned ngpio, void *context)
{
gpio_free(gpio + PCF_GPIO_ETH_DETECT);
return 0;
}
static struct pcf857x_platform_data pcf20_pdata = {
.gpio_base = GS_IA18_S_PCF_GPIO_BASE0,
.n_latch = (1 << 4),
.setup = pcf8574x_0x20_setup,
.teardown = pcf8574x_0x20_teardown,
};
static struct pcf857x_platform_data pcf22_pdata = {
.gpio_base = GS_IA18_S_PCF_GPIO_BASE1,
};
static struct pcf857x_platform_data pcf24_pdata = {
.gpio_base = GS_IA18_S_PCF_GPIO_BASE2,
};
static struct i2c_board_info __initdata gsia18s_i2c_devices[] = {
{ /* U1 on the GS_IA18-CB_V3 board */
I2C_BOARD_INFO("pcf8574", 0x20),
.platform_data = &pcf20_pdata,
},
{ /* U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
I2C_BOARD_INFO("pcf8574", 0x22),
.platform_data = &pcf22_pdata,
},
{ /* U1 on the GS_2G-OPT23-A_V0 board (Modem) */
I2C_BOARD_INFO("pcf8574", 0x24),
.platform_data = &pcf24_pdata,
},
{ /* U161 on the GS_IA18_S-MN board */
I2C_BOARD_INFO("24c1024", 0x50),
},
{ /* U162 on the GS_IA18_S-MN board */
I2C_BOARD_INFO("24c01", 0x53),
},
};
/*
* Compact Flash
*/
static struct at91_cf_data __initdata gsia18s_cf1_data = {
.irq_pin = AT91_PIN_PA27,
.det_pin = AT91_PIN_PB30,
.vcc_pin = -EINVAL,
.rst_pin = AT91_PIN_PB31,
.chipselect = 5,
.flags = AT91_CF_TRUE_IDE,
};
/* Power Off by RTC */
static void gsia18s_power_off(void)
{
pr_notice("Power supply will be switched off automatically now or after 60 seconds without ArmDAS.\n");
at91_set_gpio_output(AT91_PIN_PA25, 1);
/* Spin to death... */
while (1)
;
}
static int __init gsia18s_power_off_init(void)
{
pm_power_off = gsia18s_power_off;
return 0;
}
/* ---------------------------------------------------------------------------*/
static void __init gsia18s_board_init(void)
{
stamp9g20_board_init();
at91_add_device_usbh(&usbh_data);
at91_add_device_udc(&udc_data);
at91_add_device_eth(&macb_data);
gsia18s_leds_init();
gsia18s_pcf_leds_init();
gsia18s_add_device_buttons();
at91_add_device_i2c(gsia18s_i2c_devices,
ARRAY_SIZE(gsia18s_i2c_devices));
at91_add_device_cf(&gsia18s_cf1_data);
at91_add_device_spi(gsia18s_spi_devices,
ARRAY_SIZE(gsia18s_spi_devices));
gsia18s_power_off_init();
}
MACHINE_START(GSIA18S, "GS_IA18_S")
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
.init_early = gsia18s_init_early,
.init_irq = at91_init_irq_default,
.init_machine = gsia18s_board_init,
MACHINE_END
| gpl-2.0 |
qhh7812/android_kernel_htc_ville-lp | drivers/staging/rts_pstor/rtsx_chip.c | 5591 | 54172 | /* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"
#include "rtsx_scsi.h"
#include "rtsx_card.h"
#include "rtsx_chip.h"
#include "rtsx_sys.h"
#include "general.h"
#include "sd.h"
#include "xd.h"
#include "ms.h"
static void rtsx_calibration(struct rtsx_chip *chip)
{
rtsx_write_phy_register(chip, 0x1B, 0x135E);
wait_timeout(10);
rtsx_write_phy_register(chip, 0x00, 0x0280);
rtsx_write_phy_register(chip, 0x01, 0x7112);
rtsx_write_phy_register(chip, 0x01, 0x7110);
rtsx_write_phy_register(chip, 0x01, 0x7112);
rtsx_write_phy_register(chip, 0x01, 0x7113);
rtsx_write_phy_register(chip, 0x00, 0x0288);
}
void rtsx_disable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
rtsx_writel(chip, RTSX_BIER, reg);
}
void rtsx_enable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
int i;
for (i = 0; i <= chip->max_lun; i++) {
if (chip->lun2card[i] & XD_CARD)
reg |= XD_INT_EN;
if (chip->lun2card[i] & SD_CARD)
reg |= SD_INT_EN;
if (chip->lun2card[i] & MS_CARD)
reg |= MS_INT_EN;
}
if (chip->hw_bypass_sd)
reg &= ~((u32)SD_INT_EN);
rtsx_writel(chip, RTSX_BIER, reg);
}
void rtsx_enable_bus_int(struct rtsx_chip *chip)
{
u32 reg = 0;
#ifndef DISABLE_CARD_INT
int i;
#endif
reg = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN;
#ifndef DISABLE_CARD_INT
for (i = 0; i <= chip->max_lun; i++) {
RTSX_DEBUGP("lun2card[%d] = 0x%02x\n", i, chip->lun2card[i]);
if (chip->lun2card[i] & XD_CARD)
reg |= XD_INT_EN;
if (chip->lun2card[i] & SD_CARD)
reg |= SD_INT_EN;
if (chip->lun2card[i] & MS_CARD)
reg |= MS_INT_EN;
}
if (chip->hw_bypass_sd)
reg &= ~((u32)SD_INT_EN);
#endif
if (chip->ic_version >= IC_VER_C)
reg |= DELINK_INT_EN;
#ifdef SUPPORT_OCP
if (CHECK_PID(chip, 0x5209)) {
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
reg |= MS_OC_INT_EN | SD_OC_INT_EN;
} else {
reg |= SD_OC_INT_EN;
}
} else {
reg |= OC_INT_EN;
}
#endif
if (!chip->adma_mode)
reg |= DATA_DONE_INT_EN;
/* Enable Bus Interrupt */
rtsx_writel(chip, RTSX_BIER, reg);
RTSX_DEBUGP("RTSX_BIER: 0x%08x\n", reg);
}
void rtsx_disable_bus_int(struct rtsx_chip *chip)
{
rtsx_writel(chip, RTSX_BIER, 0);
}
static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
{
if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
if (chip->asic_code) {
RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF, FPGA_SD_PULL_CTL_EN);
}
RTSX_WRITE_REG(chip, CARD_SHARE_MODE, 0xFF, CARD_SHARE_48_SD);
/* Enable SDIO internal clock */
RTSX_WRITE_REG(chip, 0xFF2C, 0x01, 0x01);
RTSX_WRITE_REG(chip, SDIO_CTRL, 0xFF, SDIO_BUS_CTRL | SDIO_CD_CTRL);
chip->sd_int = 1;
chip->sd_io = 1;
} else {
chip->need_reset |= SD_CARD;
}
return STATUS_SUCCESS;
}
#ifdef HW_AUTO_SWITCH_SD_BUS
static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
{
u8 tmp;
int sw_bypass_sd = 0;
int retval;
if (chip->driver_first_load) {
if (CHECK_PID(chip, 0x5288)) {
RTSX_READ_REG(chip, 0xFE5A, &tmp);
if (tmp & 0x08)
sw_bypass_sd = 1;
} else if (CHECK_PID(chip, 0x5208)) {
RTSX_READ_REG(chip, 0xFE70, &tmp);
if (tmp & 0x80)
sw_bypass_sd = 1;
} else if (CHECK_PID(chip, 0x5209)) {
RTSX_READ_REG(chip, SDIO_CFG, &tmp);
if (tmp & SDIO_BUS_AUTO_SWITCH)
sw_bypass_sd = 1;
}
} else {
if (chip->sdio_in_charge)
sw_bypass_sd = 1;
}
RTSX_DEBUGP("chip->sdio_in_charge = %d\n", chip->sdio_in_charge);
RTSX_DEBUGP("chip->driver_first_load = %d\n", chip->driver_first_load);
RTSX_DEBUGP("sw_bypass_sd = %d\n", sw_bypass_sd);
if (sw_bypass_sd) {
u8 cd_toggle_mask = 0;
RTSX_READ_REG(chip, TLPTISTAT, &tmp);
if (CHECK_PID(chip, 0x5209)) {
cd_toggle_mask = 0x10;
} else {
cd_toggle_mask = 0x08;
}
if (tmp & cd_toggle_mask) {
/* Disable sdio_bus_auto_switch */
if (CHECK_PID(chip, 0x5288)) {
RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x00);
} else if (CHECK_PID(chip, 0x5208)) {
RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x00);
} else {
RTSX_WRITE_REG(chip, SDIO_CFG, SDIO_BUS_AUTO_SWITCH, 0);
}
RTSX_WRITE_REG(chip, TLPTISTAT, 0xFF, tmp);
chip->need_reset |= SD_CARD;
} else {
RTSX_DEBUGP("Chip inserted with SDIO!\n");
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20, 0);
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
/* Enable sdio_bus_auto_switch */
if (CHECK_PID(chip, 0x5288)) {
RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x08);
} else if (CHECK_PID(chip, 0x5208)) {
RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x80);
} else {
RTSX_WRITE_REG(chip, SDIO_CFG,
SDIO_BUS_AUTO_SWITCH, SDIO_BUS_AUTO_SWITCH);
}
chip->chip_insert_with_sdio = 1;
chip->sd_io = 1;
}
} else {
if (CHECK_PID(chip, 0x5209)) {
RTSX_WRITE_REG(chip, TLPTISTAT, 0x10, 0x10);
} else {
RTSX_WRITE_REG(chip, TLPTISTAT, 0x08, 0x08);
}
chip->need_reset |= SD_CARD;
}
return STATUS_SUCCESS;
}
#endif
int rtsx_reset_chip(struct rtsx_chip *chip)
{
int retval;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
rtsx_disable_aspm(chip);
if (CHECK_PID(chip, 0x5209) && chip->asic_code) {
u16 val;
/* optimize PHY */
retval = rtsx_write_phy_register(chip, 0x00, 0xB966);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x01, 0x713F);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x03, 0xA549);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x06, 0xB235);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x07, 0xEF40);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x1E, 0xF8EB);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_phy_register(chip, 0x19, 0xFE6C);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
wait_timeout(1);
retval = rtsx_write_phy_register(chip, 0x0A, 0x05C0);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_cfg_dw(chip, 1, 0x110, 0xFFFF, 0xFFFF);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_read_phy_register(chip, 0x08, &val);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
RTSX_DEBUGP("Read from phy 0x08: 0x%04x\n", val);
if (chip->phy_voltage) {
chip->phy_voltage &= 0x3F;
RTSX_DEBUGP("chip->phy_voltage = 0x%x\n", chip->phy_voltage);
val &= ~0x3F;
val |= chip->phy_voltage;
RTSX_DEBUGP("Write to phy 0x08: 0x%04x\n", val);
retval = rtsx_write_phy_register(chip, 0x08, val);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else {
chip->phy_voltage = (u8)(val & 0x3F);
RTSX_DEBUGP("Default, chip->phy_voltage = 0x%x\n", chip->phy_voltage);
}
}
RTSX_WRITE_REG(chip, HOST_SLEEP_STATE, 0x03, 0x00);
/* Disable card clock */
RTSX_WRITE_REG(chip, CARD_CLK_EN, 0x1E, 0);
#ifdef SUPPORT_OCP
/* SSC power on, OCD power on */
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, 0);
} else {
RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, MS_OC_POWER_DOWN);
}
if (CHECK_PID(chip, 0x5209)) {
RTSX_WRITE_REG(chip, OCPPARA1, SD_OCP_TIME_MASK | MS_OCP_TIME_MASK,
SD_OCP_TIME_800 | MS_OCP_TIME_800);
RTSX_WRITE_REG(chip, OCPPARA2, SD_OCP_THD_MASK | MS_OCP_THD_MASK,
chip->sd_400mA_ocp_thd | (chip->ms_ocp_thd << 4));
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
RTSX_WRITE_REG(chip, OCPGLITCH, SD_OCP_GLITCH_MASK | MS_OCP_GLITCH_MASK,
SD_OCP_GLITCH_10000 | MS_OCP_GLITCH_10000);
} else {
RTSX_WRITE_REG(chip, OCPGLITCH, SD_OCP_GLITCH_MASK, SD_OCP_GLITCH_10000);
}
RTSX_WRITE_REG(chip, OCPCTL, 0xFF,
SD_OCP_INT_EN | SD_DETECT_EN | MS_OCP_INT_EN | MS_DETECT_EN);
} else {
RTSX_WRITE_REG(chip, OCPPARA1, OCP_TIME_MASK, OCP_TIME_800);
RTSX_WRITE_REG(chip, OCPPARA2, OCP_THD_MASK, OCP_THD_244_946);
RTSX_WRITE_REG(chip, OCPCTL, 0xFF, CARD_OC_INT_EN | CARD_DETECT_EN);
}
#else
/* OC power down */
RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, OC_POWER_DOWN);
#endif
if (!CHECK_PID(chip, 0x5288)) {
RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0xFF, 0x03);
}
/* Turn off LED */
RTSX_WRITE_REG(chip, CARD_GPIO, 0xFF, 0x03);
/* Reset delink mode */
RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x0A, 0);
/* Card driving select */
RTSX_WRITE_REG(chip, CARD_DRIVE_SEL, 0xFF, chip->card_drive_sel);
if (CHECK_PID(chip, 0x5209)) {
RTSX_WRITE_REG(chip, SD30_DRIVE_SEL, 0x07, chip->sd30_drive_sel_3v3);
}
#ifdef LED_AUTO_BLINK
RTSX_WRITE_REG(chip, CARD_AUTO_BLINK, 0xFF,
LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
#endif
if (chip->asic_code) {
/* Enable SSC Clock */
RTSX_WRITE_REG(chip, SSC_CTL1, 0xFF, SSC_8X_EN | SSC_SEL_4M);
RTSX_WRITE_REG(chip, SSC_CTL2, 0xFF, 0x12);
}
/* Disable cd_pwr_save (u_force_rst_core_en=0, u_cd_rst_core_en=0)
0xFE5B
bit[1] u_cd_rst_core_en rst_value = 0
bit[2] u_force_rst_core_en rst_value = 0
bit[5] u_mac_phy_rst_n_dbg rst_value = 1
bit[4] u_non_sticky_rst_n_dbg rst_value = 0
*/
RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x16, 0x10);
/* Enable ASPM */
if (chip->aspm_l0s_l1_en) {
if (chip->dynamic_aspm) {
if (CHK_SDIO_EXIST(chip)) {
if (CHECK_PID(chip, 0x5209)) {
retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
}
} else {
if (CHECK_PID(chip, 0x5208)) {
RTSX_WRITE_REG(chip, ASPM_FORCE_CTL, 0xFF, 0x3F);
}
retval = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
chip->aspm_level[0] = chip->aspm_l0s_l1_en;
if (CHK_SDIO_EXIST(chip)) {
chip->aspm_level[1] = chip->aspm_l0s_l1_en;
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
} else {
retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
}
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
chip->aspm_enabled = 1;
}
} else {
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
retval = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
retval = rtsx_write_config_byte(chip, 0x81, 1);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
if (CHK_SDIO_EXIST(chip)) {
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
} else {
retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
}
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (CHECK_PID(chip, 0x5209)) {
retval = rtsx_write_cfg_dw(chip, 0, 0x70C, 0xFF000000, 0x5B);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (CHECK_PID(chip, 0x5288)) {
if (!CHK_SDIO_EXIST(chip)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, 0x0103);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
}
RTSX_WRITE_REG(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
RTSX_WRITE_REG(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
if (CHECK_PID(chip, 0x5209)) {
RTSX_WRITE_REG(chip, PWD_SUSPEND_EN, 0xFF, 0xFF);
RTSX_WRITE_REG(chip, PWR_GATE_CTRL, PWR_GATE_EN, PWR_GATE_EN);
}
/* Enable PCIE interrupt */
if (chip->asic_code) {
if (CHECK_PID(chip, 0x5208)) {
if (chip->phy_debug_mode) {
RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
rtsx_disable_bus_int(chip);
} else {
rtsx_enable_bus_int(chip);
}
if (chip->ic_version >= IC_VER_D) {
u16 reg;
retval = rtsx_read_phy_register(chip, 0x00, ®);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
reg &= 0xFE7F;
reg |= 0x80;
retval = rtsx_write_phy_register(chip, 0x00, reg);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
reg &= 0xFFF7;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (chip->driver_first_load && (chip->ic_version < IC_VER_C)) {
rtsx_calibration(chip);
}
} else {
rtsx_enable_bus_int(chip);
}
} else {
rtsx_enable_bus_int(chip);
}
#ifdef HW_INT_WRITE_CLR
if (CHECK_PID(chip, 0x5209)) {
/* Set interrupt write clear */
RTSX_WRITE_REG(chip, NFTS_TX_CTRL, 0x02, 0);
}
#endif
chip->need_reset = 0;
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
#ifdef HW_INT_WRITE_CLR
if (CHECK_PID(chip, 0x5209)) {
/* Clear interrupt flag */
rtsx_writel(chip, RTSX_BIPR, chip->int_reg);
}
#endif
if (chip->hw_bypass_sd)
goto NextCard;
RTSX_DEBUGP("In rtsx_reset_chip, chip->int_reg = 0x%x\n", chip->int_reg);
if (chip->int_reg & SD_EXIST) {
#ifdef HW_AUTO_SWITCH_SD_BUS
if (CHECK_PID(chip, 0x5208) && (chip->ic_version < IC_VER_C)) {
retval = rtsx_pre_handle_sdio_old(chip);
} else {
retval = rtsx_pre_handle_sdio_new(chip);
}
RTSX_DEBUGP("chip->need_reset = 0x%x (rtsx_reset_chip)\n", (unsigned int)(chip->need_reset));
#else /* HW_AUTO_SWITCH_SD_BUS */
retval = rtsx_pre_handle_sdio_old(chip);
#endif /* HW_AUTO_SWITCH_SD_BUS */
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else {
chip->sd_io = 0;
RTSX_WRITE_REG(chip, SDIO_CTRL, SDIO_BUS_CTRL | SDIO_CD_CTRL, 0);
}
NextCard:
if (chip->int_reg & XD_EXIST)
chip->need_reset |= XD_CARD;
if (chip->int_reg & MS_EXIST)
chip->need_reset |= MS_CARD;
if (chip->int_reg & CARD_EXIST) {
RTSX_WRITE_REG(chip, SSC_CTL1, SSC_RSTB, SSC_RSTB);
}
RTSX_DEBUGP("In rtsx_init_chip, chip->need_reset = 0x%x\n", (unsigned int)(chip->need_reset));
RTSX_WRITE_REG(chip, RCCTL, 0x01, 0x00);
if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
/* Turn off main power when entering S3/S4 state */
RTSX_WRITE_REG(chip, MAIN_PWR_OFF_CTL, 0x03, 0x03);
}
if (chip->remote_wakeup_en && !chip->auto_delink_en) {
RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x07);
if (chip->aux_pwr_exist) {
RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x33);
}
} else {
RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x04);
RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x30);
}
if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
RTSX_WRITE_REG(chip, PETXCFG, 0x1C, 0x14);
} else if (CHECK_PID(chip, 0x5209)) {
if (chip->force_clkreq_0) {
RTSX_WRITE_REG(chip, PETXCFG, 0x08, 0x08);
} else {
RTSX_WRITE_REG(chip, PETXCFG, 0x08, 0x00);
}
}
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_clr_phy_reg_bit(chip, 0x1C, 2);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (chip->ft2_fast_mode) {
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF, MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
udelay(chip->pmos_pwr_on_interval);
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF, MS_POWER_ON | SD_POWER_ON);
wait_timeout(200);
}
/* Reset card */
rtsx_reset_detected_cards(chip, 0);
chip->driver_first_load = 0;
return STATUS_SUCCESS;
}
static inline int check_sd_speed_prior(u32 sd_speed_prior)
{
int i, fake_para = 0;
for (i = 0; i < 4; i++) {
u8 tmp = (u8)(sd_speed_prior >> (i*8));
if ((tmp < 0x01) || (tmp > 0x04)) {
fake_para = 1;
break;
}
}
return !fake_para;
}
static inline int check_sd_current_prior(u32 sd_current_prior)
{
int i, fake_para = 0;
for (i = 0; i < 4; i++) {
u8 tmp = (u8)(sd_current_prior >> (i*8));
if (tmp > 0x03) {
fake_para = 1;
break;
}
}
return !fake_para;
}
static int rts5209_init(struct rtsx_chip *chip)
{
int retval;
u32 lval = 0;
u8 val = 0;
val = rtsx_readb(chip, 0x1C);
if ((val & 0x10) == 0) {
chip->asic_code = 1;
} else {
chip->asic_code = 0;
}
chip->ic_version = val & 0x0F;
chip->phy_debug_mode = 0;
chip->aux_pwr_exist = 0;
chip->ms_power_class_en = 0x03;
retval = rtsx_read_cfg_dw(chip, 0, 0x724, &lval);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
RTSX_DEBUGP("dw in 0x724: 0x%x\n", lval);
val = (u8)lval;
if (!(val & 0x80)) {
if (val & 0x08)
chip->lun_mode = DEFAULT_SINGLE;
else
chip->lun_mode = SD_MS_2LUN;
if (val & 0x04) {
SET_SDIO_EXIST(chip);
} else {
CLR_SDIO_EXIST(chip);
}
if (val & 0x02) {
chip->hw_bypass_sd = 0;
} else {
chip->hw_bypass_sd = 1;
}
} else {
SET_SDIO_EXIST(chip);
chip->hw_bypass_sd = 0;
}
if (chip->use_hw_setting) {
u8 clk;
chip->aspm_l0s_l1_en = (val >> 5) & 0x03;
val = (u8)(lval >> 8);
clk = (val >> 5) & 0x07;
if (clk != 0x07) {
chip->asic_sd_sdr50_clk = 98 - clk * 2;
}
if (val & 0x10) {
chip->auto_delink_en = 1;
} else {
chip->auto_delink_en = 0;
}
if (chip->ss_en == 2) {
chip->ss_en = 0;
} else {
if (val & 0x08) {
chip->ss_en = 1;
} else {
chip->ss_en = 0;
}
}
clk = val & 0x07;
if (clk != 0x07)
chip->asic_ms_hg_clk = (59 - clk) * 2;
val = (u8)(lval >> 16);
clk = (val >> 6) & 0x03;
if (clk != 0x03) {
chip->asic_sd_hs_clk = (49 - clk * 2) * 2;
chip->asic_mmc_52m_clk = (49 - clk * 2) * 2;
}
clk = (val >> 4) & 0x03;
if (clk != 0x03)
chip->asic_sd_ddr50_clk = (48 - clk * 2) * 2;
if (val & 0x01) {
chip->sdr104_en = 1;
} else {
chip->sdr104_en = 0;
}
if (val & 0x02) {
chip->ddr50_en = 1;
} else {
chip->ddr50_en = 0;
}
if (val & 0x04) {
chip->sdr50_en = 1;
} else {
chip->sdr50_en = 0;
}
val = (u8)(lval >> 24);
clk = (val >> 5) & 0x07;
if (clk != 0x07)
chip->asic_sd_sdr104_clk = 206 - clk * 3;
if (val & 0x10) {
chip->power_down_in_ss = 1;
} else {
chip->power_down_in_ss = 0;
}
chip->ms_power_class_en = val & 0x03;
}
if (chip->hp_watch_bios_hotplug && chip->auto_delink_en) {
u8 reg58, reg5b;
retval = rtsx_read_pci_cfg_byte(0x00,
0x1C, 0x02, 0x58, ®58);
if (retval < 0) {
return STATUS_SUCCESS;
}
retval = rtsx_read_pci_cfg_byte(0x00,
0x1C, 0x02, 0x5B, ®5b);
if (retval < 0) {
return STATUS_SUCCESS;
}
RTSX_DEBUGP("reg58 = 0x%x, reg5b = 0x%x\n", reg58, reg5b);
if ((reg58 == 0x00) && (reg5b == 0x01)) {
chip->auto_delink_en = 0;
}
}
return STATUS_SUCCESS;
}
static int rts5208_init(struct rtsx_chip *chip)
{
int retval;
u16 reg = 0;
u8 val = 0;
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
if (val == 0) {
chip->asic_code = 1;
} else {
chip->asic_code = 0;
}
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
RTSX_DEBUGP("Value of phy register 0x1C is 0x%x\n", reg);
chip->ic_version = (reg >> 4) & 0x07;
if (reg & PHY_DEBUG_MODE) {
chip->phy_debug_mode = 1;
} else {
chip->phy_debug_mode = 0;
}
} else {
RTSX_READ_REG(chip, 0xFE80, &val);
chip->ic_version = val;
chip->phy_debug_mode = 0;
}
RTSX_READ_REG(chip, PDINFO, &val);
RTSX_DEBUGP("PDINFO: 0x%x\n", val);
if (val & AUX_PWR_DETECTED) {
chip->aux_pwr_exist = 1;
} else {
chip->aux_pwr_exist = 0;
}
RTSX_READ_REG(chip, 0xFE50, &val);
if (val & 0x01) {
chip->hw_bypass_sd = 1;
} else {
chip->hw_bypass_sd = 0;
}
rtsx_read_config_byte(chip, 0x0E, &val);
if (val & 0x80) {
SET_SDIO_EXIST(chip);
} else {
CLR_SDIO_EXIST(chip);
}
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
if (val & 0x80) {
chip->auto_delink_en = 1;
} else {
chip->auto_delink_en = 0;
}
}
return STATUS_SUCCESS;
}
static int rts5288_init(struct rtsx_chip *chip)
{
int retval;
u8 val = 0, max_func;
u32 lval = 0;
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
if (val == 0) {
chip->asic_code = 1;
} else {
chip->asic_code = 0;
}
chip->ic_version = 0;
chip->phy_debug_mode = 0;
RTSX_READ_REG(chip, PDINFO, &val);
RTSX_DEBUGP("PDINFO: 0x%x\n", val);
if (val & AUX_PWR_DETECTED) {
chip->aux_pwr_exist = 1;
} else {
chip->aux_pwr_exist = 0;
}
RTSX_READ_REG(chip, CARD_SHARE_MODE, &val);
RTSX_DEBUGP("CARD_SHARE_MODE: 0x%x\n", val);
if (val & 0x04) {
chip->baro_pkg = QFN;
} else {
chip->baro_pkg = LQFP;
}
RTSX_READ_REG(chip, 0xFE5A, &val);
if (val & 0x10) {
chip->hw_bypass_sd = 1;
} else {
chip->hw_bypass_sd = 0;
}
retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
max_func = (u8)((lval >> 29) & 0x07);
RTSX_DEBUGP("Max function number: %d\n", max_func);
if (max_func == 0x02) {
SET_SDIO_EXIST(chip);
} else {
CLR_SDIO_EXIST(chip);
}
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
if (val & 0x80) {
chip->auto_delink_en = 1;
} else {
chip->auto_delink_en = 0;
}
if (CHECK_BARO_PKG(chip, LQFP)) {
chip->lun_mode = SD_MS_1LUN;
} else {
chip->lun_mode = DEFAULT_SINGLE;
}
}
return STATUS_SUCCESS;
}
int rtsx_init_chip(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &(chip->sd_card);
struct xd_info *xd_card = &(chip->xd_card);
struct ms_info *ms_card = &(chip->ms_card);
int retval;
unsigned int i;
RTSX_DEBUGP("Vendor ID: 0x%04x, Product ID: 0x%04x\n",
chip->vendor_id, chip->product_id);
chip->ic_version = 0;
#ifdef _MSG_TRACE
chip->msg_idx = 0;
#endif
memset(xd_card, 0, sizeof(struct xd_info));
memset(sd_card, 0, sizeof(struct sd_info));
memset(ms_card, 0, sizeof(struct ms_info));
chip->xd_reset_counter = 0;
chip->sd_reset_counter = 0;
chip->ms_reset_counter = 0;
chip->xd_show_cnt = MAX_SHOW_CNT;
chip->sd_show_cnt = MAX_SHOW_CNT;
chip->ms_show_cnt = MAX_SHOW_CNT;
chip->sd_io = 0;
chip->auto_delink_cnt = 0;
chip->auto_delink_allowed = 1;
rtsx_set_stat(chip, RTSX_STAT_INIT);
chip->aspm_enabled = 0;
chip->chip_insert_with_sdio = 0;
chip->sdio_aspm = 0;
chip->sdio_idle = 0;
chip->sdio_counter = 0;
chip->cur_card = 0;
chip->phy_debug_mode = 0;
chip->sdio_func_exist = 0;
memset(chip->sdio_raw_data, 0, 12);
for (i = 0; i < MAX_ALLOWED_LUN_CNT; i++) {
set_sense_type(chip, i, SENSE_TYPE_NO_SENSE);
chip->rw_fail_cnt[i] = 0;
}
if (!check_sd_speed_prior(chip->sd_speed_prior)) {
chip->sd_speed_prior = 0x01040203;
}
RTSX_DEBUGP("sd_speed_prior = 0x%08x\n", chip->sd_speed_prior);
if (!check_sd_current_prior(chip->sd_current_prior)) {
chip->sd_current_prior = 0x00010203;
}
RTSX_DEBUGP("sd_current_prior = 0x%08x\n", chip->sd_current_prior);
if ((chip->sd_ddr_tx_phase > 31) || (chip->sd_ddr_tx_phase < 0)) {
chip->sd_ddr_tx_phase = 0;
}
if ((chip->mmc_ddr_tx_phase > 31) || (chip->mmc_ddr_tx_phase < 0)) {
chip->mmc_ddr_tx_phase = 0;
}
RTSX_WRITE_REG(chip, FPDCTL, SSC_POWER_DOWN, 0);
wait_timeout(200);
RTSX_WRITE_REG(chip, CLK_DIV, 0x07, 0x07);
RTSX_DEBUGP("chip->use_hw_setting = %d\n", chip->use_hw_setting);
if (CHECK_PID(chip, 0x5209)) {
retval = rts5209_init(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else if (CHECK_PID(chip, 0x5208)) {
retval = rts5208_init(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
} else if (CHECK_PID(chip, 0x5288)) {
retval = rts5288_init(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (chip->ss_en == 2) {
chip->ss_en = 0;
}
RTSX_DEBUGP("chip->asic_code = %d\n", chip->asic_code);
RTSX_DEBUGP("chip->ic_version = 0x%x\n", chip->ic_version);
RTSX_DEBUGP("chip->phy_debug_mode = %d\n", chip->phy_debug_mode);
RTSX_DEBUGP("chip->aux_pwr_exist = %d\n", chip->aux_pwr_exist);
RTSX_DEBUGP("chip->sdio_func_exist = %d\n", chip->sdio_func_exist);
RTSX_DEBUGP("chip->hw_bypass_sd = %d\n", chip->hw_bypass_sd);
RTSX_DEBUGP("chip->aspm_l0s_l1_en = %d\n", chip->aspm_l0s_l1_en);
RTSX_DEBUGP("chip->lun_mode = %d\n", chip->lun_mode);
RTSX_DEBUGP("chip->auto_delink_en = %d\n", chip->auto_delink_en);
RTSX_DEBUGP("chip->ss_en = %d\n", chip->ss_en);
RTSX_DEBUGP("chip->baro_pkg = %d\n", chip->baro_pkg);
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 1;
chip->card2lun[XD_CARD] = 0xFF;
chip->lun2card[0] = SD_CARD;
chip->lun2card[1] = MS_CARD;
chip->max_lun = 1;
SET_SDIO_IGNORED(chip);
} else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 0;
chip->card2lun[XD_CARD] = 0xFF;
chip->lun2card[0] = SD_CARD | MS_CARD;
chip->max_lun = 0;
} else {
chip->card2lun[XD_CARD] = 0;
chip->card2lun[SD_CARD] = 0;
chip->card2lun[MS_CARD] = 0;
chip->lun2card[0] = XD_CARD | SD_CARD | MS_CARD;
chip->max_lun = 0;
}
retval = rtsx_reset_chip(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
void rtsx_release_chip(struct rtsx_chip *chip)
{
xd_free_l2p_tbl(chip);
ms_free_l2p_tbl(chip);
chip->card_exist = 0;
chip->card_ready = 0;
}
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
static inline void rtsx_blink_led(struct rtsx_chip *chip)
{
if (chip->card_exist && chip->blink_led) {
if (chip->led_toggle_counter < LED_TOGGLE_INTERVAL) {
chip->led_toggle_counter++;
} else {
chip->led_toggle_counter = 0;
toggle_gpio(chip, LED_GPIO);
}
}
}
#endif
static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
{
int maybe_support_aspm, reg_changed;
u32 tmp = 0;
u8 reg0 = 0, reg1 = 0;
maybe_support_aspm = 0;
reg_changed = 0;
rtsx_read_config_byte(chip, LCTLR, ®0);
if (chip->aspm_level[0] != reg0) {
reg_changed = 1;
chip->aspm_level[0] = reg0;
}
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
rtsx_read_cfg_dw(chip, 1, 0xC0, &tmp);
reg1 = (u8)tmp;
if (chip->aspm_level[1] != reg1) {
reg_changed = 1;
chip->aspm_level[1] = reg1;
}
if ((reg0 & 0x03) && (reg1 & 0x03)) {
maybe_support_aspm = 1;
}
} else {
if (reg0 & 0x03) {
maybe_support_aspm = 1;
}
}
if (reg_changed) {
if (maybe_support_aspm) {
chip->aspm_l0s_l1_en = 0x03;
}
RTSX_DEBUGP("aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
chip->aspm_level[0], chip->aspm_level[1]);
if (chip->aspm_l0s_l1_en) {
chip->aspm_enabled = 1;
} else {
chip->aspm_enabled = 0;
chip->sdio_aspm = 0;
}
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
0x30 | chip->aspm_level[0] | (chip->aspm_level[1] << 2));
}
}
void rtsx_polling_func(struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &(chip->sd_card);
#endif
int ss_allowed;
if (rtsx_chk_stat(chip, RTSX_STAT_SUSPEND))
return;
if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
goto Delink_Stage;
if (chip->polling_config) {
u8 val;
rtsx_read_config_byte(chip, 0, &val);
}
if (rtsx_chk_stat(chip, RTSX_STAT_SS))
return;
#ifdef SUPPORT_OCP
if (chip->ocp_int) {
rtsx_read_register(chip, OCPSTAT, &(chip->ocp_stat));
if (CHECK_PID(chip, 0x5209) &&
CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->ocp_int & SD_OC_INT)
sd_power_off_card3v3(chip);
if (chip->ocp_int & MS_OC_INT)
ms_power_off_card3v3(chip);
} else {
if (chip->card_exist & SD_CARD) {
sd_power_off_card3v3(chip);
} else if (chip->card_exist & MS_CARD) {
ms_power_off_card3v3(chip);
} else if (chip->card_exist & XD_CARD) {
xd_power_off_card3v3(chip);
}
}
chip->ocp_int = 0;
}
#endif
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_erase_status) {
if (chip->card_exist & SD_CARD) {
u8 val;
if (CHECK_PID(chip, 0x5209)) {
rtsx_read_register(chip, SD_BUS_STAT, &val);
if (val & SD_DAT0_STATUS) {
sd_card->sd_erase_status = SD_NOT_ERASE;
sd_card->sd_lock_notify = 1;
chip->need_reinit |= SD_CARD;
}
} else {
rtsx_read_register(chip, 0xFD30, &val);
if (val & 0x02) {
sd_card->sd_erase_status = SD_NOT_ERASE;
sd_card->sd_lock_notify = 1;
chip->need_reinit |= SD_CARD;
}
}
} else {
sd_card->sd_erase_status = SD_NOT_ERASE;
}
}
#endif
rtsx_init_cards(chip);
if (chip->ss_en) {
ss_allowed = 1;
if (CHECK_PID(chip, 0x5288)) {
ss_allowed = 0;
} else {
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
u32 val;
rtsx_read_cfg_dw(chip, 1, 0x04, &val);
if (val & 0x07) {
ss_allowed = 0;
}
}
}
} else {
ss_allowed = 0;
}
if (ss_allowed && !chip->sd_io) {
if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
chip->ss_counter = 0;
} else {
if (chip->ss_counter <
(chip->ss_idle_period / POLLING_INTERVAL)) {
chip->ss_counter++;
} else {
rtsx_exclusive_enter_ss(chip);
return;
}
}
}
if (CHECK_PID(chip, 0x5208)) {
rtsx_monitor_aspm_config(chip);
#ifdef SUPPORT_SDIO_ASPM
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) &&
chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
if (chip->sd_io) {
dynamic_configure_sdio_aspm(chip);
} else {
if (!chip->sdio_aspm) {
RTSX_DEBUGP("SDIO enter ASPM!\n");
rtsx_write_register(chip,
ASPM_FORCE_CTL, 0xFC,
0x30 | (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
}
}
#endif
}
if (chip->idle_counter < IDLE_MAX_COUNT) {
chip->idle_counter++;
} else {
if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
RTSX_DEBUGP("Idle state!\n");
rtsx_set_stat(chip, RTSX_STAT_IDLE);
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
chip->led_toggle_counter = 0;
#endif
rtsx_force_power_on(chip, SSC_PDCTL);
turn_off_led(chip, LED_GPIO);
if (chip->auto_power_down && !chip->card_ready && !chip->sd_io) {
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
}
}
switch (rtsx_get_stat(chip)) {
case RTSX_STAT_RUN:
#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
rtsx_blink_led(chip);
#endif
do_remaining_work(chip);
break;
case RTSX_STAT_IDLE:
if (chip->sd_io && !chip->sd_int) {
try_to_switch_sdio_ctrl(chip);
}
rtsx_enable_aspm(chip);
break;
default:
break;
}
#ifdef SUPPORT_OCP
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
#ifdef CONFIG_RTS_PSTOR_DEBUG
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) {
RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat);
}
#endif
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
card_power_off(chip, SD_CARD);
chip->card_fail |= SD_CARD;
}
}
if (chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER)) {
if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
card_power_off(chip, MS_CARD);
chip->card_fail |= MS_CARD;
}
}
} else {
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat);
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
chip->card_fail |= SD_CARD;
} else if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
chip->card_fail |= MS_CARD;
} else if (chip->card_exist & XD_CARD) {
rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
chip->card_fail |= XD_CARD;
}
card_power_off(chip, SD_CARD);
}
}
#endif
Delink_Stage:
if (chip->auto_delink_en && chip->auto_delink_allowed &&
!chip->card_ready && !chip->card_ejected && !chip->sd_io) {
int enter_L1 = chip->auto_delink_in_L1 && (chip->aspm_l0s_l1_en || chip->ss_en);
int delink_stage1_cnt = chip->delink_stage1_step;
int delink_stage2_cnt = delink_stage1_cnt + chip->delink_stage2_step;
int delink_stage3_cnt = delink_stage2_cnt + chip->delink_stage3_step;
if (chip->auto_delink_cnt <= delink_stage3_cnt) {
if (chip->auto_delink_cnt == delink_stage1_cnt) {
rtsx_set_stat(chip, RTSX_STAT_DELINK);
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
}
if (chip->card_exist) {
RTSX_DEBUGP("False card inserted, do force delink\n");
if (enter_L1) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
}
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x0A);
if (enter_L1) {
rtsx_enter_L1(chip);
}
chip->auto_delink_cnt = delink_stage3_cnt + 1;
} else {
RTSX_DEBUGP("No card inserted, do delink\n");
if (enter_L1) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
}
#ifdef HW_INT_WRITE_CLR
if (CHECK_PID(chip, 0x5209)) {
rtsx_writel(chip, RTSX_BIPR, 0xFFFFFFFF);
RTSX_DEBUGP("RTSX_BIPR: 0x%x\n", rtsx_readl(chip, RTSX_BIPR));
}
#endif
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0x02);
if (enter_L1) {
rtsx_enter_L1(chip);
}
}
}
if (chip->auto_delink_cnt == delink_stage2_cnt) {
RTSX_DEBUGP("Try to do force delink\n");
if (enter_L1) {
rtsx_exit_L1(chip);
}
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
}
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x0A);
}
chip->auto_delink_cnt++;
}
} else {
chip->auto_delink_cnt = 0;
}
}
void rtsx_undo_delink(struct rtsx_chip *chip)
{
chip->auto_delink_allowed = 0;
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
}
/**
* rtsx_stop_cmd - stop command transfer and DMA transfer
* @chip: Realtek's card reader chip
* @card: flash card type
*
* Stop command transfer and DMA transfer.
* This function is called in error handler.
*/
void rtsx_stop_cmd(struct rtsx_chip *chip, int card)
{
int i;
for (i = 0; i <= 8; i++) {
int addr = RTSX_HCBAR + i * 4;
u32 reg;
reg = rtsx_readl(chip, addr);
RTSX_DEBUGP("BAR (0x%02x): 0x%08x\n", addr, reg);
}
rtsx_writel(chip, RTSX_HCBCTLR, STOP_CMD);
rtsx_writel(chip, RTSX_HDBCTLR, STOP_DMA);
for (i = 0; i < 16; i++) {
u16 addr = 0xFE20 + (u16)i;
u8 val;
rtsx_read_register(chip, addr, &val);
RTSX_DEBUGP("0x%04X: 0x%02x\n", addr, val);
}
rtsx_write_register(chip, DMACTL, 0x80, 0x80);
rtsx_write_register(chip, RBCTL, 0x80, 0x80);
}
#define MAX_RW_REG_CNT 1024
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
{
int i;
u32 val = 3 << 30;
val |= (u32)(addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
rtsx_writel(chip, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
if ((val & (1 << 31)) == 0) {
if (data != (u8)val) {
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
}
TRACE_RET(chip, STATUS_TIMEDOUT);
}
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
{
u32 val = 2 << 30;
int i;
if (data) {
*data = 0;
}
val |= (u32)(addr & 0x3FFF) << 16;
rtsx_writel(chip, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
if ((val & (1 << 31)) == 0) {
break;
}
}
if (i >= MAX_RW_REG_CNT) {
TRACE_RET(chip, STATUS_TIMEDOUT);
}
if (data) {
*data = (u8)(val & 0xFF);
}
return STATUS_SUCCESS;
}
int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask, u32 val)
{
u8 mode = 0, tmp;
int i;
for (i = 0; i < 4; i++) {
if (mask & 0xFF) {
RTSX_WRITE_REG(chip, CFGDATA0 + i,
0xFF, (u8)(val & mask & 0xFF));
mode |= (1 << i);
}
mask >>= 8;
val >>= 8;
}
if (mode) {
RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF,
0x80 | mode | ((func_no & 0x03) << 4));
for (i = 0; i < MAX_RW_REG_CNT; i++) {
RTSX_READ_REG(chip, CFGRWCTL, &tmp);
if ((tmp & 0x80) == 0) {
break;
}
}
}
return STATUS_SUCCESS;
}
int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
{
int i;
u8 tmp;
u32 data = 0;
RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF, 0x80 | ((func_no & 0x03) << 4));
for (i = 0; i < MAX_RW_REG_CNT; i++) {
RTSX_READ_REG(chip, CFGRWCTL, &tmp);
if ((tmp & 0x80) == 0) {
break;
}
}
for (i = 0; i < 4; i++) {
RTSX_READ_REG(chip, CFGDATA0 + i, &tmp);
data |= (u32)tmp << (i * 8);
}
if (val) {
*val = data;
}
return STATUS_SUCCESS;
}
int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf, int len)
{
u32 *data, *mask;
u16 offset = addr % 4;
u16 aligned_addr = addr - offset;
int dw_len, i, j;
int retval;
RTSX_DEBUGP("%s\n", __func__);
if (!buf) {
TRACE_RET(chip, STATUS_NOMEM);
}
if ((len + offset) % 4) {
dw_len = (len + offset) / 4 + 1;
} else {
dw_len = (len + offset) / 4;
}
RTSX_DEBUGP("dw_len = %d\n", dw_len);
data = vzalloc(dw_len * 4);
if (!data) {
TRACE_RET(chip, STATUS_NOMEM);
}
mask = vzalloc(dw_len * 4);
if (!mask) {
vfree(data);
TRACE_RET(chip, STATUS_NOMEM);
}
j = 0;
for (i = 0; i < len; i++) {
mask[j] |= 0xFF << (offset * 8);
data[j] |= buf[i] << (offset * 8);
if (++offset == 4) {
j++;
offset = 0;
}
}
RTSX_DUMP(mask, dw_len * 4);
RTSX_DUMP(data, dw_len * 4);
for (i = 0; i < dw_len; i++) {
retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4, mask[i], data[i]);
if (retval != STATUS_SUCCESS) {
vfree(data);
vfree(mask);
TRACE_RET(chip, STATUS_FAIL);
}
}
vfree(data);
vfree(mask);
return STATUS_SUCCESS;
}
int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf, int len)
{
u32 *data;
u16 offset = addr % 4;
u16 aligned_addr = addr - offset;
int dw_len, i, j;
int retval;
RTSX_DEBUGP("%s\n", __func__);
if ((len + offset) % 4) {
dw_len = (len + offset) / 4 + 1;
} else {
dw_len = (len + offset) / 4;
}
RTSX_DEBUGP("dw_len = %d\n", dw_len);
data = (u32 *)vmalloc(dw_len * 4);
if (!data) {
TRACE_RET(chip, STATUS_NOMEM);
}
for (i = 0; i < dw_len; i++) {
retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4, data + i);
if (retval != STATUS_SUCCESS) {
vfree(data);
TRACE_RET(chip, STATUS_FAIL);
}
}
if (buf) {
j = 0;
for (i = 0; i < len; i++) {
buf[i] = (u8)(data[j] >> (offset * 8));
if (++offset == 4) {
j++;
offset = 0;
}
}
}
vfree(data);
return STATUS_SUCCESS;
}
int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
{
int i, finished = 0;
u8 tmp;
RTSX_WRITE_REG(chip, PHYDATA0, 0xFF, (u8)val);
RTSX_WRITE_REG(chip, PHYDATA1, 0xFF, (u8)(val >> 8));
RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x81);
for (i = 0; i < 100000; i++) {
RTSX_READ_REG(chip, PHYRWCTL, &tmp);
if (!(tmp & 0x80)) {
finished = 1;
break;
}
}
if (!finished) {
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
{
int i, finished = 0;
u16 data = 0;
u8 tmp;
RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x80);
for (i = 0; i < 100000; i++) {
RTSX_READ_REG(chip, PHYRWCTL, &tmp);
if (!(tmp & 0x80)) {
finished = 1;
break;
}
}
if (!finished) {
TRACE_RET(chip, STATUS_FAIL);
}
RTSX_READ_REG(chip, PHYDATA0, &tmp);
data = tmp;
RTSX_READ_REG(chip, PHYDATA1, &tmp);
data |= (u16)tmp << 8;
if (val)
*val = data;
return STATUS_SUCCESS;
}
int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
{
int i;
u8 data = 0;
RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0x80|addr);
for (i = 0; i < 100; i++) {
RTSX_READ_REG(chip, EFUSE_CTRL, &data);
if (!(data & 0x80))
break;
udelay(1);
}
if (data & 0x80) {
TRACE_RET(chip, STATUS_TIMEDOUT);
}
RTSX_READ_REG(chip, EFUSE_DATA, &data);
if (val)
*val = data;
return STATUS_SUCCESS;
}
int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
{
int i, j;
u8 data = 0, tmp = 0xFF;
for (i = 0; i < 8; i++) {
if (val & (u8)(1 << i))
continue;
tmp &= (~(u8)(1 << i));
RTSX_DEBUGP("Write 0x%x to 0x%x\n", tmp, addr);
RTSX_WRITE_REG(chip, EFUSE_DATA, 0xFF, tmp);
RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0xA0|addr);
for (j = 0; j < 100; j++) {
RTSX_READ_REG(chip, EFUSE_CTRL, &data);
if (!(data & 0x80))
break;
wait_timeout(3);
}
if (data & 0x80) {
TRACE_RET(chip, STATUS_TIMEDOUT);
}
wait_timeout(5);
}
return STATUS_SUCCESS;
}
int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
{
int retval;
u16 value;
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
if (value & (1 << bit)) {
value &= ~(1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
return STATUS_SUCCESS;
}
int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
{
int retval;
u16 value;
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
if (0 == (value & (1 << bit))) {
value |= (1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
return STATUS_SUCCESS;
}
int rtsx_check_link_ready(struct rtsx_chip *chip)
{
u8 val;
RTSX_READ_REG(chip, IRQSTAT0, &val);
RTSX_DEBUGP("IRQSTAT0: 0x%x\n", val);
if (val & LINK_RDY_INT) {
RTSX_DEBUGP("Delinked!\n");
rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
return STATUS_FAIL;
}
return STATUS_SUCCESS;
}
static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
{
u32 ultmp;
RTSX_DEBUGP("%04x set pm_dstate to %d\n", chip->product_id, dstate);
if (CHK_SDIO_EXIST(chip)) {
u8 func_no;
if (CHECK_PID(chip, 0x5288)) {
func_no = 2;
} else {
func_no = 1;
}
rtsx_read_cfg_dw(chip, func_no, 0x84, &ultmp);
RTSX_DEBUGP("pm_dstate of function %d: 0x%x\n", (int)func_no, ultmp);
rtsx_write_cfg_dw(chip, func_no, 0x84, 0xFF, dstate);
}
rtsx_write_config_byte(chip, 0x44, dstate);
rtsx_write_config_byte(chip, 0x45, 0);
}
void rtsx_enter_L1(struct rtsx_chip *chip)
{
rtsx_handle_pm_dstate(chip, 2);
}
void rtsx_exit_L1(struct rtsx_chip *chip)
{
rtsx_write_config_byte(chip, 0x44, 0);
rtsx_write_config_byte(chip, 0x45, 0);
}
void rtsx_enter_ss(struct rtsx_chip *chip)
{
RTSX_DEBUGP("Enter Selective Suspend State!\n");
rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
if (chip->power_down_in_ss) {
rtsx_power_off_card(chip);
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
if (CHK_SDIO_EXIST(chip)) {
if (CHECK_PID(chip, 0x5288)) {
rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
} else {
rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
}
}
if (chip->auto_delink_en) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
} else {
if (!chip->phy_debug_mode) {
u32 tmp;
tmp = rtsx_readl(chip, RTSX_BIER);
tmp |= CARD_INT;
rtsx_writel(chip, RTSX_BIER, tmp);
}
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0);
}
rtsx_enter_L1(chip);
RTSX_CLR_DELINK(chip);
rtsx_set_stat(chip, RTSX_STAT_SS);
}
void rtsx_exit_ss(struct rtsx_chip *chip)
{
RTSX_DEBUGP("Exit Selective Suspend State!\n");
rtsx_exit_L1(chip);
if (chip->power_down_in_ss) {
rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
udelay(1000);
}
if (RTSX_TST_DELINK(chip)) {
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
RTSX_CLR_DELINK(chip);
} else if (chip->power_down_in_ss) {
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 0);
}
}
int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
{
u32 status, int_enable;
int exit_ss = 0;
#ifdef SUPPORT_OCP
u32 ocp_int = 0;
if (CHECK_PID(chip, 0x5209)) {
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
ocp_int = MS_OC_INT | SD_OC_INT;
} else {
ocp_int = SD_OC_INT;
}
} else {
ocp_int = OC_INT;
}
#endif
if (chip->ss_en) {
chip->ss_counter = 0;
if (rtsx_get_stat(chip) == RTSX_STAT_SS) {
exit_ss = 1;
rtsx_exit_L1(chip);
rtsx_set_stat(chip, RTSX_STAT_RUN);
}
}
int_enable = rtsx_readl(chip, RTSX_BIER);
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
#ifdef HW_INT_WRITE_CLR
if (CHECK_PID(chip, 0x5209)) {
rtsx_writel(chip, RTSX_BIPR, chip->int_reg);
}
#endif
if (((chip->int_reg & int_enable) == 0) || (chip->int_reg == 0xFFFFFFFF))
return STATUS_FAIL;
if (!chip->msi_en) {
if (CHECK_PID(chip, 0x5209)) {
u8 val;
rtsx_read_config_byte(chip, 0x05, &val);
if (val & 0x04) {
return STATUS_FAIL;
}
}
}
status = chip->int_reg &= (int_enable | 0x7FFFFF);
if (status & CARD_INT) {
chip->auto_delink_cnt = 0;
if (status & SD_INT) {
if (status & SD_EXIST) {
set_bit(SD_NR, &(chip->need_reset));
} else {
set_bit(SD_NR, &(chip->need_release));
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
clear_bit(SD_NR, &(chip->need_reset));
}
} else {
/* If multi-luns, it's possible that
when plugging/unplugging one card
there is another card which still
exists in the slot. In this case,
all existed cards should be reset.
*/
if (exit_ss && (status & SD_EXIST))
set_bit(SD_NR, &(chip->need_reinit));
}
if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
if (status & XD_INT) {
if (status & XD_EXIST) {
set_bit(XD_NR, &(chip->need_reset));
} else {
set_bit(XD_NR, &(chip->need_release));
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
clear_bit(XD_NR, &(chip->need_reset));
}
} else {
if (exit_ss && (status & XD_EXIST))
set_bit(XD_NR, &(chip->need_reinit));
}
}
if (status & MS_INT) {
if (status & MS_EXIST) {
set_bit(MS_NR, &(chip->need_reset));
} else {
set_bit(MS_NR, &(chip->need_release));
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
clear_bit(MS_NR, &(chip->need_reset));
}
} else {
if (exit_ss && (status & MS_EXIST))
set_bit(MS_NR, &(chip->need_reinit));
}
}
#ifdef SUPPORT_OCP
chip->ocp_int = ocp_int & status;
#endif
if (chip->sd_io) {
if (chip->int_reg & DATA_DONE_INT)
chip->int_reg &= ~(u32)DATA_DONE_INT;
}
return STATUS_SUCCESS;
}
void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
{
int retval;
RTSX_DEBUGP("rtsx_do_before_power_down, pm_stat = %d\n", pm_stat);
rtsx_set_stat(chip, RTSX_STAT_SUSPEND);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS)
return;
rtsx_release_cards(chip);
rtsx_disable_bus_int(chip);
turn_off_led(chip, LED_GPIO);
#ifdef HW_AUTO_SWITCH_SD_BUS
if (chip->sd_io) {
chip->sdio_in_charge = 1;
if (CHECK_PID(chip, 0x5208)) {
rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
/* Enable sdio_bus_auto_switch */
rtsx_write_register(chip, 0xFE70, 0x80, 0x80);
} else if (CHECK_PID(chip, 0x5288)) {
rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
/* Enable sdio_bus_auto_switch */
rtsx_write_register(chip, 0xFE5A, 0x08, 0x08);
} else if (CHECK_PID(chip, 0x5209)) {
rtsx_write_register(chip, TLPTISTAT, 0x10, 0x10);
/* Enable sdio_bus_auto_switch */
rtsx_write_register(chip, SDIO_CFG, SDIO_BUS_AUTO_SWITCH, SDIO_BUS_AUTO_SWITCH);
}
}
#endif
if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
/* u_force_clkreq_0 */
rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
} else if (CHECK_PID(chip, 0x5209)) {
/* u_force_clkreq_0 */
rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
}
if (pm_stat == PM_S1) {
RTSX_DEBUGP("Host enter S1\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, HOST_ENTER_S1);
} else if (pm_stat == PM_S3) {
if (chip->s3_pwr_off_delay > 0) {
wait_timeout(chip->s3_pwr_off_delay);
}
RTSX_DEBUGP("Host enter S3\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, HOST_ENTER_S3);
}
if (chip->do_delink_before_power_down && chip->auto_delink_en) {
rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 2);
}
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
chip->cur_clk = 0;
chip->cur_card = 0;
chip->card_exist = 0;
}
void rtsx_enable_aspm(struct rtsx_chip *chip)
{
if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
if (!chip->aspm_enabled) {
RTSX_DEBUGP("Try to enable ASPM\n");
chip->aspm_enabled = 1;
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_write_phy_register(chip, 0x07, 0);
if (CHECK_PID(chip, 0x5208)) {
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
0x30 | chip->aspm_level[0]);
} else {
rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
}
if (CHK_SDIO_EXIST(chip)) {
u16 val = chip->aspm_l0s_l1_en | 0x0100;
if (CHECK_PID(chip, 0x5288)) {
rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, val);
} else {
rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFFFF, val);
}
}
}
}
return;
}
void rtsx_disable_aspm(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208))
rtsx_monitor_aspm_config(chip);
if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
if (chip->aspm_enabled) {
RTSX_DEBUGP("Try to disable ASPM\n");
chip->aspm_enabled = 0;
if (chip->asic_code && CHECK_PID(chip, 0x5208))
rtsx_write_phy_register(chip, 0x07, 0x0129);
if (CHECK_PID(chip, 0x5208)) {
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3, 0x30);
} else {
rtsx_write_config_byte(chip, LCTLR, 0x00);
}
wait_timeout(1);
}
}
return;
}
int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
{
int retval;
int i, j;
u16 reg_addr;
u8 *ptr;
if (!buf) {
TRACE_RET(chip, STATUS_ERROR);
}
ptr = buf;
reg_addr = PPBUF_BASE2;
for (i = 0; i < buf_len/256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
TRACE_RET(chip, STATUS_FAIL);
}
memcpy(ptr, rtsx_get_cmd_data(chip), 256);
ptr += 256;
}
if (buf_len%256) {
rtsx_init_cmd(chip);
for (j = 0; j < buf_len%256; j++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
TRACE_RET(chip, STATUS_FAIL);
}
}
memcpy(ptr, rtsx_get_cmd_data(chip), buf_len%256);
return STATUS_SUCCESS;
}
int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
{
int retval;
int i, j;
u16 reg_addr;
u8 *ptr;
if (!buf) {
TRACE_RET(chip, STATUS_ERROR);
}
ptr = buf;
reg_addr = PPBUF_BASE2;
for (i = 0; i < buf_len/256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF, *ptr);
ptr++;
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
TRACE_RET(chip, STATUS_FAIL);
}
}
if (buf_len%256) {
rtsx_init_cmd(chip);
for (j = 0; j < buf_len%256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF, *ptr);
ptr++;
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
TRACE_RET(chip, STATUS_FAIL);
}
}
return STATUS_SUCCESS;
}
int rtsx_check_chip_exist(struct rtsx_chip *chip)
{
if (rtsx_readl(chip, 0) == 0xFFFFFFFF) {
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl)
{
int retval;
u8 mask = 0;
if (ctl & SSC_PDCTL)
mask |= SSC_POWER_DOWN;
#ifdef SUPPORT_OCP
if (ctl & OC_PDCTL) {
mask |= SD_OC_POWER_DOWN;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
mask |= MS_OC_POWER_DOWN;
}
}
#endif
if (mask) {
retval = rtsx_write_register(chip, FPDCTL, mask, 0);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
if (CHECK_PID(chip, 0x5288))
wait_timeout(200);
}
return STATUS_SUCCESS;
}
int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl)
{
int retval;
u8 mask = 0, val = 0;
if (ctl & SSC_PDCTL)
mask |= SSC_POWER_DOWN;
#ifdef SUPPORT_OCP
if (ctl & OC_PDCTL) {
mask |= SD_OC_POWER_DOWN;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
mask |= MS_OC_POWER_DOWN;
}
#endif
if (mask) {
val = mask;
retval = rtsx_write_register(chip, FPDCTL, mask, val);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, STATUS_FAIL);
}
}
return STATUS_SUCCESS;
}
| gpl-2.0 |
varunchitre15/android_kernel_sony_msm8930 | fs/exportfs/expfs.c | 7383 | 12608 | /*
* Copyright (C) Neil Brown 2002
* Copyright (C) Christoph Hellwig 2007
*
* This file contains the code mapping from inodes to NFS file handles,
* and for mapping back from file handles to dentries.
*
* For details on why we do all the strange and hairy things in here
* take a look at Documentation/filesystems/nfs/Exporting.
*/
#include <linux/exportfs.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/sched.h>
#define dprintk(fmt, args...) do{}while(0)
static int get_name(struct vfsmount *mnt, struct dentry *dentry, char *name,
struct dentry *child);
static int exportfs_get_name(struct vfsmount *mnt, struct dentry *dir,
char *name, struct dentry *child)
{
const struct export_operations *nop = dir->d_sb->s_export_op;
if (nop->get_name)
return nop->get_name(dir, name, child);
else
return get_name(mnt, dir, name, child);
}
/*
* Check if the dentry or any of it's aliases is acceptable.
*/
static struct dentry *
find_acceptable_alias(struct dentry *result,
int (*acceptable)(void *context, struct dentry *dentry),
void *context)
{
struct dentry *dentry, *toput = NULL;
struct inode *inode;
if (acceptable(context, result))
return result;
inode = result->d_inode;
spin_lock(&inode->i_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
dget(dentry);
spin_unlock(&inode->i_lock);
if (toput)
dput(toput);
if (dentry != result && acceptable(context, dentry)) {
dput(result);
return dentry;
}
spin_lock(&inode->i_lock);
toput = dentry;
}
spin_unlock(&inode->i_lock);
if (toput)
dput(toput);
return NULL;
}
/*
* Find root of a disconnected subtree and return a reference to it.
*/
static struct dentry *
find_disconnected_root(struct dentry *dentry)
{
dget(dentry);
while (!IS_ROOT(dentry)) {
struct dentry *parent = dget_parent(dentry);
if (!(parent->d_flags & DCACHE_DISCONNECTED)) {
dput(parent);
break;
}
dput(dentry);
dentry = parent;
}
return dentry;
}
/*
* Make sure target_dir is fully connected to the dentry tree.
*
* It may already be, as the flag isn't always updated when connection happens.
*/
static int
reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf)
{
int noprogress = 0;
int err = -ESTALE;
/*
* It is possible that a confused file system might not let us complete
* the path to the root. For example, if get_parent returns a directory
* in which we cannot find a name for the child. While this implies a
* very sick filesystem we don't want it to cause knfsd to spin. Hence
* the noprogress counter. If we go through the loop 10 times (2 is
* probably enough) without getting anywhere, we just give up
*/
while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
struct dentry *pd = find_disconnected_root(target_dir);
if (!IS_ROOT(pd)) {
/* must have found a connected parent - great */
spin_lock(&pd->d_lock);
pd->d_flags &= ~DCACHE_DISCONNECTED;
spin_unlock(&pd->d_lock);
noprogress = 0;
} else if (pd == mnt->mnt_sb->s_root) {
printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
spin_lock(&pd->d_lock);
pd->d_flags &= ~DCACHE_DISCONNECTED;
spin_unlock(&pd->d_lock);
noprogress = 0;
} else {
/*
* We have hit the top of a disconnected path, try to
* find parent and connect.
*
* Racing with some other process renaming a directory
* isn't much of a problem here. If someone renames
* the directory, it will end up properly connected,
* which is what we want
*
* Getting the parent can't be supported generically,
* the locking is too icky.
*
* Instead we just return EACCES. If server reboots
* or inodes get flushed, you lose
*/
struct dentry *ppd = ERR_PTR(-EACCES);
struct dentry *npd;
mutex_lock(&pd->d_inode->i_mutex);
if (mnt->mnt_sb->s_export_op->get_parent)
ppd = mnt->mnt_sb->s_export_op->get_parent(pd);
mutex_unlock(&pd->d_inode->i_mutex);
if (IS_ERR(ppd)) {
err = PTR_ERR(ppd);
dprintk("%s: get_parent of %ld failed, err %d\n",
__func__, pd->d_inode->i_ino, err);
dput(pd);
break;
}
dprintk("%s: find name of %lu in %lu\n", __func__,
pd->d_inode->i_ino, ppd->d_inode->i_ino);
err = exportfs_get_name(mnt, ppd, nbuf, pd);
if (err) {
dput(ppd);
dput(pd);
if (err == -ENOENT)
/* some race between get_parent and
* get_name? just try again
*/
continue;
break;
}
dprintk("%s: found name: %s\n", __func__, nbuf);
mutex_lock(&ppd->d_inode->i_mutex);
npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
mutex_unlock(&ppd->d_inode->i_mutex);
if (IS_ERR(npd)) {
err = PTR_ERR(npd);
dprintk("%s: lookup failed: %d\n",
__func__, err);
dput(ppd);
dput(pd);
break;
}
/* we didn't really want npd, we really wanted
* a side-effect of the lookup.
* hopefully, npd == pd, though it isn't really
* a problem if it isn't
*/
if (npd == pd)
noprogress = 0;
else
printk("%s: npd != pd\n", __func__);
dput(npd);
dput(ppd);
if (IS_ROOT(pd)) {
/* something went wrong, we have to give up */
dput(pd);
break;
}
}
dput(pd);
}
if (target_dir->d_flags & DCACHE_DISCONNECTED) {
/* something went wrong - oh-well */
if (!err)
err = -ESTALE;
return err;
}
return 0;
}
struct getdents_callback {
char *name; /* name that was found. It already points to a
buffer NAME_MAX+1 is size */
unsigned long ino; /* the inum we are looking for */
int found; /* inode matched? */
int sequence; /* sequence counter */
};
/*
* A rather strange filldir function to capture
* the name matching the specified inode number.
*/
static int filldir_one(void * __buf, const char * name, int len,
loff_t pos, u64 ino, unsigned int d_type)
{
struct getdents_callback *buf = __buf;
int result = 0;
buf->sequence++;
if (buf->ino == ino) {
memcpy(buf->name, name, len);
buf->name[len] = '\0';
buf->found = 1;
result = -1;
}
return result;
}
/**
* get_name - default export_operations->get_name function
* @dentry: the directory in which to find a name
* @name: a pointer to a %NAME_MAX+1 char buffer to store the name
* @child: the dentry for the child directory.
*
* calls readdir on the parent until it finds an entry with
* the same inode number as the child, and returns that.
*/
static int get_name(struct vfsmount *mnt, struct dentry *dentry,
char *name, struct dentry *child)
{
const struct cred *cred = current_cred();
struct inode *dir = dentry->d_inode;
int error;
struct file *file;
struct getdents_callback buffer;
error = -ENOTDIR;
if (!dir || !S_ISDIR(dir->i_mode))
goto out;
error = -EINVAL;
if (!dir->i_fop)
goto out;
/*
* Open the directory ...
*/
file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY, cred);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
error = -EINVAL;
if (!file->f_op->readdir)
goto out_close;
buffer.name = name;
buffer.ino = child->d_inode->i_ino;
buffer.found = 0;
buffer.sequence = 0;
while (1) {
int old_seq = buffer.sequence;
error = vfs_readdir(file, filldir_one, &buffer);
if (buffer.found) {
error = 0;
break;
}
if (error < 0)
break;
error = -ENOENT;
if (old_seq == buffer.sequence)
break;
}
out_close:
fput(file);
out:
return error;
}
/**
* export_encode_fh - default export_operations->encode_fh function
* @dentry: the dentry to encode
* @fh: where to store the file handle fragment
* @max_len: maximum length to store there
* @connectable: whether to store parent information
*
* This default encode_fh function assumes that the 32 inode number
* is suitable for locating an inode, and that the generation number
* can be used to check that it is still valid. It places them in the
* filehandle fragment where export_decode_fh expects to find them.
*/
static int export_encode_fh(struct dentry *dentry, struct fid *fid,
int *max_len, int connectable)
{
struct inode * inode = dentry->d_inode;
int len = *max_len;
int type = FILEID_INO32_GEN;
if (connectable && (len < 4)) {
*max_len = 4;
return 255;
} else if (len < 2) {
*max_len = 2;
return 255;
}
len = 2;
fid->i32.ino = inode->i_ino;
fid->i32.gen = inode->i_generation;
if (connectable && !S_ISDIR(inode->i_mode)) {
struct inode *parent;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent->d_inode;
fid->i32.parent_ino = parent->i_ino;
fid->i32.parent_gen = parent->i_generation;
spin_unlock(&dentry->d_lock);
len = 4;
type = FILEID_INO32_GEN_PARENT;
}
*max_len = len;
return type;
}
int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
int connectable)
{
const struct export_operations *nop = dentry->d_sb->s_export_op;
int error;
if (nop->encode_fh)
error = nop->encode_fh(dentry, fid->raw, max_len, connectable);
else
error = export_encode_fh(dentry, fid, max_len, connectable);
return error;
}
EXPORT_SYMBOL_GPL(exportfs_encode_fh);
struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type,
int (*acceptable)(void *, struct dentry *), void *context)
{
const struct export_operations *nop = mnt->mnt_sb->s_export_op;
struct dentry *result, *alias;
char nbuf[NAME_MAX+1];
int err;
/*
* Try to get any dentry for the given file handle from the filesystem.
*/
if (!nop || !nop->fh_to_dentry)
return ERR_PTR(-ESTALE);
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (!result)
result = ERR_PTR(-ESTALE);
if (IS_ERR(result))
return result;
if (S_ISDIR(result->d_inode->i_mode)) {
/*
* This request is for a directory.
*
* On the positive side there is only one dentry for each
* directory inode. On the negative side this implies that we
* to ensure our dentry is connected all the way up to the
* filesystem root.
*/
if (result->d_flags & DCACHE_DISCONNECTED) {
err = reconnect_path(mnt, result, nbuf);
if (err)
goto err_result;
}
if (!acceptable(context, result)) {
err = -EACCES;
goto err_result;
}
return result;
} else {
/*
* It's not a directory. Life is a little more complicated.
*/
struct dentry *target_dir, *nresult;
/*
* See if either the dentry we just got from the filesystem
* or any alias for it is acceptable. This is always true
* if this filesystem is exported without the subtreecheck
* option. If the filesystem is exported with the subtree
* check option there's a fair chance we need to look at
* the parent directory in the file handle and make sure
* it's connected to the filesystem root.
*/
alias = find_acceptable_alias(result, acceptable, context);
if (alias)
return alias;
/*
* Try to extract a dentry for the parent directory from the
* file handle. If this fails we'll have to give up.
*/
err = -ESTALE;
if (!nop->fh_to_parent)
goto err_result;
target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
fh_len, fileid_type);
if (!target_dir)
goto err_result;
err = PTR_ERR(target_dir);
if (IS_ERR(target_dir))
goto err_result;
/*
* And as usual we need to make sure the parent directory is
* connected to the filesystem root. The VFS really doesn't
* like disconnected directories..
*/
err = reconnect_path(mnt, target_dir, nbuf);
if (err) {
dput(target_dir);
goto err_result;
}
/*
* Now that we've got both a well-connected parent and a
* dentry for the inode we're after, make sure that our
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
if (!err) {
mutex_lock(&target_dir->d_inode->i_mutex);
nresult = lookup_one_len(nbuf, target_dir,
strlen(nbuf));
mutex_unlock(&target_dir->d_inode->i_mutex);
if (!IS_ERR(nresult)) {
if (nresult->d_inode) {
dput(result);
result = nresult;
} else
dput(nresult);
}
}
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
*/
alias = find_acceptable_alias(result, acceptable, context);
if (!alias) {
err = -EACCES;
goto err_result;
}
return alias;
}
err_result:
dput(result);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
MODULE_LICENSE("GPL");
| gpl-2.0 |
1N4148/android_kernel_golden | drivers/net/phy/davicom.c | 8151 | 5160 | /*
* drivers/net/phy/davicom.c
*
* Driver for Davicom PHYs
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#define MII_DM9161_SCR 0x10
#define MII_DM9161_SCR_INIT 0x0610
#define MII_DM9161_SCR_RMII 0x0100
/* DM9161 Interrupt Register */
#define MII_DM9161_INTR 0x15
#define MII_DM9161_INTR_PEND 0x8000
#define MII_DM9161_INTR_DPLX_MASK 0x0800
#define MII_DM9161_INTR_SPD_MASK 0x0400
#define MII_DM9161_INTR_LINK_MASK 0x0200
#define MII_DM9161_INTR_MASK 0x0100
#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
#define MII_DM9161_INTR_SPD_CHANGE 0x0008
#define MII_DM9161_INTR_LINK_CHANGE 0x0004
#define MII_DM9161_INTR_INIT 0x0000
#define MII_DM9161_INTR_STOP \
(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
| MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
/* DM9161 10BT Configuration/Status */
#define MII_DM9161_10BTCSR 0x12
#define MII_DM9161_10BTCSR_INIT 0x7800
MODULE_DESCRIPTION("Davicom PHY driver");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
#define DM9161_DELAY 1
static int dm9161_config_intr(struct phy_device *phydev)
{
int temp;
temp = phy_read(phydev, MII_DM9161_INTR);
if (temp < 0)
return temp;
if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
temp &= ~(MII_DM9161_INTR_STOP);
else
temp |= MII_DM9161_INTR_STOP;
temp = phy_write(phydev, MII_DM9161_INTR, temp);
return temp;
}
static int dm9161_config_aneg(struct phy_device *phydev)
{
int err;
/* Isolate the PHY */
err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
if (err < 0)
return err;
/* Configure the new settings */
err = genphy_config_aneg(phydev);
if (err < 0)
return err;
return 0;
}
static int dm9161_config_init(struct phy_device *phydev)
{
int err, temp;
/* Isolate the PHY */
err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
if (err < 0)
return err;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_MII:
temp = MII_DM9161_SCR_INIT;
break;
case PHY_INTERFACE_MODE_RMII:
temp = MII_DM9161_SCR_INIT | MII_DM9161_SCR_RMII;
break;
default:
return -EINVAL;
}
/* Do not bypass the scrambler/descrambler */
err = phy_write(phydev, MII_DM9161_SCR, temp);
if (err < 0)
return err;
/* Clear 10BTCSR to default */
err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
if (err < 0)
return err;
/* Reconnect the PHY, and enable Autonegotiation */
err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
if (err < 0)
return err;
return 0;
}
static int dm9161_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DM9161_INTR);
return (err < 0) ? err : 0;
}
static struct phy_driver dm9161e_driver = {
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
};
static struct phy_driver dm9161a_driver = {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
};
static struct phy_driver dm9131_driver = {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
};
static int __init davicom_init(void)
{
int ret;
ret = phy_driver_register(&dm9161e_driver);
if (ret)
goto err1;
ret = phy_driver_register(&dm9161a_driver);
if (ret)
goto err2;
ret = phy_driver_register(&dm9131_driver);
if (ret)
goto err3;
return 0;
err3:
phy_driver_unregister(&dm9161a_driver);
err2:
phy_driver_unregister(&dm9161e_driver);
err1:
return ret;
}
static void __exit davicom_exit(void)
{
phy_driver_unregister(&dm9161e_driver);
phy_driver_unregister(&dm9161a_driver);
phy_driver_unregister(&dm9131_driver);
}
module_init(davicom_init);
module_exit(davicom_exit);
static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
{ 0x00181b80, 0x0ffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, davicom_tbl);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.