repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
ahmedammar/linux-xlnx | fs/squashfs/zlib_wrapper.c | 10109 | 3556 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* zlib_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/zlib.h>
#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->workspace = vmalloc(zlib_inflate_workspacesize());
if (stream->workspace == NULL)
goto failed;
return stream;
failed:
ERROR("Failed to allocate zlib workspace\n");
kfree(stream);
return ERR_PTR(-ENOMEM);
}
static void zlib_free(void *strm)
{
z_stream *stream = strm;
if (stream)
vfree(stream->workspace);
kfree(stream);
}
static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
struct buffer_head **bh, int b, int offset, int length, int srclength,
int pages)
{
int zlib_err, zlib_init = 0;
int k = 0, page = 0;
z_stream *stream = msblk->stream;
mutex_lock(&msblk->read_data_mutex);
stream->avail_out = 0;
stream->avail_in = 0;
do {
if (stream->avail_in == 0 && k < b) {
int avail = min(length, msblk->devblksize - offset);
length -= avail;
wait_on_buffer(bh[k]);
if (!buffer_uptodate(bh[k]))
goto release_mutex;
stream->next_in = bh[k]->b_data + offset;
stream->avail_in = avail;
offset = 0;
}
if (stream->avail_out == 0 && page < pages) {
stream->next_out = buffer[page++];
stream->avail_out = PAGE_CACHE_SIZE;
}
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
ERROR("zlib_inflateInit returned unexpected "
"result 0x%x, srclength %d\n",
zlib_err, srclength);
goto release_mutex;
}
zlib_init = 1;
}
zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
if (stream->avail_in == 0 && k < b)
put_bh(bh[k++]);
} while (zlib_err == Z_OK);
if (zlib_err != Z_STREAM_END) {
ERROR("zlib_inflate error, data probably corrupt\n");
goto release_mutex;
}
zlib_err = zlib_inflateEnd(stream);
if (zlib_err != Z_OK) {
ERROR("zlib_inflate error, data probably corrupt\n");
goto release_mutex;
}
if (k < b) {
ERROR("zlib_uncompress error, data remaining\n");
goto release_mutex;
}
length = stream->total_out;
mutex_unlock(&msblk->read_data_mutex);
return length;
release_mutex:
mutex_unlock(&msblk->read_data_mutex);
for (; k < b; k++)
put_bh(bh[k]);
return -EIO;
}
const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.init = zlib_init,
.free = zlib_free,
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
.supported = 1
};
| gpl-2.0 |
Surge1223/android_kernel_moto_shamu | arch/mips/math-emu/dp_flong.c | 10365 | 1886 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754dp.h"
ieee754dp ieee754dp_flong(s64 x)
{
u64 xm;
int xe;
int xs;
CLEARCX;
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
return ieee754dp_one(x < 0);
if (x == 10 || x == -10)
return ieee754dp_ten(x < 0);
xs = (x < 0);
if (xs) {
if (x == (1ULL << 63))
xm = (1ULL << 63); /* max neg can't be safely negated */
else
xm = -x;
} else {
xm = x;
}
/* normalize */
xe = DP_MBITS + 3;
if (xm >> (DP_MBITS + 1 + 3)) {
/* shunt out overflow bits */
while (xm >> (DP_MBITS + 1 + 3)) {
XDPSRSX1();
}
} else {
/* normalize in grs extended double precision */
while ((xm >> (DP_MBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
DPNORMRET1(xs, xe, xm, "dp_flong", x);
}
ieee754dp ieee754dp_fulong(u64 u)
{
if ((s64) u < 0)
return ieee754dp_add(ieee754dp_1e63(),
ieee754dp_flong(u & ~(1ULL << 63)));
return ieee754dp_flong(u);
}
| gpl-2.0 |
Jackeagle/CAF-Kernel | drivers/input/serio/hp_sdc_mlc.c | 12925 | 9419 | /*
* Access to HP-HIL MLC through HP System Device Controller.
*
* Copyright (c) 2001 Brian S. Julin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
*
* References:
* HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A
* System Device Controller Microprocessor Firmware Theory of Operation
* for Part Number 1820-4784 Revision B. Dwg No. A-1820-4784-2
*
*/
#include <linux/hil_mlc.h>
#include <linux/hp_sdc.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/semaphore.h>
#define PREFIX "HP SDC MLC: "
static hil_mlc hp_sdc_mlc;
MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
MODULE_DESCRIPTION("Glue for onboard HIL MLC in HP-PARISC machines");
MODULE_LICENSE("Dual BSD/GPL");
static struct hp_sdc_mlc_priv_s {
int emtestmode;
hp_sdc_transaction trans;
u8 tseq[16];
int got5x;
} hp_sdc_mlc_priv;
/************************* Interrupt context ******************************/
static void hp_sdc_mlc_isr (int irq, void *dev_id,
uint8_t status, uint8_t data)
{
int idx;
hil_mlc *mlc = &hp_sdc_mlc;
write_lock(&mlc->lock);
if (mlc->icount < 0) {
printk(KERN_WARNING PREFIX "HIL Overflow!\n");
up(&mlc->isem);
goto out;
}
idx = 15 - mlc->icount;
if ((status & HP_SDC_STATUS_IRQMASK) == HP_SDC_STATUS_HILDATA) {
mlc->ipacket[idx] |= data | HIL_ERR_INT;
mlc->icount--;
if (hp_sdc_mlc_priv.got5x || !idx)
goto check;
if ((mlc->ipacket[idx - 1] & HIL_PKT_ADDR_MASK) !=
(mlc->ipacket[idx] & HIL_PKT_ADDR_MASK)) {
mlc->ipacket[idx] &= ~HIL_PKT_ADDR_MASK;
mlc->ipacket[idx] |= (mlc->ipacket[idx - 1]
& HIL_PKT_ADDR_MASK);
}
goto check;
}
/* We know status is 5X */
if (data & HP_SDC_HIL_ISERR)
goto err;
mlc->ipacket[idx] =
(data & HP_SDC_HIL_R1MASK) << HIL_PKT_ADDR_SHIFT;
hp_sdc_mlc_priv.got5x = 1;
goto out;
check:
hp_sdc_mlc_priv.got5x = 0;
if (mlc->imatch == 0)
goto done;
if ((mlc->imatch == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL))
&& (mlc->ipacket[idx] == (mlc->imatch | idx)))
goto done;
if (mlc->ipacket[idx] == mlc->imatch)
goto done;
goto out;
err:
printk(KERN_DEBUG PREFIX "err code %x\n", data);
switch (data) {
case HP_SDC_HIL_RC_DONE:
printk(KERN_WARNING PREFIX "Bastard SDC reconfigured loop!\n");
break;
case HP_SDC_HIL_ERR:
mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_PERR |
HIL_ERR_FERR | HIL_ERR_FOF;
break;
case HP_SDC_HIL_TO:
mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_LERR;
break;
case HP_SDC_HIL_RC:
printk(KERN_WARNING PREFIX "Bastard SDC decided to reconfigure loop!\n");
break;
default:
printk(KERN_WARNING PREFIX "Unknown HIL Error status (%x)!\n", data);
break;
}
/* No more data will be coming due to an error. */
done:
tasklet_schedule(mlc->tasklet);
up(&mlc->isem);
out:
write_unlock(&mlc->lock);
}
/******************** Tasklet or userspace context functions ****************/
static int hp_sdc_mlc_in(hil_mlc *mlc, suseconds_t timeout)
{
struct hp_sdc_mlc_priv_s *priv;
int rc = 2;
priv = mlc->priv;
/* Try to down the semaphore */
if (down_trylock(&mlc->isem)) {
struct timeval tv;
if (priv->emtestmode) {
mlc->ipacket[0] =
HIL_ERR_INT | (mlc->opacket &
(HIL_PKT_CMD |
HIL_PKT_ADDR_MASK |
HIL_PKT_DATA_MASK));
mlc->icount = 14;
/* printk(KERN_DEBUG PREFIX ">[%x]\n", mlc->ipacket[0]); */
goto wasup;
}
do_gettimeofday(&tv);
tv.tv_usec += USEC_PER_SEC * (tv.tv_sec - mlc->instart.tv_sec);
if (tv.tv_usec - mlc->instart.tv_usec > mlc->intimeout) {
/* printk("!%i %i",
tv.tv_usec - mlc->instart.tv_usec,
mlc->intimeout);
*/
rc = 1;
up(&mlc->isem);
}
goto done;
}
wasup:
up(&mlc->isem);
rc = 0;
done:
return rc;
}
static int hp_sdc_mlc_cts(hil_mlc *mlc)
{
struct hp_sdc_mlc_priv_s *priv;
priv = mlc->priv;
/* Try to down the semaphores -- they should be up. */
BUG_ON(down_trylock(&mlc->isem));
BUG_ON(down_trylock(&mlc->osem));
up(&mlc->isem);
up(&mlc->osem);
if (down_trylock(&mlc->csem)) {
if (priv->trans.act.semaphore != &mlc->csem)
goto poll;
else
goto busy;
}
if (!(priv->tseq[4] & HP_SDC_USE_LOOP))
goto done;
poll:
priv->trans.act.semaphore = &mlc->csem;
priv->trans.actidx = 0;
priv->trans.idx = 1;
priv->trans.endidx = 5;
priv->tseq[0] =
HP_SDC_ACT_POSTCMD | HP_SDC_ACT_DATAIN | HP_SDC_ACT_SEMAPHORE;
priv->tseq[1] = HP_SDC_CMD_READ_USE;
priv->tseq[2] = 1;
priv->tseq[3] = 0;
priv->tseq[4] = 0;
__hp_sdc_enqueue_transaction(&priv->trans);
busy:
return 1;
done:
priv->trans.act.semaphore = &mlc->osem;
up(&mlc->csem);
return 0;
}
static void hp_sdc_mlc_out(hil_mlc *mlc)
{
struct hp_sdc_mlc_priv_s *priv;
priv = mlc->priv;
/* Try to down the semaphore -- it should be up. */
BUG_ON(down_trylock(&mlc->osem));
if (mlc->opacket & HIL_DO_ALTER_CTRL)
goto do_control;
do_data:
if (priv->emtestmode) {
up(&mlc->osem);
return;
}
/* Shouldn't be sending commands when loop may be busy */
BUG_ON(down_trylock(&mlc->csem));
up(&mlc->csem);
priv->trans.actidx = 0;
priv->trans.idx = 1;
priv->trans.act.semaphore = &mlc->osem;
priv->trans.endidx = 6;
priv->tseq[0] =
HP_SDC_ACT_DATAREG | HP_SDC_ACT_POSTCMD | HP_SDC_ACT_SEMAPHORE;
priv->tseq[1] = 0x7;
priv->tseq[2] =
(mlc->opacket &
(HIL_PKT_ADDR_MASK | HIL_PKT_CMD))
>> HIL_PKT_ADDR_SHIFT;
priv->tseq[3] =
(mlc->opacket & HIL_PKT_DATA_MASK)
>> HIL_PKT_DATA_SHIFT;
priv->tseq[4] = 0; /* No timeout */
if (priv->tseq[3] == HIL_CMD_DHR)
priv->tseq[4] = 1;
priv->tseq[5] = HP_SDC_CMD_DO_HIL;
goto enqueue;
do_control:
priv->emtestmode = mlc->opacket & HIL_CTRL_TEST;
/* we cannot emulate this, it should not be used. */
BUG_ON((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE);
if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY)
goto control_only;
/* Should not send command/data after engaging APE */
BUG_ON(mlc->opacket & HIL_CTRL_APE);
/* Disengaging APE this way would not be valid either since
* the loop must be allowed to idle.
*
* So, it works out that we really never actually send control
* and data when using SDC, we just send the data.
*/
goto do_data;
control_only:
priv->trans.actidx = 0;
priv->trans.idx = 1;
priv->trans.act.semaphore = &mlc->osem;
priv->trans.endidx = 4;
priv->tseq[0] =
HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT | HP_SDC_ACT_SEMAPHORE;
priv->tseq[1] = HP_SDC_CMD_SET_LPC;
priv->tseq[2] = 1;
/* priv->tseq[3] = (mlc->ddc + 1) | HP_SDC_LPS_ACSUCC; */
priv->tseq[3] = 0;
if (mlc->opacket & HIL_CTRL_APE) {
priv->tseq[3] |= HP_SDC_LPC_APE_IPF;
BUG_ON(down_trylock(&mlc->csem));
}
enqueue:
hp_sdc_enqueue_transaction(&priv->trans);
}
static int __init hp_sdc_mlc_init(void)
{
hil_mlc *mlc = &hp_sdc_mlc;
int err;
#ifdef __mc68000__
if (!MACH_IS_HP300)
return -ENODEV;
#endif
printk(KERN_INFO PREFIX "Registering the System Domain Controller's HIL MLC.\n");
hp_sdc_mlc_priv.emtestmode = 0;
hp_sdc_mlc_priv.trans.seq = hp_sdc_mlc_priv.tseq;
hp_sdc_mlc_priv.trans.act.semaphore = &mlc->osem;
hp_sdc_mlc_priv.got5x = 0;
mlc->cts = &hp_sdc_mlc_cts;
mlc->in = &hp_sdc_mlc_in;
mlc->out = &hp_sdc_mlc_out;
mlc->priv = &hp_sdc_mlc_priv;
err = hil_mlc_register(mlc);
if (err) {
printk(KERN_WARNING PREFIX "Failed to register MLC structure with hil_mlc\n");
return err;
}
if (hp_sdc_request_hil_irq(&hp_sdc_mlc_isr)) {
printk(KERN_WARNING PREFIX "Request for raw HIL ISR hook denied\n");
if (hil_mlc_unregister(mlc))
printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n"
"This is bad. Could cause an oops.\n");
return -EBUSY;
}
return 0;
}
static void __exit hp_sdc_mlc_exit(void)
{
hil_mlc *mlc = &hp_sdc_mlc;
if (hp_sdc_release_hil_irq(&hp_sdc_mlc_isr))
printk(KERN_ERR PREFIX "Failed to release the raw HIL ISR hook.\n"
"This is bad. Could cause an oops.\n");
if (hil_mlc_unregister(mlc))
printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n"
"This is bad. Could cause an oops.\n");
}
module_init(hp_sdc_mlc_init);
module_exit(hp_sdc_mlc_exit);
| gpl-2.0 |
TeamCarbonXtremeARMv7/android_kernel_samsung_golden | drivers/lguest/hypercalls.c | 13181 | 9535 | /*P:500
* Just as userspace programs request kernel operations through a system
* call, the Guest requests Host operations through a "hypercall". You might
* notice this nomenclature doesn't really follow any logic, but the name has
* been around for long enough that we're stuck with it. As you'd expect, this
* code is basically a one big switch statement.
:*/
/* Copyright (C) 2006 Rusty Russell IBM Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/ktime.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include "lg.h"
/*H:120
* This is the core hypercall routine: where the Guest gets what it wants.
* Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both.
*/
static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
{
switch (args->arg0) {
case LHCALL_FLUSH_ASYNC:
/*
* This call does nothing, except by breaking out of the Guest
* it makes us process all the asynchronous hypercalls.
*/
break;
case LHCALL_SEND_INTERRUPTS:
/*
* This call does nothing too, but by breaking out of the Guest
* it makes us process any pending interrupts.
*/
break;
case LHCALL_LGUEST_INIT:
/*
* You can't get here unless you're already initialized. Don't
* do that.
*/
kill_guest(cpu, "already have lguest_data");
break;
case LHCALL_SHUTDOWN: {
char msg[128];
/*
* Shutdown is such a trivial hypercall that we do it in five
* lines right here.
*
* If the lgread fails, it will call kill_guest() itself; the
* kill_guest() with the message will be ignored.
*/
__lgread(cpu, msg, args->arg1, sizeof(msg));
msg[sizeof(msg)-1] = '\0';
kill_guest(cpu, "CRASH: %s", msg);
if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
cpu->lg->dead = ERR_PTR(-ERESTART);
break;
}
case LHCALL_FLUSH_TLB:
/* FLUSH_TLB comes in two flavors, depending on the argument: */
if (args->arg1)
guest_pagetable_clear_all(cpu);
else
guest_pagetable_flush_user(cpu);
break;
/*
* All these calls simply pass the arguments through to the right
* routines.
*/
case LHCALL_NEW_PGTABLE:
guest_new_pagetable(cpu, args->arg1);
break;
case LHCALL_SET_STACK:
guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
break;
case LHCALL_SET_PTE:
#ifdef CONFIG_X86_PAE
guest_set_pte(cpu, args->arg1, args->arg2,
__pte(args->arg3 | (u64)args->arg4 << 32));
#else
guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
#endif
break;
case LHCALL_SET_PGD:
guest_set_pgd(cpu->lg, args->arg1, args->arg2);
break;
#ifdef CONFIG_X86_PAE
case LHCALL_SET_PMD:
guest_set_pmd(cpu->lg, args->arg1, args->arg2);
break;
#endif
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
break;
case LHCALL_TS:
/* This sets the TS flag, as we saw used in run_guest(). */
cpu->ts = args->arg1;
break;
case LHCALL_HALT:
/* Similarly, this sets the halted flag for run_guest(). */
cpu->halted = 1;
break;
case LHCALL_NOTIFY:
cpu->pending_notify = args->arg1;
break;
default:
/* It should be an architecture-specific hypercall. */
if (lguest_arch_do_hcall(cpu, args))
kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
}
}
/*H:124
* Asynchronous hypercalls are easy: we just look in the array in the
* Guest's "struct lguest_data" to see if any new ones are marked "ready".
*
* We are careful to do these in order: obviously we respect the order the
* Guest put them in the ring, but we also promise the Guest that they will
* happen before any normal hypercall (which is why we check this before
* checking for a normal hcall).
*/
static void do_async_hcalls(struct lg_cpu *cpu)
{
unsigned int i;
u8 st[LHCALL_RING_SIZE];
/* For simplicity, we copy the entire call status array in at once. */
if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
return;
/* We process "struct lguest_data"s hcalls[] ring once. */
for (i = 0; i < ARRAY_SIZE(st); i++) {
struct hcall_args args;
/*
* We remember where we were up to from last time. This makes
* sure that the hypercalls are done in the order the Guest
* places them in the ring.
*/
unsigned int n = cpu->next_hcall;
/* 0xFF means there's no call here (yet). */
if (st[n] == 0xFF)
break;
/*
* OK, we have hypercall. Increment the "next_hcall" cursor,
* and wrap back to 0 if we reach the end.
*/
if (++cpu->next_hcall == LHCALL_RING_SIZE)
cpu->next_hcall = 0;
/*
* Copy the hypercall arguments into a local copy of the
* hcall_args struct.
*/
if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
sizeof(struct hcall_args))) {
kill_guest(cpu, "Fetching async hypercalls");
break;
}
/* Do the hypercall, same as a normal one. */
do_hcall(cpu, &args);
/* Mark the hypercall done. */
if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
kill_guest(cpu, "Writing result for async hypercall");
break;
}
/*
* Stop doing hypercalls if they want to notify the Launcher:
* it needs to service this first.
*/
if (cpu->pending_notify)
break;
}
}
/*
* Last of all, we look at what happens first of all. The very first time the
* Guest makes a hypercall, we end up here to set things up:
*/
static void initialize(struct lg_cpu *cpu)
{
/*
* You can't do anything until you're initialized. The Guest knows the
* rules, so we're unforgiving here.
*/
if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
return;
}
if (lguest_arch_init_hypercalls(cpu))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/*
* The Guest tells us where we're not to deliver interrupts by putting
* the range of addresses into "struct lguest_data".
*/
if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
|| get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/*
* We write the current time into the Guest's data page once so it can
* set its clock.
*/
write_timestamp(cpu);
/* page_tables.c will also do some setup. */
page_table_guest_data_init(cpu);
/*
* This is the one case where the above accesses might have been the
* first write to a Guest page. This may have caused a copy-on-write
* fault, but the old page might be (read-only) in the Guest
* pagetable.
*/
guest_pagetable_clear_all(cpu);
}
/*:*/
/*M:013
* If a Guest reads from a page (so creates a mapping) that it has never
* written to, and then the Launcher writes to it (ie. the output of a virtual
* device), the Guest will still see the old page. In practice, this never
* happens: why would the Guest read a page which it has never written to? But
* a similar scenario might one day bite us, so it's worth mentioning.
*
* Note that if we used a shared anonymous mapping in the Launcher instead of
* mapping /dev/zero private, we wouldn't worry about cop-on-write. And we
* need that to switch the Launcher to processes (away from threads) anyway.
:*/
/*H:100
* Hypercalls
*
* Remember from the Guest, hypercalls come in two flavors: normal and
* asynchronous. This file handles both of types.
*/
void do_hypercalls(struct lg_cpu *cpu)
{
/* Not initialized yet? This hypercall must do it. */
if (unlikely(!cpu->lg->lguest_data)) {
/* Set up the "struct lguest_data" */
initialize(cpu);
/* Hcall is done. */
cpu->hcall = NULL;
return;
}
/*
* The Guest has initialized.
*
* Look in the hypercall ring for the async hypercalls:
*/
do_async_hcalls(cpu);
/*
* If we stopped reading the hypercall ring because the Guest did a
* NOTIFY to the Launcher, we want to return now. Otherwise we do
* the hypercall.
*/
if (!cpu->pending_notify) {
do_hcall(cpu, cpu->hcall);
/*
* Tricky point: we reset the hcall pointer to mark the
* hypercall as "done". We use the hcall pointer rather than
* the trap number to indicate a hypercall is pending.
* Normally it doesn't matter: the Guest will run again and
* update the trap number before we come back here.
*
* However, if we are signalled or the Guest sends I/O to the
* Launcher, the run_guest() loop will exit without running the
* Guest. When it comes back it would try to re-run the
* hypercall. Finding that bug sucked.
*/
cpu->hcall = NULL;
}
}
/*
* This routine supplies the Guest with time: it's used for wallclock time at
* initial boot and as a rough time source if the TSC isn't available.
*/
void write_timestamp(struct lg_cpu *cpu)
{
struct timespec now;
ktime_get_real_ts(&now);
if (copy_to_user(&cpu->lg->lguest_data->time,
&now, sizeof(struct timespec)))
kill_guest(cpu, "Writing timestamp");
}
| gpl-2.0 |
fabianbergmark/linux-sctp | fs/gfs2/main.c | 126 | 6076 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/mempool.h>
#include "gfs2.h"
#include "incore.h"
#include "super.h"
#include "sys.h"
#include "util.h"
#include "glock.h"
#include "quota.h"
#include "recovery.h"
#include "dir.h"
#include "glops.h"
struct workqueue_struct *gfs2_control_wq;
static void gfs2_init_inode_once(void *foo)
{
struct gfs2_inode *ip = foo;
inode_init_once(&ip->i_inode);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_qadata = NULL;
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_hash_cache = NULL;
}
static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
INIT_HLIST_BL_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_lockref.lock);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
atomic_set(&gl->gl_revokes, 0);
}
static void gfs2_init_gl_aspace_once(void *foo)
{
struct gfs2_glock *gl = foo;
struct address_space *mapping = (struct address_space *)(gl + 1);
gfs2_init_glock_once(gl);
address_space_init_once(mapping);
}
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
* Returns: 0 on success, error code on failure
*/
static int __init init_gfs2_fs(void)
{
int error;
gfs2_str2qstr(&gfs2_qdot, ".");
gfs2_str2qstr(&gfs2_qdotdot, "..");
gfs2_quota_hash_init();
error = gfs2_sys_init();
if (error)
return error;
error = list_lru_init(&gfs2_qd_lru);
if (error)
goto fail_lru;
error = gfs2_glock_init();
if (error)
goto fail;
error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0,
gfs2_init_glock_once);
if (!gfs2_glock_cachep)
goto fail;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) +
sizeof(struct address_space),
0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep)
goto fail;
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|
SLAB_ACCOUNT,
gfs2_init_inode_once);
if (!gfs2_inode_cachep)
goto fail;
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
sizeof(struct gfs2_bufdata),
0, 0, NULL);
if (!gfs2_bufdata_cachep)
goto fail;
gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
sizeof(struct gfs2_rgrpd),
0, 0, NULL);
if (!gfs2_rgrpd_cachep)
goto fail;
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
sizeof(struct gfs2_quota_data),
0, 0, NULL);
if (!gfs2_quotad_cachep)
goto fail;
gfs2_qadata_cachep = kmem_cache_create("gfs2_qadata",
sizeof(struct gfs2_qadata),
0, 0, NULL);
if (!gfs2_qadata_cachep)
goto fail;
register_shrinker(&gfs2_qd_shrinker);
error = register_filesystem(&gfs2_fs_type);
if (error)
goto fail;
error = register_filesystem(&gfs2meta_fs_type);
if (error)
goto fail_unregister;
error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
if (!gfs_recovery_wq)
goto fail_wq;
gfs2_control_wq = alloc_workqueue("gfs2_control",
WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
goto fail_recovery;
gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
if (!gfs2_freeze_wq)
goto fail_control;
gfs2_page_pool = mempool_create_page_pool(64, 0);
if (!gfs2_page_pool)
goto fail_freeze;
gfs2_register_debugfs();
pr_info("GFS2 installed\n");
return 0;
fail_freeze:
destroy_workqueue(gfs2_freeze_wq);
fail_control:
destroy_workqueue(gfs2_control_wq);
fail_recovery:
destroy_workqueue(gfs_recovery_wq);
fail_wq:
unregister_filesystem(&gfs2meta_fs_type);
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
list_lru_destroy(&gfs2_qd_lru);
fail_lru:
unregister_shrinker(&gfs2_qd_shrinker);
gfs2_glock_exit();
if (gfs2_qadata_cachep)
kmem_cache_destroy(gfs2_qadata_cachep);
if (gfs2_quotad_cachep)
kmem_cache_destroy(gfs2_quotad_cachep);
if (gfs2_rgrpd_cachep)
kmem_cache_destroy(gfs2_rgrpd_cachep);
if (gfs2_bufdata_cachep)
kmem_cache_destroy(gfs2_bufdata_cachep);
if (gfs2_inode_cachep)
kmem_cache_destroy(gfs2_inode_cachep);
if (gfs2_glock_aspace_cachep)
kmem_cache_destroy(gfs2_glock_aspace_cachep);
if (gfs2_glock_cachep)
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
return error;
}
/**
* exit_gfs2_fs - Unregister the file system
*
*/
static void __exit exit_gfs2_fs(void)
{
unregister_shrinker(&gfs2_qd_shrinker);
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq);
destroy_workqueue(gfs2_control_wq);
destroy_workqueue(gfs2_freeze_wq);
list_lru_destroy(&gfs2_qd_lru);
rcu_barrier();
mempool_destroy(gfs2_page_pool);
kmem_cache_destroy(gfs2_qadata_cachep);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
kmem_cache_destroy(gfs2_inode_cachep);
kmem_cache_destroy(gfs2_glock_aspace_cachep);
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
}
MODULE_DESCRIPTION("Global File System");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
module_init(init_gfs2_fs);
module_exit(exit_gfs2_fs);
| gpl-2.0 |
CyanogenMod/android_kernel_toshiba_betelgeuse | drivers/staging/dt3155v4l/dt3155v4l.c | 382 | 29387 | /***************************************************************************
* Copyright (C) 2006-2010 by Marin Mitov *
* mitov@issp.bas.bg *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <linux/version.h>
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
#include "dt3155v4l.h"
#define DT3155_VENDOR_ID 0x8086
#define DT3155_DEVICE_ID 0x1223
/* DT3155_CHUNK_SIZE is 4M (2^22) 8 full size buffers */
#define DT3155_CHUNK_SIZE (1U << 22)
#define DT3155_COH_FLAGS (GFP_KERNEL | GFP_DMA32 | __GFP_COLD | __GFP_NOWARN)
#define DT3155_BUF_SIZE (768 * 576)
#ifdef CONFIG_DT3155_STREAMING
#define DT3155_CAPTURE_METHOD V4L2_CAP_STREAMING
#else
#define DT3155_CAPTURE_METHOD V4L2_CAP_READWRITE
#endif
/* global initializers (for all boards) */
#ifdef CONFIG_DT3155_CCIR
static const u8 csr2_init = VT_50HZ;
#define DT3155_CURRENT_NORM V4L2_STD_625_50
static const unsigned int img_width = 768;
static const unsigned int img_height = 576;
static const unsigned int frames_per_sec = 25;
static const struct v4l2_fmtdesc frame_std[] = {
{
.index = 0,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.flags = 0,
.description = "CCIR/50Hz 8 bits gray",
.pixelformat = V4L2_PIX_FMT_GREY,
},
};
#else
static const u8 csr2_init = VT_60HZ;
#define DT3155_CURRENT_NORM V4L2_STD_525_60
static const unsigned int img_width = 640;
static const unsigned int img_height = 480;
static const unsigned int frames_per_sec = 30;
static const struct v4l2_fmtdesc frame_std[] = {
{
.index = 0,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.flags = 0,
.description = "RS-170/60Hz 8 bits gray",
.pixelformat = V4L2_PIX_FMT_GREY,
},
};
#endif
#define NUM_OF_FORMATS ARRAY_SIZE(frame_std)
static u8 config_init = ACQ_MODE_EVEN;
/**
* read_i2c_reg - reads an internal i2c register
*
* @addr: dt3155 mmio base address
* @index: index (internal address) of register to read
* @data: pointer to byte the read data will be placed in
*
* returns: zero on success or error code
*
* This function starts reading the specified (by index) register
* and busy waits for the process to finish. The result is placed
* in a byte pointed by data.
*/
static int
read_i2c_reg(void __iomem *addr, u8 index, u8 *data)
{
u32 tmp = index;
iowrite32((tmp<<17) | IIC_READ, addr + IIC_CSR2);
mmiowb();
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
/* error: NEW_CYCLE not cleared */
printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
return -EIO;
}
tmp = ioread32(addr + IIC_CSR1);
if (tmp & DIRECT_ABORT) {
/* error: DIRECT_ABORT set */
printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
return -EIO;
}
*data = tmp>>24;
return 0;
}
/**
* write_i2c_reg - writes to an internal i2c register
*
* @addr: dt3155 mmio base address
* @index: index (internal address) of register to read
* @data: data to be written
*
* returns: zero on success or error code
*
* This function starts writting the specified (by index) register
* and busy waits for the process to finish.
*/
static int
write_i2c_reg(void __iomem *addr, u8 index, u8 data)
{
u32 tmp = index;
iowrite32((tmp<<17) | IIC_WRITE | data, addr + IIC_CSR2);
mmiowb();
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
/* error: NEW_CYCLE not cleared */
printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
return -EIO;
}
if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
/* error: DIRECT_ABORT set */
printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
return -EIO;
}
return 0;
}
/**
* write_i2c_reg_nowait - writes to an internal i2c register
*
* @addr: dt3155 mmio base address
* @index: index (internal address) of register to read
* @data: data to be written
*
* This function starts writting the specified (by index) register
* and then returns.
*/
static void write_i2c_reg_nowait(void __iomem *addr, u8 index, u8 data)
{
u32 tmp = index;
iowrite32((tmp<<17) | IIC_WRITE | data, addr + IIC_CSR2);
mmiowb();
}
/**
* wait_i2c_reg - waits the read/write to finish
*
* @addr: dt3155 mmio base address
*
* returns: zero on success or error code
*
* This function waits reading/writting to finish.
*/
static int wait_i2c_reg(void __iomem *addr)
{
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
/* error: NEW_CYCLE not cleared */
printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
return -EIO;
}
if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
/* error: DIRECT_ABORT set */
printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
/* reset DIRECT_ABORT bit */
iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
return -EIO;
}
return 0;
}
static int
dt3155_start_acq(struct dt3155_priv *pd)
{
struct vb2_buffer *vb = pd->curr_buf;
dma_addr_t dma_addr;
dma_addr = vb2_dma_contig_plane_paddr(vb, 0);
iowrite32(dma_addr, pd->regs + EVEN_DMA_START);
iowrite32(dma_addr + img_width, pd->regs + ODD_DMA_START);
iowrite32(img_width, pd->regs + EVEN_DMA_STRIDE);
iowrite32(img_width, pd->regs + ODD_DMA_STRIDE);
/* enable interrupts, clear all irq flags */
iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN | CAP_CONT_EVEN | CAP_CONT_ODD,
pd->regs + CSR1);
wait_i2c_reg(pd->regs);
write_i2c_reg(pd->regs, CONFIG, pd->config);
write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE);
write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_DONE);
/* start the board */
write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | BUSY_ODD);
return 0; /* success */
}
/*
* driver-specific callbacks (vb2_ops)
*/
static int
dt3155_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
unsigned int *num_planes, unsigned long sizes[],
void *alloc_ctxs[])
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
void *ret;
if (*num_buffers == 0)
*num_buffers = 1;
*num_planes = 1;
sizes[0] = img_width * img_height;
if (pd->q->alloc_ctx[0])
return 0;
ret = vb2_dma_contig_init_ctx(&pd->pdev->dev);
if (IS_ERR(ret))
return PTR_ERR(ret);
pd->q->alloc_ctx[0] = ret;
return 0;
}
static void
dt3155_wait_prepare(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
mutex_unlock(pd->vdev->lock);
}
static void
dt3155_wait_finish(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
mutex_lock(pd->vdev->lock);
}
static int
dt3155_buf_prepare(struct vb2_buffer *vb)
{
vb2_set_plane_payload(vb, 0, img_width * img_height);
return 0;
}
static int
dt3155_start_streaming(struct vb2_queue *q)
{
return 0;
}
static int
dt3155_stop_streaming(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
struct vb2_buffer *vb;
spin_lock_irq(&pd->lock);
while (!list_empty(&pd->dmaq)) {
vb = list_first_entry(&pd->dmaq, typeof(*vb), done_entry);
list_del(&vb->done_entry);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irq(&pd->lock);
msleep(45); /* irq hendler will stop the hardware */
return 0;
}
static void
dt3155_buf_queue(struct vb2_buffer *vb)
{
struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue);
/* pd->q->streaming = 1 when dt3155_buf_queue() is invoked */
spin_lock_irq(&pd->lock);
if (pd->curr_buf)
list_add_tail(&vb->done_entry, &pd->dmaq);
else {
pd->curr_buf = vb;
dt3155_start_acq(pd);
}
spin_unlock_irq(&pd->lock);
}
/*
* end driver-specific callbacks
*/
const struct vb2_ops q_ops = {
.queue_setup = dt3155_queue_setup,
.wait_prepare = dt3155_wait_prepare,
.wait_finish = dt3155_wait_finish,
.buf_prepare = dt3155_buf_prepare,
.start_streaming = dt3155_start_streaming,
.stop_streaming = dt3155_stop_streaming,
.buf_queue = dt3155_buf_queue,
};
static irqreturn_t
dt3155_irq_handler_even(int irq, void *dev_id)
{
struct dt3155_priv *ipd = dev_id;
struct vb2_buffer *ivb;
dma_addr_t dma_addr;
u32 tmp;
tmp = ioread32(ipd->regs + INT_CSR) & (FLD_START | FLD_END_ODD);
if (!tmp)
return IRQ_NONE; /* not our irq */
if ((tmp & FLD_START) && !(tmp & FLD_END_ODD)) {
iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START,
ipd->regs + INT_CSR);
ipd->field_count++;
return IRQ_HANDLED; /* start of field irq */
}
if ((tmp & FLD_START) && (tmp & FLD_END_ODD)) {
if (!ipd->stats.start_before_end++)
printk(KERN_ERR "dt3155: irq: START before END\n");
}
/* check for corrupted fields */
/* write_i2c_reg(ipd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE); */
/* write_i2c_reg(ipd->regs, ODD_CSR, CSR_ERROR | CSR_DONE); */
tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
if (tmp) {
if (!ipd->stats.corrupted_fields++)
printk(KERN_ERR "dt3155: corrupted field %u\n", tmp);
iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
ipd->regs + CSR1);
mmiowb();
}
spin_lock(&ipd->lock);
if (ipd->curr_buf) {
do_gettimeofday(&ipd->curr_buf->v4l2_buf.timestamp);
ipd->curr_buf->v4l2_buf.sequence = (ipd->field_count) >> 1;
vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE);
}
if (!ipd->q->streaming || list_empty(&ipd->dmaq))
goto stop_dma;
ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
list_del(&ivb->done_entry);
ipd->curr_buf = ivb;
dma_addr = vb2_dma_contig_plane_paddr(ivb, 0);
iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
iowrite32(dma_addr + img_width, ipd->regs + ODD_DMA_START);
iowrite32(img_width, ipd->regs + EVEN_DMA_STRIDE);
iowrite32(img_width, ipd->regs + ODD_DMA_STRIDE);
mmiowb();
/* enable interrupts, clear all irq flags */
iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
spin_unlock(&ipd->lock);
return IRQ_HANDLED;
stop_dma:
ipd->curr_buf = NULL;
/* stop the board */
write_i2c_reg_nowait(ipd->regs, CSR2, ipd->csr2);
iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN, ipd->regs + CSR1);
/* disable interrupts, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
spin_unlock(&ipd->lock);
return IRQ_HANDLED;
}
static int
dt3155_open(struct file *filp)
{
int ret = 0;
struct dt3155_priv *pd = video_drvdata(filp);
printk(KERN_INFO "dt3155: open(): minor: %i, users: %i\n",
pd->vdev->minor, pd->users);
if (!pd->users) {
pd->q = kzalloc(sizeof(*pd->q), GFP_KERNEL);
if (!pd->q) {
printk(KERN_ERR "dt3155: error: alloc queue\n");
ret = -ENOMEM;
goto err_alloc_queue;
}
pd->q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pd->q->io_modes = VB2_READ | VB2_MMAP;
pd->q->ops = &q_ops;
pd->q->mem_ops = &vb2_dma_contig_memops;
pd->q->drv_priv = pd;
pd->curr_buf = NULL;
pd->field_count = 0;
vb2_queue_init(pd->q); /* cannot fail */
INIT_LIST_HEAD(&pd->dmaq);
spin_lock_init(&pd->lock);
/* disable all irqs, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
pd->regs + INT_CSR);
pd->irq_handler = dt3155_irq_handler_even;
ret = request_irq(pd->pdev->irq, pd->irq_handler,
IRQF_SHARED, DT3155_NAME, pd);
if (ret) {
printk(KERN_ERR "dt3155: error: request_irq\n");
goto err_request_irq;
}
}
pd->users++;
return 0; /* success */
err_request_irq:
kfree(pd->q);
pd->q = NULL;
err_alloc_queue:
return ret;
}
static int
dt3155_release(struct file *filp)
{
struct dt3155_priv *pd = video_drvdata(filp);
printk(KERN_INFO "dt3155: release(): minor: %i, users: %i\n",
pd->vdev->minor, pd->users - 1);
pd->users--;
BUG_ON(pd->users < 0);
if (!pd->users) {
vb2_queue_release(pd->q);
free_irq(pd->pdev->irq, pd);
if (pd->q->alloc_ctx[0])
vb2_dma_contig_cleanup_ctx(pd->q->alloc_ctx[0]);
kfree(pd->q);
pd->q = NULL;
}
return 0;
}
static ssize_t
dt3155_read(struct file *filp, char __user *user, size_t size, loff_t *loff)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_read(pd->q, user, size, loff, filp->f_flags & O_NONBLOCK);
}
static unsigned int
dt3155_poll(struct file *filp, struct poll_table_struct *polltbl)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_poll(pd->q, filp, polltbl);
}
static int
dt3155_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_mmap(pd->q, vma);
}
static const struct v4l2_file_operations dt3155_fops = {
.owner = THIS_MODULE,
.open = dt3155_open,
.release = dt3155_release,
.read = dt3155_read,
.poll = dt3155_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = dt3155_mmap,
};
static int
dt3155_ioc_streamon(struct file *filp, void *p, enum v4l2_buf_type type)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_streamon(pd->q, type);
}
static int
dt3155_ioc_streamoff(struct file *filp, void *p, enum v4l2_buf_type type)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_streamoff(pd->q, type);
}
static int
dt3155_ioc_querycap(struct file *filp, void *p, struct v4l2_capability *cap)
{
struct dt3155_priv *pd = video_drvdata(filp);
strcpy(cap->driver, DT3155_NAME);
strcpy(cap->card, DT3155_NAME " frame grabber");
sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev));
cap->version =
KERNEL_VERSION(DT3155_VER_MAJ, DT3155_VER_MIN, DT3155_VER_EXT);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
DT3155_CAPTURE_METHOD;
return 0;
}
static int
dt3155_ioc_enum_fmt_vid_cap(struct file *filp, void *p, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_OF_FORMATS)
return -EINVAL;
*f = frame_std[f->index];
return 0;
}
static int
dt3155_ioc_g_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
{
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
f->fmt.pix.width = img_width;
f->fmt.pix.height = img_height;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_GREY;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.bytesperline = f->fmt.pix.width;
f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height;
f->fmt.pix.colorspace = 0;
f->fmt.pix.priv = 0;
return 0;
}
static int
dt3155_ioc_try_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
{
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (f->fmt.pix.width == img_width &&
f->fmt.pix.height == img_height &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_GREY &&
f->fmt.pix.field == V4L2_FIELD_NONE &&
f->fmt.pix.bytesperline == f->fmt.pix.width &&
f->fmt.pix.sizeimage == f->fmt.pix.width * f->fmt.pix.height)
return 0;
else
return -EINVAL;
}
static int
dt3155_ioc_s_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
{
return dt3155_ioc_g_fmt_vid_cap(filp, p, f);
}
static int
dt3155_ioc_reqbufs(struct file *filp, void *p, struct v4l2_requestbuffers *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_reqbufs(pd->q, b);
}
static int
dt3155_ioc_querybuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_querybuf(pd->q, b);
}
static int
dt3155_ioc_qbuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_qbuf(pd->q, b);
}
static int
dt3155_ioc_dqbuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
return vb2_dqbuf(pd->q, b, filp->f_flags & O_NONBLOCK);
}
static int
dt3155_ioc_querystd(struct file *filp, void *p, v4l2_std_id *norm)
{
*norm = DT3155_CURRENT_NORM;
return 0;
}
static int
dt3155_ioc_g_std(struct file *filp, void *p, v4l2_std_id *norm)
{
*norm = DT3155_CURRENT_NORM;
return 0;
}
static int
dt3155_ioc_s_std(struct file *filp, void *p, v4l2_std_id *norm)
{
if (*norm & DT3155_CURRENT_NORM)
return 0;
return -EINVAL;
}
static int
dt3155_ioc_enum_input(struct file *filp, void *p, struct v4l2_input *input)
{
if (input->index)
return -EINVAL;
strcpy(input->name, "Coax in");
input->type = V4L2_INPUT_TYPE_CAMERA;
/*
* FIXME: input->std = 0 according to v4l2 API
* VIDIOC_G_STD, VIDIOC_S_STD, VIDIOC_QUERYSTD and VIDIOC_ENUMSTD
* should return -EINVAL
*/
input->std = DT3155_CURRENT_NORM;
input->status = 0;/* FIXME: add sync detection & V4L2_IN_ST_NO_H_LOCK */
return 0;
}
static int
dt3155_ioc_g_input(struct file *filp, void *p, unsigned int *i)
{
*i = 0;
return 0;
}
static int
dt3155_ioc_s_input(struct file *filp, void *p, unsigned int i)
{
if (i)
return -EINVAL;
return 0;
}
static int
dt3155_ioc_g_parm(struct file *filp, void *p, struct v4l2_streamparm *parms)
{
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
parms->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
parms->parm.capture.capturemode = 0;
parms->parm.capture.timeperframe.numerator = 1001;
parms->parm.capture.timeperframe.denominator = frames_per_sec * 1000;
parms->parm.capture.extendedmode = 0;
parms->parm.capture.readbuffers = 1; /* FIXME: 2 buffers? */
return 0;
}
static int
dt3155_ioc_s_parm(struct file *filp, void *p, struct v4l2_streamparm *parms)
{
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
parms->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
parms->parm.capture.capturemode = 0;
parms->parm.capture.timeperframe.numerator = 1001;
parms->parm.capture.timeperframe.denominator = frames_per_sec * 1000;
parms->parm.capture.extendedmode = 0;
parms->parm.capture.readbuffers = 1; /* FIXME: 2 buffers? */
return 0;
}
static const struct v4l2_ioctl_ops dt3155_ioctl_ops = {
.vidioc_streamon = dt3155_ioc_streamon,
.vidioc_streamoff = dt3155_ioc_streamoff,
.vidioc_querycap = dt3155_ioc_querycap,
/*
.vidioc_g_priority = dt3155_ioc_g_priority,
.vidioc_s_priority = dt3155_ioc_s_priority,
*/
.vidioc_enum_fmt_vid_cap = dt3155_ioc_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = dt3155_ioc_try_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = dt3155_ioc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = dt3155_ioc_s_fmt_vid_cap,
.vidioc_reqbufs = dt3155_ioc_reqbufs,
.vidioc_querybuf = dt3155_ioc_querybuf,
.vidioc_qbuf = dt3155_ioc_qbuf,
.vidioc_dqbuf = dt3155_ioc_dqbuf,
.vidioc_querystd = dt3155_ioc_querystd,
.vidioc_g_std = dt3155_ioc_g_std,
.vidioc_s_std = dt3155_ioc_s_std,
.vidioc_enum_input = dt3155_ioc_enum_input,
.vidioc_g_input = dt3155_ioc_g_input,
.vidioc_s_input = dt3155_ioc_s_input,
/*
.vidioc_queryctrl = dt3155_ioc_queryctrl,
.vidioc_g_ctrl = dt3155_ioc_g_ctrl,
.vidioc_s_ctrl = dt3155_ioc_s_ctrl,
.vidioc_querymenu = dt3155_ioc_querymenu,
.vidioc_g_ext_ctrls = dt3155_ioc_g_ext_ctrls,
.vidioc_s_ext_ctrls = dt3155_ioc_s_ext_ctrls,
*/
.vidioc_g_parm = dt3155_ioc_g_parm,
.vidioc_s_parm = dt3155_ioc_s_parm,
/*
.vidioc_cropcap = dt3155_ioc_cropcap,
.vidioc_g_crop = dt3155_ioc_g_crop,
.vidioc_s_crop = dt3155_ioc_s_crop,
.vidioc_enum_framesizes = dt3155_ioc_enum_framesizes,
.vidioc_enum_frameintervals = dt3155_ioc_enum_frameintervals,
*/
};
static int __devinit
dt3155_init_board(struct pci_dev *pdev)
{
struct dt3155_priv *pd = pci_get_drvdata(pdev);
void *buf_cpu;
dma_addr_t buf_dma;
int i;
u8 tmp;
pci_set_master(pdev); /* dt3155 needs it */
/* resetting the adapter */
iowrite32(FLD_CRPT_ODD | FLD_CRPT_EVEN | FLD_DN_ODD | FLD_DN_EVEN,
pd->regs + CSR1);
mmiowb();
msleep(20);
/* initializing adaper registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
mmiowb();
iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
iowrite32(0x00000103, pd->regs + XFER_MODE);
iowrite32(0, pd->regs + RETRY_WAIT_CNT);
iowrite32(0, pd->regs + INT_CSR);
iowrite32(1, pd->regs + EVEN_FLD_MASK);
iowrite32(1, pd->regs + ODD_FLD_MASK);
iowrite32(0, pd->regs + MASK_LENGTH);
iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
mmiowb();
/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
read_i2c_reg(pd->regs, DT_ID, &tmp);
if (tmp != DT3155_ID)
return -ENODEV;
/* initialize AD LUT */
write_i2c_reg(pd->regs, AD_ADDR, 0);
for (i = 0; i < 256; i++)
write_i2c_reg(pd->regs, AD_LUT, i);
/* initialize ADC references */
/* FIXME: pos_ref & neg_ref depend on VT_50HZ */
write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
write_i2c_reg(pd->regs, AD_ADDR, AD_POS_REF);
write_i2c_reg(pd->regs, AD_CMD, 34);
write_i2c_reg(pd->regs, AD_ADDR, AD_NEG_REF);
write_i2c_reg(pd->regs, AD_CMD, 0);
/* initialize PM LUT */
write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM);
for (i = 0; i < 256; i++) {
write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
write_i2c_reg(pd->regs, PM_LUT_DATA, i);
}
write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM | PM_LUT_SEL);
for (i = 0; i < 256; i++) {
write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
write_i2c_reg(pd->regs, PM_LUT_DATA, i);
}
write_i2c_reg(pd->regs, CONFIG, pd->config); /* ACQ_MODE_EVEN */
/* select chanel 1 for input and set sync level */
write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
/* allocate memory, and initialize the DMA machine */
buf_cpu = dma_alloc_coherent(&pdev->dev, DT3155_BUF_SIZE, &buf_dma,
GFP_KERNEL);
if (!buf_cpu) {
printk(KERN_ERR "dt3155: dma_alloc_coherent "
"(in dt3155_init_board) failed\n");
return -ENOMEM;
}
iowrite32(buf_dma, pd->regs + EVEN_DMA_START);
iowrite32(buf_dma, pd->regs + ODD_DMA_START);
iowrite32(0, pd->regs + EVEN_DMA_STRIDE);
iowrite32(0, pd->regs + ODD_DMA_STRIDE);
/* Perform a pseudo even field acquire */
iowrite32(FIFO_EN | SRST | CAP_CONT_ODD, pd->regs + CSR1);
write_i2c_reg(pd->regs, CSR2, pd->csr2 | SYNC_SNTL);
write_i2c_reg(pd->regs, CONFIG, pd->config);
write_i2c_reg(pd->regs, EVEN_CSR, CSR_SNGL);
write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | SYNC_SNTL);
msleep(100);
read_i2c_reg(pd->regs, CSR2, &tmp);
write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE);
write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE);
write_i2c_reg(pd->regs, CSR2, pd->csr2);
iowrite32(FIFO_EN | SRST | FLD_DN_EVEN | FLD_DN_ODD, pd->regs + CSR1);
/* deallocate memory */
dma_free_coherent(&pdev->dev, DT3155_BUF_SIZE, buf_cpu, buf_dma);
if (tmp & BUSY_EVEN) {
printk(KERN_ERR "dt3155: BUSY_EVEN not cleared\n");
return -EIO;
}
return 0;
}
static struct video_device dt3155_vdev = {
.name = DT3155_NAME,
.fops = &dt3155_fops,
.ioctl_ops = &dt3155_ioctl_ops,
.minor = -1,
.release = video_device_release,
.tvnorms = DT3155_CURRENT_NORM,
.current_norm = DT3155_CURRENT_NORM,
};
/* same as in drivers/base/dma-coherent.c */
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
int size;
int flags;
unsigned long *bitmap;
};
static int __devinit
dt3155_alloc_coherent(struct device *dev, size_t size, int flags)
{
struct dma_coherent_mem *mem;
dma_addr_t dev_base;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
if ((flags & DMA_MEMORY_MAP) == 0)
goto out;
if (!size)
goto out;
if (dev->dma_mem)
goto out;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
goto out;
mem->virt_base = dma_alloc_coherent(dev, size, &dev_base,
DT3155_COH_FLAGS);
if (!mem->virt_base)
goto err_alloc_coherent;
mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!mem->bitmap)
goto err_bitmap;
/* coherent_dma_mask is already set to 32 bits */
mem->device_base = dev_base;
mem->size = pages;
mem->flags = flags;
dev->dma_mem = mem;
return DMA_MEMORY_MAP;
err_bitmap:
dma_free_coherent(dev, size, mem->virt_base, dev_base);
err_alloc_coherent:
kfree(mem);
out:
return 0;
}
static void __devexit
dt3155_free_coherent(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if (!mem)
return;
dev->dma_mem = NULL;
dma_free_coherent(dev, mem->size << PAGE_SHIFT,
mem->virt_base, mem->device_base);
kfree(mem->bitmap);
kfree(mem);
}
static int __devinit
dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
struct dt3155_priv *pd;
printk(KERN_INFO "dt3155: probe()\n");
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "dt3155: cannot set dma_mask\n");
return -ENODEV;
}
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "dt3155: cannot set dma_coherent_mask\n");
return -ENODEV;
}
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
printk(KERN_ERR "dt3155: cannot allocate dt3155_priv\n");
return -ENOMEM;
}
pd->vdev = video_device_alloc();
if (!pd->vdev) {
printk(KERN_ERR "dt3155: cannot allocate vdev structure\n");
goto err_video_device_alloc;
}
*pd->vdev = dt3155_vdev;
pci_set_drvdata(pdev, pd); /* for use in dt3155_remove() */
video_set_drvdata(pd->vdev, pd); /* for use in video_fops */
pd->users = 0;
pd->pdev = pdev;
INIT_LIST_HEAD(&pd->dmaq);
mutex_init(&pd->mux);
pd->vdev->lock = &pd->mux; /* for locking v4l2_file_operations */
spin_lock_init(&pd->lock);
pd->csr2 = csr2_init;
pd->config = config_init;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "dt3155: pci_dev not enabled\n");
goto err_enable_dev;
}
err = pci_request_region(pdev, 0, pci_name(pdev));
if (err)
goto err_req_region;
pd->regs = pci_iomap(pdev, 0, pci_resource_len(pd->pdev, 0));
if (!pd->regs) {
err = -ENOMEM;
printk(KERN_ERR "dt3155: pci_iomap failed\n");
goto err_pci_iomap;
}
err = dt3155_init_board(pdev);
if (err) {
printk(KERN_ERR "dt3155: dt3155_init_board failed\n");
goto err_init_board;
}
err = video_register_device(pd->vdev, VFL_TYPE_GRABBER, -1);
if (err) {
printk(KERN_ERR "dt3155: Cannot register video device\n");
goto err_init_board;
}
err = dt3155_alloc_coherent(&pdev->dev, DT3155_CHUNK_SIZE,
DMA_MEMORY_MAP);
if (err)
printk(KERN_INFO "dt3155: preallocated 8 buffers\n");
printk(KERN_INFO "dt3155: /dev/video%i is ready\n", pd->vdev->minor);
return 0; /* success */
err_init_board:
pci_iounmap(pdev, pd->regs);
err_pci_iomap:
pci_release_region(pdev, 0);
err_req_region:
pci_disable_device(pdev);
err_enable_dev:
video_device_release(pd->vdev);
err_video_device_alloc:
kfree(pd);
return err;
}
static void __devexit
dt3155_remove(struct pci_dev *pdev)
{
struct dt3155_priv *pd = pci_get_drvdata(pdev);
printk(KERN_INFO "dt3155: remove()\n");
dt3155_free_coherent(&pdev->dev);
video_unregister_device(pd->vdev);
pci_iounmap(pdev, pd->regs);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
/*
* video_device_release() is invoked automatically
* see: struct video_device dt3155_vdev
*/
kfree(pd);
}
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
{ PCI_DEVICE(DT3155_VENDOR_ID, DT3155_DEVICE_ID) },
{ 0, /* zero marks the end */ },
};
MODULE_DEVICE_TABLE(pci, pci_ids);
static struct pci_driver pci_driver = {
.name = DT3155_NAME,
.id_table = pci_ids,
.probe = dt3155_probe,
.remove = __devexit_p(dt3155_remove),
};
static int __init
dt3155_init_module(void)
{
int err;
printk(KERN_INFO "dt3155: ==================\n");
printk(KERN_INFO "dt3155: init()\n");
err = pci_register_driver(&pci_driver);
if (err) {
printk(KERN_ERR "dt3155: cannot register pci_driver\n");
return err;
}
return 0; /* succes */
}
static void __exit
dt3155_exit_module(void)
{
pci_unregister_driver(&pci_driver);
printk(KERN_INFO "dt3155: exit()\n");
printk(KERN_INFO "dt3155: ==================\n");
}
module_init(dt3155_init_module);
module_exit(dt3155_exit_module);
MODULE_DESCRIPTION("video4linux pci-driver for dt3155 frame grabber");
MODULE_AUTHOR("Marin Mitov <mitov@issp.bas.bg>");
MODULE_VERSION(DT3155_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ProtouProject/android_kernel_htc_protou | sound/core/memory.c | 382 | 1939 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
* Misc memory accessors
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/export.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <sound/core.h>
int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size_t count)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_to_user(dst, (const void __force*)src, count) ? -EFAULT : 0;
#else
char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
memcpy_fromio(buf, (void __iomem *)src, c);
if (copy_to_user(dst, buf, c))
return -EFAULT;
count -= c;
dst += c;
src += c;
}
return 0;
#endif
}
EXPORT_SYMBOL(copy_to_user_fromio);
int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size_t count)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_from_user((void __force *)dst, src, count) ? -EFAULT : 0;
#else
char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
if (copy_from_user(buf, src, c))
return -EFAULT;
memcpy_toio(dst, buf, c);
count -= c;
dst += c;
src += c;
}
return 0;
#endif
}
EXPORT_SYMBOL(copy_from_user_toio);
| gpl-2.0 |
croniccorey/OnePlus2-Kernel | drivers/soc/qcom/ddr-health.c | 894 | 3311 | /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <soc/qcom/rpm-smd.h>
#define RPM_MISC_REQ_TYPE 0x6373696d
#define RPM_MISC_REQ_DDR_HEALTH 0x31726464
static uint32_t mem_size;
static int ddr_health_set(const char *val, struct kernel_param *kp);
module_param_call(mem_size, ddr_health_set, param_get_uint,
&mem_size, 0644);
struct ddr_health {
uint64_t addr;
uint32_t size;
uint32_t reserved;
};
static struct mutex lock;
static struct ddr_health *ddr_health;
static struct msm_rpm_kvp rpm_kvp;
static int ddr_health_set(const char *val, struct kernel_param *kp)
{
int ret;
void *virt;
uint64_t old_addr = 0;
uint32_t old_size = 0;
mutex_lock(&lock);
ret = param_set_uint(val, kp);
if (ret) {
pr_err("ddr-health: error setting value %d\n", ret);
mutex_unlock(&lock);
return ret;
}
if (rpm_kvp.data) {
ddr_health = (struct ddr_health *)rpm_kvp.data;
old_addr = ddr_health->addr;
old_size = ddr_health->size;
}
rpm_kvp.key = RPM_MISC_REQ_DDR_HEALTH;
if (mem_size) {
virt = kzalloc(mem_size, GFP_KERNEL);
if (!virt) {
pr_err("ddr-health: failed to alloc mem request %x\n",
mem_size);
mutex_unlock(&lock);
return -ENOMEM;
}
ddr_health->addr = (uint64_t)virt_to_phys(virt);
ddr_health->size = mem_size;
rpm_kvp.length = sizeof(struct ddr_health);
rpm_kvp.data = (void *)ddr_health;
ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1);
if (ret) {
pr_err("ddr-health: send buf to RPM failed %d, %x\n",
ret, mem_size);
kfree(virt);
goto err;
}
} else {
ddr_health->addr = 0;
ddr_health->size = 0;
rpm_kvp.length = sizeof(struct ddr_health);
rpm_kvp.data = (void *)ddr_health;
ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1);
if (ret) {
pr_err("ddr-health: send nobuf to RPM failed %d, %x\n",
ret, mem_size);
goto err;
}
}
if (old_addr)
kfree(phys_to_virt((phys_addr_t)old_addr));
mutex_unlock(&lock);
return 0;
err:
ddr_health->addr = old_addr;
ddr_health->size = old_size;
mutex_unlock(&lock);
return ret;
}
static int __init ddr_health_init(void)
{
mutex_init(&lock);
ddr_health = kzalloc(sizeof(*ddr_health), GFP_KERNEL);
if (!ddr_health) {
pr_err("ddr-health: failed to alloc mem\n");
return -ENOMEM;
}
return 0;
}
module_init(ddr_health_init);
static void __exit ddr_health_exit(void)
{
if (rpm_kvp.data) {
ddr_health = (struct ddr_health *)rpm_kvp.data;
if (ddr_health->addr)
kfree(phys_to_virt((phys_addr_t)ddr_health->addr));
}
kfree(ddr_health);
mutex_destroy(&lock);
}
module_exit(ddr_health_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DDR health monitor driver");
| gpl-2.0 |
XCage15/linux | drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 894 | 64963 | /* 10G controller driver for Samsung SoCs
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Siva Reddy Kallam <siva.kallam@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/sxgbe_platform.h>
#include "sxgbe_common.h"
#include "sxgbe_desc.h"
#include "sxgbe_dma.h"
#include "sxgbe_mtl.h"
#include "sxgbe_reg.h"
#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
#define JUMBO_LEN 9000
/* Module parameters */
#define TX_TIMEO 5000
#define DMA_TX_SIZE 512
#define DMA_RX_SIZE 1024
#define TC_DEFAULT 64
#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
#define SXGBE_DEFAULT_LPI_TIMER 1000
static int debug = -1;
static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, S_IRUGO | S_IWUSR);
module_param(debug, int, S_IRUGO | S_IWUSR);
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
/**
* sxgbe_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
* Note that wrong parameters are replaced with the default values.
*/
static void sxgbe_verify_args(void)
{
if (unlikely(eee_timer < 0))
eee_timer = SXGBE_DEFAULT_LPI_TIMER;
}
static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
{
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
priv->hw->mac->set_eee_mode(priv->ioaddr);
}
void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
/* Exit and disable EEE in case of we are are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
/**
* sxgbe_eee_ctrl_timer
* @arg : data hook
* Description:
* If there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
static void sxgbe_eee_ctrl_timer(unsigned long arg)
{
struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
}
/**
* sxgbe_eee_init
* @priv: private device pointer
* Description:
* If the EEE support has been enabled while configuring the driver,
* if the GMAC actually supports the EEE (from the HW cap reg) and the
* phy can also manage EEE, so enable the LPI state and start the timer
* to verify if the tx path can enter in LPI state.
*/
bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
{
bool ret = false;
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
if (phy_init_eee(priv->phydev, 1))
return false;
priv->eee_active = 1;
setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
(unsigned long)priv);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
priv->hw->mac->set_eee_timer(priv->ioaddr,
SXGBE_DEFAULT_LPI_TIMER,
priv->tx_lpi_timer);
pr_info("Energy-Efficient Ethernet initialized\n");
ret = true;
}
return ret;
}
static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
{
/* When the EEE has been already initialised we have to
* modify the PLS bit in the LPI ctrl & status reg according
* to the PHY link status. For this reason.
*/
if (priv->eee_enabled)
priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
}
/**
* sxgbe_clk_csr_set - dynamically set the MDC clock
* @priv: driver private structure
* Description: this is to dynamically set the MDC clock according to the csr
* clock input.
*/
static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
{
u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
/* assign the proper divider, this will be used during
* mdio communication
*/
if (clk_rate < SXGBE_CSR_F_150M)
priv->clk_csr = SXGBE_CSR_100_150M;
else if (clk_rate <= SXGBE_CSR_F_250M)
priv->clk_csr = SXGBE_CSR_150_250M;
else if (clk_rate <= SXGBE_CSR_F_300M)
priv->clk_csr = SXGBE_CSR_250_300M;
else if (clk_rate <= SXGBE_CSR_F_350M)
priv->clk_csr = SXGBE_CSR_300_350M;
else if (clk_rate <= SXGBE_CSR_F_400M)
priv->clk_csr = SXGBE_CSR_350_400M;
else if (clk_rate <= SXGBE_CSR_F_500M)
priv->clk_csr = SXGBE_CSR_400_500M;
}
/* minimum number of free TX descriptors required to wake up TX process */
#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
{
return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
}
/**
* sxgbe_adjust_link
* @dev: net device structure
* Description: it adjusts the link parameters.
*/
static void sxgbe_adjust_link(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
u8 new_state = 0;
u8 speed = 0xff;
if (!phydev)
return;
/* SXGBE is not supporting auto-negotiation and
* half duplex mode. so, not handling duplex change
* in this function. only handling speed and link status
*/
if (phydev->link) {
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case SPEED_10000:
speed = SXGBE_SPEED_10G;
break;
case SPEED_2500:
speed = SXGBE_SPEED_2_5G;
break;
case SPEED_1000:
speed = SXGBE_SPEED_1G;
break;
default:
netif_err(priv, link, dev,
"Speed (%d) not supported\n",
phydev->speed);
}
priv->speed = phydev->speed;
priv->hw->mac->set_speed(priv->ioaddr, speed);
}
if (!priv->oldlink) {
new_state = 1;
priv->oldlink = 1;
}
} else if (priv->oldlink) {
new_state = 1;
priv->oldlink = 0;
priv->speed = SPEED_UNKNOWN;
}
if (new_state & netif_msg_link(priv))
phy_print_status(phydev);
/* Alter the MAC settings for EEE */
sxgbe_eee_adjust(priv);
}
/**
* sxgbe_init_phy - PHY initialization
* @dev: net device structure
* Description: it initializes the driver's PHY state, and attaches the PHY
* to the mac driver.
* Return value:
* 0 on success
*/
static int sxgbe_init_phy(struct net_device *ndev)
{
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
struct phy_device *phydev;
struct sxgbe_priv_data *priv = netdev_priv(ndev);
int phy_iface = priv->plat->interface;
/* assign default link status */
priv->oldlink = 0;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
if (priv->plat->phy_bus_name)
snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
priv->plat->phy_bus_name, priv->plat->bus_id);
else
snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
priv->plat->bus_id);
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
if (IS_ERR(phydev)) {
netdev_err(ndev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
(phy_iface == PHY_INTERFACE_MODE_RMII))
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
if (phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
}
netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
__func__, phydev->phy_id, phydev->link);
/* save phy device in private structure */
priv->phydev = phydev;
return 0;
}
/**
* sxgbe_clear_descriptors: clear descriptors
* @priv: driver private structure
* Description: this function is called to clear the tx and rx descriptors
* in case of both basic and extended descriptors are used.
*/
static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
{
int i, j;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
/* Clear the Rx/Tx descriptors */
for (j = 0; j < SXGBE_RX_QUEUES; j++) {
for (i = 0; i < rxsize; i++)
priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
priv->use_riwt, priv->mode,
(i == rxsize - 1));
}
for (j = 0; j < SXGBE_TX_QUEUES; j++) {
for (i = 0; i < txsize; i++)
priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
}
}
static int sxgbe_init_rx_buffers(struct net_device *dev,
struct sxgbe_rx_norm_desc *p, int i,
unsigned int dma_buf_sz,
struct sxgbe_rx_queue *rx_ring)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
struct sk_buff *skb;
skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
if (!skb)
return -ENOMEM;
rx_ring->rx_skbuff[i] = skb;
rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
dma_buf_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
netdev_err(dev, "%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
}
p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
return 0;
}
/**
* sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
* @dev: net device structure
* @rx_ring: ring to be freed
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
static void sxgbe_free_rx_buffers(struct net_device *dev,
struct sxgbe_rx_norm_desc *p, int i,
unsigned int dma_buf_sz,
struct sxgbe_rx_queue *rx_ring)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
kfree_skb(rx_ring->rx_skbuff[i]);
dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
dma_buf_sz, DMA_FROM_DEVICE);
}
/**
* init_tx_ring - init the TX descriptor ring
* @dev: net device structure
* @tx_ring: ring to be intialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
static int init_tx_ring(struct device *dev, u8 queue_no,
struct sxgbe_tx_queue *tx_ring, int tx_rsize)
{
/* TX ring is not allcoated */
if (!tx_ring) {
dev_err(dev, "No memory for TX queue of SXGBE\n");
return -ENOMEM;
}
/* allocate memory for TX descriptors */
tx_ring->dma_tx = dma_zalloc_coherent(dev,
tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
&tx_ring->dma_tx_phy, GFP_KERNEL);
if (!tx_ring->dma_tx)
return -ENOMEM;
/* allocate memory for TX skbuff array */
tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
if (!tx_ring->tx_skbuff_dma)
goto dmamem_err;
tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!tx_ring->tx_skbuff)
goto dmamem_err;
/* assign queue number */
tx_ring->queue_no = queue_no;
/* initialise counters */
tx_ring->dirty_tx = 0;
tx_ring->cur_tx = 0;
/* initialise TX queue lock */
spin_lock_init(&tx_ring->tx_lock);
return 0;
dmamem_err:
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
return -ENOMEM;
}
/**
* free_rx_ring - free the RX descriptor ring
* @dev: net device structure
* @rx_ring: ring to be intialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
int rx_rsize)
{
dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
kfree(rx_ring->rx_skbuff_dma);
kfree(rx_ring->rx_skbuff);
}
/**
* init_rx_ring - init the RX descriptor ring
* @dev: net device structure
* @rx_ring: ring to be intialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
static int init_rx_ring(struct net_device *dev, u8 queue_no,
struct sxgbe_rx_queue *rx_ring, int rx_rsize)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
int desc_index;
unsigned int bfsize = 0;
unsigned int ret = 0;
/* Set the max buffer size according to the MTU. */
bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
/* RX ring is not allcoated */
if (rx_ring == NULL) {
netdev_err(dev, "No memory for RX queue\n");
return -ENOMEM;
}
/* assign queue number */
rx_ring->queue_no = queue_no;
/* allocate memory for RX descriptors */
rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
&rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL)
return -ENOMEM;
/* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
if (!rx_ring->rx_skbuff_dma) {
ret = -ENOMEM;
goto err_free_dma_rx;
}
rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!rx_ring->rx_skbuff) {
ret = -ENOMEM;
goto err_free_skbuff_dma;
}
/* initialise the buffers */
for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
struct sxgbe_rx_norm_desc *p;
p = rx_ring->dma_rx + desc_index;
ret = sxgbe_init_rx_buffers(dev, p, desc_index,
bfsize, rx_ring);
if (ret)
goto err_free_rx_buffers;
}
/* initialise counters */
rx_ring->cur_rx = 0;
rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
priv->dma_buf_sz = bfsize;
return 0;
err_free_rx_buffers:
while (--desc_index >= 0) {
struct sxgbe_rx_norm_desc *p;
p = rx_ring->dma_rx + desc_index;
sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
}
kfree(rx_ring->rx_skbuff);
err_free_skbuff_dma:
kfree(rx_ring->rx_skbuff_dma);
err_free_dma_rx:
dma_free_coherent(priv->device,
rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
return ret;
}
/**
* free_tx_ring - free the TX descriptor ring
* @dev: net device structure
* @tx_ring: ring to be intialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
int tx_rsize)
{
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
}
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
static int init_dma_desc_rings(struct net_device *netd)
{
int queue_num, ret;
struct sxgbe_priv_data *priv = netdev_priv(netd);
int tx_rsize = priv->dma_tx_size;
int rx_rsize = priv->dma_rx_size;
/* Allocate memory for queue structures and TX descs */
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
ret = init_tx_ring(priv->device, queue_num,
priv->txq[queue_num], tx_rsize);
if (ret) {
dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
goto txalloc_err;
}
/* save private pointer in each ring this
* pointer is needed during cleaing TX queue
*/
priv->txq[queue_num]->priv_ptr = priv;
}
/* Allocate memory for queue structures and RX descs */
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
ret = init_rx_ring(netd, queue_num,
priv->rxq[queue_num], rx_rsize);
if (ret) {
netdev_err(netd, "RX DMA ring allocation failed!!\n");
goto rxalloc_err;
}
/* save private pointer in each ring this
* pointer is needed during cleaing TX queue
*/
priv->rxq[queue_num]->priv_ptr = priv;
}
sxgbe_clear_descriptors(priv);
return 0;
txalloc_err:
while (queue_num--)
free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
return ret;
rxalloc_err:
while (queue_num--)
free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
return ret;
}
static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
{
int dma_desc;
struct sxgbe_priv_data *priv = txqueue->priv_ptr;
int tx_rsize = priv->dma_tx_size;
for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
if (txqueue->tx_skbuff_dma[dma_desc])
dma_unmap_single(priv->device,
txqueue->tx_skbuff_dma[dma_desc],
priv->hw->desc->get_tx_len(tdesc),
DMA_TO_DEVICE);
dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
txqueue->tx_skbuff[dma_desc] = NULL;
txqueue->tx_skbuff_dma[dma_desc] = 0;
}
}
static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
{
int queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
tx_free_ring_skbufs(tqueue);
}
}
static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
{
int queue_num;
int tx_rsize = priv->dma_tx_size;
int rx_rsize = priv->dma_rx_size;
/* Release the DMA TX buffers */
dma_free_tx_skbufs(priv);
/* Release the TX ring memory also */
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
}
/* Release the RX ring memory also */
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
}
}
static int txring_mem_alloc(struct sxgbe_priv_data *priv)
{
int queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
priv->txq[queue_num] = devm_kmalloc(priv->device,
sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
if (!priv->txq[queue_num])
return -ENOMEM;
}
return 0;
}
static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
{
int queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
priv->rxq[queue_num] = devm_kmalloc(priv->device,
sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
if (!priv->rxq[queue_num])
return -ENOMEM;
}
return 0;
}
/**
* sxgbe_mtl_operation_mode - HW MTL operation mode
* @priv: driver private structure
* Description: it sets the MTL operation mode: tx/rx MTL thresholds
* or Store-And-Forward capability.
*/
static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
{
int queue_num;
/* TX/RX threshold control */
if (likely(priv->plat->force_sf_dma_mode)) {
/* set TC mode for TX QUEUES */
SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
SXGBE_MTL_SFMODE);
priv->tx_tc = SXGBE_MTL_SFMODE;
/* set TC mode for RX QUEUES */
SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
SXGBE_MTL_SFMODE);
priv->rx_tc = SXGBE_MTL_SFMODE;
} else if (unlikely(priv->plat->force_thresh_dma_mode)) {
/* set TC mode for TX QUEUES */
SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
priv->tx_tc);
/* set TC mode for RX QUEUES */
SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
priv->rx_tc);
} else {
pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
}
}
/**
* sxgbe_tx_queue_clean:
* @priv: driver private structure
* Description: it reclaims resources after transmission completes.
*/
static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
{
struct sxgbe_priv_data *priv = tqueue->priv_ptr;
unsigned int tx_rsize = priv->dma_tx_size;
struct netdev_queue *dev_txq;
u8 queue_no = tqueue->queue_no;
dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
spin_lock(&tqueue->tx_lock);
priv->xstats.tx_clean++;
while (tqueue->dirty_tx != tqueue->cur_tx) {
unsigned int entry = tqueue->dirty_tx % tx_rsize;
struct sk_buff *skb = tqueue->tx_skbuff[entry];
struct sxgbe_tx_norm_desc *p;
p = tqueue->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
if (priv->hw->desc->get_tx_owner(p))
break;
if (netif_msg_tx_done(priv))
pr_debug("%s: curr %d, dirty %d\n",
__func__, tqueue->cur_tx, tqueue->dirty_tx);
if (likely(tqueue->tx_skbuff_dma[entry])) {
dma_unmap_single(priv->device,
tqueue->tx_skbuff_dma[entry],
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
tqueue->tx_skbuff_dma[entry] = 0;
}
if (likely(skb)) {
dev_kfree_skb(skb);
tqueue->tx_skbuff[entry] = NULL;
}
priv->hw->desc->release_tx_desc(p);
tqueue->dirty_tx++;
}
/* wake up queue */
if (unlikely(netif_tx_queue_stopped(dev_txq) &&
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
netif_tx_lock(priv->dev);
if (netif_tx_queue_stopped(dev_txq) &&
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
if (netif_msg_tx_done(priv))
pr_debug("%s: restart transmit\n", __func__);
netif_tx_wake_queue(dev_txq);
}
netif_tx_unlock(priv->dev);
}
spin_unlock(&tqueue->tx_lock);
}
/**
* sxgbe_tx_clean:
* @priv: driver private structure
* Description: it reclaims resources after transmission completes.
*/
static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
{
u8 queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
sxgbe_tx_queue_clean(tqueue);
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
}
}
/**
* sxgbe_restart_tx_queue: irq tx error mng function
* @priv: driver private structure
* Description: it cleans the descriptors and restarts the transmission
* in case of errors.
*/
static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
{
struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
queue_num);
/* stop the queue */
netif_tx_stop_queue(dev_txq);
/* stop the tx dma */
priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
/* free the skbuffs of the ring */
tx_free_ring_skbufs(tx_ring);
/* initialise counters */
tx_ring->cur_tx = 0;
tx_ring->dirty_tx = 0;
/* start the tx dma */
priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
priv->dev->stats.tx_errors++;
/* wakeup the queue */
netif_tx_wake_queue(dev_txq);
}
/**
* sxgbe_reset_all_tx_queues: irq tx error mng function
* @priv: driver private structure
* Description: it cleans all the descriptors and
* restarts the transmission on all queues in case of errors.
*/
static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
{
int queue_num;
/* On TX timeout of net device, resetting of all queues
* may not be proper way, revisit this later if needed
*/
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
sxgbe_restart_tx_queue(priv, queue_num);
}
/**
* sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
* @priv: driver private structure
* Description:
* new GMAC chip generations have a new register to indicate the
* presence of the optional feature/functions.
* This can be also used to override the value passed through the
* platform and necessary for old MAC10/100 and GMAC chips.
*/
static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
{
int rval = 0;
struct sxgbe_hw_features *features = &priv->hw_cap;
/* Read First Capability Register CAP[0] */
rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
if (rval) {
features->pmt_remote_wake_up =
SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
features->tx_csum_offload =
SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
features->rx_csum_offload =
SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
features->eee = SXGBE_HW_FEAT_EEE(rval);
}
/* Read First Capability Register CAP[1] */
rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
if (rval) {
features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
}
/* Read First Capability Register CAP[2] */
rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
if (rval) {
features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
}
return rval;
}
/**
* sxgbe_check_ether_addr: check if the MAC addr is valid
* @priv: driver private structure
* Description:
* it is to verify if the MAC address is valid, in case of failures it
* generates a random MAC address
*/
static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
{
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
priv->hw->mac->get_umac_addr((void __iomem *)
priv->ioaddr,
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
}
dev_info(priv->device, "device MAC address %pM\n",
priv->dev->dev_addr);
}
/**
* sxgbe_init_dma_engine: DMA init.
* @priv: driver private structure
* Description:
* It inits the DMA invoking the specific SXGBE callback.
* Some DMA parameters can be passed from the platform;
* in case of these are not passed a default is kept for the MAC or GMAC.
*/
static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
{
int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
int queue_num;
if (priv->plat->dma_cfg) {
pbl = priv->plat->dma_cfg->pbl;
fixed_burst = priv->plat->dma_cfg->fixed_burst;
burst_map = priv->plat->dma_cfg->burst_map;
}
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
priv->hw->dma->cha_init(priv->ioaddr, queue_num,
fixed_burst, pbl,
(priv->txq[queue_num])->dma_tx_phy,
(priv->rxq[queue_num])->dma_rx_phy,
priv->dma_tx_size, priv->dma_rx_size);
return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
}
/**
* sxgbe_init_mtl_engine: MTL init.
* @priv: driver private structure
* Description:
* It inits the MTL invoking the specific SXGBE callback.
*/
static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
{
int queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
priv->hw_cap.tx_mtl_qsize);
priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
}
}
/**
* sxgbe_disable_mtl_engine: MTL disable.
* @priv: driver private structure
* Description:
* It disables the MTL queues by invoking the specific SXGBE callback.
*/
static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
{
int queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
}
/**
* sxgbe_tx_timer: mitigation sw timer for tx.
* @data: data pointer
* Description:
* This is the timer handler to directly invoke the sxgbe_tx_clean.
*/
static void sxgbe_tx_timer(unsigned long data)
{
struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
sxgbe_tx_queue_clean(p);
}
/**
* sxgbe_init_tx_coalesce: init tx mitigation options.
* @priv: driver private structure
* Description:
* This inits the transmit coalesce parameters: i.e. timer rate,
* timer handler and default threshold used for enabling the
* interrupt on completion bit.
*/
static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
{
u8 queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *p = priv->txq[queue_num];
p->tx_coal_frames = SXGBE_TX_FRAMES;
p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
setup_timer(&p->txtimer, sxgbe_tx_timer,
(unsigned long)&priv->txq[queue_num]);
p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
add_timer(&p->txtimer);
}
}
static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
{
u8 queue_num;
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *p = priv->txq[queue_num];
del_timer_sync(&p->txtimer);
}
}
/**
* sxgbe_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
* This function is the open entry point of the driver.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int sxgbe_open(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
int ret, queue_num;
clk_prepare_enable(priv->sxgbe_clk);
sxgbe_check_ether_addr(priv);
/* Init the phy */
ret = sxgbe_init_phy(dev);
if (ret) {
netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
__func__, ret);
goto phy_error;
}
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
priv->tx_tc = TC_DEFAULT;
priv->rx_tc = TC_DEFAULT;
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
ret = sxgbe_init_dma_engine(priv);
if (ret < 0) {
netdev_err(dev, "%s: DMA initialization failed\n", __func__);
goto init_error;
}
/* MTL initialization */
sxgbe_init_mtl_engine(priv);
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
}
/* Request the IRQ lines */
ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, priv->irq, ret);
goto init_error;
}
/* If the LPI irq is different from the mac irq
* register a dedicated handler
*/
if (priv->lpi_irq != dev->irq) {
ret = devm_request_irq(priv->device, priv->lpi_irq,
sxgbe_common_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
__func__, priv->lpi_irq, ret);
goto init_error;
}
}
/* Request TX DMA irq lines */
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
ret = devm_request_irq(priv->device,
(priv->txq[queue_num])->irq_no,
sxgbe_tx_interrupt, 0,
dev->name, priv->txq[queue_num]);
if (unlikely(ret < 0)) {
netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
__func__, priv->irq, ret);
goto init_error;
}
}
/* Request RX DMA irq lines */
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
ret = devm_request_irq(priv->device,
(priv->rxq[queue_num])->irq_no,
sxgbe_rx_interrupt, 0,
dev->name, priv->rxq[queue_num]);
if (unlikely(ret < 0)) {
netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
__func__, priv->irq, ret);
goto init_error;
}
}
/* Enable the MAC Rx/Tx */
priv->hw->mac->enable_tx(priv->ioaddr, true);
priv->hw->mac->enable_rx(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
sxgbe_mtl_operation_mode(priv);
/* Extra statistics */
memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
priv->xstats.tx_threshold = priv->tx_tc;
priv->xstats.rx_threshold = priv->rx_tc;
/* Start the ball rolling... */
netdev_dbg(dev, "DMA RX/TX processes started...\n");
priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
if (priv->phydev)
phy_start(priv->phydev);
/* initialise TX coalesce parameters */
sxgbe_tx_init_coalesce(priv);
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
}
priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
priv->eee_enabled = sxgbe_eee_init(priv);
napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
init_error:
free_dma_desc_resources(priv);
if (priv->phydev)
phy_disconnect(priv->phydev);
phy_error:
clk_disable_unprepare(priv->sxgbe_clk);
return ret;
}
/**
* sxgbe_release - close entry point of the driver
* @dev : device pointer.
* Description:
* This is the stop entry point of the driver.
*/
static int sxgbe_release(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
if (priv->phydev) {
phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
netif_tx_stop_all_queues(dev);
napi_disable(&priv->napi);
/* delete TX timers */
sxgbe_tx_del_timer(priv);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
/* disable MTL queue */
sxgbe_disable_mtl_engine(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
priv->hw->mac->enable_tx(priv->ioaddr, false);
priv->hw->mac->enable_rx(priv->ioaddr, false);
clk_disable_unprepare(priv->sxgbe_clk);
return 0;
}
/* Prepare first Tx descriptor for doing TSO operation */
static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
struct sxgbe_tx_norm_desc *first_desc,
struct sk_buff *skb)
{
unsigned int total_hdr_len, tcp_hdr_len;
/* Write first Tx descriptor with appropriate value */
tcp_hdr_len = tcp_hdrlen(skb);
total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
first_desc->tdes01 = dma_map_single(priv->device, skb->data,
total_hdr_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, first_desc->tdes01))
pr_err("%s: TX dma mapping failed!!\n", __func__);
first_desc->tdes23.tx_rd_des23.first_desc = 1;
priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
tcp_hdr_len,
skb->len - total_hdr_len);
}
/**
* sxgbe_xmit: Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
* Description : this is the tx entry point of the driver.
* It programs the chain or the ring and supports oversized frames
* and SG feature.
*/
static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int entry, frag_num;
int cksum_flag = 0;
struct netdev_queue *dev_txq;
unsigned txq_index = skb_get_queue_mapping(skb);
struct sxgbe_priv_data *priv = netdev_priv(dev);
unsigned int tx_rsize = priv->dma_tx_size;
struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
int nr_frags = skb_shinfo(skb)->nr_frags;
int no_pagedlen = skb_headlen(skb);
int is_jumbo = 0;
u16 cur_mss = skb_shinfo(skb)->gso_size;
u32 ctxt_desc_req = 0;
/* get the TX queue handle */
dev_txq = netdev_get_tx_queue(dev, txq_index);
if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
ctxt_desc_req = 1;
if (unlikely(skb_vlan_tag_present(skb) ||
((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
tqueue->hwts_tx_en)))
ctxt_desc_req = 1;
/* get the spinlock */
spin_lock(&tqueue->tx_lock);
if (priv->tx_path_in_lpi_mode)
sxgbe_disable_eee_mode(priv);
if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
if (!netif_tx_queue_stopped(dev_txq)) {
netif_tx_stop_queue(dev_txq);
netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
__func__, txq_index);
}
/* release the spin lock in case of BUSY */
spin_unlock(&tqueue->tx_lock);
return NETDEV_TX_BUSY;
}
entry = tqueue->cur_tx % tx_rsize;
tx_desc = tqueue->dma_tx + entry;
first_desc = tx_desc;
if (ctxt_desc_req)
ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
/* save the skb address */
tqueue->tx_skbuff[entry] = skb;
if (!is_jumbo) {
if (likely(skb_is_gso(skb))) {
/* TSO support */
if (unlikely(tqueue->prev_mss != cur_mss)) {
priv->hw->desc->tx_ctxt_desc_set_mss(
ctxt_desc, cur_mss);
priv->hw->desc->tx_ctxt_desc_set_tcmssv(
ctxt_desc);
priv->hw->desc->tx_ctxt_desc_reset_ostc(
ctxt_desc);
priv->hw->desc->tx_ctxt_desc_set_ctxt(
ctxt_desc);
priv->hw->desc->tx_ctxt_desc_set_owner(
ctxt_desc);
entry = (++tqueue->cur_tx) % tx_rsize;
first_desc = tqueue->dma_tx + entry;
tqueue->prev_mss = cur_mss;
}
sxgbe_tso_prepare(priv, first_desc, skb);
} else {
tx_desc->tdes01 = dma_map_single(priv->device,
skb->data, no_pagedlen, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, tx_desc->tdes01))
netdev_err(dev, "%s: TX dma mapping failed!!\n",
__func__);
priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
no_pagedlen, cksum_flag);
}
}
for (frag_num = 0; frag_num < nr_frags; frag_num++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
int len = skb_frag_size(frag);
entry = (++tqueue->cur_tx) % tx_rsize;
tx_desc = tqueue->dma_tx + entry;
tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
tqueue->tx_skbuff[entry] = NULL;
/* prepare the descriptor */
priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
len, cksum_flag);
/* memory barrier to flush descriptor */
wmb();
/* set the owner */
priv->hw->desc->set_tx_owner(tx_desc);
}
/* close the descriptors */
priv->hw->desc->close_tx_desc(tx_desc);
/* memory barrier to flush descriptor */
wmb();
tqueue->tx_count_frames += nr_frags + 1;
if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
priv->hw->desc->clear_tx_ic(tx_desc);
priv->xstats.tx_reset_ic_bit++;
mod_timer(&tqueue->txtimer,
SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
} else {
tqueue->tx_count_frames = 0;
}
/* set owner for first desc */
priv->hw->desc->set_tx_owner(first_desc);
/* memory barrier to flush descriptor */
wmb();
tqueue->cur_tx++;
/* display current ring */
netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
__func__, tqueue->cur_tx % tx_rsize,
tqueue->dirty_tx % tx_rsize, entry,
first_desc, nr_frags);
if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
__func__);
netif_tx_stop_queue(dev_txq);
}
dev->stats.tx_bytes += skb->len;
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
tqueue->hwts_tx_en)) {
/* declare that device is doing timestamping */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
priv->hw->desc->tx_enable_tstamp(first_desc);
}
if (!tqueue->hwts_tx_en)
skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
spin_unlock(&tqueue->tx_lock);
return NETDEV_TX_OK;
}
/**
* sxgbe_rx_refill: refill used skb preallocated buffers
* @priv: driver private structure
* Description : this is to reallocate the skb for the reception process
* that is based on zero-copy.
*/
static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
{
unsigned int rxsize = priv->dma_rx_size;
int bfsize = priv->dma_buf_sz;
u8 qnum = priv->cur_rx_qnum;
for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
priv->rxq[qnum]->dirty_rx++) {
unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
struct sxgbe_rx_norm_desc *p;
p = priv->rxq[qnum]->dma_rx + entry;
if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(skb == NULL))
break;
priv->rxq[qnum]->rx_skbuff[entry] = skb;
priv->rxq[qnum]->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
p->rdes23.rx_rd_des23.buf2_addr =
priv->rxq[qnum]->rx_skbuff_dma[entry];
}
/* Added memory barrier for RX descriptor modification */
wmb();
priv->hw->desc->set_rx_owner(p);
priv->hw->desc->set_rx_int_on_com(p);
/* Added memory barrier for RX descriptor modification */
wmb();
}
}
/**
* sxgbe_rx: receive the frames from the remote host
* @priv: driver private structure
* @limit: napi bugget.
* Description : this the function called by the napi poll method.
* It gets all the frames inside the ring.
*/
static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
{
u8 qnum = priv->cur_rx_qnum;
unsigned int rxsize = priv->dma_rx_size;
unsigned int entry = priv->rxq[qnum]->cur_rx;
unsigned int next_entry = 0;
unsigned int count = 0;
int checksum;
int status;
while (count < limit) {
struct sxgbe_rx_norm_desc *p;
struct sk_buff *skb;
int frame_len;
p = priv->rxq[qnum]->dma_rx + entry;
if (priv->hw->desc->get_rx_owner(p))
break;
count++;
next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
prefetch(priv->rxq[qnum]->dma_rx + next_entry);
/* Read the status of the incoming frame and also get checksum
* value based on whether it is enabled in SXGBE hardware or
* not.
*/
status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
&checksum);
if (unlikely(status < 0)) {
entry = next_entry;
continue;
}
if (unlikely(!priv->rxcsum_insertion))
checksum = CHECKSUM_NONE;
skb = priv->rxq[qnum]->rx_skbuff[entry];
if (unlikely(!skb))
netdev_err(priv->dev, "rx descriptor is not consistent\n");
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
frame_len = priv->hw->desc->get_rx_frame_len(p);
skb_put(skb, frame_len);
skb->ip_summed = checksum;
if (checksum == CHECKSUM_NONE)
netif_receive_skb(skb);
else
napi_gro_receive(&priv->napi, skb);
entry = next_entry;
}
sxgbe_rx_refill(priv);
return count;
}
/**
* sxgbe_poll - sxgbe poll method (NAPI)
* @napi : pointer to the napi structure.
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
* To look at the incoming frames and clear the tx resources.
*/
static int sxgbe_poll(struct napi_struct *napi, int budget)
{
struct sxgbe_priv_data *priv = container_of(napi,
struct sxgbe_priv_data, napi);
int work_done = 0;
u8 qnum = priv->cur_rx_qnum;
priv->xstats.napi_poll++;
/* first, clean the tx queues */
sxgbe_tx_all_clean(priv);
work_done = sxgbe_rx(priv, budget);
if (work_done < budget) {
napi_complete(napi);
priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
}
return work_done;
}
/**
* sxgbe_tx_timeout
* @dev : Pointer to net device structure
* Description: this function is called when a packet transmission fails to
* complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
static void sxgbe_tx_timeout(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
sxgbe_reset_all_tx_queues(priv);
}
/**
* sxgbe_common_interrupt - main ISR
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the main driver interrupt service routine.
* It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
* interrupts.
*/
static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
{
struct net_device *netdev = (struct net_device *)dev_id;
struct sxgbe_priv_data *priv = netdev_priv(netdev);
int status;
status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
/* For LPI we need to save the tx status */
if (status & TX_ENTRY_LPI_MODE) {
priv->xstats.tx_lpi_entry_n++;
priv->tx_path_in_lpi_mode = true;
}
if (status & TX_EXIT_LPI_MODE) {
priv->xstats.tx_lpi_exit_n++;
priv->tx_path_in_lpi_mode = false;
}
if (status & RX_ENTRY_LPI_MODE)
priv->xstats.rx_lpi_entry_n++;
if (status & RX_EXIT_LPI_MODE)
priv->xstats.rx_lpi_exit_n++;
return IRQ_HANDLED;
}
/**
* sxgbe_tx_interrupt - TX DMA ISR
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the tx dma interrupt service routine.
*/
static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
{
int status;
struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
struct sxgbe_priv_data *priv = txq->priv_ptr;
/* get the channel status */
status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
&priv->xstats);
/* check for normal path */
if (likely((status & handle_tx)))
napi_schedule(&priv->napi);
/* check for unrecoverable error */
if (unlikely((status & tx_hard_error)))
sxgbe_restart_tx_queue(priv, txq->queue_no);
/* check for TC configuration change */
if (unlikely((status & tx_bump_tc) &&
(priv->tx_tc != SXGBE_MTL_SFMODE) &&
(priv->tx_tc < 512))) {
/* step of TX TC is 32 till 128, otherwise 64 */
priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
txq->queue_no, priv->tx_tc);
priv->xstats.tx_threshold = priv->tx_tc;
}
return IRQ_HANDLED;
}
/**
* sxgbe_rx_interrupt - RX DMA ISR
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the rx dma interrupt service routine.
*/
static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
{
int status;
struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
struct sxgbe_priv_data *priv = rxq->priv_ptr;
/* get the channel status */
status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
&priv->xstats);
if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
__napi_schedule(&priv->napi);
}
/* check for TC configuration change */
if (unlikely((status & rx_bump_tc) &&
(priv->rx_tc != SXGBE_MTL_SFMODE) &&
(priv->rx_tc < 128))) {
/* step of TC is 32 */
priv->rx_tc += 32;
priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
rxq->queue_no, priv->rx_tc);
priv->xstats.rx_threshold = priv->rx_tc;
}
return IRQ_HANDLED;
}
static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
{
u64 val = readl(ioaddr + reg_lo);
val |= ((u64)readl(ioaddr + reg_hi)) << 32;
return val;
}
/* sxgbe_get_stats64 - entry point to see statistical information of device
* @dev : device pointer.
* @stats : pointer to hold all the statistical information of device.
* Description:
* This function is a driver entry point whenever ifconfig command gets
* executed to see device statistics. Statistics are number of
* bytes sent or received, errors occurred etc.
* Return value:
* This function returns various statistical information of device.
*/
static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->ioaddr;
u64 count;
spin_lock(&priv->stats_lock);
/* Freeze the counter registers before reading value otherwise it may
* get updated by hardware while we are reading them
*/
writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
stats->rx_bytes = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXOCTETLO_GCNT_REG,
SXGBE_MMC_RXOCTETHI_GCNT_REG);
stats->rx_packets = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXFRAMELO_GBCNT_REG,
SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
stats->multicast = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXMULTILO_GCNT_REG,
SXGBE_MMC_RXMULTIHI_GCNT_REG);
stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXCRCERRLO_REG,
SXGBE_MMC_RXCRCERRHI_REG);
stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXLENERRLO_REG,
SXGBE_MMC_RXLENERRHI_REG);
stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
stats->tx_bytes = sxgbe_get_stat64(ioaddr,
SXGBE_MMC_TXOCTETLO_GCNT_REG,
SXGBE_MMC_TXOCTETHI_GCNT_REG);
count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
SXGBE_MMC_TXFRAMEHI_GCNT_REG);
stats->tx_errors = count - stats->tx_errors;
stats->tx_packets = count;
stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
SXGBE_MMC_TXUFLWHI_GBCNT_REG);
writel(0, ioaddr + SXGBE_MMC_CTL_REG);
spin_unlock(&priv->stats_lock);
return stats;
}
/* sxgbe_set_features - entry point to set offload features of the device.
* @dev : device pointer.
* @features : features which are required to be set.
* Description:
* This function is a driver entry point and called by Linux kernel whenever
* any device features are set or reset by user.
* Return value:
* This function returns 0 after setting or resetting device features.
*/
static int sxgbe_set_features(struct net_device *dev,
netdev_features_t features)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM) {
priv->hw->mac->enable_rx_csum(priv->ioaddr);
priv->rxcsum_insertion = true;
} else {
priv->hw->mac->disable_rx_csum(priv->ioaddr);
priv->rxcsum_insertion = false;
}
}
return 0;
}
/* sxgbe_change_mtu - entry point to change MTU size for the device.
* @dev : device pointer.
* @new_mtu : the new MTU size for the device.
* Description: the Maximum Transfer Unit (MTU) is used by the network layer
* to drive packet transmission. Ethernet has an MTU of 1500 octets
* (ETH_DATA_LEN). This value can be changed with ifconfig.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
/* RFC 791, page 25, "Every internet module must be able to forward
* a datagram of 68 octets without further fragmentation."
*/
if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
MIN_MTU, MAX_MTU);
return -EINVAL;
}
/* Return if the buffer sizes will not change */
if (dev->mtu == new_mtu)
return 0;
dev->mtu = new_mtu;
if (!netif_running(dev))
return 0;
/* Recevice ring buffer size is needed to be set based on MTU. If MTU is
* changed then reinitilisation of the receive ring buffers need to be
* done. Hence bring interface down and bring interface back up
*/
sxgbe_release(dev);
return sxgbe_open(dev);
}
static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
unsigned long data;
data = (addr[5] << 8) | addr[4];
/* For MAC Addr registers se have to set the Address Enable (AE)
* bit that has no effect on the High Reg 0 where the bit 31 (MO)
* is RO.
*/
writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
}
/**
* sxgbe_set_rx_mode - entry point for setting different receive mode of
* a device. unicast, multicast addressing
* @dev : pointer to the device structure
* Description:
* This function is a driver entry point which gets called by the kernel
* whenever different receive mode like unicast, multicast and promiscuous
* must be enabled/disabled.
* Return value:
* void.
*/
static void sxgbe_set_rx_mode(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
unsigned int value = 0;
u32 mc_filter[2];
struct netdev_hw_addr *ha;
int reg = 1;
netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
__func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC) {
value = SXGBE_FRAME_FILTER_PR;
} else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
(dev->flags & IFF_ALLMULTI)) {
value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
/* Hash filter for multicast */
value = SXGBE_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table
*/
int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
* within the register.
*/
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
}
/* Handle multiple unicast addresses (perfect filtering) */
if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
/* Switch to promiscuous mode if more than 16 addrs
* are required
*/
value |= SXGBE_FRAME_FILTER_PR;
else {
netdev_for_each_uc_addr(ha, dev) {
sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
reg++;
}
}
#ifdef FRAME_FILTER_DEBUG
/* Enable Receive all mode (to debug filtering_fail errors) */
value |= SXGBE_FRAME_FILTER_RA;
#endif
writel(value, ioaddr + SXGBE_FRAME_FILTER);
netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
readl(ioaddr + SXGBE_FRAME_FILTER),
readl(ioaddr + SXGBE_HASH_HIGH),
readl(ioaddr + SXGBE_HASH_LOW));
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* sxgbe_poll_controller - entry point for polling receive by device
* @dev : pointer to the device structure
* Description:
* This function is used by NETCONSOLE and other diagnostic tools
* to allow network I/O with interrupts disabled.
* Return value:
* Void.
*/
static void sxgbe_poll_controller(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
disable_irq(priv->irq);
sxgbe_rx_interrupt(priv->irq, dev);
enable_irq(priv->irq);
}
#endif
/* sxgbe_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
* @rq: An IOCTL specefic structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd: IOCTL command
* Description:
* Currently it supports the phy_mii_ioctl(...) and HW time stamping.
*/
static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
return -EINVAL;
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
if (!priv->phydev)
return -EINVAL;
ret = phy_mii_ioctl(priv->phydev, rq, cmd);
break;
default:
break;
}
return ret;
}
static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_open = sxgbe_open,
.ndo_start_xmit = sxgbe_xmit,
.ndo_stop = sxgbe_release,
.ndo_get_stats64 = sxgbe_get_stats64,
.ndo_change_mtu = sxgbe_change_mtu,
.ndo_set_features = sxgbe_set_features,
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
.ndo_do_ioctl = sxgbe_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
.ndo_set_mac_address = eth_mac_addr,
};
/* Get the hardware ops */
static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
{
ops_ptr->mac = sxgbe_get_core_ops();
ops_ptr->desc = sxgbe_get_desc_ops();
ops_ptr->dma = sxgbe_get_dma_ops();
ops_ptr->mtl = sxgbe_get_mtl_ops();
/* set the MDIO communication Address/Data regisers */
ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
/* Assigning the default link settings
* no SXGBE defined default values to be set in registers,
* so assigning as 0 for port and duplex
*/
ops_ptr->link.port = 0;
ops_ptr->link.duplex = 0;
ops_ptr->link.speed = SXGBE_SPEED_10G;
}
/**
* sxgbe_hw_init - Init the GMAC device
* @priv: driver private structure
* Description: this function checks the HW capability
* (if supported) and sets the driver's features.
*/
static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
{
u32 ctrl_ids;
priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
if(!priv->hw)
return -ENOMEM;
/* get the hardware ops */
sxgbe_get_ops(priv->hw);
/* get the controller id */
ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
priv->hw->ctrl_uid, priv->hw->ctrl_id);
/* get the H/W features */
if (!sxgbe_get_hw_features(priv))
pr_info("Hardware features not found\n");
if (priv->hw_cap.tx_csum_offload)
pr_info("TX Checksum offload supported\n");
if (priv->hw_cap.rx_csum_offload)
pr_info("RX Checksum offload supported\n");
return 0;
}
static int sxgbe_sw_reset(void __iomem *addr)
{
int retry_count = 10;
writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
while (retry_count--) {
if (!(readl(addr + SXGBE_DMA_MODE_REG) &
SXGBE_DMA_SOFT_RESET))
break;
mdelay(10);
}
if (retry_count < 0)
return -EBUSY;
return 0;
}
/**
* sxgbe_drv_probe
* @device: device pointer
* @plat_dat: platform data pointer
* @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
*/
struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
struct sxgbe_plat_data *plat_dat,
void __iomem *addr)
{
struct sxgbe_priv_data *priv;
struct net_device *ndev;
int ret;
u8 queue_num;
ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
if (!ndev)
return NULL;
SET_NETDEV_DEV(ndev, device);
priv = netdev_priv(ndev);
priv->device = device;
priv->dev = ndev;
sxgbe_set_ethtool_ops(ndev);
priv->plat = plat_dat;
priv->ioaddr = addr;
ret = sxgbe_sw_reset(priv->ioaddr);
if (ret)
goto error_free_netdev;
/* Verify driver arguments */
sxgbe_verify_args();
/* Init MAC and get the capabilities */
ret = sxgbe_hw_init(priv);
if (ret)
goto error_free_netdev;
/* allocate memory resources for Descriptor rings */
ret = txring_mem_alloc(priv);
if (ret)
goto error_free_hw;
ret = rxring_mem_alloc(priv);
if (ret)
goto error_free_hw;
ndev->netdev_ops = &sxgbe_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GRO;
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
/* assign filtering support */
ndev->priv_flags |= IFF_UNICAST_FLT;
priv->msg_enable = netif_msg_init(debug, default_msg_level);
/* Enable TCP segmentation offload for all DMA channels */
if (priv->hw_cap.tcpseg_offload) {
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
}
}
/* Enable Rx checksum offload */
if (priv->hw_cap.rx_csum_offload) {
priv->hw->mac->enable_rx_csum(priv->ioaddr);
priv->rxcsum_insertion = true;
}
/* Initialise pause frame settings */
priv->rx_pause = 1;
priv->tx_pause = 1;
/* Rx Watchdog is available, enable depend on platform data */
if (!priv->plat->riwt_off) {
priv->use_riwt = 1;
pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
spin_lock_init(&priv->stats_lock);
priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
if (IS_ERR(priv->sxgbe_clk)) {
netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
__func__);
goto error_napi_del;
}
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
* set the MDC clock dynamically according to the csr actual
* clock input.
*/
if (!priv->plat->clk_csr)
sxgbe_clk_csr_set(priv);
else
priv->clk_csr = priv->plat->clk_csr;
/* MDIO bus Registration */
ret = sxgbe_mdio_register(ndev);
if (ret < 0) {
netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
__func__, priv->plat->bus_id);
goto error_clk_put;
}
ret = register_netdev(ndev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n", __func__, ret);
goto error_mdio_unregister;
}
sxgbe_check_ether_addr(priv);
return priv;
error_mdio_unregister:
sxgbe_mdio_unregister(ndev);
error_clk_put:
clk_put(priv->sxgbe_clk);
error_napi_del:
netif_napi_del(&priv->napi);
error_free_hw:
kfree(priv->hw);
error_free_netdev:
free_netdev(ndev);
return NULL;
}
/**
* sxgbe_drv_remove
* @ndev: net device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
int sxgbe_drv_remove(struct net_device *ndev)
{
struct sxgbe_priv_data *priv = netdev_priv(ndev);
u8 queue_num;
netdev_info(ndev, "%s: removing driver\n", __func__);
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
}
priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->mac->enable_tx(priv->ioaddr, false);
priv->hw->mac->enable_rx(priv->ioaddr, false);
unregister_netdev(ndev);
sxgbe_mdio_unregister(ndev);
clk_put(priv->sxgbe_clk);
netif_napi_del(&priv->napi);
kfree(priv->hw);
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_PM
int sxgbe_suspend(struct net_device *ndev)
{
return 0;
}
int sxgbe_resume(struct net_device *ndev)
{
return 0;
}
int sxgbe_freeze(struct net_device *ndev)
{
return -ENOSYS;
}
int sxgbe_restore(struct net_device *ndev)
{
return -ENOSYS;
}
#endif /* CONFIG_PM */
/* Driver is configured as Platform driver */
static int __init sxgbe_init(void)
{
int ret;
ret = sxgbe_register_platform();
if (ret)
goto err;
return 0;
err:
pr_err("driver registration failed\n");
return ret;
}
static void __exit sxgbe_exit(void)
{
sxgbe_unregister_platform();
}
module_init(sxgbe_init);
module_exit(sxgbe_exit);
#ifndef MODULE
static int __init sxgbe_cmdline_opt(char *str)
{
char *opt;
if (!str || !*str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "eee_timer:", 6)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
}
return 0;
err:
pr_err("%s: ERROR broken module parameter conversion\n", __func__);
return -EINVAL;
}
__setup("sxgbeeth=", sxgbe_cmdline_opt);
#endif /* MODULE */
MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
blueskycoco/sam | arch/s390/kernel/traps.c | 2174 | 20838 | /*
* arch/s390/kernel/traps.c
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
*
* Derived from "arch/i386/kernel/traps.c"
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/tracehook.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/bug.h>
#include <linux/utsname.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/mathemu.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include "entry.h"
pgm_check_handler_t *pgm_check_table[128];
int show_unhandled_signals;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
extern pgm_check_handler_t do_asce_exception;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
#ifndef CONFIG_64BIT
#define LONG "%08lx "
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
static int kstack_depth_to_print = 12;
#else /* CONFIG_64BIT */
#define LONG "%016lx "
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
static int kstack_depth_to_print = 20;
#endif /* CONFIG_64BIT */
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
sp = sp & PSW_ADDR_INSN;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain & PSW_ADDR_INSN;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
low = sp;
sp = regs->gprs[15];
}
}
static void show_trace(struct task_struct *task, unsigned long *stack)
{
register unsigned long __r15 asm ("15");
unsigned long sp;
sp = (unsigned long) stack;
if (!sp)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
S390_lowcore.panic_stack);
#endif
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
else
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
if (!task)
task = current;
debug_show_held_locks(task);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
register unsigned long * __r15 asm ("15");
unsigned long *stack;
int i;
if (!sp)
stack = task ? (unsigned long *) task->thread.ksp : __r15;
else
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if (i && ((i * sizeof (long) % 32) == 0))
printk("\n ");
printk(LONG, *stack++);
}
printk("\n");
show_trace(task, sp);
}
static void show_last_breaking_event(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
#endif
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
{
return (regs->psw.mask & bits) / ((~bits + 1) & bits);
}
void show_registers(struct pt_regs *regs)
{
char *mode;
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
#endif
printk("\n%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" " FOURLONG,
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
printk(" " FOURLONG,
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
printk(" " FOURLONG,
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
void show_regs(struct pt_regs *regs)
{
print_modules();
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
oops_enter();
debug_stop_all();
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
do_exit(SIGSEGV);
}
static void inline report_user_fault(struct pt_regs *regs, long int_code,
int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code 0x%lX ", int_code);
print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
show_regs(regs);
}
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
struct pt_regs *regs, siginfo_t *info)
{
if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
pgm_int_code, signr) == NOTIFY_STOP)
return;
if (regs->psw.mask & PSW_MASK_PSTATE) {
struct task_struct *tsk = current;
tsk->thread.trap_no = pgm_int_code & 0xffff;
force_sig_info(signr, info, tsk);
report_user_fault(regs, pgm_int_code, signr);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup)
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
else {
enum bug_trap_type btt;
btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
if (btt == BUG_TRAP_TYPE_WARN)
return;
die(str, regs, pgm_int_code);
}
}
}
static inline void __user *get_psw_address(struct pt_regs *regs,
long pgm_int_code)
{
return (void __user *)
((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
}
void __kprobes do_per_trap(struct pt_regs *regs)
{
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
return;
if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current);
}
static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
report_user_fault(regs, pgm_int_code, SIGSEGV);
do_exit(SIGSEGV);
} else
die("Unknown program exception", regs, pgm_int_code);
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
static void name(struct pt_regs *regs, long pgm_int_code, \
unsigned long trans_exc_code) \
{ \
siginfo_t info; \
info.si_signo = signr; \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_addr = get_psw_address(regs, pgm_int_code); \
do_trap(pgm_int_code, signr, str, regs, &info); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
"addressing exception")
DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
"execute exception")
DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
"fixpoint divide exception")
DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
"fixpoint overflow exception")
DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
"HFP overflow exception")
DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
"HFP underflow exception")
DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
"HFP significance exception")
DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
"HFP divide exception")
DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
"HFP square root exception")
DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
"operand exception")
DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
"translation exception")
static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
int fpc, long pgm_int_code)
{
siginfo_t si;
si.si_signo = SIGFPE;
si.si_errno = 0;
si.si_addr = location;
si.si_code = 0;
/* FPC[2] is Data Exception Code */
if ((fpc & 0x00000300) == 0) {
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
if (fpc & 0x8000) /* invalid fp operation */
si.si_code = FPE_FLTINV;
else if (fpc & 0x4000) /* div by 0 */
si.si_code = FPE_FLTDIV;
else if (fpc & 0x2000) /* overflow */
si.si_code = FPE_FLTOVF;
else if (fpc & 0x1000) /* underflow */
si.si_code = FPE_FLTUND;
else if (fpc & 0x0800) /* inexact */
si.si_code = FPE_FLTRES;
}
do_trap(pgm_int_code, SIGFPE,
"floating point exception", regs, &si);
}
static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
siginfo_t info;
__u8 opcode[6];
__u16 __user *location;
int signal = 0;
location = get_psw_address(regs, pgm_int_code);
if (regs->psw.mask & PSW_MASK_PSTATE) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current);
else
signal = SIGILL;
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
return;
signal = math_emu_b3(opcode, regs);
} else if (opcode[0] == 0xed) {
if (get_user(*((__u32 *) (opcode+2)),
(__u32 __user *)(location+1)))
return;
signal = math_emu_ed(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb299) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
return;
signal = math_emu_srnm(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb29c) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
return;
signal = math_emu_stfpc(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb29d) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
return;
signal = math_emu_lfpc(opcode, regs);
#endif
} else
signal = SIGILL;
} else {
/*
* If we get an illegal op in kernel mode, send it through the
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
*/
if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
#ifdef CONFIG_MATHEMU
if (signal == SIGFPE)
do_fp_trap(regs, location,
current->thread.fp_regs.fpc, pgm_int_code);
else if (signal == SIGSEGV) {
info.si_signo = signal;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void __user *) location;
do_trap(pgm_int_code, signal,
"user address fault", regs, &info);
} else
#endif
if (signal) {
info.si_signo = signal;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = (void __user *) location;
do_trap(pgm_int_code, signal,
"illegal operation", regs, &info);
}
}
#ifdef CONFIG_MATHEMU
asmlinkage void specification_exception(struct pt_regs *regs,
long pgm_int_code,
unsigned long trans_exc_code)
{
__u8 opcode[6];
__u16 __user *location = NULL;
int signal = 0;
location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
if (regs->psw.mask & PSW_MASK_PSTATE) {
get_user(*((__u16 *) opcode), location);
switch (opcode[0]) {
case 0x28: /* LDR Rx,Ry */
signal = math_emu_ldr(opcode);
break;
case 0x38: /* LER Rx,Ry */
signal = math_emu_ler(opcode);
break;
case 0x60: /* STD R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_std(opcode, regs);
break;
case 0x68: /* LD R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_ld(opcode, regs);
break;
case 0x70: /* STE R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_ste(opcode, regs);
break;
case 0x78: /* LE R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_le(opcode, regs);
break;
default:
signal = SIGILL;
break;
}
} else
signal = SIGILL;
if (signal == SIGFPE)
do_fp_trap(regs, location,
current->thread.fp_regs.fpc, pgm_int_code);
else if (signal) {
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = ILL_ILLOPN;
info.si_addr = location;
do_trap(pgm_int_code, signal,
"specification exception", regs, &info);
}
}
#else
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
static void data_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
__u16 __user *location;
int signal = 0;
location = get_psw_address(regs, pgm_int_code);
if (MACHINE_HAS_IEEE)
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
#ifdef CONFIG_MATHEMU
else if (regs->psw.mask & PSW_MASK_PSTATE) {
__u8 opcode[6];
get_user(*((__u16 *) opcode), location);
switch (opcode[0]) {
case 0x28: /* LDR Rx,Ry */
signal = math_emu_ldr(opcode);
break;
case 0x38: /* LER Rx,Ry */
signal = math_emu_ler(opcode);
break;
case 0x60: /* STD R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_std(opcode, regs);
break;
case 0x68: /* LD R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_ld(opcode, regs);
break;
case 0x70: /* STE R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_ste(opcode, regs);
break;
case 0x78: /* LE R,D(X,B) */
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_le(opcode, regs);
break;
case 0xb3:
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_b3(opcode, regs);
break;
case 0xed:
get_user(*((__u32 *) (opcode+2)),
(__u32 __user *)(location+1));
signal = math_emu_ed(opcode, regs);
break;
case 0xb2:
if (opcode[1] == 0x99) {
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_srnm(opcode, regs);
} else if (opcode[1] == 0x9c) {
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_stfpc(opcode, regs);
} else if (opcode[1] == 0x9d) {
get_user(*((__u16 *) (opcode+2)), location+1);
signal = math_emu_lfpc(opcode, regs);
} else
signal = SIGILL;
break;
default:
signal = SIGILL;
break;
}
}
#endif
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
signal = SIGILL;
if (signal == SIGFPE)
do_fp_trap(regs, location,
current->thread.fp_regs.fpc, pgm_int_code);
else if (signal) {
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = ILL_ILLOPN;
info.si_addr = location;
do_trap(pgm_int_code, signal, "data exception", regs, &info);
}
}
static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
siginfo_t info;
/* Set user psw back to home space mode. */
if (regs->psw.mask & PSW_MASK_PSTATE)
regs->psw.mask |= PSW_ASC_HOME;
/* Send SIGILL. */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_PRVOPC;
info.si_addr = get_psw_address(regs, pgm_int_code);
do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
}
asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
{
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
show_regs(regs);
bust_spinlocks(0);
panic("Corrupt kernel stack, can't continue.");
}
/* init is done in lowcore.S and head.S */
void __init trap_init(void)
{
int i;
for (i = 0; i < 128; i++)
pgm_check_table[i] = &default_trap_handler;
pgm_check_table[1] = &illegal_op;
pgm_check_table[2] = &privileged_op;
pgm_check_table[3] = &execute_exception;
pgm_check_table[4] = &do_protection_exception;
pgm_check_table[5] = &addressing_exception;
pgm_check_table[6] = &specification_exception;
pgm_check_table[7] = &data_exception;
pgm_check_table[8] = &overflow_exception;
pgm_check_table[9] = ÷_exception;
pgm_check_table[0x0A] = &overflow_exception;
pgm_check_table[0x0B] = ÷_exception;
pgm_check_table[0x0C] = &hfp_overflow_exception;
pgm_check_table[0x0D] = &hfp_underflow_exception;
pgm_check_table[0x0E] = &hfp_significance_exception;
pgm_check_table[0x0F] = &hfp_divide_exception;
pgm_check_table[0x10] = &do_dat_exception;
pgm_check_table[0x11] = &do_dat_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
#ifdef CONFIG_64BIT
pgm_check_table[0x38] = &do_asce_exception;
pgm_check_table[0x39] = &do_dat_exception;
pgm_check_table[0x3A] = &do_dat_exception;
pgm_check_table[0x3B] = &do_dat_exception;
#endif /* CONFIG_64BIT */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception;
/* Enable machine checks early. */
local_mcck_enable();
}
| gpl-2.0 |
rabeeh/linux-linaro-stable-mx6-unmaintained-will-be-deleted | drivers/net/phy/icplus.c | 2430 | 6769 | /*
* Driver for ICPlus PHYs
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
MODULE_DESCRIPTION("ICPlus IP175C/IP101A/IP101G/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
static int ip175c_config_init(struct phy_device *phydev)
{
int err, i;
static int full_reset_performed = 0;
if (full_reset_performed == 0) {
/* master reset */
err = mdiobus_write(phydev->bus, 30, 0, 0x175c);
if (err < 0)
return err;
/* ensure no bus delays overlap reset period */
err = mdiobus_read(phydev->bus, 30, 0);
/* data sheet specifies reset period is 2 msec */
mdelay(2);
/* enable IP175C mode */
err = mdiobus_write(phydev->bus, 29, 31, 0x175c);
if (err < 0)
return err;
/* Set MII0 speed and duplex (in PHY mode) */
err = mdiobus_write(phydev->bus, 29, 22, 0x420);
if (err < 0)
return err;
/* reset switch ports */
for (i = 0; i < 5; i++) {
err = mdiobus_write(phydev->bus, i,
MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
}
for (i = 0; i < 5; i++)
err = mdiobus_read(phydev->bus, i, MII_BMCR);
mdelay(2);
full_reset_performed = 1;
}
if (phydev->addr != 4) {
phydev->state = PHY_RUNNING;
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
phydev->link = 1;
netif_carrier_on(phydev->attached_dev);
}
return 0;
}
static int ip1xx_reset(struct phy_device *phydev)
{
int bmcr;
/* Software Reset PHY */
bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
return bmcr;
bmcr |= BMCR_RESET;
bmcr = phy_write(phydev, MII_BMCR, bmcr);
if (bmcr < 0)
return bmcr;
do {
bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
return bmcr;
} while (bmcr & BMCR_RESET);
return 0;
}
static int ip1001_config_init(struct phy_device *phydev)
{
int c;
c = ip1xx_reset(phydev);
if (c < 0)
return c;
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP1001_SPEC_CTRL_STATUS_2);
if (c < 0)
return c;
c |= IP1001_APS_ON;
c = phy_write(phydev, IP1001_SPEC_CTRL_STATUS_2, c);
if (c < 0)
return c;
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
return c;
c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
c |= IP1001_RXPHASE_SEL;
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
c |= IP1001_TXPHASE_SEL;
c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
if (c < 0)
return c;
}
return 0;
}
static int ip101a_g_config_init(struct phy_device *phydev)
{
int c;
c = ip1xx_reset(phydev);
if (c < 0)
return c;
/* INTR pin used: speed/link/duplex will cause an interrupt */
c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
if (c < 0)
return c;
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
c |= IP101A_G_APS_ON;
return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
}
static int ip175c_read_status(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
genphy_read_status(phydev);
else
/* Don't need to read status for switch ports */
phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
}
static int ip175c_config_aneg(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
genphy_config_aneg(phydev);
return 0;
}
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
if (err < 0)
return err;
return 0;
}
static struct phy_driver icplus_driver[] = {
{
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.config_init = &ip175c_config_init,
.config_aneg = &ip175c_config_aneg,
.read_status = &ip175c_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
.features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
.config_init = &ip1001_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
.flags = PHY_HAS_INTERRUPT,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
} };
static int __init icplus_init(void)
{
return phy_drivers_register(icplus_driver,
ARRAY_SIZE(icplus_driver));
}
static void __exit icplus_exit(void)
{
phy_drivers_unregister(icplus_driver,
ARRAY_SIZE(icplus_driver));
}
module_init(icplus_init);
module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
{ 0x02430d90, 0x0ffffff0 },
{ 0x02430c54, 0x0ffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, icplus_tbl);
| gpl-2.0 |
AnguisCaptor/PwnKernel_Shamu_M | drivers/net/wireless/hostap/hostap_pci.c | 2686 | 11028 | #define PRISM2_PCI
/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
* driver patches from Reyk Floeter <reyk@vantronix.net> and
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "hostap_wlan.h"
static char *dev_info = "hostap_pci";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
"PCI cards.");
MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
MODULE_LICENSE("GPL");
/* struct local_info::hw_priv */
struct hostap_pci_priv {
void __iomem *mem_start;
};
/* FIX: do we need mb/wmb/rmb with memory operations? */
static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
{ 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
/* Samsung MagicLAN SWL-2210P */
{ 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
#ifdef PRISM2_IO_DEBUG
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
writeb(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readb(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
writew(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readw(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a)))
#else /* PRISM2_IO_DEBUG */
static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writeb(v, hw_priv->mem_start + a);
}
static inline u8 hfa384x_inb(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readb(hw_priv->mem_start + a);
}
static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writew(v, hw_priv->mem_start + a);
}
static inline u16 hfa384x_inw(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readw(hw_priv->mem_start + a);
}
#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a)))
#endif /* PRISM2_IO_DEBUG */
static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
*pos++ = HFA384X_INW_DATA(d_off);
if (len & 1)
*((char *) pos) = HFA384X_INB(d_off);
return 0;
}
static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
HFA384X_OUTW_DATA(*pos++, d_off);
if (len & 1)
HFA384X_OUTB(*((char *) pos), d_off);
return 0;
}
/* FIX: This might change at some point.. */
#include "hostap_hw.c"
static void prism2_pci_cor_sreset(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 reg;
reg = HFA384X_INB(HFA384X_PCICOR_OFF);
printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
/* linux-wlan-ng uses extremely long hold and settle times for
* COR sreset. A comment in the driver code mentions that the long
* delays appear to be necessary. However, at least IBM 22P6901 seems
* to work fine with shorter delays.
*
* Longer delays can be configured by uncommenting following line: */
/* #define PRISM2_PCI_USE_LONG_DELAYS */
#ifdef PRISM2_PCI_USE_LONG_DELAYS
int i;
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(250);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(500);
/* Wait for f/w to complete initialization (CMD:BUSY == 0) */
i = 2000000 / 10;
while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
udelay(10);
#else /* PRISM2_PCI_USE_LONG_DELAYS */
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
#endif /* PRISM2_PCI_USE_LONG_DELAYS */
if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
}
}
static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
{
struct net_device *dev = local->dev;
HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
mdelay(10);
HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
mdelay(10);
HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
mdelay(10);
}
static struct prism2_helper_functions prism2_pci_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_pci_cor_sreset,
.genesis_reset = prism2_pci_genesis_reset,
.hw_type = HOSTAP_HW_PCI,
};
static int prism2_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned long phymem;
void __iomem *mem = NULL;
local_info_t *local = NULL;
struct net_device *dev = NULL;
static int cards_found /* = 0 */;
int irq_registered = 0;
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
if (hw_priv == NULL)
return -ENOMEM;
if (pci_enable_device(pdev))
goto err_out_free;
phymem = pci_resource_start(pdev, 0);
if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
goto err_out_disable;
}
mem = pci_ioremap_bar(pdev, 0);
if (mem == NULL) {
printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
goto fail;
}
dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
&pdev->dev);
if (dev == NULL)
goto fail;
iface = netdev_priv(dev);
local = iface->local;
local->hw_priv = hw_priv;
cards_found++;
dev->irq = pdev->irq;
hw_priv->mem_start = mem;
dev->base_addr = (unsigned long) mem;
prism2_pci_cor_sreset(local);
pci_set_drvdata(pdev, dev);
if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
dev)) {
printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
goto fail;
} else
irq_registered = 1;
if (!local->pri_only && prism2_hw_config(dev, 1)) {
printk(KERN_DEBUG "%s: hardware initialization failed\n",
dev_info);
goto fail;
}
printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
"mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
return hostap_hw_ready(dev);
fail:
if (irq_registered && dev)
free_irq(dev->irq, dev);
if (mem)
iounmap(mem);
release_mem_region(phymem, pci_resource_len(pdev, 0));
err_out_disable:
pci_disable_device(pdev);
prism2_free_local_data(dev);
err_out_free:
kfree(hw_priv);
return -ENODEV;
}
static void prism2_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev;
struct hostap_interface *iface;
void __iomem *mem_start;
struct hostap_pci_priv *hw_priv;
dev = pci_get_drvdata(pdev);
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
/* Reset the hardware, and ensure interrupts are disabled. */
prism2_pci_cor_sreset(iface->local);
hfa384x_disable_interrupts(dev);
if (dev->irq)
free_irq(dev->irq, dev);
mem_start = hw_priv->mem_start;
prism2_free_local_data(dev);
kfree(hw_priv);
iounmap(mem_start);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (netif_running(dev)) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int prism2_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
int err;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
dev->name);
return err;
}
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
}
return 0;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static struct pci_driver prism2_pci_driver = {
.name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
#ifdef CONFIG_PM
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
};
module_pci_driver(prism2_pci_driver);
| gpl-2.0 |
mbadarkhe/davinci-next | drivers/gpu/drm/nouveau/core/engine/graph/nv35.c | 2686 | 5330 | #include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nouveau_oclass
nv35_graph_sclass[] = {
{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv35_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x040c, 0x00000101);
nv_wo32(chan, 0x0420, 0x00000111);
nv_wo32(chan, 0x0424, 0x00000060);
nv_wo32(chan, 0x0440, 0x00000080);
nv_wo32(chan, 0x0444, 0xffff0000);
nv_wo32(chan, 0x0448, 0x00000001);
nv_wo32(chan, 0x045c, 0x44400000);
nv_wo32(chan, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
nv_wo32(chan, i, 0x0fff0000);
nv_wo32(chan, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x054c, 0x4b7fffff);
nv_wo32(chan, 0x0588, 0x00000080);
nv_wo32(chan, 0x058c, 0x30201000);
nv_wo32(chan, 0x0590, 0x70605040);
nv_wo32(chan, 0x0594, 0xb8a89888);
nv_wo32(chan, 0x0598, 0xf8e8d8c8);
nv_wo32(chan, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
nv_wo32(chan, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
nv_wo32(chan, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
nv_wo32(chan, i, 0x00080008);
nv_wo32(chan, 0x0860, 0x00040000);
nv_wo32(chan, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
nv_wo32(chan, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
nv_wo32(chan, i + 0, 0x10700ff9);
nv_wo32(chan, i + 4, 0x0436086c);
nv_wo32(chan, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
nv_wo32(chan, i, 0x0000ffff);
nv_wo32(chan, 0x3450, 0x3f800000);
nv_wo32(chan, 0x380c, 0x3f800000);
nv_wo32(chan, 0x3820, 0x3f800000);
nv_wo32(chan, 0x384c, 0x40000000);
nv_wo32(chan, 0x3850, 0x3f800000);
nv_wo32(chan, 0x3854, 0x3f000000);
nv_wo32(chan, 0x385c, 0x40000000);
nv_wo32(chan, 0x3860, 0x3f800000);
nv_wo32(chan, 0x3868, 0xbf800000);
nv_wo32(chan, 0x3870, 0xbf800000);
return 0;
}
static struct nouveau_oclass
nv35_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x35),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv35_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv35_graph_cclass;
nv_engine(priv)->sclass = nv35_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv35_graph_oclass = {
.handle = NV_ENGINE(GR, 0x35),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv35_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv30_graph_init,
.fini = _nouveau_graph_fini,
},
};
| gpl-2.0 |
dirtybit/libcustomperf | drivers/gpu/drm/nouveau/core/core/client.c | 2686 | 3272 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/client.h>
#include <core/handle.h>
#include <core/option.h>
#include <engine/device.h>
static void
nouveau_client_dtor(struct nouveau_object *object)
{
struct nouveau_client *client = (void *)object;
nouveau_object_ref(NULL, &client->device);
nouveau_handle_destroy(client->root);
nouveau_namedb_destroy(&client->base);
}
static struct nouveau_oclass
nouveau_client_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.dtor = nouveau_client_dtor,
},
};
int
nouveau_client_create_(const char *name, u64 devname, const char *cfg,
const char *dbg, int length, void **pobject)
{
struct nouveau_object *device;
struct nouveau_client *client;
int ret;
device = (void *)nouveau_device_find(devname);
if (!device)
return -ENODEV;
ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
NV_CLIENT_CLASS, NULL,
(1ULL << NVDEV_ENGINE_DEVICE),
length, pobject);
client = *pobject;
if (ret)
return ret;
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
if (ret)
return ret;
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
nouveau_object_ref(device, &client->device);
snprintf(client->name, sizeof(client->name), "%s", name);
client->debug = nouveau_dbgopt(dbg, "CLIENT");
return 0;
}
int
nouveau_client_init(struct nouveau_client *client)
{
int ret;
nv_debug(client, "init running\n");
ret = nouveau_handle_init(client->root);
nv_debug(client, "init completed with %d\n", ret);
return ret;
}
int
nouveau_client_fini(struct nouveau_client *client, bool suspend)
{
const char *name[2] = { "fini", "suspend" };
int ret;
nv_debug(client, "%s running\n", name[suspend]);
ret = nouveau_handle_fini(client->root, suspend);
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
}
const char *
nouveau_client_name(void *obj)
{
const char *client_name = "unknown";
struct nouveau_client *client = nouveau_client(obj);
if (client)
client_name = client->name;
return client_name;
}
| gpl-2.0 |
Jackeagle/kernel_stock_e53g | drivers/md/dm-cache-policy-cleaner.c | 2942 | 10303 | /*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
* writeback cache policy supporting flushing out dirty cache blocks.
*
* This file is released under the GPL.
*/
#include "dm-cache-policy.h"
#include "dm.h"
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/*----------------------------------------------------------------*/
#define DM_MSG_PREFIX "cache cleaner"
/* Cache entry struct. */
struct wb_cache_entry {
struct list_head list;
struct hlist_node hlist;
dm_oblock_t oblock;
dm_cblock_t cblock;
bool dirty:1;
bool pending:1;
};
struct hash {
struct hlist_head *table;
dm_block_t hash_bits;
unsigned nr_buckets;
};
struct policy {
struct dm_cache_policy policy;
spinlock_t lock;
struct list_head free;
struct list_head clean;
struct list_head clean_pending;
struct list_head dirty;
/*
* We know exactly how many cblocks will be needed,
* so we can allocate them up front.
*/
dm_cblock_t cache_size, nr_cblocks_allocated;
struct wb_cache_entry *cblocks;
struct hash chash;
};
/*----------------------------------------------------------------------------*/
/*
* Low-level functions.
*/
static unsigned next_power(unsigned n, unsigned min)
{
return roundup_pow_of_two(max(n, min));
}
static struct policy *to_policy(struct dm_cache_policy *p)
{
return container_of(p, struct policy, policy);
}
static struct list_head *list_pop(struct list_head *q)
{
struct list_head *r = q->next;
list_del(r);
return r;
}
/*----------------------------------------------------------------------------*/
/* Allocate/free various resources. */
static int alloc_hash(struct hash *hash, unsigned elts)
{
hash->nr_buckets = next_power(elts >> 4, 16);
hash->hash_bits = ffs(hash->nr_buckets) - 1;
hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
return hash->table ? 0 : -ENOMEM;
}
static void free_hash(struct hash *hash)
{
vfree(hash->table);
}
static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
{
int r = -ENOMEM;
p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
if (p->cblocks) {
unsigned u = from_cblock(cache_size);
while (u--)
list_add(&p->cblocks[u].list, &p->free);
p->nr_cblocks_allocated = 0;
/* Cache entries hash. */
r = alloc_hash(&p->chash, from_cblock(cache_size));
if (r)
vfree(p->cblocks);
}
return r;
}
static void free_cache_blocks_and_hash(struct policy *p)
{
free_hash(&p->chash);
vfree(p->cblocks);
}
static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
{
struct wb_cache_entry *e;
BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
return e;
}
/*----------------------------------------------------------------------------*/
/* Hash functions (lookup, insert, remove). */
static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
{
struct hash *hash = &p->chash;
unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
struct wb_cache_entry *cur;
struct hlist_head *bucket = &hash->table[h];
hlist_for_each_entry(cur, bucket, hlist) {
if (cur->oblock == oblock) {
/* Move upfront bucket for faster access. */
hlist_del(&cur->hlist);
hlist_add_head(&cur->hlist, bucket);
return cur;
}
}
return NULL;
}
static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
{
unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
hlist_add_head(&e->hlist, &p->chash.table[h]);
}
static void remove_cache_hash_entry(struct wb_cache_entry *e)
{
hlist_del(&e->hlist);
}
/* Public interface (see dm-cache-policy.h */
static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool discarded_oblock,
struct bio *bio, struct policy_result *result)
{
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
unsigned long flags;
result->op = POLICY_MISS;
if (can_block)
spin_lock_irqsave(&p->lock, flags);
else if (!spin_trylock_irqsave(&p->lock, flags))
return -EWOULDBLOCK;
e = lookup_cache_entry(p, oblock);
if (e) {
result->op = POLICY_HIT;
result->cblock = e->cblock;
}
spin_unlock_irqrestore(&p->lock, flags);
return 0;
}
static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
{
int r;
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
unsigned long flags;
if (!spin_trylock_irqsave(&p->lock, flags))
return -EWOULDBLOCK;
e = lookup_cache_entry(p, oblock);
if (e) {
*cblock = e->cblock;
r = 0;
} else
r = -ENOENT;
spin_unlock_irqrestore(&p->lock, flags);
return r;
}
static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
{
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
e = lookup_cache_entry(p, oblock);
BUG_ON(!e);
if (set) {
if (!e->dirty) {
e->dirty = true;
list_move(&e->list, &p->dirty);
}
} else {
if (e->dirty) {
e->pending = false;
e->dirty = false;
list_move(&e->list, &p->clean);
}
}
}
static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
__set_clear_dirty(pe, oblock, true);
spin_unlock_irqrestore(&p->lock, flags);
}
static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
__set_clear_dirty(pe, oblock, false);
spin_unlock_irqrestore(&p->lock, flags);
}
static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
{
insert_cache_hash_entry(p, e);
if (e->dirty)
list_add(&e->list, &p->dirty);
else
list_add(&e->list, &p->clean);
}
static int wb_load_mapping(struct dm_cache_policy *pe,
dm_oblock_t oblock, dm_cblock_t cblock,
uint32_t hint, bool hint_valid)
{
int r;
struct policy *p = to_policy(pe);
struct wb_cache_entry *e = alloc_cache_entry(p);
if (e) {
e->cblock = cblock;
e->oblock = oblock;
e->dirty = false; /* blocks default to clean */
add_cache_entry(p, e);
r = 0;
} else
r = -ENOMEM;
return r;
}
static void wb_destroy(struct dm_cache_policy *pe)
{
struct policy *p = to_policy(pe);
free_cache_blocks_and_hash(p);
kfree(p);
}
static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
{
struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
BUG_ON(!r);
remove_cache_hash_entry(r);
list_del(&r->list);
return r;
}
static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
e = __wb_force_remove_mapping(p, oblock);
list_add_tail(&e->list, &p->free);
BUG_ON(!from_cblock(p->nr_cblocks_allocated));
p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
spin_unlock_irqrestore(&p->lock, flags);
}
static void wb_force_mapping(struct dm_cache_policy *pe,
dm_oblock_t current_oblock, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
e = __wb_force_remove_mapping(p, current_oblock);
e->oblock = oblock;
add_cache_entry(p, e);
spin_unlock_irqrestore(&p->lock, flags);
}
static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
{
struct list_head *l;
struct wb_cache_entry *r;
if (list_empty(&p->dirty))
return NULL;
l = list_pop(&p->dirty);
r = container_of(l, struct wb_cache_entry, list);
list_add(l, &p->clean_pending);
return r;
}
static int wb_writeback_work(struct dm_cache_policy *pe,
dm_oblock_t *oblock,
dm_cblock_t *cblock)
{
int r = -ENOENT;
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
e = get_next_dirty_entry(p);
if (e) {
*oblock = e->oblock;
*cblock = e->cblock;
r = 0;
}
spin_unlock_irqrestore(&p->lock, flags);
return r;
}
static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
{
return to_policy(pe)->nr_cblocks_allocated;
}
/* Init the policy plugin interface function pointers. */
static void init_policy_functions(struct policy *p)
{
p->policy.destroy = wb_destroy;
p->policy.map = wb_map;
p->policy.lookup = wb_lookup;
p->policy.set_dirty = wb_set_dirty;
p->policy.clear_dirty = wb_clear_dirty;
p->policy.load_mapping = wb_load_mapping;
p->policy.walk_mappings = NULL;
p->policy.remove_mapping = wb_remove_mapping;
p->policy.writeback_work = wb_writeback_work;
p->policy.force_mapping = wb_force_mapping;
p->policy.residency = wb_residency;
p->policy.tick = NULL;
}
static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
int r;
struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
init_policy_functions(p);
INIT_LIST_HEAD(&p->free);
INIT_LIST_HEAD(&p->clean);
INIT_LIST_HEAD(&p->clean_pending);
INIT_LIST_HEAD(&p->dirty);
p->cache_size = cache_size;
spin_lock_init(&p->lock);
/* Allocate cache entry structs and add them to free list. */
r = alloc_cache_blocks_with_hash(p, cache_size);
if (!r)
return &p->policy;
kfree(p);
return NULL;
}
/*----------------------------------------------------------------------------*/
static struct dm_cache_policy_type wb_policy_type = {
.name = "cleaner",
.version = {1, 0, 0},
.hint_size = 0,
.owner = THIS_MODULE,
.create = wb_create
};
static int __init wb_init(void)
{
int r = dm_cache_policy_register(&wb_policy_type);
if (r < 0)
DMERR("register failed %d", r);
else
DMINFO("version %u.%u.%u loaded",
wb_policy_type.version[0],
wb_policy_type.version[1],
wb_policy_type.version[2]);
return r;
}
static void __exit wb_exit(void)
{
dm_cache_policy_unregister(&wb_policy_type);
}
module_init(wb_init);
module_exit(wb_exit);
MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("cleaner cache policy");
| gpl-2.0 |
lexmazter/dellstreak5-kernel-3.4 | drivers/net/wireless/ath/ath9k/ar5008_phy.c | 4734 | 46498 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hw.h"
#include "hw-ops.h"
#include "../regd.h"
#include "ar9002_phy.h"
/* All code below is for AR5008, AR9001, AR9002 */
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
static const int cycpwrThr1_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
/*
* register values to turn OFDM weak signal detection OFF
*/
static const int m1ThreshLow_off = 127;
static const int m2ThreshLow_off = 127;
static const int m1Thresh_off = 127;
static const int m2Thresh_off = 127;
static const int m2CountThr_off = 31;
static const int m2CountThrLow_off = 63;
static const int m1ThreshLowExt_off = 127;
static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
static void ar5008_rf_bank_setup(u32 *bank, struct ar5416IniArray *array,
int col)
{
int i;
for (i = 0; i < array->ia_rows; i++)
bank[i] = INI_RA(array, i, col);
}
#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) \
ar5008_write_rf_array(ah, iniarray, regData, &(regWr))
static void ar5008_write_rf_array(struct ath_hw *ah, struct ar5416IniArray *array,
u32 *data, unsigned int *writecnt)
{
int r;
ENABLE_REGWRITE_BUFFER(ah);
for (r = 0; r < array->ia_rows; r++) {
REG_WRITE(ah, INI_RA(array, r, 0), data[r]);
DO_DELAY(*writecnt);
}
REGWRITE_BUFFER_FLUSH(ah);
}
/**
* ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
* @rfbuf:
* @reg32:
* @numBits:
* @firstBit:
* @column:
*
* Performs analog "swizzling" of parameters into their location.
* Used on external AR2133/AR5133 radios.
*/
static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
u32 numBits, u32 firstBit,
u32 column)
{
u32 tmp32, mask, arrayEntry, lastBit;
int32_t bitPosition, bitsLeft;
tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
arrayEntry = (firstBit - 1) / 8;
bitPosition = (firstBit - 1) % 8;
bitsLeft = numBits;
while (bitsLeft > 0) {
lastBit = (bitPosition + bitsLeft > 8) ?
8 : bitPosition + bitsLeft;
mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
(column * 8);
rfBuf[arrayEntry] &= ~mask;
rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
(column * 8)) & mask;
bitsLeft -= 8 - bitPosition;
tmp32 = tmp32 >> (8 - bitPosition);
bitPosition = 0;
arrayEntry++;
}
}
/*
* Fix on 2.4 GHz band for orientation sensitivity issue by increasing
* rf_pwd_icsyndiv.
*
* Theoretical Rules:
* if 2 GHz band
* if forceBiasAuto
* if synth_freq < 2412
* bias = 0
* else if 2412 <= synth_freq <= 2422
* bias = 1
* else // synth_freq > 2422
* bias = 2
* else if forceBias > 0
* bias = forceBias & 7
* else
* no change, use value from ini file
* else
* no change, invalid band
*
* 1st Mod:
* 2422 also uses value of 2
* <approved>
*
* 2nd Mod:
* Less than 2412 uses value of 0, 2412 and above uses value of 2
*/
static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 tmp_reg;
int reg_writes = 0;
u32 new_bias = 0;
if (!AR_SREV_5416(ah) || synth_freq >= 3000)
return;
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
if (synth_freq < 2412)
new_bias = 0;
else if (synth_freq < 2422)
new_bias = 1;
else
new_bias = 2;
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
/* write Bank 6 with new params */
REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
}
/**
* ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
* @ah: atheros hardware structure
* @chan:
*
* For the external AR2133/AR5133 radios, takes the MHz channel value and set
* the channel value. Assumes writes enabled to analog bus and bank6 register
* cache in ah->analogBank6Data.
*/
static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 channelSel = 0;
u32 bModeSynth = 0;
u32 aModeRefSel = 0;
u32 reg32 = 0;
u16 freq;
struct chan_centers centers;
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
freq = centers.synth_center;
if (freq < 4800) {
u32 txctl;
if (((freq - 2192) % 5) == 0) {
channelSel = ((freq - 672) * 2 - 3040) / 10;
bModeSynth = 0;
} else if (((freq - 2224) % 5) == 0) {
channelSel = ((freq - 704) * 2 - 3040) / 10;
bModeSynth = 1;
} else {
ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
channelSel = (channelSel << 2) & 0xff;
channelSel = ath9k_hw_reverse_bits(channelSel, 8);
txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
if (freq == 2484) {
REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
} else {
REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
}
} else if ((freq % 20) == 0 && freq >= 5120) {
channelSel =
ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else if ((freq % 10) == 0) {
channelSel =
ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
aModeRefSel = ath9k_hw_reverse_bits(2, 2);
else
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else if ((freq % 5) == 0) {
channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else {
ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
ar5008_hw_force_bias(ah, freq);
reg32 =
(channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
(1 << 5) | 0x1;
REG_WRITE(ah, AR_PHY(0x37), reg32);
ah->curchan = chan;
ah->curchan_rad_index = -1;
return 0;
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int bb_spur = AR_NO_SPUR;
int bin, cur_bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int upper, lower, cur_vit_mask;
int tmp, new;
int i;
static int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static int inc[4] = { 0, 100, 0, 0 };
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
for (i = 0; i < 4; i++) {
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
chan_mask = chan_mask | 0x1 << bp;
}
cur_bin += 100;
}
cur_bin += inc[i];
REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
REG_WRITE(ah, chan_mask_reg[i], chan_mask);
}
cur_vit_mask = 6100;
upper = bin + 120;
lower = bin - 120;
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
if (tmp_v < 75)
mask_amt = 1;
else
mask_amt = 0;
if (cur_vit_mask < 0)
mask_m[abs(cur_vit_mask / 100)] = mask_amt;
else
mask_p[cur_vit_mask / 100] = mask_amt;
}
cur_vit_mask -= 100;
}
tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
| (mask_m[48] << 26) | (mask_m[49] << 24)
| (mask_m[50] << 22) | (mask_m[51] << 20)
| (mask_m[52] << 18) | (mask_m[53] << 16)
| (mask_m[54] << 14) | (mask_m[55] << 12)
| (mask_m[56] << 10) | (mask_m[57] << 8)
| (mask_m[58] << 6) | (mask_m[59] << 4)
| (mask_m[60] << 2) | (mask_m[61] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
tmp_mask = (mask_m[31] << 28)
| (mask_m[32] << 26) | (mask_m[33] << 24)
| (mask_m[34] << 22) | (mask_m[35] << 20)
| (mask_m[36] << 18) | (mask_m[37] << 16)
| (mask_m[48] << 14) | (mask_m[39] << 12)
| (mask_m[40] << 10) | (mask_m[41] << 8)
| (mask_m[42] << 6) | (mask_m[43] << 4)
| (mask_m[44] << 2) | (mask_m[45] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
| (mask_m[18] << 26) | (mask_m[18] << 24)
| (mask_m[20] << 22) | (mask_m[20] << 20)
| (mask_m[22] << 18) | (mask_m[22] << 16)
| (mask_m[24] << 14) | (mask_m[24] << 12)
| (mask_m[25] << 10) | (mask_m[26] << 8)
| (mask_m[27] << 6) | (mask_m[28] << 4)
| (mask_m[29] << 2) | (mask_m[30] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
| (mask_m[2] << 26) | (mask_m[3] << 24)
| (mask_m[4] << 22) | (mask_m[5] << 20)
| (mask_m[6] << 18) | (mask_m[7] << 16)
| (mask_m[8] << 14) | (mask_m[9] << 12)
| (mask_m[10] << 10) | (mask_m[11] << 8)
| (mask_m[12] << 6) | (mask_m[13] << 4)
| (mask_m[14] << 2) | (mask_m[15] << 0);
REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
tmp_mask = (mask_p[15] << 28)
| (mask_p[14] << 26) | (mask_p[13] << 24)
| (mask_p[12] << 22) | (mask_p[11] << 20)
| (mask_p[10] << 18) | (mask_p[9] << 16)
| (mask_p[8] << 14) | (mask_p[7] << 12)
| (mask_p[6] << 10) | (mask_p[5] << 8)
| (mask_p[4] << 6) | (mask_p[3] << 4)
| (mask_p[2] << 2) | (mask_p[1] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
tmp_mask = (mask_p[30] << 28)
| (mask_p[29] << 26) | (mask_p[28] << 24)
| (mask_p[27] << 22) | (mask_p[26] << 20)
| (mask_p[25] << 18) | (mask_p[24] << 16)
| (mask_p[23] << 14) | (mask_p[22] << 12)
| (mask_p[21] << 10) | (mask_p[20] << 8)
| (mask_p[19] << 6) | (mask_p[18] << 4)
| (mask_p[17] << 2) | (mask_p[16] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
tmp_mask = (mask_p[45] << 28)
| (mask_p[44] << 26) | (mask_p[43] << 24)
| (mask_p[42] << 22) | (mask_p[41] << 20)
| (mask_p[40] << 18) | (mask_p[39] << 16)
| (mask_p[38] << 14) | (mask_p[37] << 12)
| (mask_p[36] << 10) | (mask_p[35] << 8)
| (mask_p[34] << 6) | (mask_p[33] << 4)
| (mask_p[32] << 2) | (mask_p[31] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
| (mask_p[59] << 26) | (mask_p[58] << 24)
| (mask_p[57] << 22) | (mask_p[56] << 20)
| (mask_p[55] << 18) | (mask_p[54] << 16)
| (mask_p[53] << 14) | (mask_p[52] << 12)
| (mask_p[51] << 10) | (mask_p[50] << 8)
| (mask_p[49] << 6) | (mask_p[48] << 4)
| (mask_p[47] << 2) | (mask_p[46] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure
*
* Only required for older devices with external AR2133/AR5133 radios.
*/
static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
{
#define ATH_ALLOC_BANK(bank, size) do { \
bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
if (!bank) { \
ath_err(common, "Cannot allocate RF banks\n"); \
return -ENOMEM; \
} \
} while (0);
struct ath_common *common = ath9k_hw_common(ah);
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
return 0;
#undef ATH_ALLOC_BANK
}
/**
* ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
* @ah: atheros hardware struture
* For the external AR2133/AR5133 radios banks.
*/
static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
{
#define ATH_FREE_BANK(bank) do { \
kfree(bank); \
bank = NULL; \
} while (0);
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
ATH_FREE_BANK(ah->analogBank0Data);
ATH_FREE_BANK(ah->analogBank1Data);
ATH_FREE_BANK(ah->analogBank2Data);
ATH_FREE_BANK(ah->analogBank3Data);
ATH_FREE_BANK(ah->analogBank6Data);
ATH_FREE_BANK(ah->analogBank6TPCData);
ATH_FREE_BANK(ah->analogBank7Data);
ATH_FREE_BANK(ah->bank6Temp);
#undef ATH_FREE_BANK
}
/* *
* ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
* @ah: atheros hardware structure
* @chan:
* @modesIndex:
*
* Used for the external AR2133/AR5133 radios.
*
* Reads the EEPROM header info from the device structure and programs
* all rf registers. This routine requires access to the analog
* rf device. This is not required for single-chip devices.
*/
static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex)
{
u32 eepMinorRev;
u32 ob5GHz = 0, db5GHz = 0;
u32 ob2GHz = 0, db2GHz = 0;
int regWrites = 0;
/*
* Software does not need to program bank data
* for single chip devices, that is AR9280 or anything
* after that.
*/
if (AR_SREV_9280_20_OR_LATER(ah))
return true;
/* Setup rf parameters */
eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
/* Setup Bank 0 Write */
ar5008_rf_bank_setup(ah->analogBank0Data, &ah->iniBank0, 1);
/* Setup Bank 1 Write */
ar5008_rf_bank_setup(ah->analogBank1Data, &ah->iniBank1, 1);
/* Setup Bank 2 Write */
ar5008_rf_bank_setup(ah->analogBank2Data, &ah->iniBank2, 1);
/* Setup Bank 6 Write */
ar5008_rf_bank_setup(ah->analogBank3Data, &ah->iniBank3,
modesIndex);
{
int i;
for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
ah->analogBank6Data[i] =
INI_RA(&ah->iniBank6TPC, i, modesIndex);
}
}
/* Only the 5 or 2 GHz OB/DB need to be set for a mode */
if (eepMinorRev >= 2) {
if (IS_CHAN_2GHZ(chan)) {
ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
ob2GHz, 3, 197, 0);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
db2GHz, 3, 194, 0);
} else {
ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
ob5GHz, 3, 203, 0);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
db5GHz, 3, 200, 0);
}
}
/* Setup Bank 7 Setup */
ar5008_rf_bank_setup(ah->analogBank7Data, &ah->iniBank7, 1);
/* Write Analog registers */
REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
regWrites);
return true;
}
static void ar5008_hw_init_bb(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 synthDelay;
synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(chan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
if (IS_CHAN_HALF_RATE(chan))
synthDelay *= 2;
else if (IS_CHAN_QUARTER_RATE(chan))
synthDelay *= 4;
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
udelay(synthDelay + BASE_ACTIVATE_DELAY);
}
static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
{
int rx_chainmask, tx_chainmask;
rx_chainmask = ah->rxchainmask;
tx_chainmask = ah->txchainmask;
switch (rx_chainmask) {
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
case 0x1:
case 0x2:
case 0x7:
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
break;
default:
ENABLE_REGWRITE_BUFFER(ah);
break;
}
REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
REGWRITE_BUFFER_FLUSH(ah);
if (tx_chainmask == 0x5) {
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
}
if (AR_SREV_9100(ah))
REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
}
static void ar5008_hw_override_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 val;
/*
* Set the RX_ABORT and RX_DIS and clear if off only after
* RXE is set for MAC. This prevents frames with corrupted
* descriptor status.
*/
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
if (AR_SREV_9280_20_OR_LATER(ah)) {
val = REG_READ(ah, AR_PCU_MISC_MODE2);
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
* Disable BB clock gating
* Necessary to avoid issues on AR5416 2.0
*/
REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
/*
* Disable RIFS search on some chips to avoid baseband
* hang issues.
*/
if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
val &= ~AR_PHY_RIFS_INIT_DELAY;
REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
}
}
static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 phymode;
u32 enableDacFifo = 0;
if (AR_SREV_9285_12_OR_LATER(ah))
enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
AR_PHY_FC_ENABLE_DAC_FIFO);
phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
| AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
(chan->chanmode == CHANNEL_G_HT40PLUS))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
ath9k_hw_set11nmac2040(ah);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
REGWRITE_BUFFER_FLUSH(ah);
}
static int ar5008_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
int i, regWrites = 0;
u32 modesIndex, freqIndex;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
modesIndex = 1;
freqIndex = 1;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
modesIndex = 2;
freqIndex = 1;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
modesIndex = 4;
freqIndex = 2;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
modesIndex = 3;
freqIndex = 2;
break;
default:
return -EINVAL;
}
/*
* Set correct baseband to analog shift setting to
* access analog chips.
*/
REG_WRITE(ah, AR_PHY(0), 0x00000007);
/* Write ADDAC shifts */
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
if (ah->eep_ops->set_addac)
ah->eep_ops->set_addac(ah, chan);
REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < ah->iniModes.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniModes, i, 0);
u32 val = INI_RA(&ah->iniModes, i, modesIndex);
if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
val &= ~AR_AN_TOP2_PWDCLKIND;
REG_WRITE(ah, reg, val);
if (reg >= 0x7800 && reg < 0x78a0
&& ah->config.analog_shiftreg
&& (common->bus_ops->ath_bus_type != ATH_USB)) {
udelay(100);
}
DO_DELAY(regWrites);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9280(ah) || AR_SREV_9287_11_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
AR_SREV_9287_11_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
if (AR_SREV_9271_10(ah)) {
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENA);
REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_ADC_ON, 0xa);
}
ENABLE_REGWRITE_BUFFER(ah);
/* Write common array parameters */
for (i = 0; i < ah->iniCommon.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniCommon, i, 0);
u32 val = INI_RA(&ah->iniCommon, i, 1);
REG_WRITE(ah, reg, val);
if (reg >= 0x7800 && reg < 0x78a0
&& ah->config.analog_shiftreg
&& (common->bus_ops->ath_bus_type != ATH_USB)) {
udelay(100);
}
DO_DELAY(regWrites);
}
REGWRITE_BUFFER_FLUSH(ah);
REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex,
regWrites);
ar5008_hw_override_ini(ah, chan);
ar5008_hw_set_channel_regs(ah, chan);
ar5008_hw_init_chain_masks(ah);
ath9k_olc_init(ah);
ath9k_hw_apply_txpower(ah, chan, false);
/* Write analog registers */
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
ath_err(ath9k_hw_common(ah), "ar5416SetRfRegs failed\n");
return -EIO;
}
return 0;
}
static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
{
u32 rfMode = 0;
if (chan == NULL)
return;
rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
REG_WRITE(ah, AR_PHY_MODE, rfMode);
}
static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
}
static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 coef_scaled, ds_coef_exp, ds_coef_man;
u32 clockMhzScaled = 0x64000000;
struct chan_centers centers;
if (IS_CHAN_HALF_RATE(chan))
clockMhzScaled = clockMhzScaled >> 1;
else if (IS_CHAN_QUARTER_RATE(chan))
clockMhzScaled = clockMhzScaled >> 2;
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
coef_scaled = clockMhzScaled / centers.synth_center;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
coef_scaled = (9 * coef_scaled) / 10;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
REG_RMW_FIELD(ah, AR_PHY_HALFGI,
AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_HALFGI,
AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
}
static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
}
static void ar5008_hw_rfbus_done(struct ath_hw *ah)
{
u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(ah->curchan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
udelay(synthDelay + BASE_ACTIVATE_DELAY);
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
static void ar5008_restore_chainmask(struct ath_hw *ah)
{
int rx_chainmask = ah->rxchainmask;
if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
}
}
static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
if (chan && IS_CHAN_5GHZ(chan))
pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
else
pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
return pll;
}
static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
if (chan && IS_CHAN_5GHZ(chan))
pll |= SM(0xa, AR_RTC_PLL_DIV);
else
pll |= SM(0xb, AR_RTC_PLL_DIV);
return pll;
}
static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
switch (cmd & ah->ani_function) {
case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
AR_PHY_DESIRED_SZ_TOT_DES,
ah->totalSizeDesired[level]);
REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
AR_PHY_AGC_CTL1_COARSE_LOW,
ah->coarse_low[level]);
REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
AR_PHY_AGC_CTL1_COARSE_HIGH,
ah->coarse_high[level]);
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRPWR,
ah->firpwr[level]);
if (level > aniState->noiseImmunityLevel)
ah->stats.ast_ani_niup++;
else if (level < aniState->noiseImmunityLevel)
ah->stats.ast_ani_nidown++;
aniState->noiseImmunityLevel = level;
break;
}
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
static const int m1ThreshLow[] = { 127, 50 };
static const int m2ThreshLow[] = { 127, 40 };
static const int m1Thresh[] = { 127, 0x4d };
static const int m2Thresh[] = { 127, 0x40 };
static const int m2CountThr[] = { 31, 16 };
static const int m2CountThrLow[] = { 63, 48 };
u32 on = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH,
m1Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH,
m2Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR,
m2CountThr[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
m1ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
m2ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH,
m1Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH,
m2Thresh[on]);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
else
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
aniState->ofdmWeakSigDetectOff = !on;
}
break;
}
case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
static const int weakSigThrCck[] = { 8, 6 };
u32 high = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
weakSigThrCck[high]);
if (high != aniState->cckWeakSigThreshold) {
if (high)
ah->stats.ast_ani_cckhigh++;
else
ah->stats.ast_ani_ccklow++;
aniState->cckWeakSigThreshold = high;
}
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
static const int firstep[] = { 0, 4, 8 };
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
firstep[level]);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
ah->stats.ast_ani_stepdown++;
aniState->firstepLevel = level;
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
cycpwrThr1[level]);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
ah->stats.ast_ani_spurdown++;
aniState->spurImmunityLevel = level;
break;
}
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
ath_dbg(common, ANI, "ANI parameters:\n");
ath_dbg(common, ANI,
"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
aniState->noiseImmunityLevel,
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff);
ath_dbg(common, ANI,
"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
aniState->cckWeakSigThreshold,
aniState->firstepLevel,
aniState->listenTime);
ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
return true;
}
static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &chan->ani;
s32 value, value2;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
/*
* on == 1 means ofdm weak signal detection is ON
* on == 1 is the default, for less noise immunity
*
* on == 0 means ofdm weak signal detection is OFF
* on == 0 means more noise imm
*/
u32 on = param ? 1 : 0;
/*
* make register setting for default
* (weak sig detect ON) come from INI file
*/
int m1ThreshLow = on ?
aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
int m2ThreshLow = on ?
aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
int m1Thresh = on ?
aniState->iniDef.m1Thresh : m1Thresh_off;
int m2Thresh = on ?
aniState->iniDef.m2Thresh : m2Thresh_off;
int m2CountThr = on ?
aniState->iniDef.m2CountThr : m2CountThr_off;
int m2CountThrLow = on ?
aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
int m1ThreshLowExt = on ?
aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
int m2ThreshLowExt = on ?
aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
int m1ThreshExt = on ?
aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
int m2ThreshExt = on ?
aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH, m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH, m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
else
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
aniState->ofdmWeakSigDetectOff = !on;
}
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
value);
/*
* we need to set first step low register too
* make register setting relative to default
* from INI file & cap value
*/
value2 = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
ah->stats.ast_ani_stepdown++;
aniState->firstepLevel = level;
}
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
value);
/*
* set AR_PHY_EXT_CCA for extension channel
* make register setting relative to default
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
ah->stats.ast_ani_spurdown++;
aniState->spurImmunityLevel = level;
}
break;
}
case ATH9K_ANI_MRC_CCK:
/*
* You should not see this as AR5008, AR9001, AR9002
* does not have hardware support for MRC CCK.
*/
WARN_ON(1);
break;
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
aniState->firstepLevel,
!aniState->mrcCCKOff ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
return true;
}
static void ar5008_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
int16_t nf;
nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
nfarray[0] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
nfarray[1] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
nfarray[2] = sign_extend32(nf, 8);
if (!IS_CHAN_HT40(ah->curchan))
return;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
nfarray[3] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
nfarray[4] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
nfarray[5] = sign_extend32(nf, 8);
}
/*
* Initialize the ANI register values with default (ini) values.
* This routine is called during a (full) hardware reset after
* all the registers are initialised from the INI.
*/
static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &chan->ani;
struct ath9k_ani_default *iniDef;
u32 val;
iniDef = &aniState->iniDef;
ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
chan->channel,
chan->channelFlags);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
val = REG_READ(ah, AR_PHY_SFCORR_LOW);
iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
val = REG_READ(ah, AR_PHY_SFCORR_EXT);
iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
iniDef->firstep = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP);
iniDef->firstepLow = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_FIRSTEP_LOW);
iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1);
iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
aniState->mrcCCKOff = true; /* not available on pre AR9003 */
}
static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
{
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ;
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ;
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_5416_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
}
static void ar5008_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
u32 radar_0 = 0, radar_1 = 0;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
return;
}
radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
if (conf->ext_channel)
REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
else
REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
}
static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
{
struct ath_hw_radar_conf *conf = &ah->radar_conf;
conf->fir_power = -33;
conf->radar_rssi = 20;
conf->pulse_height = 10;
conf->pulse_rssi = 24;
conf->pulse_inband = 15;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
conf->radar_inband = 8;
}
void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
static const u32 ar5416_cca_regs[6] = {
AR_PHY_CCA,
AR_PHY_CH1_CCA,
AR_PHY_CH2_CCA,
AR_PHY_EXT_CCA,
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
priv_ops->rf_set_freq = ar5008_hw_set_channel;
priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
priv_ops->init_bb = ar5008_hw_init_bb;
priv_ops->process_ini = ar5008_hw_process_ini;
priv_ops->set_rfmode = ar5008_hw_set_rfmode;
priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
priv_ops->rfbus_req = ar5008_hw_rfbus_req;
priv_ops->rfbus_done = ar5008_hw_rfbus_done;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->set_radar_params = ar5008_hw_set_radar_params;
if (modparam_force_new_ani) {
priv_ops->ani_control = ar5008_hw_ani_control_new;
priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
} else
priv_ops->ani_control = ar5008_hw_ani_control_old;
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
else
priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
ar5008_hw_set_nf_limits(ah);
ar5008_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
}
| gpl-2.0 |
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-m4 | arch/arm/mach-ep93xx/vision_ep9307.c | 4734 | 10316 | /*
* arch/arm/mach-ep93xx/vision_ep9307.c
* Vision Engraving Systems EP9307 SoM support.
*
* Copyright (C) 2008-2011 Vision Engraving Systems
* H Hartley Sweeten <hsweeten@visionengravers.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/i2c/pca953x.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <mach/ep93xx_spi.h>
#include <mach/gpio-ep93xx.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include "soc.h"
/*************************************************************************
* Static I/O mappings for the FPGA
*************************************************************************/
#define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE
#define VISION_VIRT_BASE 0xfebff000
static struct map_desc vision_io_desc[] __initdata = {
{
.virtual = VISION_VIRT_BASE,
.pfn = __phys_to_pfn(VISION_PHYS_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
static void __init vision_map_io(void)
{
ep93xx_map_io();
iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc));
}
/*************************************************************************
* Ethernet
*************************************************************************/
static struct ep93xx_eth_data vision_eth_data __initdata = {
.phy_id = 1,
};
/*************************************************************************
* Framebuffer
*************************************************************************/
#define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1
static int vision_lcd_setup(struct platform_device *pdev)
{
int err;
err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_INIT_HIGH,
dev_name(&pdev->dev));
if (err)
return err;
ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS |
EP93XX_SYSCON_DEVCFG_RASONP3 |
EP93XX_SYSCON_DEVCFG_EXVC);
return 0;
}
static void vision_lcd_teardown(struct platform_device *pdev)
{
gpio_free(VISION_LCD_ENABLE);
}
static void vision_lcd_blank(int blank_mode, struct fb_info *info)
{
if (blank_mode)
gpio_set_value(VISION_LCD_ENABLE, 0);
else
gpio_set_value(VISION_LCD_ENABLE, 1);
}
static struct ep93xxfb_mach_info ep93xxfb_info __initdata = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
.flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
.setup = vision_lcd_setup,
.teardown = vision_lcd_teardown,
.blank = vision_lcd_blank,
};
/*************************************************************************
* GPIO Expanders
*************************************************************************/
#define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1)
#define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16)
#define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16)
#define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16)
static struct pca953x_platform_data pca953x_74_gpio_data = {
.gpio_base = PCA9539_74_GPIO_BASE,
.irq_base = EP93XX_BOARD_IRQ(0),
};
static struct pca953x_platform_data pca953x_75_gpio_data = {
.gpio_base = PCA9539_75_GPIO_BASE,
.irq_base = -1,
};
static struct pca953x_platform_data pca953x_76_gpio_data = {
.gpio_base = PCA9539_76_GPIO_BASE,
.irq_base = -1,
};
static struct pca953x_platform_data pca953x_77_gpio_data = {
.gpio_base = PCA9539_77_GPIO_BASE,
.irq_base = -1,
};
/*************************************************************************
* I2C Bus
*************************************************************************/
static struct i2c_gpio_platform_data vision_i2c_gpio_data __initdata = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
};
static struct i2c_board_info vision_i2c_info[] __initdata = {
{
I2C_BOARD_INFO("isl1208", 0x6f),
.irq = IRQ_EP93XX_EXT1,
}, {
I2C_BOARD_INFO("pca9539", 0x74),
.platform_data = &pca953x_74_gpio_data,
}, {
I2C_BOARD_INFO("pca9539", 0x75),
.platform_data = &pca953x_75_gpio_data,
}, {
I2C_BOARD_INFO("pca9539", 0x76),
.platform_data = &pca953x_76_gpio_data,
}, {
I2C_BOARD_INFO("pca9539", 0x77),
.platform_data = &pca953x_77_gpio_data,
},
};
/*************************************************************************
* SPI Flash
*************************************************************************/
#define VISION_SPI_FLASH_CS EP93XX_GPIO_LINE_EGPIO7
static struct mtd_partition vision_spi_flash_partitions[] = {
{
.name = "SPI bootstrap",
.offset = 0,
.size = SZ_4K,
}, {
.name = "Bootstrap config",
.offset = MTDPART_OFS_APPEND,
.size = SZ_4K,
}, {
.name = "System config",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct flash_platform_data vision_spi_flash_data = {
.name = "SPI Flash",
.parts = vision_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(vision_spi_flash_partitions),
};
static int vision_spi_flash_hw_setup(struct spi_device *spi)
{
return gpio_request_one(VISION_SPI_FLASH_CS, GPIOF_INIT_HIGH,
spi->modalias);
}
static void vision_spi_flash_hw_cleanup(struct spi_device *spi)
{
gpio_free(VISION_SPI_FLASH_CS);
}
static void vision_spi_flash_hw_cs_control(struct spi_device *spi, int value)
{
gpio_set_value(VISION_SPI_FLASH_CS, value);
}
static struct ep93xx_spi_chip_ops vision_spi_flash_hw = {
.setup = vision_spi_flash_hw_setup,
.cleanup = vision_spi_flash_hw_cleanup,
.cs_control = vision_spi_flash_hw_cs_control,
};
/*************************************************************************
* SPI SD/MMC host
*************************************************************************/
#define VISION_SPI_MMC_CS EP93XX_GPIO_LINE_G(2)
#define VISION_SPI_MMC_WP EP93XX_GPIO_LINE_F(0)
#define VISION_SPI_MMC_CD EP93XX_GPIO_LINE_EGPIO15
static struct gpio vision_spi_mmc_gpios[] = {
{ VISION_SPI_MMC_WP, GPIOF_DIR_IN, "mmc_spi:wp" },
{ VISION_SPI_MMC_CD, GPIOF_DIR_IN, "mmc_spi:cd" },
};
static int vision_spi_mmc_init(struct device *pdev,
irqreturn_t (*func)(int, void *), void *pdata)
{
int err;
err = gpio_request_array(vision_spi_mmc_gpios,
ARRAY_SIZE(vision_spi_mmc_gpios));
if (err)
return err;
err = gpio_set_debounce(VISION_SPI_MMC_CD, 1);
if (err)
goto exit_err;
err = request_irq(gpio_to_irq(VISION_SPI_MMC_CD), func,
IRQ_TYPE_EDGE_BOTH, "mmc_spi:cd", pdata);
if (err)
goto exit_err;
return 0;
exit_err:
gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
return err;
}
static void vision_spi_mmc_exit(struct device *pdev, void *pdata)
{
free_irq(gpio_to_irq(VISION_SPI_MMC_CD), pdata);
gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
}
static int vision_spi_mmc_get_ro(struct device *pdev)
{
return !!gpio_get_value(VISION_SPI_MMC_WP);
}
static int vision_spi_mmc_get_cd(struct device *pdev)
{
return !gpio_get_value(VISION_SPI_MMC_CD);
}
static struct mmc_spi_platform_data vision_spi_mmc_data = {
.init = vision_spi_mmc_init,
.exit = vision_spi_mmc_exit,
.get_ro = vision_spi_mmc_get_ro,
.get_cd = vision_spi_mmc_get_cd,
.detect_delay = 100,
.powerup_msecs = 100,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
};
static int vision_spi_mmc_hw_setup(struct spi_device *spi)
{
return gpio_request_one(VISION_SPI_MMC_CS, GPIOF_INIT_HIGH,
spi->modalias);
}
static void vision_spi_mmc_hw_cleanup(struct spi_device *spi)
{
gpio_free(VISION_SPI_MMC_CS);
}
static void vision_spi_mmc_hw_cs_control(struct spi_device *spi, int value)
{
gpio_set_value(VISION_SPI_MMC_CS, value);
}
static struct ep93xx_spi_chip_ops vision_spi_mmc_hw = {
.setup = vision_spi_mmc_hw_setup,
.cleanup = vision_spi_mmc_hw_cleanup,
.cs_control = vision_spi_mmc_hw_cs_control,
};
/*************************************************************************
* SPI Bus
*************************************************************************/
static struct spi_board_info vision_spi_board_info[] __initdata = {
{
.modalias = "sst25l",
.platform_data = &vision_spi_flash_data,
.controller_data = &vision_spi_flash_hw,
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_3,
}, {
.modalias = "mmc_spi",
.platform_data = &vision_spi_mmc_data,
.controller_data = &vision_spi_mmc_hw,
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = 1,
.mode = SPI_MODE_3,
},
};
static struct ep93xx_spi_info vision_spi_master __initdata = {
.num_chipselect = ARRAY_SIZE(vision_spi_board_info),
};
/*************************************************************************
* Machine Initialization
*************************************************************************/
static void __init vision_init_machine(void)
{
ep93xx_init_devices();
ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M);
ep93xx_register_eth(&vision_eth_data, 1);
ep93xx_register_fb(&ep93xxfb_info);
ep93xx_register_pwm(1, 0);
/*
* Request the gpio expander's interrupt gpio line now to prevent
* the kernel from doing a WARN in gpiolib:gpio_ensure_requested().
*/
if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_DIR_IN,
"pca9539:74"))
pr_warn("cannot request interrupt gpio for pca9539:74\n");
vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
ep93xx_register_i2c(&vision_i2c_gpio_data, vision_i2c_info,
ARRAY_SIZE(vision_i2c_info));
ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
ARRAY_SIZE(vision_spi_board_info));
}
MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
/* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
.atag_offset = 0x100,
.map_io = vision_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = vision_init_machine,
.restart = ep93xx_restart,
MACHINE_END
| gpl-2.0 |
Hundsbuah/Note3_Samsung_Source_Drops | arch/arm/mach-davinci/board-dm355-evm.c | 4734 | 8918 | /*
* TI DaVinci EVM board support
*
* Author: Kevin Hilman, Deep Root Systems, LLC
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/videodev2.h>
#include <media/tvp514x.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/nand.h>
#include <mach/mmc.h>
#include <mach/usb.h>
#include "davinci.h"
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
* need to be changed.
*/
#define NAND_BLOCK_SIZE SZ_128K
static struct mtd_partition davinci_nand_partitions[] = {
{
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
.size = 15 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = 1 * NAND_BLOCK_SIZE,
.mask_flags = 0,
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_4M,
.mask_flags = 0,
}, {
.name = "filesystem1",
.offset = MTDPART_OFS_APPEND,
.size = SZ_512M,
.mask_flags = 0,
}, {
.name = "filesystem2",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
}
/* two blocks with bad block table (and mirror) at the end */
};
static struct davinci_nand_pdata davinci_nand_data = {
.mask_chipsel = BIT(14),
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
.bbt_options = NAND_BBT_USE_FLASH,
.ecc_bits = 4,
};
static struct resource davinci_nand_resources[] = {
{
.start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
.end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = DM355_ASYNC_EMIF_CONTROL_BASE,
.end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device davinci_nand_device = {
.name = "davinci_nand",
.id = 0,
.num_resources = ARRAY_SIZE(davinci_nand_resources),
.resource = davinci_nand_resources,
.dev = {
.platform_data = &davinci_nand_data,
},
};
static struct davinci_i2c_platform_data i2c_pdata = {
.bus_freq = 400 /* kHz */,
.bus_delay = 0 /* usec */,
.sda_pin = 15,
.scl_pin = 14,
};
static struct snd_platform_data dm355_evm_snd_data;
static int dm355evm_mmc_gpios = -EINVAL;
static void dm355evm_mmcsd_gpios(unsigned gpio)
{
gpio_request(gpio + 0, "mmc0_ro");
gpio_request(gpio + 1, "mmc0_cd");
gpio_request(gpio + 2, "mmc1_ro");
gpio_request(gpio + 3, "mmc1_cd");
/* we "know" these are input-only so we don't
* need to call gpio_direction_input()
*/
dm355evm_mmc_gpios = gpio;
}
static struct i2c_board_info dm355evm_i2c_info[] = {
{ I2C_BOARD_INFO("dm355evm_msp", 0x25),
.platform_data = dm355evm_mmcsd_gpios,
},
/* { plus irq }, */
{ I2C_BOARD_INFO("tlv320aic33", 0x1b), },
};
static void __init evm_init_i2c(void)
{
davinci_init_i2c(&i2c_pdata);
gpio_request(5, "dm355evm_msp");
gpio_direction_input(5);
dm355evm_i2c_info[0].irq = gpio_to_irq(5);
i2c_register_board_info(1, dm355evm_i2c_info,
ARRAY_SIZE(dm355evm_i2c_info));
}
static struct resource dm355evm_dm9000_rsrc[] = {
{
/* addr */
.start = 0x04014000,
.end = 0x04014001,
.flags = IORESOURCE_MEM,
}, {
/* data */
.start = 0x04014002,
.end = 0x04014003,
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_IRQ
| IORESOURCE_IRQ_HIGHEDGE /* rising (active high) */,
},
};
static struct platform_device dm355evm_dm9000 = {
.name = "dm9000",
.id = -1,
.resource = dm355evm_dm9000_rsrc,
.num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc),
};
static struct tvp514x_platform_data tvp5146_pdata = {
.clk_polarity = 0,
.hs_polarity = 1,
.vs_polarity = 1
};
#define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
/* Inputs available at the TVP5146 */
static struct v4l2_input tvp5146_inputs[] = {
{
.index = 0,
.name = "Composite",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = TVP514X_STD_ALL,
},
{
.index = 1,
.name = "S-Video",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = TVP514X_STD_ALL,
},
};
/*
* this is the route info for connecting each input to decoder
* ouput that goes to vpfe. There is a one to one correspondence
* with tvp5146_inputs
*/
static struct vpfe_route tvp5146_routes[] = {
{
.input = INPUT_CVBS_VI2B,
.output = OUTPUT_10BIT_422_EMBEDDED_SYNC,
},
{
.input = INPUT_SVIDEO_VI2C_VI1C,
.output = OUTPUT_10BIT_422_EMBEDDED_SYNC,
},
};
static struct vpfe_subdev_info vpfe_sub_devs[] = {
{
.name = "tvp5146",
.grp_id = 0,
.num_inputs = ARRAY_SIZE(tvp5146_inputs),
.inputs = tvp5146_inputs,
.routes = tvp5146_routes,
.can_route = 1,
.ccdc_if_params = {
.if_type = VPFE_BT656,
.hdpol = VPFE_PINPOL_POSITIVE,
.vdpol = VPFE_PINPOL_POSITIVE,
},
.board_info = {
I2C_BOARD_INFO("tvp5146", 0x5d),
.platform_data = &tvp5146_pdata,
},
}
};
static struct vpfe_config vpfe_cfg = {
.num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
.i2c_adapter_id = 1,
.sub_devs = vpfe_sub_devs,
.card_name = "DM355 EVM",
.ccdc = "DM355 CCDC",
};
static struct platform_device *davinci_evm_devices[] __initdata = {
&dm355evm_dm9000,
&davinci_nand_device,
};
static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
static void __init dm355_evm_map_io(void)
{
/* setup input configuration for VPFE input devices */
dm355_set_vpfe_config(&vpfe_cfg);
dm355_init();
}
static int dm355evm_mmc_get_cd(int module)
{
if (!gpio_is_valid(dm355evm_mmc_gpios))
return -ENXIO;
/* low == card present */
return !gpio_get_value_cansleep(dm355evm_mmc_gpios + 2 * module + 1);
}
static int dm355evm_mmc_get_ro(int module)
{
if (!gpio_is_valid(dm355evm_mmc_gpios))
return -ENXIO;
/* high == card's write protect switch active */
return gpio_get_value_cansleep(dm355evm_mmc_gpios + 2 * module + 0);
}
static struct davinci_mmc_config dm355evm_mmc_config = {
.get_cd = dm355evm_mmc_get_cd,
.get_ro = dm355evm_mmc_get_ro,
.wires = 4,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_1,
};
/* Don't connect anything to J10 unless you're only using USB host
* mode *and* have to do so with some kind of gender-bender. If
* you have proper Mini-B or Mini-A cables (or Mini-A adapters)
* the ID pin won't need any help.
*/
#ifdef CONFIG_USB_MUSB_PERIPHERAL
#define USB_ID_VALUE 0 /* ID pulled high; *should* float */
#else
#define USB_ID_VALUE 1 /* ID pulled low */
#endif
static struct spi_eeprom at25640a = {
.byte_len = SZ_64K / 8,
.name = "at25640a",
.page_size = 32,
.flags = EE_ADDR2,
};
static struct spi_board_info dm355_evm_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640a,
.max_speed_hz = 10 * 1000 * 1000, /* at 3v3 */
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
},
};
static __init void dm355_evm_init(void)
{
struct clk *aemif;
gpio_request(1, "dm9000");
gpio_direction_input(1);
dm355evm_dm9000_rsrc[2].start = gpio_to_irq(1);
aemif = clk_get(&dm355evm_dm9000.dev, "aemif");
if (IS_ERR(aemif))
WARN("%s: unable to get AEMIF clock\n", __func__);
else
clk_enable(aemif);
platform_add_devices(davinci_evm_devices,
ARRAY_SIZE(davinci_evm_devices));
evm_init_i2c();
davinci_serial_init(&uart_config);
/* NOTE: NAND flash timings set by the UBL are slower than
* needed by MT29F16G08FAA chips ... EMIF.A1CR is 0x40400204
* but could be 0x0400008c for about 25% faster page reads.
*/
gpio_request(2, "usb_id_toggle");
gpio_direction_output(2, USB_ID_VALUE);
/* irlml6401 switches over 1A in under 8 msec */
davinci_setup_usb(1000, 8);
davinci_setup_mmc(0, &dm355evm_mmc_config);
davinci_setup_mmc(1, &dm355evm_mmc_config);
dm355_init_spi0(BIT(0), dm355_evm_spi_info,
ARRAY_SIZE(dm355_evm_spi_info));
/* DM335 EVM uses ASP1; line-out is a stereo mini-jack */
dm355_init_asp1(ASP1_TX_EVT_EN | ASP1_RX_EVT_EN, &dm355_evm_snd_data);
}
MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.atag_offset = 0x100,
.map_io = dm355_evm_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
| gpl-2.0 |
JASON-RR/jrr_TW_5.0.1_kernel | arch/arm/mach-omap1/board-voiceblue.c | 4734 | 7402 | /*
* linux/arch/arm/mach-omap1/board-voiceblue.c
*
* Modified from board-generic.c
*
* Copyright (C) 2004 2N Telekomunikace, Ladislav Michl <michl@2n.cz>
*
* Code for OMAP5910 based VoiceBlue board (VoIP to GSM gateway).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mtd/physmap.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/smc91x.h>
#include <linux/export.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/board-voiceblue.h>
#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/tc.h>
#include <plat/usb.h>
#include <mach/hardware.h>
#include "common.h"
static struct plat_serial8250_port voiceblue_ports[] = {
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x40000),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = 3686400,
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x50000),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = 3686400,
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x60000),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = 3686400,
},
{
.mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x70000),
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = 3686400,
},
{ },
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
};
static int __init ext_uart_init(void)
{
if (!machine_is_voiceblue())
return -ENODEV;
voiceblue_ports[0].irq = gpio_to_irq(12);
voiceblue_ports[1].irq = gpio_to_irq(13);
voiceblue_ports[2].irq = gpio_to_irq(14);
voiceblue_ports[3].irq = gpio_to_irq(15);
serial_device.dev.platform_data = voiceblue_ports;
return platform_device_register(&serial_device);
}
arch_initcall(ext_uart_init);
static struct physmap_flash_data voiceblue_flash_data = {
.width = 2,
.set_vpp = omap1_set_vpp,
};
static struct resource voiceblue_flash_resource = {
.start = OMAP_CS0_PHYS,
.end = OMAP_CS0_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device voiceblue_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &voiceblue_flash_data,
},
.num_resources = 1,
.resource = &voiceblue_flash_resource,
};
static struct smc91x_platdata voiceblue_smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource voiceblue_smc91x_resources[] = {
[0] = {
.start = OMAP_CS2_PHYS + 0x300,
.end = OMAP_CS2_PHYS + 0x300 + 16,
.flags = IORESOURCE_MEM,
},
[1] = {
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
static struct platform_device voiceblue_smc91x_device = {
.name = "smc91x",
.id = 0,
.dev = {
.platform_data = &voiceblue_smc91x_info,
},
.num_resources = ARRAY_SIZE(voiceblue_smc91x_resources),
.resource = voiceblue_smc91x_resources,
};
static struct platform_device *voiceblue_devices[] __initdata = {
&voiceblue_flash_device,
&voiceblue_smc91x_device,
};
static struct omap_usb_config voiceblue_usb_config __initdata = {
.hmc_mode = 3,
.register_host = 1,
.register_dev = 1,
.pins[0] = 2,
.pins[1] = 6,
.pins[2] = 6,
};
static struct omap_board_config_kernel voiceblue_config[] = {
};
#define MACHINE_PANICED 1
#define MACHINE_REBOOTING 2
#define MACHINE_REBOOT 4
static unsigned long machine_state;
static int panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
if (test_and_set_bit(MACHINE_PANICED, &machine_state))
return NOTIFY_DONE;
/* Flash power LED */
omap_writeb(0x78, OMAP_LPG1_LCR);
omap_writeb(0x01, OMAP_LPG1_PMR); /* Enable clock */
return NOTIFY_DONE;
}
static struct notifier_block panic_block = {
.notifier_call = panic_event,
};
static int __init voiceblue_setup(void)
{
if (!machine_is_voiceblue())
return -ENODEV;
/* Setup panic notifier */
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
postcore_initcall(voiceblue_setup);
static int wdt_gpio_state;
void voiceblue_wdt_enable(void)
{
gpio_direction_output(0, 0);
gpio_set_value(0, 1);
gpio_set_value(0, 0);
wdt_gpio_state = 0;
}
void voiceblue_wdt_disable(void)
{
gpio_set_value(0, 0);
gpio_set_value(0, 1);
gpio_set_value(0, 0);
gpio_direction_input(0);
}
void voiceblue_wdt_ping(void)
{
if (test_bit(MACHINE_REBOOT, &machine_state))
return;
wdt_gpio_state = !wdt_gpio_state;
gpio_set_value(0, wdt_gpio_state);
}
static void voiceblue_restart(char mode, const char *cmd)
{
/*
* Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
* "Global Software Reset Affects Traffic Controller Frequency".
*/
if (cpu_is_omap5912()) {
omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4), DPLL_CTL);
omap_writew(0x8, ARM_RSTCT1);
}
set_bit(MACHINE_REBOOT, &machine_state);
voiceblue_wdt_enable();
while (1) ;
}
EXPORT_SYMBOL(voiceblue_wdt_enable);
EXPORT_SYMBOL(voiceblue_wdt_disable);
EXPORT_SYMBOL(voiceblue_wdt_ping);
static void __init voiceblue_init(void)
{
/* mux pins for uarts */
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
omap_cfg_reg(UART2_TX);
omap_cfg_reg(UART2_RTS);
omap_cfg_reg(UART3_TX);
omap_cfg_reg(UART3_RX);
/* Watchdog */
gpio_request(0, "Watchdog");
/* smc91x reset */
gpio_request(7, "SMC91x reset");
gpio_direction_output(7, 1);
udelay(2); /* wait at least 100ns */
gpio_set_value(7, 0);
mdelay(50); /* 50ms until PHY ready */
/* smc91x interrupt pin */
gpio_request(8, "SMC91x irq");
/* 16C554 reset*/
gpio_request(6, "16C554 reset");
gpio_direction_output(6, 0);
/* 16C554 interrupt pins */
gpio_request(12, "16C554 irq");
gpio_request(13, "16C554 irq");
gpio_request(14, "16C554 irq");
gpio_request(15, "16C554 irq");
irq_set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING);
irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
voiceblue_smc91x_resources[1].start = gpio_to_irq(8);
voiceblue_smc91x_resources[1].end = gpio_to_irq(8);
platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
omap_board_config = voiceblue_config;
omap_board_config_size = ARRAY_SIZE(voiceblue_config);
omap_serial_init();
omap1_usb_init(&voiceblue_usb_config);
omap_register_i2c_bus(1, 100, NULL, 0);
/* There is a good chance board is going up, so enable power LED
* (it is connected through invertor) */
omap_writeb(0x00, OMAP_LPG1_LCR);
omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
}
MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
/* Maintainer: Ladislav Michl <michl@2n.cz> */
.atag_offset = 0x100,
.map_io = omap15xx_map_io,
.init_early = omap1_init_early,
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = voiceblue_init,
.timer = &omap1_timer,
.restart = voiceblue_restart,
MACHINE_END
| gpl-2.0 |
compulab/cm-qs600-kernel | drivers/net/wireless/ath/ath9k/ar5008_phy.c | 4734 | 46498 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hw.h"
#include "hw-ops.h"
#include "../regd.h"
#include "ar9002_phy.h"
/* All code below is for AR5008, AR9001, AR9002 */
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
static const int cycpwrThr1_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
/*
* register values to turn OFDM weak signal detection OFF
*/
static const int m1ThreshLow_off = 127;
static const int m2ThreshLow_off = 127;
static const int m1Thresh_off = 127;
static const int m2Thresh_off = 127;
static const int m2CountThr_off = 31;
static const int m2CountThrLow_off = 63;
static const int m1ThreshLowExt_off = 127;
static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
static void ar5008_rf_bank_setup(u32 *bank, struct ar5416IniArray *array,
int col)
{
int i;
for (i = 0; i < array->ia_rows; i++)
bank[i] = INI_RA(array, i, col);
}
#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) \
ar5008_write_rf_array(ah, iniarray, regData, &(regWr))
static void ar5008_write_rf_array(struct ath_hw *ah, struct ar5416IniArray *array,
u32 *data, unsigned int *writecnt)
{
int r;
ENABLE_REGWRITE_BUFFER(ah);
for (r = 0; r < array->ia_rows; r++) {
REG_WRITE(ah, INI_RA(array, r, 0), data[r]);
DO_DELAY(*writecnt);
}
REGWRITE_BUFFER_FLUSH(ah);
}
/**
* ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
* @rfbuf:
* @reg32:
* @numBits:
* @firstBit:
* @column:
*
* Performs analog "swizzling" of parameters into their location.
* Used on external AR2133/AR5133 radios.
*/
static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
u32 numBits, u32 firstBit,
u32 column)
{
u32 tmp32, mask, arrayEntry, lastBit;
int32_t bitPosition, bitsLeft;
tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
arrayEntry = (firstBit - 1) / 8;
bitPosition = (firstBit - 1) % 8;
bitsLeft = numBits;
while (bitsLeft > 0) {
lastBit = (bitPosition + bitsLeft > 8) ?
8 : bitPosition + bitsLeft;
mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
(column * 8);
rfBuf[arrayEntry] &= ~mask;
rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
(column * 8)) & mask;
bitsLeft -= 8 - bitPosition;
tmp32 = tmp32 >> (8 - bitPosition);
bitPosition = 0;
arrayEntry++;
}
}
/*
* Fix on 2.4 GHz band for orientation sensitivity issue by increasing
* rf_pwd_icsyndiv.
*
* Theoretical Rules:
* if 2 GHz band
* if forceBiasAuto
* if synth_freq < 2412
* bias = 0
* else if 2412 <= synth_freq <= 2422
* bias = 1
* else // synth_freq > 2422
* bias = 2
* else if forceBias > 0
* bias = forceBias & 7
* else
* no change, use value from ini file
* else
* no change, invalid band
*
* 1st Mod:
* 2422 also uses value of 2
* <approved>
*
* 2nd Mod:
* Less than 2412 uses value of 0, 2412 and above uses value of 2
*/
static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 tmp_reg;
int reg_writes = 0;
u32 new_bias = 0;
if (!AR_SREV_5416(ah) || synth_freq >= 3000)
return;
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
if (synth_freq < 2412)
new_bias = 0;
else if (synth_freq < 2422)
new_bias = 1;
else
new_bias = 2;
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
/* write Bank 6 with new params */
REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
}
/**
* ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
* @ah: atheros hardware structure
* @chan:
*
* For the external AR2133/AR5133 radios, takes the MHz channel value and set
* the channel value. Assumes writes enabled to analog bus and bank6 register
* cache in ah->analogBank6Data.
*/
static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 channelSel = 0;
u32 bModeSynth = 0;
u32 aModeRefSel = 0;
u32 reg32 = 0;
u16 freq;
struct chan_centers centers;
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
freq = centers.synth_center;
if (freq < 4800) {
u32 txctl;
if (((freq - 2192) % 5) == 0) {
channelSel = ((freq - 672) * 2 - 3040) / 10;
bModeSynth = 0;
} else if (((freq - 2224) % 5) == 0) {
channelSel = ((freq - 704) * 2 - 3040) / 10;
bModeSynth = 1;
} else {
ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
channelSel = (channelSel << 2) & 0xff;
channelSel = ath9k_hw_reverse_bits(channelSel, 8);
txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
if (freq == 2484) {
REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
} else {
REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
}
} else if ((freq % 20) == 0 && freq >= 5120) {
channelSel =
ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else if ((freq % 10) == 0) {
channelSel =
ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
aModeRefSel = ath9k_hw_reverse_bits(2, 2);
else
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else if ((freq % 5) == 0) {
channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else {
ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
ar5008_hw_force_bias(ah, freq);
reg32 =
(channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
(1 << 5) | 0x1;
REG_WRITE(ah, AR_PHY(0x37), reg32);
ah->curchan = chan;
ah->curchan_rad_index = -1;
return 0;
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int bb_spur = AR_NO_SPUR;
int bin, cur_bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int upper, lower, cur_vit_mask;
int tmp, new;
int i;
static int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static int inc[4] = { 0, 100, 0, 0 };
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
for (i = 0; i < 4; i++) {
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
chan_mask = chan_mask | 0x1 << bp;
}
cur_bin += 100;
}
cur_bin += inc[i];
REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
REG_WRITE(ah, chan_mask_reg[i], chan_mask);
}
cur_vit_mask = 6100;
upper = bin + 120;
lower = bin - 120;
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
if (tmp_v < 75)
mask_amt = 1;
else
mask_amt = 0;
if (cur_vit_mask < 0)
mask_m[abs(cur_vit_mask / 100)] = mask_amt;
else
mask_p[cur_vit_mask / 100] = mask_amt;
}
cur_vit_mask -= 100;
}
tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
| (mask_m[48] << 26) | (mask_m[49] << 24)
| (mask_m[50] << 22) | (mask_m[51] << 20)
| (mask_m[52] << 18) | (mask_m[53] << 16)
| (mask_m[54] << 14) | (mask_m[55] << 12)
| (mask_m[56] << 10) | (mask_m[57] << 8)
| (mask_m[58] << 6) | (mask_m[59] << 4)
| (mask_m[60] << 2) | (mask_m[61] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
tmp_mask = (mask_m[31] << 28)
| (mask_m[32] << 26) | (mask_m[33] << 24)
| (mask_m[34] << 22) | (mask_m[35] << 20)
| (mask_m[36] << 18) | (mask_m[37] << 16)
| (mask_m[48] << 14) | (mask_m[39] << 12)
| (mask_m[40] << 10) | (mask_m[41] << 8)
| (mask_m[42] << 6) | (mask_m[43] << 4)
| (mask_m[44] << 2) | (mask_m[45] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
| (mask_m[18] << 26) | (mask_m[18] << 24)
| (mask_m[20] << 22) | (mask_m[20] << 20)
| (mask_m[22] << 18) | (mask_m[22] << 16)
| (mask_m[24] << 14) | (mask_m[24] << 12)
| (mask_m[25] << 10) | (mask_m[26] << 8)
| (mask_m[27] << 6) | (mask_m[28] << 4)
| (mask_m[29] << 2) | (mask_m[30] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
| (mask_m[2] << 26) | (mask_m[3] << 24)
| (mask_m[4] << 22) | (mask_m[5] << 20)
| (mask_m[6] << 18) | (mask_m[7] << 16)
| (mask_m[8] << 14) | (mask_m[9] << 12)
| (mask_m[10] << 10) | (mask_m[11] << 8)
| (mask_m[12] << 6) | (mask_m[13] << 4)
| (mask_m[14] << 2) | (mask_m[15] << 0);
REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
tmp_mask = (mask_p[15] << 28)
| (mask_p[14] << 26) | (mask_p[13] << 24)
| (mask_p[12] << 22) | (mask_p[11] << 20)
| (mask_p[10] << 18) | (mask_p[9] << 16)
| (mask_p[8] << 14) | (mask_p[7] << 12)
| (mask_p[6] << 10) | (mask_p[5] << 8)
| (mask_p[4] << 6) | (mask_p[3] << 4)
| (mask_p[2] << 2) | (mask_p[1] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
tmp_mask = (mask_p[30] << 28)
| (mask_p[29] << 26) | (mask_p[28] << 24)
| (mask_p[27] << 22) | (mask_p[26] << 20)
| (mask_p[25] << 18) | (mask_p[24] << 16)
| (mask_p[23] << 14) | (mask_p[22] << 12)
| (mask_p[21] << 10) | (mask_p[20] << 8)
| (mask_p[19] << 6) | (mask_p[18] << 4)
| (mask_p[17] << 2) | (mask_p[16] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
tmp_mask = (mask_p[45] << 28)
| (mask_p[44] << 26) | (mask_p[43] << 24)
| (mask_p[42] << 22) | (mask_p[41] << 20)
| (mask_p[40] << 18) | (mask_p[39] << 16)
| (mask_p[38] << 14) | (mask_p[37] << 12)
| (mask_p[36] << 10) | (mask_p[35] << 8)
| (mask_p[34] << 6) | (mask_p[33] << 4)
| (mask_p[32] << 2) | (mask_p[31] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
| (mask_p[59] << 26) | (mask_p[58] << 24)
| (mask_p[57] << 22) | (mask_p[56] << 20)
| (mask_p[55] << 18) | (mask_p[54] << 16)
| (mask_p[53] << 14) | (mask_p[52] << 12)
| (mask_p[51] << 10) | (mask_p[50] << 8)
| (mask_p[49] << 6) | (mask_p[48] << 4)
| (mask_p[47] << 2) | (mask_p[46] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure
*
* Only required for older devices with external AR2133/AR5133 radios.
*/
static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
{
#define ATH_ALLOC_BANK(bank, size) do { \
bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
if (!bank) { \
ath_err(common, "Cannot allocate RF banks\n"); \
return -ENOMEM; \
} \
} while (0);
struct ath_common *common = ath9k_hw_common(ah);
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
return 0;
#undef ATH_ALLOC_BANK
}
/**
* ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
* @ah: atheros hardware struture
* For the external AR2133/AR5133 radios banks.
*/
static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
{
#define ATH_FREE_BANK(bank) do { \
kfree(bank); \
bank = NULL; \
} while (0);
BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
ATH_FREE_BANK(ah->analogBank0Data);
ATH_FREE_BANK(ah->analogBank1Data);
ATH_FREE_BANK(ah->analogBank2Data);
ATH_FREE_BANK(ah->analogBank3Data);
ATH_FREE_BANK(ah->analogBank6Data);
ATH_FREE_BANK(ah->analogBank6TPCData);
ATH_FREE_BANK(ah->analogBank7Data);
ATH_FREE_BANK(ah->bank6Temp);
#undef ATH_FREE_BANK
}
/* *
* ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
* @ah: atheros hardware structure
* @chan:
* @modesIndex:
*
* Used for the external AR2133/AR5133 radios.
*
* Reads the EEPROM header info from the device structure and programs
* all rf registers. This routine requires access to the analog
* rf device. This is not required for single-chip devices.
*/
static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex)
{
u32 eepMinorRev;
u32 ob5GHz = 0, db5GHz = 0;
u32 ob2GHz = 0, db2GHz = 0;
int regWrites = 0;
/*
* Software does not need to program bank data
* for single chip devices, that is AR9280 or anything
* after that.
*/
if (AR_SREV_9280_20_OR_LATER(ah))
return true;
/* Setup rf parameters */
eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
/* Setup Bank 0 Write */
ar5008_rf_bank_setup(ah->analogBank0Data, &ah->iniBank0, 1);
/* Setup Bank 1 Write */
ar5008_rf_bank_setup(ah->analogBank1Data, &ah->iniBank1, 1);
/* Setup Bank 2 Write */
ar5008_rf_bank_setup(ah->analogBank2Data, &ah->iniBank2, 1);
/* Setup Bank 6 Write */
ar5008_rf_bank_setup(ah->analogBank3Data, &ah->iniBank3,
modesIndex);
{
int i;
for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
ah->analogBank6Data[i] =
INI_RA(&ah->iniBank6TPC, i, modesIndex);
}
}
/* Only the 5 or 2 GHz OB/DB need to be set for a mode */
if (eepMinorRev >= 2) {
if (IS_CHAN_2GHZ(chan)) {
ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
ob2GHz, 3, 197, 0);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
db2GHz, 3, 194, 0);
} else {
ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
ob5GHz, 3, 203, 0);
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
db5GHz, 3, 200, 0);
}
}
/* Setup Bank 7 Setup */
ar5008_rf_bank_setup(ah->analogBank7Data, &ah->iniBank7, 1);
/* Write Analog registers */
REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
regWrites);
REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
regWrites);
return true;
}
static void ar5008_hw_init_bb(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 synthDelay;
synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(chan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
if (IS_CHAN_HALF_RATE(chan))
synthDelay *= 2;
else if (IS_CHAN_QUARTER_RATE(chan))
synthDelay *= 4;
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
udelay(synthDelay + BASE_ACTIVATE_DELAY);
}
static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
{
int rx_chainmask, tx_chainmask;
rx_chainmask = ah->rxchainmask;
tx_chainmask = ah->txchainmask;
switch (rx_chainmask) {
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
case 0x1:
case 0x2:
case 0x7:
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
break;
default:
ENABLE_REGWRITE_BUFFER(ah);
break;
}
REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
REGWRITE_BUFFER_FLUSH(ah);
if (tx_chainmask == 0x5) {
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
}
if (AR_SREV_9100(ah))
REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
}
static void ar5008_hw_override_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 val;
/*
* Set the RX_ABORT and RX_DIS and clear if off only after
* RXE is set for MAC. This prevents frames with corrupted
* descriptor status.
*/
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
if (AR_SREV_9280_20_OR_LATER(ah)) {
val = REG_READ(ah, AR_PCU_MISC_MODE2);
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
* Disable BB clock gating
* Necessary to avoid issues on AR5416 2.0
*/
REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
/*
* Disable RIFS search on some chips to avoid baseband
* hang issues.
*/
if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
val &= ~AR_PHY_RIFS_INIT_DELAY;
REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
}
}
static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 phymode;
u32 enableDacFifo = 0;
if (AR_SREV_9285_12_OR_LATER(ah))
enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
AR_PHY_FC_ENABLE_DAC_FIFO);
phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
| AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
(chan->chanmode == CHANNEL_G_HT40PLUS))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
ath9k_hw_set11nmac2040(ah);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
REGWRITE_BUFFER_FLUSH(ah);
}
static int ar5008_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
int i, regWrites = 0;
u32 modesIndex, freqIndex;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
modesIndex = 1;
freqIndex = 1;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
modesIndex = 2;
freqIndex = 1;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
modesIndex = 4;
freqIndex = 2;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
modesIndex = 3;
freqIndex = 2;
break;
default:
return -EINVAL;
}
/*
* Set correct baseband to analog shift setting to
* access analog chips.
*/
REG_WRITE(ah, AR_PHY(0), 0x00000007);
/* Write ADDAC shifts */
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
if (ah->eep_ops->set_addac)
ah->eep_ops->set_addac(ah, chan);
REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < ah->iniModes.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniModes, i, 0);
u32 val = INI_RA(&ah->iniModes, i, modesIndex);
if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
val &= ~AR_AN_TOP2_PWDCLKIND;
REG_WRITE(ah, reg, val);
if (reg >= 0x7800 && reg < 0x78a0
&& ah->config.analog_shiftreg
&& (common->bus_ops->ath_bus_type != ATH_USB)) {
udelay(100);
}
DO_DELAY(regWrites);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9280(ah) || AR_SREV_9287_11_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
AR_SREV_9287_11_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
if (AR_SREV_9271_10(ah)) {
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENA);
REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_ADC_ON, 0xa);
}
ENABLE_REGWRITE_BUFFER(ah);
/* Write common array parameters */
for (i = 0; i < ah->iniCommon.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniCommon, i, 0);
u32 val = INI_RA(&ah->iniCommon, i, 1);
REG_WRITE(ah, reg, val);
if (reg >= 0x7800 && reg < 0x78a0
&& ah->config.analog_shiftreg
&& (common->bus_ops->ath_bus_type != ATH_USB)) {
udelay(100);
}
DO_DELAY(regWrites);
}
REGWRITE_BUFFER_FLUSH(ah);
REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex,
regWrites);
ar5008_hw_override_ini(ah, chan);
ar5008_hw_set_channel_regs(ah, chan);
ar5008_hw_init_chain_masks(ah);
ath9k_olc_init(ah);
ath9k_hw_apply_txpower(ah, chan, false);
/* Write analog registers */
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
ath_err(ath9k_hw_common(ah), "ar5416SetRfRegs failed\n");
return -EIO;
}
return 0;
}
static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
{
u32 rfMode = 0;
if (chan == NULL)
return;
rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
REG_WRITE(ah, AR_PHY_MODE, rfMode);
}
static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
}
static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 coef_scaled, ds_coef_exp, ds_coef_man;
u32 clockMhzScaled = 0x64000000;
struct chan_centers centers;
if (IS_CHAN_HALF_RATE(chan))
clockMhzScaled = clockMhzScaled >> 1;
else if (IS_CHAN_QUARTER_RATE(chan))
clockMhzScaled = clockMhzScaled >> 2;
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
coef_scaled = clockMhzScaled / centers.synth_center;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
coef_scaled = (9 * coef_scaled) / 10;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
REG_RMW_FIELD(ah, AR_PHY_HALFGI,
AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_HALFGI,
AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
}
static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
}
static void ar5008_hw_rfbus_done(struct ath_hw *ah)
{
u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(ah->curchan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
udelay(synthDelay + BASE_ACTIVATE_DELAY);
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
static void ar5008_restore_chainmask(struct ath_hw *ah)
{
int rx_chainmask = ah->rxchainmask;
if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
}
}
static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
if (chan && IS_CHAN_5GHZ(chan))
pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
else
pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
return pll;
}
static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
if (chan && IS_CHAN_5GHZ(chan))
pll |= SM(0xa, AR_RTC_PLL_DIV);
else
pll |= SM(0xb, AR_RTC_PLL_DIV);
return pll;
}
static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
switch (cmd & ah->ani_function) {
case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
AR_PHY_DESIRED_SZ_TOT_DES,
ah->totalSizeDesired[level]);
REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
AR_PHY_AGC_CTL1_COARSE_LOW,
ah->coarse_low[level]);
REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
AR_PHY_AGC_CTL1_COARSE_HIGH,
ah->coarse_high[level]);
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRPWR,
ah->firpwr[level]);
if (level > aniState->noiseImmunityLevel)
ah->stats.ast_ani_niup++;
else if (level < aniState->noiseImmunityLevel)
ah->stats.ast_ani_nidown++;
aniState->noiseImmunityLevel = level;
break;
}
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
static const int m1ThreshLow[] = { 127, 50 };
static const int m2ThreshLow[] = { 127, 40 };
static const int m1Thresh[] = { 127, 0x4d };
static const int m2Thresh[] = { 127, 0x40 };
static const int m2CountThr[] = { 31, 16 };
static const int m2CountThrLow[] = { 63, 48 };
u32 on = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH,
m1Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH,
m2Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR,
m2CountThr[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
m1ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
m2ThreshLow[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH,
m1Thresh[on]);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH,
m2Thresh[on]);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
else
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
aniState->ofdmWeakSigDetectOff = !on;
}
break;
}
case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
static const int weakSigThrCck[] = { 8, 6 };
u32 high = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
weakSigThrCck[high]);
if (high != aniState->cckWeakSigThreshold) {
if (high)
ah->stats.ast_ani_cckhigh++;
else
ah->stats.ast_ani_ccklow++;
aniState->cckWeakSigThreshold = high;
}
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
static const int firstep[] = { 0, 4, 8 };
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
firstep[level]);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
ah->stats.ast_ani_stepdown++;
aniState->firstepLevel = level;
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
cycpwrThr1[level]);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
ah->stats.ast_ani_spurdown++;
aniState->spurImmunityLevel = level;
break;
}
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
ath_dbg(common, ANI, "ANI parameters:\n");
ath_dbg(common, ANI,
"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
aniState->noiseImmunityLevel,
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff);
ath_dbg(common, ANI,
"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
aniState->cckWeakSigThreshold,
aniState->firstepLevel,
aniState->listenTime);
ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
return true;
}
static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &chan->ani;
s32 value, value2;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
/*
* on == 1 means ofdm weak signal detection is ON
* on == 1 is the default, for less noise immunity
*
* on == 0 means ofdm weak signal detection is OFF
* on == 0 means more noise imm
*/
u32 on = param ? 1 : 0;
/*
* make register setting for default
* (weak sig detect ON) come from INI file
*/
int m1ThreshLow = on ?
aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
int m2ThreshLow = on ?
aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
int m1Thresh = on ?
aniState->iniDef.m1Thresh : m1Thresh_off;
int m2Thresh = on ?
aniState->iniDef.m2Thresh : m2Thresh_off;
int m2CountThr = on ?
aniState->iniDef.m2CountThr : m2CountThr_off;
int m2CountThrLow = on ?
aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
int m1ThreshLowExt = on ?
aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
int m2ThreshLowExt = on ?
aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
int m1ThreshExt = on ?
aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
int m2ThreshExt = on ?
aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH, m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH, m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
else
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
aniState->ofdmWeakSigDetectOff = !on;
}
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
value);
/*
* we need to set first step low register too
* make register setting relative to default
* from INI file & cap value
*/
value2 = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
ah->stats.ast_ani_stepdown++;
aniState->firstepLevel = level;
}
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
value);
/*
* set AR_PHY_EXT_CCA for extension channel
* make register setting relative to default
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
ah->stats.ast_ani_spurdown++;
aniState->spurImmunityLevel = level;
}
break;
}
case ATH9K_ANI_MRC_CCK:
/*
* You should not see this as AR5008, AR9001, AR9002
* does not have hardware support for MRC CCK.
*/
WARN_ON(1);
break;
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
aniState->firstepLevel,
!aniState->mrcCCKOff ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
return true;
}
static void ar5008_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
int16_t nf;
nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
nfarray[0] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
nfarray[1] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
nfarray[2] = sign_extend32(nf, 8);
if (!IS_CHAN_HT40(ah->curchan))
return;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
nfarray[3] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
nfarray[4] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
nfarray[5] = sign_extend32(nf, 8);
}
/*
* Initialize the ANI register values with default (ini) values.
* This routine is called during a (full) hardware reset after
* all the registers are initialised from the INI.
*/
static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &chan->ani;
struct ath9k_ani_default *iniDef;
u32 val;
iniDef = &aniState->iniDef;
ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
chan->channel,
chan->channelFlags);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
val = REG_READ(ah, AR_PHY_SFCORR_LOW);
iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
val = REG_READ(ah, AR_PHY_SFCORR_EXT);
iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
iniDef->firstep = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP);
iniDef->firstepLow = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_FIRSTEP_LOW);
iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1);
iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
aniState->mrcCCKOff = true; /* not available on pre AR9003 */
}
static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
{
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ;
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ;
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_5416_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
}
static void ar5008_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
u32 radar_0 = 0, radar_1 = 0;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
return;
}
radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
if (conf->ext_channel)
REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
else
REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
}
static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
{
struct ath_hw_radar_conf *conf = &ah->radar_conf;
conf->fir_power = -33;
conf->radar_rssi = 20;
conf->pulse_height = 10;
conf->pulse_rssi = 24;
conf->pulse_inband = 15;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
conf->radar_inband = 8;
}
void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
static const u32 ar5416_cca_regs[6] = {
AR_PHY_CCA,
AR_PHY_CH1_CCA,
AR_PHY_CH2_CCA,
AR_PHY_EXT_CCA,
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
priv_ops->rf_set_freq = ar5008_hw_set_channel;
priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
priv_ops->init_bb = ar5008_hw_init_bb;
priv_ops->process_ini = ar5008_hw_process_ini;
priv_ops->set_rfmode = ar5008_hw_set_rfmode;
priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
priv_ops->rfbus_req = ar5008_hw_rfbus_req;
priv_ops->rfbus_done = ar5008_hw_rfbus_done;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->set_radar_params = ar5008_hw_set_radar_params;
if (modparam_force_new_ani) {
priv_ops->ani_control = ar5008_hw_ani_control_new;
priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
} else
priv_ops->ani_control = ar5008_hw_ani_control_old;
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
else
priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
ar5008_hw_set_nf_limits(ah);
ar5008_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
}
| gpl-2.0 |
anoane/HTC-HD2-kernel-3.4 | arch/arm/mach-davinci/board-sffsdr.c | 4734 | 4553 | /*
* Lyrtech SFFSDR board support.
*
* Copyright (C) 2008 Philip Balister, OpenSDR <philip@opensdr.com>
* Copyright (C) 2008 Lyrtech <www.lyrtech.com>
*
* Based on DV-EVM platform, original copyright follows:
*
* Copyright (C) 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mux.h>
#include <mach/usb.h>
#include "davinci.h"
#define SFFSDR_PHY_ID "davinci_mdio-0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
* U-Boot: Blocks 6-7 (256 kb)
* Integrity Kernel: Blocks 8-31 (3 Mb)
* Integrity Data: Blocks 100-END
*/
{
.name = "Linux Kernel",
.offset = 32 * SZ_128K,
.size = 16 * SZ_128K, /* 2 Mb */
.mask_flags = MTD_WRITEABLE, /* Force read-only */
},
{
.name = "Linux ROOT",
.offset = MTDPART_OFS_APPEND,
.size = 256 * SZ_128K, /* 32 Mb */
.mask_flags = 0, /* R/W */
},
};
static struct flash_platform_data davinci_sffsdr_nandflash_data = {
.parts = davinci_sffsdr_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_sffsdr_nandflash_partition),
};
static struct resource davinci_sffsdr_nandflash_resource[] = {
{
.start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
.end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = DM644X_ASYNC_EMIF_CONTROL_BASE,
.end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device davinci_sffsdr_nandflash_device = {
.name = "davinci_nand", /* Name of driver */
.id = 0,
.dev = {
.platform_data = &davinci_sffsdr_nandflash_data,
},
.num_resources = ARRAY_SIZE(davinci_sffsdr_nandflash_resource),
.resource = davinci_sffsdr_nandflash_resource,
};
static struct at24_platform_data eeprom_info = {
.byte_len = (64*1024) / 8,
.page_size = 32,
.flags = AT24_FLAG_ADDR16,
};
static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("24lc64", 0x50),
.platform_data = &eeprom_info,
},
/* Other I2C devices:
* MSP430, addr 0x23 (not used)
* PCA9543, addr 0x70 (setup done by U-Boot)
* ADS7828, addr 0x48 (ADC for voltage monitoring.)
*/
};
static struct davinci_i2c_platform_data i2c_pdata = {
.bus_freq = 20 /* kHz */,
.bus_delay = 100 /* usec */,
};
static void __init sffsdr_init_i2c(void)
{
davinci_init_i2c(&i2c_pdata);
i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
}
static struct platform_device *davinci_sffsdr_devices[] __initdata = {
&davinci_sffsdr_nandflash_device,
};
static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
static void __init davinci_sffsdr_map_io(void)
{
dm644x_init();
}
static __init void davinci_sffsdr_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
platform_add_devices(davinci_sffsdr_devices,
ARRAY_SIZE(davinci_sffsdr_devices));
sffsdr_init_i2c();
davinci_serial_init(&uart_config);
soc_info->emac_pdata->phy_id = SFFSDR_PHY_ID;
davinci_setup_usb(0, 0); /* We support only peripheral mode. */
/* mux VLYNQ pins */
davinci_cfg_reg(DM644X_VLYNQEN);
davinci_cfg_reg(DM644X_VLYNQWD);
}
MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
/* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */
.atag_offset = 0x100,
.map_io = davinci_sffsdr_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
| gpl-2.0 |
omerjerk/CodyKernel-hammerhead | drivers/spi/spi-sh.c | 4990 | 12641 | /*
* SH SPI bus driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
*
* Based on pxa2xx_spi.c:
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#define SPI_SH_TBR 0x00
#define SPI_SH_RBR 0x00
#define SPI_SH_CR1 0x08
#define SPI_SH_CR2 0x10
#define SPI_SH_CR3 0x18
#define SPI_SH_CR4 0x20
#define SPI_SH_CR5 0x28
/* CR1 */
#define SPI_SH_TBE 0x80
#define SPI_SH_TBF 0x40
#define SPI_SH_RBE 0x20
#define SPI_SH_RBF 0x10
#define SPI_SH_PFONRD 0x08
#define SPI_SH_SSDB 0x04
#define SPI_SH_SSD 0x02
#define SPI_SH_SSA 0x01
/* CR2 */
#define SPI_SH_RSTF 0x80
#define SPI_SH_LOOPBK 0x40
#define SPI_SH_CPOL 0x20
#define SPI_SH_CPHA 0x10
#define SPI_SH_L1M0 0x08
/* CR3 */
#define SPI_SH_MAX_BYTE 0xFF
/* CR4 */
#define SPI_SH_TBEI 0x80
#define SPI_SH_TBFI 0x40
#define SPI_SH_RBEI 0x20
#define SPI_SH_RBFI 0x10
#define SPI_SH_WPABRT 0x04
#define SPI_SH_SSS 0x01
/* CR8 */
#define SPI_SH_P1L0 0x80
#define SPI_SH_PP1L0 0x40
#define SPI_SH_MUXI 0x20
#define SPI_SH_MUXIRQ 0x10
#define SPI_SH_FIFO_SIZE 32
#define SPI_SH_SEND_TIMEOUT (3 * HZ)
#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
#undef DEBUG
struct spi_sh_data {
void __iomem *addr;
int irq;
struct spi_master *master;
struct list_head queue;
struct workqueue_struct *workqueue;
struct work_struct ws;
unsigned long cr1;
wait_queue_head_t wait;
spinlock_t lock;
int width;
};
static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
unsigned long offset)
{
if (ss->width == 8)
iowrite8(data, ss->addr + (offset >> 2));
else if (ss->width == 32)
iowrite32(data, ss->addr + offset);
}
static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
{
if (ss->width == 8)
return ioread8(ss->addr + (offset >> 2));
else if (ss->width == 32)
return ioread32(ss->addr + offset);
else
return 0;
}
static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
unsigned long offset)
{
unsigned long tmp;
tmp = spi_sh_read(ss, offset);
tmp |= val;
spi_sh_write(ss, tmp, offset);
}
static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
unsigned long offset)
{
unsigned long tmp;
tmp = spi_sh_read(ss, offset);
tmp &= ~val;
spi_sh_write(ss, tmp, offset);
}
static void clear_fifo(struct spi_sh_data *ss)
{
spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
}
static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
{
int timeout = 100000;
while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
udelay(10);
if (timeout-- < 0)
return -ETIMEDOUT;
}
return 0;
}
static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
{
int timeout = 100000;
while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
udelay(10);
if (timeout-- < 0)
return -ETIMEDOUT;
}
return 0;
}
static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
struct spi_transfer *t)
{
int i, retval = 0;
int remain = t->len;
int cur_len;
unsigned char *data;
unsigned long tmp;
long ret;
if (t->len)
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
data = (unsigned char *)t->tx_buf;
while (remain > 0) {
cur_len = min(SPI_SH_FIFO_SIZE, remain);
for (i = 0; i < cur_len &&
!(spi_sh_read(ss, SPI_SH_CR4) &
SPI_SH_WPABRT) &&
!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
i++)
spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
/* Abort SPI operation */
spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
retval = -EIO;
break;
}
cur_len = i;
remain -= cur_len;
data += cur_len;
if (remain > 0) {
ss->cr1 &= ~SPI_SH_TBE;
spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_TBE,
SPI_SH_SEND_TIMEOUT);
if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
tmp = spi_sh_read(ss, SPI_SH_CR1);
tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
spi_sh_write(ss, tmp, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_TBE,
SPI_SH_SEND_TIMEOUT);
if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
return retval;
}
static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
struct spi_transfer *t)
{
int i;
int remain = t->len;
int cur_len;
unsigned char *data;
unsigned long tmp;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
tmp = spi_sh_read(ss, SPI_SH_CR1);
tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
spi_sh_write(ss, tmp, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);
data = (unsigned char *)t->rx_buf;
while (remain > 0) {
if (remain >= SPI_SH_FIFO_SIZE) {
ss->cr1 &= ~SPI_SH_RBF;
spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_RBF,
SPI_SH_RECEIVE_TIMEOUT);
if (ret == 0 &&
spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
cur_len = min(SPI_SH_FIFO_SIZE, remain);
for (i = 0; i < cur_len; i++) {
if (spi_sh_wait_receive_buffer(ss))
break;
data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
}
remain -= cur_len;
data += cur_len;
}
/* deassert CS when SPI is receiving. */
if (t->len > SPI_SH_MAX_BYTE) {
clear_fifo(ss);
spi_sh_write(ss, 1, SPI_SH_CR3);
} else {
spi_sh_write(ss, 0, SPI_SH_CR3);
}
return 0;
}
static void spi_sh_work(struct work_struct *work)
{
struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
struct spi_message *mesg;
struct spi_transfer *t;
unsigned long flags;
int ret;
pr_debug("%s: enter\n", __func__);
spin_lock_irqsave(&ss->lock, flags);
while (!list_empty(&ss->queue)) {
mesg = list_entry(ss->queue.next, struct spi_message, queue);
list_del_init(&mesg->queue);
spin_unlock_irqrestore(&ss->lock, flags);
list_for_each_entry(t, &mesg->transfers, transfer_list) {
pr_debug("tx_buf = %p, rx_buf = %p\n",
t->tx_buf, t->rx_buf);
pr_debug("len = %d, delay_usecs = %d\n",
t->len, t->delay_usecs);
if (t->tx_buf) {
ret = spi_sh_send(ss, mesg, t);
if (ret < 0)
goto error;
}
if (t->rx_buf) {
ret = spi_sh_receive(ss, mesg, t);
if (ret < 0)
goto error;
}
mesg->actual_length += t->len;
}
spin_lock_irqsave(&ss->lock, flags);
mesg->status = 0;
mesg->complete(mesg->context);
}
clear_fifo(ss);
spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
udelay(100);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
clear_fifo(ss);
spin_unlock_irqrestore(&ss->lock, flags);
return;
error:
mesg->status = ret;
mesg->complete(mesg->context);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
clear_fifo(ss);
}
static int spi_sh_setup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
if (!spi->bits_per_word)
spi->bits_per_word = 8;
pr_debug("%s: enter\n", __func__);
spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
clear_fifo(ss);
/* 1/8 clock */
spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
udelay(10);
return 0;
}
static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
unsigned long flags;
pr_debug("%s: enter\n", __func__);
pr_debug("\tmode = %02x\n", spi->mode);
spin_lock_irqsave(&ss->lock, flags);
mesg->actual_length = 0;
mesg->status = -EINPROGRESS;
spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
list_add_tail(&mesg->queue, &ss->queue);
queue_work(ss->workqueue, &ss->ws);
spin_unlock_irqrestore(&ss->lock, flags);
return 0;
}
static void spi_sh_cleanup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
pr_debug("%s: enter\n", __func__);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
}
static irqreturn_t spi_sh_irq(int irq, void *_ss)
{
struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
unsigned long cr1;
cr1 = spi_sh_read(ss, SPI_SH_CR1);
if (cr1 & SPI_SH_TBE)
ss->cr1 |= SPI_SH_TBE;
if (cr1 & SPI_SH_TBF)
ss->cr1 |= SPI_SH_TBF;
if (cr1 & SPI_SH_RBE)
ss->cr1 |= SPI_SH_RBE;
if (cr1 & SPI_SH_RBF)
ss->cr1 |= SPI_SH_RBF;
if (ss->cr1) {
spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
wake_up(&ss->wait);
}
return IRQ_HANDLED;
}
static int __devexit spi_sh_remove(struct platform_device *pdev)
{
struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
spi_unregister_master(ss->master);
destroy_workqueue(ss->workqueue);
free_irq(ss->irq, ss);
iounmap(ss->addr);
return 0;
}
static int __devinit spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct spi_sh_data *ss;
int ret, irq;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "platform_get_irq error\n");
return -ENODEV;
}
master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
if (master == NULL) {
dev_err(&pdev->dev, "spi_alloc_master error.\n");
return -ENOMEM;
}
ss = spi_master_get_devdata(master);
dev_set_drvdata(&pdev->dev, ss);
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_8BIT:
ss->width = 8;
break;
case IORESOURCE_MEM_32BIT:
ss->width = 32;
break;
default:
dev_err(&pdev->dev, "No support width\n");
ret = -ENODEV;
goto error1;
}
ss->irq = irq;
ss->master = master;
ss->addr = ioremap(res->start, resource_size(res));
if (ss->addr == NULL) {
dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto error1;
}
INIT_LIST_HEAD(&ss->queue);
spin_lock_init(&ss->lock);
INIT_WORK(&ss->ws, spi_sh_work);
init_waitqueue_head(&ss->wait);
ss->workqueue = create_singlethread_workqueue(
dev_name(master->dev.parent));
if (ss->workqueue == NULL) {
dev_err(&pdev->dev, "create workqueue error\n");
ret = -EBUSY;
goto error2;
}
ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
goto error3;
}
master->num_chipselect = 2;
master->bus_num = pdev->id;
master->setup = spi_sh_setup;
master->transfer = spi_sh_transfer;
master->cleanup = spi_sh_cleanup;
ret = spi_register_master(master);
if (ret < 0) {
printk(KERN_ERR "spi_register_master error.\n");
goto error4;
}
return 0;
error4:
free_irq(irq, ss);
error3:
destroy_workqueue(ss->workqueue);
error2:
iounmap(ss->addr);
error1:
spi_master_put(master);
return ret;
}
static struct platform_driver spi_sh_driver = {
.probe = spi_sh_probe,
.remove = __devexit_p(spi_sh_remove),
.driver = {
.name = "sh_spi",
.owner = THIS_MODULE,
},
};
module_platform_driver(spi_sh_driver);
MODULE_DESCRIPTION("SH SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:sh_spi");
| gpl-2.0 |
supersonicninja/CAF-Kernel | arch/arm/mach-imx/clock-imx25.c | 4990 | 11026 | /*
* Copyright (C) 2009 by Sascha Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/mx25.h>
#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
#define CCM_MPCTL 0x00
#define CCM_UPCTL 0x04
#define CCM_CCTL 0x08
#define CCM_CGCR0 0x0C
#define CCM_CGCR1 0x10
#define CCM_CGCR2 0x14
#define CCM_PCDR0 0x18
#define CCM_PCDR1 0x1C
#define CCM_PCDR2 0x20
#define CCM_PCDR3 0x24
#define CCM_RCSR 0x28
#define CCM_CRDR 0x2C
#define CCM_DCVR0 0x30
#define CCM_DCVR1 0x34
#define CCM_DCVR2 0x38
#define CCM_DCVR3 0x3c
#define CCM_LTR0 0x40
#define CCM_LTR1 0x44
#define CCM_LTR2 0x48
#define CCM_LTR3 0x4c
static unsigned long get_rate_mpll(void)
{
ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
return mxc_decode_pll(mpctl, 24000000);
}
static unsigned long get_rate_upll(void)
{
ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
return mxc_decode_pll(mpctl, 24000000);
}
unsigned long get_rate_arm(struct clk *clk)
{
unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
unsigned long rate = get_rate_mpll();
if (cctl & (1 << 14))
rate = (rate * 3) >> 2;
return rate / ((cctl >> 30) + 1);
}
static unsigned long get_rate_ahb(struct clk *clk)
{
unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
}
static unsigned long get_rate_ipg(struct clk *clk)
{
return get_rate_ahb(NULL) >> 1;
}
static unsigned long get_rate_per(int per)
{
unsigned long ofs = (per & 0x3) * 8;
unsigned long reg = per & ~0x3;
unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
unsigned long fref;
if (readl(CRM_BASE + 0x64) & (1 << per))
fref = get_rate_upll();
else
fref = get_rate_ahb(NULL);
return fref / (val + 1);
}
static unsigned long get_rate_uart(struct clk *clk)
{
return get_rate_per(15);
}
static unsigned long get_rate_ssi2(struct clk *clk)
{
return get_rate_per(14);
}
static unsigned long get_rate_ssi1(struct clk *clk)
{
return get_rate_per(13);
}
static unsigned long get_rate_i2c(struct clk *clk)
{
return get_rate_per(6);
}
static unsigned long get_rate_nfc(struct clk *clk)
{
return get_rate_per(8);
}
static unsigned long get_rate_gpt(struct clk *clk)
{
return get_rate_per(5);
}
static unsigned long get_rate_lcdc(struct clk *clk)
{
return get_rate_per(7);
}
static unsigned long get_rate_esdhc1(struct clk *clk)
{
return get_rate_per(3);
}
static unsigned long get_rate_esdhc2(struct clk *clk)
{
return get_rate_per(4);
}
static unsigned long get_rate_csi(struct clk *clk)
{
return get_rate_per(0);
}
static unsigned long get_rate_otg(struct clk *clk)
{
unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
unsigned long rate = get_rate_upll();
return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
}
static int clk_cgcr_enable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg |= 1 << clk->enable_shift;
__raw_writel(reg, clk->enable_reg);
return 0;
}
static void clk_cgcr_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
reg &= ~(1 << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
static struct clk name = { \
.id = i, \
.enable_reg = CRM_BASE + er, \
.enable_shift = es, \
.get_rate = gr, \
.set_rate = sr, \
.enable = clk_cgcr_enable, \
.disable = clk_cgcr_disable, \
.secondary = s, \
}
/*
* Note: the following IPG clock gating bits are wrongly marked "Reserved" in
* the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
* taken from the Freescale released BSP.
*
* bit reg offset clock
*
* 0 CGCR1 0 AUDMUX
* 12 CGCR1 12 ESAI
* 16 CGCR1 16 GPIO1
* 17 CGCR1 17 GPIO2
* 18 CGCR1 18 GPIO3
* 23 CGCR1 23 I2C1
* 24 CGCR1 24 I2C2
* 25 CGCR1 25 I2C3
* 27 CGCR1 27 IOMUXC
* 28 CGCR1 28 KPP
* 30 CGCR1 30 OWIRE
* 36 CGCR2 4 RTIC
* 51 CGCR2 19 WDOG
*/
DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1, NULL, NULL);
DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL,
&esdhc1_ahb_clk);
DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL);
DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL,
&esdhc2_ahb_clk);
DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL);
DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL);
DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk);
DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1, 8, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk);
DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
&esdhc1_per_clk);
DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
&esdhc2_per_clk);
DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
DEFINE_CLOCK(csi_clk, 0, CCM_CGCR1, 4, get_rate_csi, NULL, &csi_per_clk);
DEFINE_CLOCK(can1_clk, 0, CCM_CGCR1, 2, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(can2_clk, 1, CCM_CGCR1, 3, get_rate_ipg, NULL, NULL);
DEFINE_CLOCK(iim_clk, 0, CCM_CGCR1, 26, NULL, NULL, NULL);
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup lookups[] = {
/* i.mx25 has the i.mx21 type uart */
_REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
_REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
_REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
_REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
_REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
_REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
_REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
_REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
/* i.mx25 has the i.mx35 type cspi */
_REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
_REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
_REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
_REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
_REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
_REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
_REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
_REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
_REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
_REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
_REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
_REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
_REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
_REGISTER_CLOCK(NULL, "audmux", audmux_clk)
_REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
_REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
/* i.mx25 has the i.mx35 type sdma */
_REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
_REGISTER_CLOCK(NULL, "iim", iim_clk)
};
int __init mx25_clocks_init(void)
{
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clocks except the ones we need to survive, namely:
* EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
* SCC
*/
__raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
__raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
__raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
clk_enable(&uart1_clk);
#endif
/* Clock source for lcdc and csi is upll */
__raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
CRM_BASE + 0x64);
/* Clock source for gpt is ahb_div */
__raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
clk_enable(&iim_clk);
imx_print_silicon_rev("i.MX25", mx25_revision());
clk_disable(&iim_clk);
mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;
}
| gpl-2.0 |
arkas/Samsung-GT-I5510-Kernel | kernel/drivers/isdn/mISDN/dsp_hwec.c | 4990 | 3049 | /*
* dsp_hwec.c:
* builtin mISDN dsp pipeline element for enabling the hw echocanceller
*
* Copyright (C) 2007, Nadi Sarrar
*
* Nadi Sarrar <nadi@beronet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mISDNdsp.h>
#include <linux/mISDNif.h>
#include "core.h"
#include "dsp.h"
#include "dsp_hwec.h"
static struct mISDN_dsp_element_arg args[] = {
{ "deftaps", "128", "Set the number of taps of cancellation." },
};
static struct mISDN_dsp_element dsp_hwec_p = {
.name = "hwec",
.new = NULL,
.free = NULL,
.process_tx = NULL,
.process_rx = NULL,
.num_args = ARRAY_SIZE(args),
.args = args,
};
struct mISDN_dsp_element *dsp_hwec = &dsp_hwec_p;
void dsp_hwec_enable(struct dsp *dsp, const char *arg)
{
int deftaps = 128,
len;
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to enable hwec: dsp is NULL\n",
__func__);
return;
}
if (!arg)
goto _do;
len = strlen(arg);
if (!len)
goto _do;
{
char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
strcpy(_dup, arg);
dup = _dup;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "=");
val = tok;
if (!val)
continue;
if (!strcmp(name, "deftaps")) {
if (sscanf(val, "%d", &tmp) == 1)
deftaps = tmp;
}
}
}
_do:
printk(KERN_DEBUG "%s: enabling hwec with deftaps=%d\n",
__func__, deftaps);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
cq.p1 = deftaps;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
void dsp_hwec_disable(struct dsp *dsp)
{
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to disable hwec: dsp is NULL\n",
__func__);
return;
}
printk(KERN_DEBUG "%s: disabling hwec\n", __func__);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
int dsp_hwec_init(void)
{
mISDN_dsp_element_register(dsp_hwec);
return 0;
}
void dsp_hwec_exit(void)
{
mISDN_dsp_element_unregister(dsp_hwec);
}
| gpl-2.0 |
TeamRegular/android_kernel_samsung_exynos5420 | drivers/ide/ide-ioctls.c | 7550 | 6861 | /*
* IDE ioctls handling.
*/
#include <linux/export.h>
#include <linux/hdreg.h>
#include <linux/ide.h>
#include <linux/slab.h>
static const struct ide_ioctl_devset ide_ioctl_settings[] = {
{ HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
{ HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings },
{ HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, &ide_devset_unmaskirq },
{ HDIO_GET_DMA, HDIO_SET_DMA, &ide_devset_using_dma },
{ -1, HDIO_SET_PIO_MODE, &ide_devset_pio_mode },
{ 0 }
};
int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
unsigned int cmd, unsigned long arg,
const struct ide_ioctl_devset *s)
{
const struct ide_devset *ds;
int err = -EOPNOTSUPP;
for (; (ds = s->setting); s++) {
if (ds->get && s->get_ioctl == cmd)
goto read_val;
else if (ds->set && s->set_ioctl == cmd)
goto set_val;
}
return err;
read_val:
mutex_lock(&ide_setting_mtx);
err = ds->get(drive);
mutex_unlock(&ide_setting_mtx);
return err >= 0 ? put_user(err, (long __user *)arg) : err;
set_val:
if (bdev != bdev->bd_contains)
err = -EINVAL;
else {
if (!capable(CAP_SYS_ADMIN))
err = -EACCES;
else {
mutex_lock(&ide_setting_mtx);
err = ide_devset_execute(drive, ds, arg);
mutex_unlock(&ide_setting_mtx);
}
}
return err;
}
EXPORT_SYMBOL_GPL(ide_setting_ioctl);
static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
unsigned long arg)
{
u16 *id = NULL;
int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
int rc = 0;
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
rc = -ENOMSG;
goto out;
}
/* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
if (id == NULL) {
rc = -ENOMEM;
goto out;
}
memcpy(id, drive->id, size);
ata_id_to_hd_driveid(id);
if (copy_to_user((void __user *)arg, id, size))
rc = -EFAULT;
kfree(id);
out:
return rc;
}
static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
{
return put_user((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
<< IDE_NICE_DSC_OVERLAP) |
(!!(drive->dev_flags & IDE_DFLAG_NICE1)
<< IDE_NICE_1), (long __user *)arg);
}
static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
{
if (arg != (arg & ((1 << IDE_NICE_DSC_OVERLAP) | (1 << IDE_NICE_1))))
return -EPERM;
if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
(drive->media != ide_tape))
return -EPERM;
if ((arg >> IDE_NICE_DSC_OVERLAP) & 1)
drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
else
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
if ((arg >> IDE_NICE_1) & 1)
drive->dev_flags |= IDE_DFLAG_NICE1;
else
drive->dev_flags &= ~IDE_DFLAG_NICE1;
return 0;
}
static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
{
u8 *buf = NULL;
int bufsize = 0, err = 0;
u8 args[4], xfer_rate = 0;
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
if (NULL == (void *) arg) {
struct request *rq;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
return err;
}
if (copy_from_user(args, (void __user *)arg, 4))
return -EFAULT;
memset(&cmd, 0, sizeof(cmd));
tf->feature = args[2];
if (args[0] == ATA_CMD_SMART) {
tf->nsect = args[3];
tf->lbal = args[1];
tf->lbam = 0x4f;
tf->lbah = 0xc2;
cmd.valid.out.tf = IDE_VALID_OUT_TF;
cmd.valid.in.tf = IDE_VALID_NSECT;
} else {
tf->nsect = args[1];
cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
cmd.valid.in.tf = IDE_VALID_NSECT;
}
tf->command = args[0];
cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA;
if (args[3]) {
cmd.tf_flags |= IDE_TFLAG_IO_16BIT;
bufsize = SECTOR_SIZE * args[3];
buf = kzalloc(bufsize, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
}
if (tf->command == ATA_CMD_SET_FEATURES &&
tf->feature == SETFEATURES_XFER &&
tf->nsect >= XFER_SW_DMA_0) {
xfer_rate = ide_find_dma_mode(drive, tf->nsect);
if (xfer_rate != tf->nsect) {
err = -EINVAL;
goto abort;
}
cmd.tf_flags |= IDE_TFLAG_SET_XFER;
}
err = ide_raw_taskfile(drive, &cmd, buf, args[3]);
args[0] = tf->status;
args[1] = tf->error;
args[2] = tf->nsect;
abort:
if (copy_to_user((void __user *)arg, &args, 4))
err = -EFAULT;
if (buf) {
if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
err = -EFAULT;
kfree(buf);
}
return err;
}
static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg)
{
void __user *p = (void __user *)arg;
int err = 0;
u8 args[7];
struct ide_cmd cmd;
if (copy_from_user(args, p, 7))
return -EFAULT;
memset(&cmd, 0, sizeof(cmd));
memcpy(&cmd.tf.feature, &args[1], 6);
cmd.tf.command = args[0];
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
err = ide_no_data_taskfile(drive, &cmd);
args[0] = cmd.tf.command;
memcpy(&args[1], &cmd.tf.feature, 6);
if (copy_to_user(p, args, 7))
err = -EFAULT;
return err;
}
static int generic_drive_reset(ide_drive_t *drive)
{
struct request *rq;
int ret = 0;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_len = 1;
rq->cmd[0] = REQ_DRIVE_RESET;
if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors;
blk_put_request(rq);
return ret;
}
int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
int err;
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
if (err != -EOPNOTSUPP)
return err;
switch (cmd) {
case HDIO_OBSOLETE_IDENTITY:
case HDIO_GET_IDENTITY:
if (bdev != bdev->bd_contains)
return -EINVAL;
return ide_get_identity_ioctl(drive, cmd, arg);
case HDIO_GET_NICE:
return ide_get_nice_ioctl(drive, arg);
case HDIO_SET_NICE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return ide_set_nice_ioctl(drive, arg);
#ifdef CONFIG_IDE_TASK_IOCTL
case HDIO_DRIVE_TASKFILE:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
if (drive->media == ide_disk)
return ide_taskfile_ioctl(drive, arg);
return -ENOMSG;
#endif
case HDIO_DRIVE_CMD:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
return ide_cmd_ioctl(drive, arg);
case HDIO_DRIVE_TASK:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
return ide_task_ioctl(drive, arg);
case HDIO_DRIVE_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return generic_drive_reset(drive);
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(BUSSTATE_ON, (long __user *)arg))
return -EFAULT;
return 0;
case HDIO_SET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(generic_ide_ioctl);
| gpl-2.0 |
joryb/android_kernel_samsung_jf | drivers/w1/masters/ds2490.c | 7806 | 24010 | /*
* dscore.c
*
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include "../w1_int.h"
#include "../w1.h"
/* COMMAND TYPE CODES */
#define CONTROL_CMD 0x00
#define COMM_CMD 0x01
#define MODE_CMD 0x02
/* CONTROL COMMAND CODES */
#define CTL_RESET_DEVICE 0x0000
#define CTL_START_EXE 0x0001
#define CTL_RESUME_EXE 0x0002
#define CTL_HALT_EXE_IDLE 0x0003
#define CTL_HALT_EXE_DONE 0x0004
#define CTL_FLUSH_COMM_CMDS 0x0007
#define CTL_FLUSH_RCV_BUFFER 0x0008
#define CTL_FLUSH_XMT_BUFFER 0x0009
#define CTL_GET_COMM_CMDS 0x000A
/* MODE COMMAND CODES */
#define MOD_PULSE_EN 0x0000
#define MOD_SPEED_CHANGE_EN 0x0001
#define MOD_1WIRE_SPEED 0x0002
#define MOD_STRONG_PU_DURATION 0x0003
#define MOD_PULLDOWN_SLEWRATE 0x0004
#define MOD_PROG_PULSE_DURATION 0x0005
#define MOD_WRITE1_LOWTIME 0x0006
#define MOD_DSOW0_TREC 0x0007
/* COMMUNICATION COMMAND CODES */
#define COMM_ERROR_ESCAPE 0x0601
#define COMM_SET_DURATION 0x0012
#define COMM_BIT_IO 0x0020
#define COMM_PULSE 0x0030
#define COMM_1_WIRE_RESET 0x0042
#define COMM_BYTE_IO 0x0052
#define COMM_MATCH_ACCESS 0x0064
#define COMM_BLOCK_IO 0x0074
#define COMM_READ_STRAIGHT 0x0080
#define COMM_DO_RELEASE 0x6092
#define COMM_SET_PATH 0x00A2
#define COMM_WRITE_SRAM_PAGE 0x00B2
#define COMM_WRITE_EPROM 0x00C4
#define COMM_READ_CRC_PROT_PAGE 0x00D4
#define COMM_READ_REDIRECT_PAGE_CRC 0x21E4
#define COMM_SEARCH_ACCESS 0x00F4
/* Communication command bits */
#define COMM_TYPE 0x0008
#define COMM_SE 0x0008
#define COMM_D 0x0008
#define COMM_Z 0x0008
#define COMM_CH 0x0008
#define COMM_SM 0x0008
#define COMM_R 0x0008
#define COMM_IM 0x0001
#define COMM_PS 0x4000
#define COMM_PST 0x4000
#define COMM_CIB 0x4000
#define COMM_RTS 0x4000
#define COMM_DT 0x2000
#define COMM_SPU 0x1000
#define COMM_F 0x0800
#define COMM_NTF 0x0400
#define COMM_ICP 0x0200
#define COMM_RST 0x0100
#define PULSE_PROG 0x01
#define PULSE_SPUE 0x02
#define BRANCH_MAIN 0xCC
#define BRANCH_AUX 0x33
/* Status flags */
#define ST_SPUA 0x01 /* Strong Pull-up is active */
#define ST_PRGA 0x02 /* 12V programming pulse is being generated */
#define ST_12VP 0x04 /* external 12V programming voltage is present */
#define ST_PMOD 0x08 /* DS2490 powered from USB and external sources */
#define ST_HALT 0x10 /* DS2490 is currently halted */
#define ST_IDLE 0x20 /* DS2490 is currently idle */
#define ST_EPOF 0x80
/* Result Register flags */
#define RR_DETECT 0xA5 /* New device detected */
#define RR_NRS 0x01 /* Reset no presence or ... */
#define RR_SH 0x02 /* short on reset or set path */
#define RR_APP 0x04 /* alarming presence on reset */
#define RR_VPP 0x08 /* 12V expected not seen */
#define RR_CMP 0x10 /* compare error */
#define RR_CRC 0x20 /* CRC error detected */
#define RR_RDP 0x40 /* redirected page */
#define RR_EOS 0x80 /* end of search error */
#define SPEED_NORMAL 0x00
#define SPEED_FLEXIBLE 0x01
#define SPEED_OVERDRIVE 0x02
#define NUM_EP 4
#define EP_CONTROL 0
#define EP_STATUS 1
#define EP_DATA_OUT 2
#define EP_DATA_IN 3
struct ds_device
{
struct list_head ds_entry;
struct usb_device *udev;
struct usb_interface *intf;
int ep[NUM_EP];
/* Strong PullUp
* 0: pullup not active, else duration in milliseconds
*/
int spu_sleep;
/* spu_bit contains COMM_SPU or 0 depending on if the strong pullup
* should be active or not for writes.
*/
u16 spu_bit;
struct w1_bus_master master;
};
struct ds_status
{
u8 enable;
u8 speed;
u8 pullup_dur;
u8 ppuls_dur;
u8 pulldown_slew;
u8 write1_time;
u8 write0_time;
u8 reserved0;
u8 status;
u8 command0;
u8 command1;
u8 command_buffer_status;
u8 data_out_buffer_status;
u8 data_in_buffer_status;
u8 reserved1;
u8 reserved2;
};
static struct usb_device_id ds_id_table [] = {
{ USB_DEVICE(0x04fa, 0x2490) },
{ },
};
MODULE_DEVICE_TABLE(usb, ds_id_table);
static int ds_probe(struct usb_interface *, const struct usb_device_id *);
static void ds_disconnect(struct usb_interface *);
static int ds_send_control(struct ds_device *, u16, u16);
static int ds_send_control_cmd(struct ds_device *, u16, u16);
static LIST_HEAD(ds_devices);
static DEFINE_MUTEX(ds_mutex);
static struct usb_driver ds_driver = {
.name = "DS9490R",
.probe = ds_probe,
.disconnect = ds_disconnect,
.id_table = ds_id_table,
};
static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
CONTROL_CMD, 0x40, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
MODE_CMD, 0x40, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
COMM_CMD, 0x40, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
unsigned char *buf, int size)
{
int count, err;
memset(st, 0, sizeof(*st));
count = 0;
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);
if (err < 0) {
printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
return err;
}
if (count >= sizeof(*st))
memcpy(st, buf, sizeof(*st));
return count;
}
static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
{
printk(KERN_INFO "%45s: %8x\n", str, buf[off]);
}
static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
{
int i;
printk(KERN_INFO "0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
for (i=0; i<count; ++i)
printk("%02x ", buf[i]);
printk(KERN_INFO "\n");
if (count >= 16) {
ds_print_msg(buf, "enable flag", 0);
ds_print_msg(buf, "1-wire speed", 1);
ds_print_msg(buf, "strong pullup duration", 2);
ds_print_msg(buf, "programming pulse duration", 3);
ds_print_msg(buf, "pulldown slew rate control", 4);
ds_print_msg(buf, "write-1 low time", 5);
ds_print_msg(buf, "data sample offset/write-0 recovery time",
6);
ds_print_msg(buf, "reserved (test register)", 7);
ds_print_msg(buf, "device status flags", 8);
ds_print_msg(buf, "communication command byte 1", 9);
ds_print_msg(buf, "communication command byte 2", 10);
ds_print_msg(buf, "communication command buffer status", 11);
ds_print_msg(buf, "1-wire data output buffer status", 12);
ds_print_msg(buf, "1-wire data input buffer status", 13);
ds_print_msg(buf, "reserved", 14);
ds_print_msg(buf, "reserved", 15);
}
for (i = 16; i < count; ++i) {
if (buf[i] == RR_DETECT) {
ds_print_msg(buf, "new device detect", i);
continue;
}
ds_print_msg(buf, "Result Register Value: ", i);
if (buf[i] & RR_NRS)
printk(KERN_INFO "NRS: Reset no presence or ...\n");
if (buf[i] & RR_SH)
printk(KERN_INFO "SH: short on reset or set path\n");
if (buf[i] & RR_APP)
printk(KERN_INFO "APP: alarming presence on reset\n");
if (buf[i] & RR_VPP)
printk(KERN_INFO "VPP: 12V expected not seen\n");
if (buf[i] & RR_CMP)
printk(KERN_INFO "CMP: compare error\n");
if (buf[i] & RR_CRC)
printk(KERN_INFO "CRC: CRC error detected\n");
if (buf[i] & RR_RDP)
printk(KERN_INFO "RDP: redirected page\n");
if (buf[i] & RR_EOS)
printk(KERN_INFO "EOS: end of search error\n");
}
}
static void ds_reset_device(struct ds_device *dev)
{
ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
/* Always allow strong pullup which allow individual writes to use
* the strong pullup.
*/
if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
printk(KERN_ERR "ds_reset_device: "
"Error allowing strong pullup\n");
/* Chip strong pullup time was cleared. */
if (dev->spu_sleep) {
/* lower 4 bits are 0, see ds_set_pullup */
u8 del = dev->spu_sleep>>4;
if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
printk(KERN_ERR "ds_reset_device: "
"Error setting duration\n");
}
}
static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
{
int count, err;
struct ds_status st;
/* Careful on size. If size is less than what is available in
* the input buffer, the device fails the bulk transfer and
* clears the input buffer. It could read the maximum size of
* the data buffer, but then do you return the first, last, or
* some set of the middle size bytes? As long as the rest of
* the code is correct there will be size bytes waiting. A
* call to ds_wait_status will wait until the device is idle
* and any data to be received would have been available.
*/
count = 0;
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
u8 buf[0x20];
int count;
printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
ds_dump_status(dev, buf, count);
return err;
}
#if 0
{
int i;
printk("%s: count=%d: ", __func__, count);
for (i=0; i<count; ++i)
printk("%02x ", buf[i]);
printk("\n");
}
#endif
return count;
}
static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
{
int count, err;
count = 0;
err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to write 1-wire data to ep0x%x: "
"err=%d.\n", dev->ep[EP_DATA_OUT], err);
return err;
}
return err;
}
#if 0
int ds_stop_pulse(struct ds_device *dev, int limit)
{
struct ds_status st;
int count = 0, err = 0;
u8 buf[0x20];
do {
err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
if (err)
break;
err = ds_send_control(dev, CTL_RESUME_EXE, 0);
if (err)
break;
err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
if (err)
break;
if ((st.status & ST_SPUA) == 0) {
err = ds_send_control_mode(dev, MOD_PULSE_EN, 0);
if (err)
break;
}
} while(++count < limit);
return err;
}
int ds_detect(struct ds_device *dev, struct ds_status *st)
{
int err;
err = ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
if (err)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, 0);
if (err)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM | COMM_TYPE, 0x40);
if (err)
return err;
err = ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_PROG);
if (err)
return err;
err = ds_dump_status(dev, st);
return err;
}
#endif /* 0 */
static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
{
u8 buf[0x20];
int err, count = 0;
do {
err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
#if 0
if (err >= 0) {
int i;
printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
for (i=0; i<err; ++i)
printk("%02x ", buf[i]);
printk("\n");
}
#endif
} while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
printk(KERN_INFO "Resetting device after ST_EPOF.\n");
ds_reset_device(dev);
/* Always dump the device status. */
count = 101;
}
/* Dump the status for errors or if there is extended return data.
* The extended status includes new device detection (maybe someone
* can do something with it).
*/
if (err > 16 || count >= 100 || err < 0)
ds_dump_status(dev, buf, err);
/* Extended data isn't an error. Well, a short is, but the dump
* would have already told the user that and we can't do anything
* about it in software anyway.
*/
if (count >= 100 || err < 0)
return -1;
else
return 0;
}
static int ds_reset(struct ds_device *dev)
{
int err;
/* Other potentionally interesting flags for reset.
*
* COMM_NTF: Return result register feedback. This could be used to
* detect some conditions such as short, alarming presence, or
* detect if a new device was detected.
*
* COMM_SE which allows SPEED_NORMAL, SPEED_FLEXIBLE, SPEED_OVERDRIVE:
* Select the data transfer rate.
*/
err = ds_send_control(dev, COMM_1_WIRE_RESET | COMM_IM, SPEED_NORMAL);
if (err)
return err;
return 0;
}
#if 0
static int ds_set_speed(struct ds_device *dev, int speed)
{
int err;
if (speed != SPEED_NORMAL && speed != SPEED_FLEXIBLE && speed != SPEED_OVERDRIVE)
return -EINVAL;
if (speed != SPEED_OVERDRIVE)
speed = SPEED_FLEXIBLE;
speed &= 0xff;
err = ds_send_control_mode(dev, MOD_1WIRE_SPEED, speed);
if (err)
return err;
return err;
}
#endif /* 0 */
static int ds_set_pullup(struct ds_device *dev, int delay)
{
int err = 0;
u8 del = 1 + (u8)(delay >> 4);
/* Just storing delay would not get the trunication and roundup. */
int ms = del<<4;
/* Enable spu_bit if a delay is set. */
dev->spu_bit = delay ? COMM_SPU : 0;
/* If delay is zero, it has already been disabled, if the time is
* the same as the hardware was last programmed to, there is also
* nothing more to do. Compare with the recalculated value ms
* rather than del or delay which can have a different value.
*/
if (delay == 0 || ms == dev->spu_sleep)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del);
if (err)
return err;
dev->spu_sleep = ms;
return err;
}
static int ds_touch_bit(struct ds_device *dev, u8 bit, u8 *tbit)
{
int err;
struct ds_status st;
err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | (bit ? COMM_D : 0),
0);
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_recv_data(dev, tbit, sizeof(*tbit));
if (err < 0)
return err;
return 0;
}
#if 0
static int ds_write_bit(struct ds_device *dev, u8 bit)
{
int err;
struct ds_status st;
/* Set COMM_ICP to write without a readback. Note, this will
* produce one time slot, a down followed by an up with COMM_D
* only determing the timing.
*/
err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | COMM_ICP |
(bit ? COMM_D : 0), 0);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
#endif
static int ds_write_byte(struct ds_device *dev, u8 byte)
{
int err;
struct ds_status st;
u8 rbyte;
err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
if (err)
return err;
if (dev->spu_bit)
msleep(dev->spu_sleep);
err = ds_wait_status(dev, &st);
if (err)
return err;
err = ds_recv_data(dev, &rbyte, sizeof(rbyte));
if (err < 0)
return err;
return !(byte == rbyte);
}
static int ds_read_byte(struct ds_device *dev, u8 *byte)
{
int err;
struct ds_status st;
err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM , 0xff);
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_recv_data(dev, byte, sizeof(*byte));
if (err < 0)
return err;
return 0;
}
static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
{
struct ds_status st;
int err;
if (len > 64*1024)
return -E2BIG;
memset(buf, 0xFF, len);
err = ds_send_data(dev, buf, len);
if (err < 0)
return err;
err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM, len);
if (err)
return err;
ds_wait_status(dev, &st);
memset(buf, 0x00, len);
err = ds_recv_data(dev, buf, len);
return err;
}
static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
{
int err;
struct ds_status st;
err = ds_send_data(dev, buf, len);
if (err < 0)
return err;
err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM | dev->spu_bit, len);
if (err)
return err;
if (dev->spu_bit)
msleep(dev->spu_sleep);
ds_wait_status(dev, &st);
err = ds_recv_data(dev, buf, len);
if (err < 0)
return err;
return !(err == len);
}
#if 0
static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search)
{
int err;
u16 value, index;
struct ds_status st;
memset(buf, 0, sizeof(buf));
err = ds_send_data(ds_dev, (unsigned char *)&init, 8);
if (err)
return err;
ds_wait_status(ds_dev, &st);
value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS;
index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8);
err = ds_send_control(ds_dev, value, index);
if (err)
return err;
ds_wait_status(ds_dev, &st);
err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number);
if (err < 0)
return err;
return err/8;
}
static int ds_match_access(struct ds_device *dev, u64 init)
{
int err;
struct ds_status st;
err = ds_send_data(dev, (unsigned char *)&init, sizeof(init));
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_send_control(dev, COMM_MATCH_ACCESS | COMM_IM | COMM_RST, 0x0055);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
static int ds_set_path(struct ds_device *dev, u64 init)
{
int err;
struct ds_status st;
u8 buf[9];
memcpy(buf, &init, 8);
buf[8] = BRANCH_MAIN;
err = ds_send_data(dev, buf, sizeof(buf));
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_send_control(dev, COMM_SET_PATH | COMM_IM | COMM_RST, 0);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
#endif /* 0 */
static u8 ds9490r_touch_bit(void *data, u8 bit)
{
u8 ret;
struct ds_device *dev = data;
if (ds_touch_bit(dev, bit, &ret))
return 0;
return ret;
}
#if 0
static void ds9490r_write_bit(void *data, u8 bit)
{
struct ds_device *dev = data;
ds_write_bit(dev, bit);
}
static u8 ds9490r_read_bit(void *data)
{
struct ds_device *dev = data;
int err;
u8 bit = 0;
err = ds_touch_bit(dev, 1, &bit);
if (err)
return 0;
return bit & 1;
}
#endif
static void ds9490r_write_byte(void *data, u8 byte)
{
struct ds_device *dev = data;
ds_write_byte(dev, byte);
}
static u8 ds9490r_read_byte(void *data)
{
struct ds_device *dev = data;
int err;
u8 byte = 0;
err = ds_read_byte(dev, &byte);
if (err)
return 0;
return byte;
}
static void ds9490r_write_block(void *data, const u8 *buf, int len)
{
struct ds_device *dev = data;
ds_write_block(dev, (u8 *)buf, len);
}
static u8 ds9490r_read_block(void *data, u8 *buf, int len)
{
struct ds_device *dev = data;
int err;
err = ds_read_block(dev, buf, len);
if (err < 0)
return 0;
return len;
}
static u8 ds9490r_reset(void *data)
{
struct ds_device *dev = data;
int err;
err = ds_reset(dev);
if (err)
return 1;
return 0;
}
static u8 ds9490r_set_pullup(void *data, int delay)
{
struct ds_device *dev = data;
if (ds_set_pullup(dev, delay))
return 1;
return 0;
}
static int ds_w1_init(struct ds_device *dev)
{
memset(&dev->master, 0, sizeof(struct w1_bus_master));
/* Reset the device as it can be in a bad state.
* This is necessary because a block write will wait for data
* to be placed in the output buffer and block any later
* commands which will keep accumulating and the device will
* not be idle. Another case is removing the ds2490 module
* while a bus search is in progress, somehow a few commands
* get through, but the input transfers fail leaving data in
* the input buffer. This will cause the next read to fail
* see the note in ds_recv_data.
*/
ds_reset_device(dev);
dev->master.data = dev;
dev->master.touch_bit = &ds9490r_touch_bit;
/* read_bit and write_bit in w1_bus_master are expected to set and
* sample the line level. For write_bit that means it is expected to
* set it to that value and leave it there. ds2490 only supports an
* individual time slot at the lowest level. The requirement from
* pulling the bus state down to reading the state is 15us, something
* that isn't realistic on the USB bus anyway.
dev->master.read_bit = &ds9490r_read_bit;
dev->master.write_bit = &ds9490r_write_bit;
*/
dev->master.read_byte = &ds9490r_read_byte;
dev->master.write_byte = &ds9490r_write_byte;
dev->master.read_block = &ds9490r_read_block;
dev->master.write_block = &ds9490r_write_block;
dev->master.reset_bus = &ds9490r_reset;
dev->master.set_pullup = &ds9490r_set_pullup;
return w1_add_master_device(&dev->master);
}
static void ds_w1_fini(struct ds_device *dev)
{
w1_remove_master_device(&dev->master);
}
static int ds_probe(struct usb_interface *intf,
const struct usb_device_id *udev_id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_desc;
struct ds_device *dev;
int i, err;
dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL);
if (!dev) {
printk(KERN_INFO "Failed to allocate new DS9490R structure.\n");
return -ENOMEM;
}
dev->spu_sleep = 0;
dev->spu_bit = 0;
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
goto err_out_free;
}
memset(dev->ep, 0, sizeof(dev->ep));
usb_set_intfdata(intf, dev);
err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3);
if (err) {
printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n",
intf->altsetting[0].desc.bInterfaceNumber, err);
goto err_out_clear;
}
err = usb_reset_configuration(dev->udev);
if (err) {
printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err);
goto err_out_clear;
}
iface_desc = &intf->altsetting[0];
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints);
err = -EINVAL;
goto err_out_clear;
}
/*
* This loop doesn'd show control 0 endpoint,
* so we will fill only 1-3 endpoints entry.
*/
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
dev->ep[i+1] = endpoint->bEndpointAddress;
#if 0
printk("%d: addr=%x, size=%d, dir=%s, type=%x\n",
i, endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize),
(endpoint->bEndpointAddress & USB_DIR_IN)?"IN":"OUT",
endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
#endif
}
err = ds_w1_init(dev);
if (err)
goto err_out_clear;
mutex_lock(&ds_mutex);
list_add_tail(&dev->ds_entry, &ds_devices);
mutex_unlock(&ds_mutex);
return 0;
err_out_clear:
usb_set_intfdata(intf, NULL);
usb_put_dev(dev->udev);
err_out_free:
kfree(dev);
return err;
}
static void ds_disconnect(struct usb_interface *intf)
{
struct ds_device *dev;
dev = usb_get_intfdata(intf);
if (!dev)
return;
mutex_lock(&ds_mutex);
list_del(&dev->ds_entry);
mutex_unlock(&ds_mutex);
ds_w1_fini(dev);
usb_set_intfdata(intf, NULL);
usb_put_dev(dev->udev);
kfree(dev);
}
module_usb_driver(ds_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
| gpl-2.0 |
amanone/GT-I9505G-MFD | drivers/scsi/isci/unsolicited_frame_control.c | 7806 | 7914 | /*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "host.h"
#include "unsolicited_frame_control.h"
#include "registers.h"
int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct sci_unsolicited_frame *uf;
u32 buf_len, header_len, i;
dma_addr_t dma;
size_t size;
void *virt;
/*
* Prepare all of the memory sizes for the UF headers, UF address
* table, and UF buffers themselves.
*/
buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
/*
* The Unsolicited Frame buffers are set at the start of the UF
* memory descriptor entry. The headers and address table will be
* placed after the buffers.
*/
virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
if (!virt)
return -ENOMEM;
/*
* Program the location of the UF header table into the SCU.
* Notes:
* - The address must align on a 64-byte boundary. Guaranteed to be
* on 64-byte boundary already 1KB boundary for unsolicited frames.
* - Program unused header entries to overlap with the last
* unsolicited frame. The silicon will never DMA to these unused
* headers, since we program the UF address table pointers to
* NULL.
*/
uf_control->headers.physical_address = dma + buf_len;
uf_control->headers.array = virt + buf_len;
/*
* Program the location of the UF address table into the SCU.
* Notes:
* - The address must align on a 64-bit boundary. Guaranteed to be on 64
* byte boundary already due to above programming headers being on a
* 64-bit boundary and headers are on a 64-bytes in size.
*/
uf_control->address_table.physical_address = dma + buf_len + header_len;
uf_control->address_table.array = virt + buf_len + header_len;
uf_control->get = 0;
/*
* UF buffer requirements are:
* - The last entry in the UF queue is not NULL.
* - There is a power of 2 number of entries (NULL or not-NULL)
* programmed into the queue.
* - Aligned on a 1KB boundary. */
/*
* Program the actual used UF buffers into the UF address table and
* the controller's array of UFs.
*/
for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
uf = &uf_control->buffers.array[i];
uf_control->address_table.array[i] = dma;
uf->buffer = virt;
uf->header = &uf_control->headers.array[i];
uf->state = UNSOLICITED_FRAME_EMPTY;
/*
* Increment the address of the physical and virtual memory
* pointers. Everything is aligned on 1k boundary with an
* increment of 1k.
*/
virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
}
return 0;
}
enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_header)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
/* Skip the first word in the frame since this is a controll word used
* by the hardware.
*/
*frame_header = &uf_control->buffers.array[frame_index].header->data;
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_buffer)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
*frame_buffer = uf_control->buffers.array[frame_index].buffer;
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index)
{
u32 frame_get;
u32 frame_cycle;
frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
/*
* In the event there are NULL entries in the UF table, we need to
* advance the get pointer in order to find out if this frame should
* be released (i.e. update the get pointer)
*/
while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
frame_get < SCU_MAX_UNSOLICITED_FRAMES)
frame_get++;
/*
* The table has a NULL entry as it's last element. This is
* illegal.
*/
BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
return false;
uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
if (frame_get != frame_index) {
/*
* Frames remain in use until we advance the get pointer
* so there is nothing we can do here
*/
return false;
}
/*
* The frame index is equal to the current get pointer so we
* can now free up all of the frame entries that
*/
while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
frame_get = 0;
} else
frame_get++;
}
uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
return true;
}
| gpl-2.0 |
txuki2005/TaUrUs_Kernel | arch/mips/math-emu/sp_cmp.c | 10366 | 1798 | /* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
CLEARCX; /* Even clear inexact flag here */
if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) {
if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
SETCX(IEEE754_INVALID_OPERATION);
if (cmp & IEEE754_CUN)
return 1;
if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION))
return ieee754si_xcpt(0, "fcmpf", x);
}
return 0;
} else {
int vx = x.bits;
int vy = y.bits;
if (vx < 0)
vx = -vx ^ SP_SIGN_BIT;
if (vy < 0)
vy = -vy ^ SP_SIGN_BIT;
if (vx < vy)
return (cmp & IEEE754_CLT) != 0;
else if (vx == vy)
return (cmp & IEEE754_CEQ) != 0;
else
return (cmp & IEEE754_CGT) != 0;
}
}
| gpl-2.0 |
agat63/AGAT_L720_kernel | arch/mips/math-emu/sp_cmp.c | 10366 | 1798 | /* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig)
{
COMPXSP;
COMPYSP;
EXPLODEXSP;
EXPLODEYSP;
FLUSHXSP;
FLUSHYSP;
CLEARCX; /* Even clear inexact flag here */
if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) {
if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
SETCX(IEEE754_INVALID_OPERATION);
if (cmp & IEEE754_CUN)
return 1;
if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION))
return ieee754si_xcpt(0, "fcmpf", x);
}
return 0;
} else {
int vx = x.bits;
int vy = y.bits;
if (vx < 0)
vx = -vx ^ SP_SIGN_BIT;
if (vy < 0)
vy = -vy ^ SP_SIGN_BIT;
if (vx < vy)
return (cmp & IEEE754_CLT) != 0;
else if (vx == vy)
return (cmp & IEEE754_CEQ) != 0;
else
return (cmp & IEEE754_CGT) != 0;
}
}
| gpl-2.0 |
gingo21/kernel_wiko_cink_slim | arch/mips/math-emu/dp_fint.c | 10366 | 1906 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754dp.h"
ieee754dp ieee754dp_fint(int x)
{
u64 xm;
int xe;
int xs;
CLEARCX;
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
return ieee754dp_one(x < 0);
if (x == 10 || x == -10)
return ieee754dp_ten(x < 0);
xs = (x < 0);
if (xs) {
if (x == (1 << 31))
xm = ((unsigned) 1 << 31); /* max neg can't be safely negated */
else
xm = -x;
} else {
xm = x;
}
#if 1
/* normalize - result can never be inexact or overflow */
xe = DP_MBITS;
while ((xm >> DP_MBITS) == 0) {
xm <<= 1;
xe--;
}
return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
#else
/* normalize */
xe = DP_MBITS + 3;
while ((xm >> (DP_MBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
DPNORMRET1(xs, xe, xm, "fint", x);
#endif
}
ieee754dp ieee754dp_funs(unsigned int u)
{
if ((int) u < 0)
return ieee754dp_add(ieee754dp_1e31(),
ieee754dp_fint(u & ~(1 << 31)));
return ieee754dp_fint(u);
}
| gpl-2.0 |
EPDCenterSpain/kernel_Archos_80_Titan | arch/mips/math-emu/dp_tlong.c | 10366 | 3033 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754dp.h"
s64 ieee754dp_tlong(ieee754dp x)
{
COMPXDP;
CLEARCX;
EXPLODEXDP;
FLUSHXDP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_INF:
SETCX(IEEE754_INVALID_OPERATION);
return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x);
case IEEE754_CLASS_ZERO:
return 0;
case IEEE754_CLASS_DNORM:
case IEEE754_CLASS_NORM:
break;
}
if (xe >= 63) {
/* look for valid corner case */
if (xe == 63 && xs && xm == DP_HIDDEN_BIT)
return -0x8000000000000000LL;
/* Set invalid. We will only use overflow for floating
point overflow */
SETCX(IEEE754_INVALID_OPERATION);
return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x);
}
/* oh gawd */
if (xe > DP_MBITS) {
xm <<= xe - DP_MBITS;
} else if (xe < DP_MBITS) {
u64 residue;
int round;
int sticky;
int odd;
if (xe < -1) {
residue = xm;
round = 0;
sticky = residue != 0;
xm = 0;
} else {
/* Shifting a u64 64 times does not work,
* so we do it in two steps. Be aware that xe
* may be -1 */
residue = xm << (xe + 1);
residue <<= 63 - DP_MBITS;
round = (residue >> 63) != 0;
sticky = (residue << 1) != 0;
xm >>= DP_MBITS - xe;
}
odd = (xm & 0x1) != 0x0;
switch (ieee754_csr.rm) {
case IEEE754_RN:
if (round && (sticky || odd))
xm++;
break;
case IEEE754_RZ:
break;
case IEEE754_RU: /* toward +Infinity */
if ((round || sticky) && !xs)
xm++;
break;
case IEEE754_RD: /* toward -Infinity */
if ((round || sticky) && xs)
xm++;
break;
}
if ((xm >> 63) != 0) {
/* This can happen after rounding */
SETCX(IEEE754_INVALID_OPERATION);
return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x);
}
if (round || sticky)
SETCX(IEEE754_INEXACT);
}
if (xs)
return -xm;
else
return xm;
}
u64 ieee754dp_tulong(ieee754dp x)
{
ieee754dp hb = ieee754dp_1e63();
/* what if x < 0 ?? */
if (ieee754dp_lt(x, hb))
return (u64) ieee754dp_tlong(x);
return (u64) ieee754dp_tlong(ieee754dp_sub(x, hb)) |
(1ULL << 63);
}
| gpl-2.0 |
Split-Screen/android_kernel_huawei_msm8928 | net/ipv6/netfilter/ip6t_mh.c | 13694 | 2352 | /*
* Copyright (C)2006 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Author:
* Masahide NAKAMURA @USAGI <masahide.nakamura.cz@hitachi.com>
*
* Based on net/netfilter/xt_tcpudp.c
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/mip6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6t_mh.h>
MODULE_DESCRIPTION("Xtables: IPv6 Mobility Header match");
MODULE_LICENSE("GPL");
/* Returns 1 if the type is matched by the range, 0 otherwise */
static inline bool
type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert)
{
return (type >= min && type <= max) ^ invert;
}
static bool mh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip6_mh _mh;
const struct ip6_mh *mh;
const struct ip6t_mh *mhinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
mh = skb_header_pointer(skb, par->thoff, sizeof(_mh), &_mh);
if (mh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil MH tinygram.\n");
par->hotdrop = true;
return false;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
pr_debug("Dropping invalid MH Payload Proto: %u\n",
mh->ip6mh_proto);
par->hotdrop = true;
return false;
}
return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
!!(mhinfo->invflags & IP6T_MH_INV_TYPE));
}
static int mh_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_mh *mhinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (mhinfo->invflags & ~IP6T_MH_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match mh_mt6_reg __read_mostly = {
.name = "mh",
.family = NFPROTO_IPV6,
.checkentry = mh_mt6_check,
.match = mh_mt6,
.matchsize = sizeof(struct ip6t_mh),
.proto = IPPROTO_MH,
.me = THIS_MODULE,
};
static int __init mh_mt6_init(void)
{
return xt_register_match(&mh_mt6_reg);
}
static void __exit mh_mt6_exit(void)
{
xt_unregister_match(&mh_mt6_reg);
}
module_init(mh_mt6_init);
module_exit(mh_mt6_exit);
| gpl-2.0 |
Cobaltum/kernel_u8110 | drivers/usb/host/ehci-q.c | 127 | 39765 | /*
* Copyright (C) 2001-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
*
* Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
* entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
* buffers needed for the larger number). We use one QH per endpoint, queue
* multiple urbs (all three types) per endpoint. URBs may need several qtds.
*
* ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
* interrupts) needs careful scheduling. Performance improvements can be
* an ongoing challenge. That's in "ehci-sched.c".
*
* USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
* or otherwise through transaction translators (TTs) in USB 2.0 hubs using
* (b) special fields in qh entries or (c) split iso entries. TTs will
* buffer low/full speed data so the host collects it at high speed.
*/
/*-------------------------------------------------------------------------*/
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
size_t len, int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
(u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
qtd->length = count;
return count;
}
/*-------------------------------------------------------------------------*/
static inline void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
struct ehci_qh_hw *hw = qh->hw;
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
hw->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void
qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
if (list_empty (&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
qtd = NULL;
}
if (qtd)
qh_update (ehci, qh, qtd);
}
/*-------------------------------------------------------------------------*/
static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_qh *qh = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state))
qh_link_async(ehci, qh);
spin_unlock_irqrestore(&ehci->lock, flags);
}
static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
struct urb *urb, u32 token)
{
/* If an async split transaction gets an error or is unlinked,
* the TT buffer may be left in an indeterminate state. We
* have to clear the TT buffer.
*
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
#ifdef DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
#endif /* DEBUG */
if (!ehci_is_TDI(ehci)
|| urb->dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
qh->clearing_tt = 1;
} else {
/* REVISIT ARC-derived cores don't clear the root
* hub TT buffer in this way...
*/
}
}
}
static int qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
size_t length,
u32 token
)
{
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely (QTD_PID (token) != 2))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
if (unlikely(urb->unlinked))
return status;
/* force cleanup after short read; not always an error */
if (unlikely (IS_SHORT_READ (token)))
status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
/* In theory, more than one of the following bits can be set
* since they are sticky and the transaction is retried.
* Which to test first is rather arbitrary.
*/
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
} else if (token & QTD_STS_DBE) {
status = (QTD_PID (token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad CRC, wrong PID, etc */
ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
status = -EPROTO;
} else { /* unknown */
status = -EPROTO;
}
ehci_vdbg (ehci,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice (urb->pipe),
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
token, status);
}
return status;
}
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
__releases(ehci->lock)
__acquires(ehci->lock)
{
if (likely (urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
qh_put (qh);
}
if (unlikely(urb->unlinked)) {
COUNT(ehci->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
COUNT(ehci->stats.complete);
}
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
spin_lock (&ehci->lock);
}
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
int last_status;
int stopped;
unsigned count = 0;
u8 state;
const __le32 halt = HALT_BIT(ehci);
struct ehci_qh_hw *hw = qh->hw;
if (unlikely (list_empty (&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
* scan_periodic().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->needs_rescan = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_safe (entry, tmp, &qh->qtd_list) {
struct ehci_qtd *qtd;
struct urb *urb;
u32 token = 0;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
ehci_urb_done(ehci, last->urb, last_status);
count++;
last_status = -EINPROGRESS;
}
ehci_qtd_free (ehci, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb ();
token = hc32_to_cpu(ehci, qtd->hw_token);
/* always clean up qtds the hc de-activated */
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
if ((token & QTD_STS_HALT) != 0) {
/* retry transaction errors until we
* reach the software xacterr limit
*/
if ((token & QTD_STS_XACT) &&
QTD_CERR(token) == 0 &&
++qh->xacterrs < QH_XACTERR_MAX &&
!urb->unlinked) {
ehci_dbg(ehci,
"detected XactErr len %zu/%zu retry %d\n",
qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
/* reset the token in the qtd and the
* qh overlay (which still contains
* the qtd) so that we pick up from
* where we left off
*/
token &= ~QTD_STS_HALT;
token |= QTD_STS_ACTIVE |
(EHCI_TUNE_CERR << 10);
qtd->hw_token = cpu_to_hc32(ehci,
token);
wmb();
hw->hw_token = cpu_to_hc32(ehci,
token);
goto retry_xacterr;
}
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*
* other short reads won't stop the queue, including
* control transfers (status stage handles that) or
* most other single-qtd reads ... the queue stops if
* URB_SHORT_NOT_OK was set so the driver submitting
* the urbs could clean it up.
*/
} else if (IS_SHORT_READ (token)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
&& HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
break;
/* scan the whole queue for unlinks whenever it stops */
} else {
stopped = 1;
/* cancel everything if we halt, suspend, etc */
if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
* for its urb faulted, or its urb was canceled.
*/
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
/* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_hc32(ehci, qtd->qtd_dma)
== hw->hw_current) {
token = hc32_to_cpu(ehci, hw->hw_token);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
* We have to clear it.
*/
ehci_clear_tt_buffer(ehci, qh, urb, token);
}
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((halt & hw->hw_token) == 0) {
halt:
hw->hw_token |= halt;
wmb ();
}
}
/* unless we already know the urb's status, collect qtd status
* and update count of bytes transferred. in common short read
* cases with only one data qtd (including control transfers),
* queue processing won't halt. but with two or more qtds (for
* example, with a 32 KB transfer), when the first qtd gets a
* short read the second must be removed by hand.
*/
if (last_status == -EINPROGRESS) {
last_status = qtd_copy_status(ehci, urb,
qtd->length, token);
if (last_status == -EREMOTEIO
&& (qtd->hw_alt_next
& EHCI_LIST_END(ehci)))
last_status = -EINPROGRESS;
/* As part of low/full-speed endpoint-halt processing
* we must clear the TT buffer (11.17.5).
*/
if (unlikely(last_status != -EINPROGRESS &&
last_status != -EREMOTEIO)) {
/* The TT's in some hubs malfunction when they
* receive this request following a STALL (they
* stop sending isochronous packets). Since a
* STALL can't leave the TT buffer in a busy
* state (if you believe Figures 11-48 - 11-51
* in the USB 2.0 spec), we won't clear the TT
* buffer in this case. Strictly speaking this
* is a violation of the spec.
*/
if (last_status != -EPIPE)
ehci_clear_tt_buffer(ehci, qh, urb,
token);
}
}
/* if we're removing something not at the queue head,
* patch the hardware queue pointer.
*/
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
/* remove qtd; it's recycled after possible urb completion */
list_del (&qtd->qtd_list);
last = qtd;
/* reinit the xacterr counter for the next qtd */
qh->xacterrs = 0;
}
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
ehci_urb_done(ehci, last->urb, last_status);
count++;
ehci_qtd_free (ehci, last);
}
/* Do we need to rescan for URBs dequeued during a giveback? */
if (unlikely(qh->needs_rescan)) {
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
/* Otherwise we have to wait until the QH is fully unlinked.
* Our caller will start an unlink if qh->needs_rescan is
* set. But if an unlink has already started, nothing needs
* to be done.
*/
if (state != QH_STATE_LINKED)
qh->needs_rescan = 0;
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
break;
case QH_STATE_LINKED:
/* We won't refresh a QH that's linked (after the HC
* stopped the queue). That avoids a race:
* - HC reads first part of QH;
* - CPU updates that first part and the token;
* - HC reads rest of that QH, including token
* Result: HC gets an inconsistent image, and then
* DMAs to/from the wrong memory (corrupting it).
*
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
/* Tell the caller to start an unlink */
qh->needs_rescan = 1;
break;
/* otherwise, unlink already started */
}
}
return count;
}
/*-------------------------------------------------------------------------*/
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/*
* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list
) {
struct list_head *entry, *temp;
list_for_each_safe (entry, temp, qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
list_del (&qtd->qtd_list);
ehci_qtd_free (ehci, qtd);
}
}
/*
* create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *
qh_urb_transaction (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *head,
gfp_t flags
) {
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input;
u32 token;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
return NULL;
list_add_tail (&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein (urb->pipe);
if (usb_pipecontrol (urb->pipe)) {
/* SETUP pid */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* data transfer stage: buffer setup
*/
buf = urb->transfer_dma;
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
len -= this_qtd_len;
buf += this_qtd_len;
/*
* short reads advance to a "magic" dummy instead of the next
* qtd ... that forces the queue to stop, for manual cleanup.
* (this will usually be overridden later.)
*/
if (is_input)
qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely (len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
}
/*
* unless the caller requires manual cleanup after short reads,
* have the alt_next mechanism keep the queue running after the
* last data qtd (the only one, for control and most other cases).
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END(ehci);
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol (urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk (urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(ehci, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
return head;
cleanup:
qtd_list_free (ehci, urb, head);
return NULL;
}
/*-------------------------------------------------------------------------*/
// Would be best to create all qh's from config descriptors,
// when each interface/altsetting is established. Unlink
// any previous qh and cancel its urbs first; endpoints are
// implicitly reset then (data toggle too).
// That'd mean updating how usbcore talks to HCDs. (2.7?)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *
qh_make (
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
if (max_packet(maxp) > 1024) {
ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
goto done;
}
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
} else if (qh->period > ehci->periodic_size) {
qh->period = ehci->periodic_size;
urb->interval = qh->period << 3;
}
} else {
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US (think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
qh->period = urb->interval;
if (qh->period > ehci->periodic_size) {
qh->period = ehci->periodic_size;
urb->interval = qh->period;
}
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
/* Some Freescale processors have an erratum in which the
* port number in the queue head was 0..N-1 instead of 1..N.
*/
if (ehci_has_fsl_portno_bug(ehci))
info2 |= (urb->dev->ttport-1) << 23;
else
info2 |= urb->dev->ttport << 23;
/* set the address of the TT; for TDI's integrated
* root hub tt, leave it zeroed.
*/
if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
info2 |= tt->hub->devnum << 16;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
/* The USB spec says that high speed bulk endpoints
* always use 512 byte maxpacket. But some device
* vendors decided to ignore that, and MSFT is happy
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
info1 |= max_packet(maxp) << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
info2 |= hb_mult (maxp) << 30;
}
break;
default:
dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
done:
qh_put (qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ehci, info1);
hw->hw_info2 = cpu_to_hc32(ehci, info2);
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
}
/*-------------------------------------------------------------------------*/
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
__hc32 dma = QH_NEXT(ehci, qh->qh_dma);
struct ehci_qh *head;
/* Don't link a QH if there's a Clear-TT-Buffer pending */
if (unlikely(qh->clearing_tt))
return;
WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* (re)start the async schedule? */
head = ehci->async;
timer_action_done (ehci, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = ehci_readl(ehci, &ehci->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(ehci, &ehci->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
ehci_writel(ehci, cmd, &ehci->regs->command);
ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(ehci, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
wmb ();
head->qh_next.qh = qh;
head->hw->hw_next = dma;
qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
/*-------------------------------------------------------------------------*/
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int epnum,
void **ptr
)
{
struct ehci_qh *qh = NULL;
__hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
qh = (struct ehci_qh *) *ptr;
if (unlikely (qh == NULL)) {
/* can't sleep here, we have ehci->lock... */
qh = qh_make (ehci, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely (qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely (list_empty (qtd_list)))
qtd = NULL;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely (epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely (qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ehci);
wmb ();
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
list_splice_tail(qtd_list, &qh->qtd_list);
ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(ehci, dma);
/* let the hc process these next qtds */
wmb ();
dummy->hw_token = token;
urb->hcpriv = qh_get (qh);
}
}
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
&ehci_to_hcd(ehci)->flags))) {
rc = -ESHUTDOWN;
goto done;
}
rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(rc))
goto done;
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async(ehci, qh);
done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (unlikely (qh == NULL))
qtd_list_free (ehci, urb, qtd_list);
return rc;
}
/*-------------------------------------------------------------------------*/
/* This function creates the qtds and submits them for the
* SINGLE_STEP_SET_FEATURE Test.
* This is done in two parts: first SETUP req for GetDesc is sent then
* 15 seconds later, the IN stage for GetDesc starts to req data from dev
*
* is_setup : i/p arguement decides which of the two stage needs to be
* performed; TRUE - SETUP and FALSE - IN+STATUS
* Returns 0 if success
*/
#ifdef CONFIG_USB_EHCI_EHSET
static int
submit_single_step_set_feature(
struct usb_hcd *hcd,
struct urb *urb,
int is_setup
) {
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct list_head qtd_list;
struct list_head *head ;
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
u32 token;
INIT_LIST_HEAD(&qtd_list);
head = &qtd_list;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
if (unlikely(!qtd))
return -ENOMEM;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
len = urb->transfer_buffer_length;
/* Check if the request is to perform just the SETUP stage (getDesc)
* as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
* 15 secs after the setup
*/
if (is_setup) {
/* SETUP pid */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
return 0; /*Return now; we shall come back after 15 seconds*/
}
/*---------------------------------------------------------------------
* IN: data transfer stage: buffer setup : start the IN txn phase for
* the get_Desc SETUP which was sent 15seconds back
*/
token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
buf = urb->transfer_dma;
token |= (1 /* "in" */ << 8); /*This is IN stage*/
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
/* Our IN phase shall always be a short read; so keep the queue running
* and let it advance to the next qtd which zero length OUT status */
qtd->hw_alt_next = EHCI_LIST_END(ehci);
/*----------------------------------------------------------------------
* STATUS stage for GetDesc control request
*/
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
qtd_prev = qtd;
qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* dont fill any data in such packets */
qtd_fill(ehci, qtd, 0, 0, token, 0);
/* by default, enable interrupt on urb completion */
if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
return 0;
cleanup:
qtd_list_free(ehci, urb, head);
return -ENOMEM;
}
#endif
/*-------------------------------------------------------------------------*/
/* the async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh = ehci->reclaim;
struct ehci_qh *next;
iaa_watchdog_done(ehci);
// qh->hw_next = cpu_to_hc32(qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put (qh); // refcount from reclaim
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
ehci->reclaim = next;
qh->reclaim = NULL;
qh_completions (ehci, qh);
if (!list_empty (&qh->qtd_list)
&& HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
qh_link_async (ehci, qh);
else {
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
&& ehci->async->qh_next.qh == NULL)
timer_action (ehci, TIMER_ASYNC_OFF);
}
qh_put(qh); /* refcount from async list */
if (next) {
ehci->reclaim = NULL;
start_unlink_async (ehci, next);
}
}
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int cmd = ehci_readl(ehci, &ehci->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&ehci->lock);
if (ehci->reclaim
|| (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT)
)
BUG ();
#endif
/* stop async schedule right now? */
if (unlikely (qh == ehci->async)) {
/* can't get here without STS_ASS set */
if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
&& !ehci->reclaim) {
/* ... and CMD_IAAD clear */
ehci_writel(ehci, cmd & ~CMD_ASE,
&ehci->regs->command);
wmb ();
// handshake later, if we need to
timer_action_done (ehci, TIMER_ASYNC_OFF);
}
return;
}
qh->qh_state = QH_STATE_UNLINK;
ehci->reclaim = qh = qh_get (qh);
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async (ehci);
return;
}
cmd |= CMD_IAAD;
ehci_writel(ehci, cmd, &ehci->regs->command);
(void)ehci_readl(ehci, &ehci->regs->command);
iaa_watchdog_start(ehci);
}
/*-------------------------------------------------------------------------*/
static void scan_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
rescan:
qh = ehci->async->qh_next.qh;
if (likely (qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty (&qh->qtd_list)
&& qh->stamp != ehci->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get (qh);
qh->stamp = ehci->stamp;
temp = qh_completions (ehci, qh);
if (qh->needs_rescan)
unlink_async(ehci, qh);
qh_put (qh);
if (temp != 0) {
goto rescan;
}
}
/* unlink idle entries, reducing DMA usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
if (!ehci->reclaim
&& ((ehci->stamp - qh->stamp) & 0x1fff)
>= (EHCI_SHRINK_FRAMES * 8))
start_unlink_async(ehci, qh);
else
action = TIMER_ASYNC_SHRINK;
}
qh = qh->qh_next.qh;
} while (qh);
}
if (action == TIMER_ASYNC_SHRINK)
timer_action (ehci, TIMER_ASYNC_SHRINK);
}
| gpl-2.0 |
chrisw957/gumstix-linux | drivers/ata/libahci_platform.c | 383 | 15741 | /*
* AHCI SATA platform library
*
* Copyright 2004-2005 Red Hat, Inc.
* Jeff Garzik <jgarzik@pobox.com>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
struct ata_port_operations ahci_platform_ops = {
.inherits = &ahci_ops,
.host_stop = ahci_host_stop,
};
EXPORT_SYMBOL_GPL(ahci_platform_ops);
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
/**
* ahci_platform_enable_phys - Enable PHYs
* @hpriv: host private area to store config values
*
* This function enables all the PHYs found in hpriv->phys, if any.
* If a PHY fails to be enabled, it disables all the PHYs already
* enabled in reverse order and returns an error.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->phys[i])
continue;
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
}
return 0;
disable_phys:
while (--i >= 0) {
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
return rc;
}
/**
* ahci_platform_disable_phys - Disable PHYs
* @hpriv: host private area to store config values
*
* This function disables all PHYs found in hpriv->phys.
*/
static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->phys[i])
continue;
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
}
/**
* ahci_platform_enable_clks - Enable platform clocks
* @hpriv: host private area to store config values
*
* This function enables all the clks found in hpriv->clks, starting at
* index 0. If any clk fails to enable it disables all the clks already
* enabled in reverse order, and then returns an error.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
{
int c, rc;
for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
rc = clk_prepare_enable(hpriv->clks[c]);
if (rc)
goto disable_unprepare_clk;
}
return 0;
disable_unprepare_clk:
while (--c >= 0)
clk_disable_unprepare(hpriv->clks[c]);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
/**
* ahci_platform_disable_clks - Disable platform clocks
* @hpriv: host private area to store config values
*
* This function disables all the clks found in hpriv->clks, in reverse
* order of ahci_platform_enable_clks (starting at the end of the array).
*/
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
{
int c;
for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
if (hpriv->clks[c])
clk_disable_unprepare(hpriv->clks[c]);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
/**
* ahci_platform_enable_resources - Enable platform resources
* @hpriv: host private area to store config values
*
* This function enables all ahci_platform managed resources in the
* following order:
* 1) Regulator
* 2) Clocks (through ahci_platform_enable_clks)
* 3) Phys
*
* If resource enabling fails at any point the previous enabled resources
* are disabled in reverse order.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
{
int rc;
if (hpriv->target_pwr) {
rc = regulator_enable(hpriv->target_pwr);
if (rc)
return rc;
}
rc = ahci_platform_enable_clks(hpriv);
if (rc)
goto disable_regulator;
rc = ahci_platform_enable_phys(hpriv);
if (rc)
goto disable_clks;
return 0;
disable_clks:
ahci_platform_disable_clks(hpriv);
disable_regulator:
if (hpriv->target_pwr)
regulator_disable(hpriv->target_pwr);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
/**
* ahci_platform_disable_resources - Disable platform resources
* @hpriv: host private area to store config values
*
* This function disables all ahci_platform managed resources in the
* following order:
* 1) Phys
* 2) Clocks (through ahci_platform_disable_clks)
* 3) Regulator
*/
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
ahci_platform_disable_clks(hpriv);
if (hpriv->target_pwr)
regulator_disable(hpriv->target_pwr);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
static void ahci_platform_put_resources(struct device *dev, void *res)
{
struct ahci_host_priv *hpriv = res;
int c;
if (hpriv->got_runtime_pm) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
}
for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
clk_put(hpriv->clks[c]);
}
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
*
* This function allocates an ahci_host_priv struct, and gets the following
* resources, storing a reference to them inside the returned struct:
*
* 1) mmio registers (IORESOURCE_MEM 0, mandatory)
* 2) regulator for controlling the targets power (optional)
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
* or for non devicetree enabled platforms a single clock
* 4) phys (optional)
*
* RETURNS:
* The allocated ahci_host_priv on success, otherwise an ERR_PTR value
*/
struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct clk *clk;
struct device_node *child;
int i, enabled_ports = 0, rc = -ENOMEM;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
GFP_KERNEL);
if (!hpriv)
goto err_out;
devres_add(dev, hpriv);
hpriv->mmio = devm_ioremap_resource(dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (IS_ERR(hpriv->mmio)) {
dev_err(dev, "no mmio space\n");
rc = PTR_ERR(hpriv->mmio);
goto err_out;
}
hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
if (IS_ERR(hpriv->target_pwr)) {
rc = PTR_ERR(hpriv->target_pwr);
if (rc == -EPROBE_DEFER)
goto err_out;
hpriv->target_pwr = NULL;
}
for (i = 0; i < AHCI_MAX_CLKS; i++) {
/*
* For now we must use clk_get(dev, NULL) for the first clock,
* because some platforms (da850, spear13xx) are not yet
* converted to use devicetree for clocks. For new platforms
* this is equivalent to of_clk_get(dev->of_node, 0).
*/
if (i == 0)
clk = clk_get(dev, NULL);
else
clk = of_clk_get(dev->of_node, i);
if (IS_ERR(clk)) {
rc = PTR_ERR(clk);
if (rc == -EPROBE_DEFER)
goto err_out;
break;
}
hpriv->clks[i] = clk;
}
hpriv->nports = of_get_child_count(dev->of_node);
if (hpriv->nports) {
hpriv->phys = devm_kzalloc(dev,
hpriv->nports * sizeof(*hpriv->phys),
GFP_KERNEL);
if (!hpriv->phys) {
rc = -ENOMEM;
goto err_out;
}
for_each_child_of_node(dev->of_node, child) {
u32 port;
if (!of_device_is_available(child))
continue;
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
goto err_out;
}
if (port >= hpriv->nports) {
dev_warn(dev, "invalid port number %d\n", port);
continue;
}
mask_port_map |= BIT(port);
hpriv->phys[port] = devm_of_phy_get(dev, child, NULL);
if (IS_ERR(hpriv->phys[port])) {
rc = PTR_ERR(hpriv->phys[port]);
dev_err(dev,
"couldn't get PHY in node %s: %d\n",
child->name, rc);
goto err_out;
}
enabled_ports++;
}
if (!enabled_ports) {
dev_warn(dev, "No port enabled\n");
rc = -ENODEV;
goto err_out;
}
if (!hpriv->mask_port_map)
hpriv->mask_port_map = mask_port_map;
} else {
/*
* If no sub-node was found, keep this for device tree
* compatibility
*/
struct phy *phy = devm_phy_get(dev, "sata-phy");
if (!IS_ERR(phy)) {
hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys),
GFP_KERNEL);
if (!hpriv->phys) {
rc = -ENOMEM;
goto err_out;
}
hpriv->phys[0] = phy;
hpriv->nports = 1;
} else {
rc = PTR_ERR(phy);
switch (rc) {
case -ENOSYS:
/* No PHY support. Check if PHY is required. */
if (of_find_property(dev->of_node, "phys", NULL)) {
dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
goto err_out;
}
case -ENODEV:
/* continue normally */
hpriv->phys = NULL;
break;
default:
goto err_out;
}
}
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
hpriv->got_runtime_pm = true;
devres_remove_group(dev, NULL);
return hpriv;
err_out:
devres_release_group(dev, NULL);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
/**
* ahci_platform_init_host - Bring up an ahci-platform host
* @pdev: platform device pointer for the host
* @hpriv: ahci-host private data for the host
* @pi_template: template for the ata_port_info to use
*
* This function does all the usual steps needed to bring up an
* ahci-platform host, note any necessary resources (ie clks, phys, etc.)
* must be initialized / enabled before calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template)
{
struct device *dev = &pdev->dev;
struct ata_port_info pi = *pi_template;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ata_host *host;
int i, irq, n_ports, rc;
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "no irq\n");
return -EINVAL;
}
/* prepare host */
pi.private_data = (void *)(unsigned long)hpriv->flags;
ahci_save_initial_config(dev, hpriv);
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
ahci_set_em_messages(hpriv, &pi);
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_desc(ap, "mmio %pR",
platform_get_resource(pdev, IORESOURCE_MEM, 0));
ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
if (hpriv->cap & HOST_CAP_64) {
rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (rc) {
rc = dma_coerce_mask_and_coherent(dev,
DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "Failed to enable 64-bit DMA.\n");
return rc;
}
dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
}
}
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
ahci_print_info(host, "platform");
return ahci_host_activate(host, irq, &ahci_platform_sht);
}
EXPORT_SYMBOL_GPL(ahci_platform_init_host);
static void ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
ahci_platform_disable_resources(hpriv);
}
#ifdef CONFIG_PM_SLEEP
/**
* ahci_platform_suspend_host - Suspend an ahci-platform host
* @dev: device pointer for the host
*
* This function does all the usual steps needed to suspend an
* ahci-platform host, note any necessary resources (ie clks, phys, etc.)
* must be disabled after calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_suspend_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(dev, "firmware update required for suspend/resume\n");
return -EIO;
}
/*
* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
return ata_host_suspend(host, PMSG_SUSPEND);
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
/**
* ahci_platform_resume_host - Resume an ahci-platform host
* @dev: device pointer for the host
*
* This function does all the usual steps needed to resume an ahci-platform
* host, note any necessary resources (ie clks, phys, etc.) must be
* initialized / enabled before calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_resume_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
int rc;
if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
}
ata_host_resume(host);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
/**
* ahci_platform_suspend - Suspend an ahci-platform device
* @dev: the platform device to suspend
*
* This function suspends the host associated with the device, followed by
* disabling all the resources of the device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_suspend_host(dev);
if (rc)
return rc;
ahci_platform_disable_resources(hpriv);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend);
/**
* ahci_platform_resume - Resume an ahci-platform device
* @dev: the platform device to resume
*
* This function enables all the resources of the device followed by
* resuming the host associated with the device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_platform_resume_host(dev);
if (rc)
goto disable_resources;
/* We resumed so update PM runtime state */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_resume);
#endif
MODULE_DESCRIPTION("AHCI SATA platform library");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tsuibin/linux-4.x.y | net/sched/act_connmark.c | 383 | 4682 | /*
* net/sched/act_connmark.c netfilter connmark retriever action
* skb mark is over-written
*
* Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/act_api.h>
#include <uapi/linux/tc_act/tc_connmark.h>
#include <net/tc_act/tc_connmark.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
#define CONNMARK_TAB_MASK 3
static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
const struct nf_conntrack_tuple_hash *thash;
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
struct tcf_connmark_info *ca = a->priv;
struct nf_conn *c;
int proto;
spin_lock(&ca->tcf_lock);
ca->tcf_tm.lastuse = jiffies;
bstats_update(&ca->tcf_bstats, skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (skb->len < sizeof(struct iphdr))
goto out;
proto = NFPROTO_IPV4;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if (skb->len < sizeof(struct ipv6hdr))
goto out;
proto = NFPROTO_IPV6;
} else {
goto out;
}
c = nf_ct_get(skb, &ctinfo);
if (c) {
skb->mark = c->mark;
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
goto out;
}
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
proto, &tuple))
goto out;
thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
if (!thash)
goto out;
c = nf_ct_tuplehash_to_ctrack(thash);
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
skb->mark = c->mark;
nf_ct_put(c);
out:
spin_unlock(&ca->tcf_lock);
return ca->tcf_action;
}
static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
[TCA_CONNMARK_PARMS] = { .len = sizeof(struct tc_connmark) },
};
static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *a,
int ovr, int bind)
{
struct nlattr *tb[TCA_CONNMARK_MAX + 1];
struct tcf_connmark_info *ci;
struct tc_connmark *parm;
int ret = 0;
if (!nla)
return -EINVAL;
ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy);
if (ret < 0)
return ret;
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
if (ret)
return ret;
ci = to_connmark(a);
ci->tcf_action = parm->action;
ci->zone = parm->zone;
tcf_hash_insert(a);
ret = ACT_P_CREATED;
} else {
ci = to_connmark(a);
if (bind)
return 0;
tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
/* replacing action and zone */
ci->tcf_action = parm->action;
ci->zone = parm->zone;
}
return ret;
}
static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_connmark_info *ci = a->priv;
struct tc_connmark opt = {
.index = ci->tcf_index,
.refcnt = ci->tcf_refcnt - ref,
.bindcnt = ci->tcf_bindcnt - bind,
.action = ci->tcf_action,
.zone = ci->zone,
};
struct tcf_t t;
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.type = TCA_ACT_CONNMARK,
.owner = THIS_MODULE,
.act = tcf_connmark,
.dump = tcf_connmark_dump,
.init = tcf_connmark_init,
};
static int __init connmark_init_module(void)
{
return tcf_register_action(&act_connmark_ops, CONNMARK_TAB_MASK);
}
static void __exit connmark_cleanup_module(void)
{
tcf_unregister_action(&act_connmark_ops);
}
module_init(connmark_init_module);
module_exit(connmark_cleanup_module);
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
MODULE_DESCRIPTION("Connection tracking mark restoring");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shubhangi-shrivastava/drm-intel-nightly | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 383 | 128858 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
* All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include "mlx4.h"
#include "fw.h"
#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
#define MLX4_PF_COUNTERS_PER_PORT 2
#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
u64 mac;
int ref_count;
u8 smac_index;
u8 port;
};
struct vlan_res {
struct list_head list;
u16 vlan;
int ref_count;
int vlan_index;
u8 port;
};
struct res_common {
struct list_head list;
struct rb_node node;
u64 res_id;
int owner;
int state;
int from_state;
int to_state;
int removing;
};
enum {
RES_ANY_BUSY = 1
};
struct res_gid {
struct list_head list;
u8 gid[16];
enum mlx4_protocol prot;
enum mlx4_steer_type steer;
u64 reg_id;
};
enum res_qp_states {
RES_QP_BUSY = RES_ANY_BUSY,
/* QP number was allocated */
RES_QP_RESERVED,
/* ICM memory for QP context was mapped */
RES_QP_MAPPED,
/* QP is in hw ownership */
RES_QP_HW
};
struct res_qp {
struct res_common com;
struct res_mtt *mtt;
struct res_cq *rcq;
struct res_cq *scq;
struct res_srq *srq;
struct list_head mcg_list;
spinlock_t mcg_spl;
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
/* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
__be32 param3;
u8 vlan_control;
u8 fvl_rx;
u8 pri_path_fl;
u8 vlan_index;
u8 feup;
};
enum res_mtt_states {
RES_MTT_BUSY = RES_ANY_BUSY,
RES_MTT_ALLOCATED,
};
static inline const char *mtt_states_str(enum res_mtt_states state)
{
switch (state) {
case RES_MTT_BUSY: return "RES_MTT_BUSY";
case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
default: return "Unknown";
}
}
struct res_mtt {
struct res_common com;
int order;
atomic_t ref_count;
};
enum res_mpt_states {
RES_MPT_BUSY = RES_ANY_BUSY,
RES_MPT_RESERVED,
RES_MPT_MAPPED,
RES_MPT_HW,
};
struct res_mpt {
struct res_common com;
struct res_mtt *mtt;
int key;
};
enum res_eq_states {
RES_EQ_BUSY = RES_ANY_BUSY,
RES_EQ_RESERVED,
RES_EQ_HW,
};
struct res_eq {
struct res_common com;
struct res_mtt *mtt;
};
enum res_cq_states {
RES_CQ_BUSY = RES_ANY_BUSY,
RES_CQ_ALLOCATED,
RES_CQ_HW,
};
struct res_cq {
struct res_common com;
struct res_mtt *mtt;
atomic_t ref_count;
};
enum res_srq_states {
RES_SRQ_BUSY = RES_ANY_BUSY,
RES_SRQ_ALLOCATED,
RES_SRQ_HW,
};
struct res_srq {
struct res_common com;
struct res_mtt *mtt;
struct res_cq *cq;
atomic_t ref_count;
};
enum res_counter_states {
RES_COUNTER_BUSY = RES_ANY_BUSY,
RES_COUNTER_ALLOCATED,
};
struct res_counter {
struct res_common com;
int port;
};
enum res_xrcdn_states {
RES_XRCD_BUSY = RES_ANY_BUSY,
RES_XRCD_ALLOCATED,
};
struct res_xrcdn {
struct res_common com;
int port;
};
enum res_fs_rule_states {
RES_FS_RULE_BUSY = RES_ANY_BUSY,
RES_FS_RULE_ALLOCATED,
};
struct res_fs_rule {
struct res_common com;
int qpn;
};
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
{
struct rb_node *node = root->rb_node;
while (node) {
struct res_common *res = container_of(node, struct res_common,
node);
if (res_id < res->res_id)
node = node->rb_left;
else if (res_id > res->res_id)
node = node->rb_right;
else
return res;
}
return NULL;
}
static int res_tracker_insert(struct rb_root *root, struct res_common *res)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */
while (*new) {
struct res_common *this = container_of(*new, struct res_common,
node);
parent = *new;
if (res->res_id < this->res_id)
new = &((*new)->rb_left);
else if (res->res_id > this->res_id)
new = &((*new)->rb_right);
else
return -EEXIST;
}
/* Add new node and rebalance tree. */
rb_link_node(&res->node, parent, new);
rb_insert_color(&res->node, root);
return 0;
}
enum qp_transition {
QP_TRANS_INIT2RTR,
QP_TRANS_RTR2RTS,
QP_TRANS_RTS2RTS,
QP_TRANS_SQERR2RTS,
QP_TRANS_SQD2SQD,
QP_TRANS_SQD2RTS
};
/* For Debug uses */
static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
case RES_CQ: return "RES_CQ";
case RES_SRQ: return "RES_SRQ";
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
};
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EINVAL;
int allocated, free, reserved, guaranteed, from_free;
int from_rsvd;
if (slave > dev->persist->num_vfs)
return -EINVAL;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
free = (port > 0) ? res_alloc->res_port_free[port - 1] :
res_alloc->res_free;
reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
if (allocated + count > res_alloc->quota[slave]) {
mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
slave, port, resource_str(res_type), count,
allocated, res_alloc->quota[slave]);
goto out;
}
if (allocated + count <= guaranteed) {
err = 0;
from_rsvd = count;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
from_free = count - (guaranteed - allocated);
else
from_free = count;
from_rsvd = count - from_free;
if (free - from_free >= reserved)
err = 0;
else
mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
slave, port, resource_str(res_type), free,
from_free, reserved);
}
if (!err) {
/* grant the request */
if (port > 0) {
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
res_alloc->res_reserved -= from_rsvd;
}
}
out:
spin_unlock(&res_alloc->alloc_lock);
return err;
}
static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int allocated, guaranteed, from_rsvd;
if (slave > dev->persist->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
guaranteed = res_alloc->guaranteed[slave];
if (allocated - count >= guaranteed) {
from_rsvd = 0;
} else {
/* portion may need to be returned to reserved area */
if (allocated - guaranteed > 0)
from_rsvd = count - (allocated - guaranteed);
else
from_rsvd = count;
}
if (port > 0) {
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
res_alloc->res_reserved += from_rsvd;
}
spin_unlock(&res_alloc->alloc_lock);
return;
}
static inline void initialize_res_quotas(struct mlx4_dev *dev,
struct resource_allocator *res_alloc,
enum mlx4_resource res_type,
int vf, int num_instances)
{
res_alloc->guaranteed[vf] = num_instances /
(2 * (dev->persist->num_vfs + 1));
res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
if (vf == mlx4_master_func_num(dev)) {
res_alloc->res_free = num_instances;
if (res_type == RES_MTT) {
/* reserved mtts will be taken out of the PF allocation */
res_alloc->res_free += dev->caps.reserved_mtts;
res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
res_alloc->quota[vf] += dev->caps.reserved_mtts;
}
}
}
void mlx4_init_quotas(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int pf;
/* quotas for VFs are initialized in mlx4_slave_cap */
if (mlx4_is_slave(dev))
return;
if (!mlx4_is_mfunc(dev)) {
dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev);
dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
return;
}
pf = mlx4_master_func_num(dev);
dev->quotas.qp =
priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
dev->quotas.cq =
priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
dev->quotas.srq =
priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
dev->quotas.mtt =
priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
{
/* reduce the sink counter */
return (dev->caps.max_counters - 1 -
(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
/ MLX4_MAX_PORTS;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
GFP_KERNEL);
if (!priv->mfunc.master.res_tracker.slave_list)
return -ENOMEM;
for (i = 0 ; i < dev->num_slaves; i++) {
for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
slave_list[i].res_list[t]);
mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
sizeof(int), GFP_KERNEL);
res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
sizeof(int), GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
(dev->persist->num_vfs
+ 1) *
sizeof(int), GFP_KERNEL);
else
res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
/* Reduce the sink counter */
if (i == RES_COUNTER)
res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->persist->num_vfs + 1; t++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, t);
switch (i) {
case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
t, dev->caps.num_qps -
dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev));
break;
case RES_CQ:
initialize_res_quotas(dev, res_alloc, RES_CQ,
t, dev->caps.num_cqs -
dev->caps.reserved_cqs);
break;
case RES_SRQ:
initialize_res_quotas(dev, res_alloc, RES_SRQ,
t, dev->caps.num_srqs -
dev->caps.reserved_srqs);
break;
case RES_MPT:
initialize_res_quotas(dev, res_alloc, RES_MPT,
t, dev->caps.num_mpts -
dev->caps.reserved_mrws);
break;
case RES_MTT:
initialize_res_quotas(dev, res_alloc, RES_MTT,
t, dev->caps.num_mtts -
dev->caps.reserved_mtts);
break;
case RES_MAC:
if (t == mlx4_master_func_num(dev)) {
int max_vfs_pport = 0;
/* Calculate the max vfs per port for */
/* both ports. */
for (j = 0; j < dev->caps.num_ports;
j++) {
struct mlx4_slaves_pport slaves_pport =
mlx4_phys_to_slaves_pport(dev, j + 1);
unsigned current_slaves =
bitmap_weight(slaves_pport.slaves,
dev->caps.num_ports) - 1;
if (max_vfs_pport < current_slaves)
max_vfs_pport =
current_slaves;
}
res_alloc->quota[t] =
MLX4_MAX_MAC_NUM -
2 * max_vfs_pport;
res_alloc->guaranteed[t] = 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
}
break;
case RES_VLAN:
if (t == mlx4_master_func_num(dev)) {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
res_alloc->quota[t];
} else {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
res_alloc->guaranteed[t] = 0;
}
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
if (t == mlx4_master_func_num(dev))
res_alloc->guaranteed[t] =
MLX4_PF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else if (t <= max_vfs_guarantee_counter)
res_alloc->guaranteed[t] =
MLX4_VF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
}
if (i == RES_MAC || i == RES_VLAN) {
for (j = 0; j < dev->caps.num_ports; j++)
if (test_bit(j, actv_ports.ports))
res_alloc->res_port_rsvd[j] +=
res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
}
}
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0;
no_mem_err:
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
enum mlx4_res_tracker_free_type type)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
if (type != RES_TR_FREE_STRUCTS_ONLY) {
for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
}
/* free master's vlans */
i = dev->caps.function;
mlx4_reset_roce_gids(dev, i);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
rem_slave_vlans(dev, i);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
if (type != RES_TR_FREE_SLAVES_ONLY) {
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
}
}
static void update_pkey_index(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox)
{
u8 sched = *(u8 *)(inbox->buf + 64);
u8 orig_index = *(u8 *)(inbox->buf + 35);
u8 new_index;
struct mlx4_priv *priv = mlx4_priv(dev);
int port;
port = (sched >> 6 & 1) + 1;
new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
*(u8 *)(inbox->buf + 35) = new_index;
}
static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
u8 slave)
{
struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
int port;
if (MLX4_QP_ST_UD == ts) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port))
qp_ctx->pri_path.mgid_index =
mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
else
qp_ctx->pri_path.mgid_index = slave | 0x80;
} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port)) {
qp_ctx->pri_path.mgid_index +=
mlx4_get_base_gid_ix(dev, slave, port);
qp_ctx->pri_path.mgid_index &= 0x7f;
} else {
qp_ctx->pri_path.mgid_index = slave & 0x7F;
}
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port)) {
qp_ctx->alt_path.mgid_index +=
mlx4_get_base_gid_ix(dev, slave, port);
qp_ctx->alt_path.mgid_index &= 0x7f;
} else {
qp_ctx->alt_path.mgid_index = slave & 0x7F;
}
}
}
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port);
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
{
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
err = handle_counter(dev, qpc, slave, port);
if (err)
goto out;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
*/
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
if (qp_type == MLX4_QP_ST_UD ||
(qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
*(__be32 *)inbox->buf =
cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
MLX4_QP_OPTPAR_VLAN_STRIPPING);
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
} else {
struct mlx4_update_qp_params params = {.flags = 0};
err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
if (err)
goto out;
}
}
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
qpc->pri_path.vlan_control =
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
qpc->pri_path.vlan_control =
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
} else { /* priority tagged */
qpc->pri_path.vlan_control =
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
}
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
qpc->qos_vport = vp_oper->state.qos_vport;
}
if (vp_oper->state.spoofchk) {
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
out:
return err;
}
static int mpt_mask(struct mlx4_dev *dev)
{
return dev->caps.num_mpts - 1;
}
static void *find_res(struct mlx4_dev *dev, u64 res_id,
enum mlx4_resource type)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
res_id);
}
static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res)
{
struct res_common *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
if (!r) {
err = -ENONET;
goto exit;
}
if (r->state == RES_ANY_BUSY) {
err = -EBUSY;
goto exit;
}
if (r->owner != slave) {
err = -EPERM;
goto exit;
}
r->from_state = r->state;
r->state = RES_ANY_BUSY;
if (res)
*((struct res_common **)res) = r;
exit:
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
u64 res_id, int *slave)
{
struct res_common *r;
int err = -ENOENT;
int id = res_id;
if (type == RES_QP)
id &= 0x7fffff;
spin_lock(mlx4_tlock(dev));
r = find_res(dev, id, type);
if (r) {
*slave = r->owner;
err = 0;
}
spin_unlock(mlx4_tlock(dev));
return err;
}
static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
if (r)
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port);
static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
int counter_index)
{
struct res_common *r;
struct res_counter *counter;
int ret = 0;
if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
return ret;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, counter_index, RES_COUNTER);
if (!r || r->owner != slave)
ret = -EINVAL;
counter = container_of(r, struct res_counter, com);
if (!counter->port)
counter->port = port;
spin_unlock_irq(mlx4_tlock(dev));
return ret;
}
static int handle_unexisting_counter(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc, u8 slave,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (port == counter->port) {
qpc->pri_path.counter_index = counter->com.res_id;
spin_unlock_irq(mlx4_tlock(dev));
return 0;
}
}
spin_unlock_irq(mlx4_tlock(dev));
/* No existing counter, need to allocate a new counter */
err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
port);
if (err == -ENOENT) {
err = 0;
} else if (err && err != -ENOSPC) {
mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
__func__, slave, err);
} else {
qpc->pri_path.counter_index = counter_idx;
mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
__func__, slave, qpc->pri_path.counter_index);
err = 0;
}
return err;
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port)
{
if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
return handle_existing_counter(dev, slave, port,
qpc->pri_path.counter_index);
return handle_unexisting_counter(dev, qpc, slave, port);
}
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_QP_RESERVED;
ret->local_qpn = id;
INIT_LIST_HEAD(&ret->mcg_list);
spin_lock_init(&ret->mcg_spl);
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_mtt_tr(int id, int order)
{
struct res_mtt *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->order = order;
ret->com.state = RES_MTT_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_mpt_tr(int id, int key)
{
struct res_mpt *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_MPT_RESERVED;
ret->key = key;
return &ret->com;
}
static struct res_common *alloc_eq_tr(int id)
{
struct res_eq *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_EQ_RESERVED;
return &ret->com;
}
static struct res_common *alloc_cq_tr(int id)
{
struct res_cq *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_CQ_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_srq_tr(int id)
{
struct res_srq *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_SRQ_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
ret->port = port;
return &ret->com;
}
static struct res_common *alloc_xrcdn_tr(int id)
{
struct res_xrcdn *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_XRCD_ALLOCATED;
return &ret->com;
}
static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
{
struct res_fs_rule *ret;
ret = kzalloc(sizeof *ret, GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_FS_RULE_ALLOCATED;
ret->qpn = qpn;
return &ret->com;
}
static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
switch (type) {
case RES_QP:
ret = alloc_qp_tr(id);
break;
case RES_MPT:
ret = alloc_mpt_tr(id, extra);
break;
case RES_MTT:
ret = alloc_mtt_tr(id, extra);
break;
case RES_EQ:
ret = alloc_eq_tr(id);
break;
case RES_CQ:
ret = alloc_cq_tr(id);
break;
case RES_SRQ:
ret = alloc_srq_tr(id);
break;
case RES_MAC:
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
case RES_FS_RULE:
ret = alloc_fs_rule_tr(id, extra);
break;
default:
return NULL;
}
if (ret)
ret->owner = slave;
return ret;
}
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
struct mlx4_counter *data)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
int *counters_arr;
int i = 0, err = 0;
memset(data, 0, sizeof(*data));
counters_arr = kmalloc_array(dev->caps.max_counters,
sizeof(*counters_arr), GFP_KERNEL);
if (!counters_arr)
return -ENOMEM;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (counter->port == port) {
counters_arr[i] = (int)tmp->res_id;
i++;
}
}
spin_unlock_irq(mlx4_tlock(dev));
counters_arr[i] = -1;
i = 0;
while (counters_arr[i] != -1) {
err = mlx4_get_counter_stats(dev, counters_arr[i], data,
0);
if (err) {
memset(data, 0, sizeof(*data));
goto table_changed;
}
i++;
}
table_changed:
kfree(counters_arr);
return 0;
}
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct rb_root *root = &tracker->res_tree[type];
res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
if (!res_arr)
return -ENOMEM;
for (i = 0; i < count; ++i) {
res_arr[i] = alloc_tr(base + i, type, slave, extra);
if (!res_arr[i]) {
for (--i; i >= 0; --i)
kfree(res_arr[i]);
kfree(res_arr);
return -ENOMEM;
}
}
spin_lock_irq(mlx4_tlock(dev));
for (i = 0; i < count; ++i) {
if (find_res(dev, base + i, type)) {
err = -EEXIST;
goto undo;
}
err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
&tracker->slave_list[slave].res_list[type]);
}
spin_unlock_irq(mlx4_tlock(dev));
kfree(res_arr);
return 0;
undo:
for (--i; i >= base; --i)
rb_erase(&res_arr[i]->node, root);
spin_unlock_irq(mlx4_tlock(dev));
for (i = 0; i < count; ++i)
kfree(res_arr[i]);
kfree(res_arr);
return err;
}
static int remove_qp_ok(struct res_qp *res)
{
if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
!list_empty(&res->mcg_list)) {
pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
res->com.state, atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_QP_RESERVED) {
return -EPERM;
}
return 0;
}
static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
pr_devel("%s-%d: state %s, ref_count %d\n",
__func__, __LINE__,
mtt_states_str(res->com.state),
atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
else if (res->order != order)
return -EINVAL;
return 0;
}
static int remove_mpt_ok(struct res_mpt *res)
{
if (res->com.state == RES_MPT_BUSY)
return -EBUSY;
else if (res->com.state != RES_MPT_RESERVED)
return -EPERM;
return 0;
}
static int remove_eq_ok(struct res_eq *res)
{
if (res->com.state == RES_MPT_BUSY)
return -EBUSY;
else if (res->com.state != RES_MPT_RESERVED)
return -EPERM;
return 0;
}
static int remove_counter_ok(struct res_counter *res)
{
if (res->com.state == RES_COUNTER_BUSY)
return -EBUSY;
else if (res->com.state != RES_COUNTER_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_xrcdn_ok(struct res_xrcdn *res)
{
if (res->com.state == RES_XRCD_BUSY)
return -EBUSY;
else if (res->com.state != RES_XRCD_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_fs_rule_ok(struct res_fs_rule *res)
{
if (res->com.state == RES_FS_RULE_BUSY)
return -EBUSY;
else if (res->com.state != RES_FS_RULE_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
return -EBUSY;
else if (res->com.state != RES_CQ_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_srq_ok(struct res_srq *res)
{
if (res->com.state == RES_SRQ_BUSY)
return -EBUSY;
else if (res->com.state != RES_SRQ_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
{
switch (type) {
case RES_QP:
return remove_qp_ok((struct res_qp *)res);
case RES_CQ:
return remove_cq_ok((struct res_cq *)res);
case RES_SRQ:
return remove_srq_ok((struct res_srq *)res);
case RES_MPT:
return remove_mpt_ok((struct res_mpt *)res);
case RES_MTT:
return remove_mtt_ok((struct res_mtt *)res, extra);
case RES_MAC:
return -ENOSYS;
case RES_EQ:
return remove_eq_ok((struct res_eq *)res);
case RES_COUNTER:
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
case RES_FS_RULE:
return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
}
if (r->owner != slave) {
err = -EPERM;
goto out;
}
err = remove_ok(r, type, extra);
if (err)
goto out;
}
for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i);
rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
err = 0;
out:
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
enum res_qp_states state, struct res_qp **qp,
int alloc)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_qp *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_QP_BUSY:
mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
case RES_QP_RESERVED:
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
case RES_QP_MAPPED:
if ((r->com.state == RES_QP_RESERVED && alloc) ||
r->com.state == RES_QP_HW)
break;
else {
mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
break;
case RES_QP_HW:
if (r->com.state != RES_QP_MAPPED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
*qp = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_mpt_states state, struct res_mpt **mpt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_mpt *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_MPT_BUSY:
err = -EINVAL;
break;
case RES_MPT_RESERVED:
if (r->com.state != RES_MPT_MAPPED)
err = -EINVAL;
break;
case RES_MPT_MAPPED:
if (r->com.state != RES_MPT_RESERVED &&
r->com.state != RES_MPT_HW)
err = -EINVAL;
break;
case RES_MPT_HW:
if (r->com.state != RES_MPT_MAPPED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
*mpt = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_eq_states state, struct res_eq **eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_eq *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_EQ_BUSY:
err = -EINVAL;
break;
case RES_EQ_RESERVED:
if (r->com.state != RES_EQ_HW)
err = -EINVAL;
break;
case RES_EQ_HW:
if (r->com.state != RES_EQ_RESERVED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
if (eq)
*eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
enum res_cq_states state, struct res_cq **cq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_cq *r;
int err;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r) {
err = -ENOENT;
} else if (r->com.owner != slave) {
err = -EPERM;
} else if (state == RES_CQ_ALLOCATED) {
if (r->com.state != RES_CQ_HW)
err = -EINVAL;
else if (atomic_read(&r->ref_count))
err = -EBUSY;
else
err = 0;
} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
err = -EINVAL;
} else {
err = 0;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_CQ_BUSY;
if (cq)
*cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_srq *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r) {
err = -ENOENT;
} else if (r->com.owner != slave) {
err = -EPERM;
} else if (state == RES_SRQ_ALLOCATED) {
if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
else if (atomic_read(&r->ref_count))
err = -EBUSY;
} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_SRQ_BUSY;
if (srq)
*srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static void res_abort_move(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int id)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
}
static void res_end_move(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int id)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
}
static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
{
return mlx4_is_qp_reserved(dev, qpn) &&
(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
}
static int fw_reserved(struct mlx4_dev *dev, int qpn)
{
return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
}
static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err;
int count;
int align;
int base;
int qpn;
u8 flags;
switch (op) {
case RES_OP_RESERVE:
count = get_param_l(&in_param) & 0xffffff;
/* Turn off all unsupported QP allocation flags that the
* slave tries to set.
*/
flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
return err;
}
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
set_param_l(out_param, base);
break;
case RES_OP_MAP_ICM:
qpn = get_param_l(&in_param) & 0x7fffff;
if (valid_reserved(dev, slave, qpn)) {
err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
if (err)
return err;
}
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
NULL, 1);
if (err)
return err;
if (!fw_reserved(dev, qpn)) {
err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
}
res_end_move(dev, slave, RES_QP, qpn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int base;
int order;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
order = get_param_l(&in_param);
err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
if (err)
return err;
base = __mlx4_alloc_mtt_range(dev, order);
if (base == -1) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
}
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
if (err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
} else {
set_param_l(out_param, base);
}
return err;
}
static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int index;
int id;
struct res_mpt *mpt;
switch (op) {
case RES_OP_RESERVE:
err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
if (err)
break;
index = __mlx4_mpt_reserve(dev);
if (index == -1) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
}
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
set_param_l(out_param, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_MAPPED, &mpt);
if (err)
return err;
err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
res_end_move(dev, slave, RES_MPT, id);
break;
}
return err;
}
static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int cqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
err = __mlx4_cq_alloc_icm(dev, &cqn);
if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
break;
}
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
set_param_l(out_param, cqn);
break;
default:
err = -EINVAL;
}
return err;
}
static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int srqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
err = __mlx4_srq_alloc_icm(dev, &srqn);
if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
break;
}
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
set_param_l(out_param, srqn);
break;
default:
err = -EINVAL;
}
return err;
}
static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
u8 smac_index, u64 *mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->smac_index == smac_index && res->port == (u8) port) {
*mac = res->mac;
return 0;
}
}
return -ENOENT;
}
static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
/* mac found. update ref count */
++res->ref_count;
return 0;
}
}
if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
return -EINVAL;
res = kzalloc(sizeof *res, GFP_KERNEL);
if (!res) {
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
}
res->mac = mac;
res->port = (u8) port;
res->smac_index = smac_index;
res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_MAC]);
return 0;
}
static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
if (!--res->ref_count) {
list_del(&res->list);
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res);
}
break;
}
}
}
static void rem_slave_macs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
int i;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
/* dereference the mac the num times the slave referenced it */
for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
u64 mac;
u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
port = !in_port ? get_param_l(out_param) : in_port;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
if (err >= 0) {
smac_index = err;
set_param_l(out_param, err);
err = 0;
}
if (!err) {
err = mac_add_to_slave(dev, slave, mac, port, smac_index);
if (err)
__mlx4_unregister_mac(dev, port, mac);
}
return err;
}
static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
int port, int vlan_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
/* vlan found. update ref count */
++res->ref_count;
return 0;
}
}
if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
return -ENOMEM;
}
res->vlan = vlan;
res->port = (u8) port;
res->vlan_index = vlan_index;
res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
if (!--res->ref_count) {
list_del(&res->list);
mlx4_release_resource(dev, slave, RES_VLAN,
1, port);
kfree(res);
}
break;
}
}
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
int i;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
list_del(&res->list);
/* dereference the vlan the num times the slave referenced it */
for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_vlan(dev, res->port, res->vlan);
mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
kfree(res);
}
}
static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err;
u16 vlan;
int vlan_index;
int port;
port = !in_port ? get_param_l(out_param) : in_port;
if (!port || op != RES_OP_RESERVE_AND_MAP)
return -EINVAL;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true;
return 0;
}
vlan = (u16) in_param;
err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
if (!err) {
set_param_l(out_param, (u32) vlan_index);
err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
if (err)
__mlx4_unregister_vlan(dev, port, vlan);
}
return err;
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
err = __mlx4_counter_alloc(dev, &index);
if (err) {
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
} else {
set_param_l(out_param, index);
}
return err;
}
static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
u32 xrcdn;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
err = __mlx4_xrcd_alloc(dev, &xrcdn);
if (err)
return err;
err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
if (err)
__mlx4_xrcd_free(dev, xrcdn);
else
set_param_l(out_param, xrcdn);
return err;
}
int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int alop = vhcr->op_modifier;
switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MTT:
err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MPT:
err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_CQ:
err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_SRQ:
err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param)
{
int err;
int count;
int base;
int qpn;
switch (op) {
case RES_OP_RESERVE:
base = get_param_l(&in_param) & 0x7fffff;
count = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
qpn = get_param_l(&in_param) & 0x7fffff;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
NULL, 0);
if (err)
return err;
if (!fw_reserved(dev, qpn))
__mlx4_qp_free_icm(dev, qpn);
res_end_move(dev, slave, RES_QP, qpn);
if (valid_reserved(dev, slave, qpn))
err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int base;
int order;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
if (!err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
}
return err;
}
static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param)
{
int err = -EINVAL;
int index;
int id;
struct res_mpt *mpt;
switch (op) {
case RES_OP_RESERVE:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = get_res(dev, slave, id, RES_MPT, &mpt);
if (err)
break;
index = mpt->key;
put_res(dev, slave, id, RES_MPT);
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_RESERVED, &mpt);
if (err)
return err;
__mlx4_mpt_free_icm(dev, mpt->key);
res_end_move(dev, slave, RES_MPT, id);
return err;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int cqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
cqn = get_param_l(&in_param);
err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int srqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
srqn = get_param_l(&in_param);
err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
port = !in_port ? get_param_l(out_param) : in_port;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err = 0;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
if (slave_state[slave].old_vlan_api)
return 0;
if (!port)
return -EINVAL;
vlan_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_vlan(dev, port, in_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int index;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
index = get_param_l(&in_param);
if (index == MLX4_SINK_COUNTER_INDEX(dev))
return 0;
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int xrcdn;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
xrcdn = get_param_l(&in_param);
err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
if (err)
return err;
__mlx4_xrcd_free(dev, xrcdn);
return err;
}
int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err = -EINVAL;
int alop = vhcr->op_modifier;
switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
break;
case RES_MTT:
err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MPT:
err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
break;
case RES_CQ:
err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_SRQ:
err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_XRCD:
err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
default:
break;
}
return err;
}
/* ugly but other choices are uglier */
static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
{
return (be32_to_cpu(mpt->flags) >> 9) & 1;
}
static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
{
return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
}
static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->mtt_sz);
}
static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
}
static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
}
static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
}
static int mr_is_region(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
}
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
}
static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
{
return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
}
static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
{
int page_shift = (qpc->log_page_size & 0x3f) + 12;
int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
int log_sq_sride = qpc->sq_size_stride & 7;
int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size;
int rq_size;
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
total_pages =
roundup_pow_of_two((total_mem + (page_offset << 6)) >>
page_shift);
return total_pages;
}
static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
int size, struct res_mtt *mtt)
{
int res_start = mtt->com.res_id;
int res_size = (1 << mtt->order);
if (start < res_start || start + size > res_start + res_size)
return -EPERM;
return 0;
}
int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
struct res_mpt *mpt;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
u32 pd;
int pd_slave;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
if (err)
return err;
/* Disable memory windows for VFs. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* Make sure that the PD bits related to the slave id are zeros. */
pd = mr_get_pd(inbox->buf);
pd_slave = (pd >> 17) & 0x7f;
if (pd_slave != 0 && --pd_slave != slave) {
err = -EPERM;
goto ex_abort;
}
if (mr_is_fmr(inbox->buf)) {
/* FMR and Bind Enable are forbidden in slave devices. */
if (mr_is_bind_enabled(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* FMR and Memory Windows are also forbidden. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
}
phys = mr_phys_mpt(inbox->buf);
if (!phys) {
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base,
mr_get_mtt_size(inbox->buf), mtt);
if (err)
goto ex_put;
mpt->mtt = mtt;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put;
if (!phys) {
atomic_inc(&mtt->ref_count);
put_res(dev, slave, mtt->com.res_id, RES_MTT);
}
res_end_move(dev, slave, RES_MPT, id);
return 0;
ex_put:
if (!phys)
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mpt *mpt;
int id;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
res_end_move(dev, slave, RES_MPT, id);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mpt *mpt;
int id;
id = index & mpt_mask(dev);
err = get_res(dev, slave, id, RES_MPT, &mpt);
if (err)
return err;
if (mpt->com.from_state == RES_MPT_MAPPED) {
/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
* that, the VF must read the MPT. But since the MPT entry memory is not
* in the VF's virtual memory space, it must use QUERY_MPT to obtain the
* entry contents. To guarantee that the MPT cannot be changed, the driver
* must perform HW2SW_MPT before this query and return the MPT entry to HW
* ownership fofollowing the change. The change here allows the VF to
* perform QUERY_MPT also when the entry is in SW ownership.
*/
struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
&mlx4_priv(dev)->mr_table.dmpt_table,
mpt->key, NULL);
if (NULL == mpt_entry || NULL == outbox->buf) {
err = -EINVAL;
goto out;
}
memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
err = 0;
} else if (mpt->com.from_state == RES_MPT_HW) {
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
} else {
err = -EBUSY;
goto out;
}
out:
put_res(dev, slave, id, RES_MPT);
return err;
}
static int qp_get_rcqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
}
static int qp_get_scqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->cqn_send) & 0xffffff;
}
static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->srqn) & 0x1ffffff;
}
static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
struct mlx4_qp_context *context)
{
u32 qpn = vhcr->in_modifier & 0xffffff;
u32 qkey = 0;
if (mlx4_get_parav_qkey(dev, qpn, &qkey))
return;
/* adjust qkey in qp context */
context->qkey = cpu_to_be32(qkey);
}
static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox);
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_mtt *mtt;
struct res_qp *qp;
struct mlx4_qp_context *qpc = inbox->buf + 8;
int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
int mtt_size = qp_get_mtt_size(qpc);
struct res_cq *rcq;
struct res_cq *scq;
int rcqn = qp_get_rcqn(qpc);
int scqn = qp_get_scqn(qpc);
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
qp->param3 = 0;
qp->vlan_control = 0;
qp->fvl_rx = 0;
qp->pri_path_fl = 0;
qp->vlan_index = 0;
qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
if (err)
goto ex_put_mtt;
err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
if (err)
goto ex_put_mtt;
if (scqn != rcqn) {
err = get_res(dev, slave, scqn, RES_CQ, &scq);
if (err)
goto ex_put_rcq;
} else
scq = rcq;
if (use_srq) {
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
goto ex_put_scq;
}
adjust_proxy_tun_qkey(dev, vhcr, qpc);
update_pkey_index(dev, slave, inbox);
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_srq;
atomic_inc(&mtt->ref_count);
qp->mtt = mtt;
atomic_inc(&rcq->ref_count);
qp->rcq = rcq;
atomic_inc(&scq->ref_count);
qp->scq = scq;
if (scqn != rcqn)
put_res(dev, slave, scqn, RES_CQ);
if (use_srq) {
atomic_inc(&srq->ref_count);
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
return 0;
ex_put_srq:
if (use_srq)
put_res(dev, slave, srqn, RES_SRQ);
ex_put_scq:
if (scqn != rcqn)
put_res(dev, slave, scqn, RES_CQ);
ex_put_rcq:
put_res(dev, slave, rcqn, RES_CQ);
ex_put_mtt:
put_res(dev, slave, mtt_base, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
{
return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
}
static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
{
int log_eq_size = eqc->log_eq_size & 0x1f;
int page_shift = (eqc->log_page_size & 0x3f) + 12;
if (log_eq_size + 5 < page_shift)
return 1;
return 1 << (log_eq_size + 5 - page_shift);
}
static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
{
return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
}
static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
{
int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
int page_shift = (cqc->log_page_size & 0x3f) + 12;
if (log_cq_size + 5 < page_shift)
return 1;
return 1 << (log_cq_size + 5 - page_shift);
}
int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int eqn = vhcr->in_modifier;
int res_id = (slave << 10) | eqn;
struct mlx4_eq_context *eqc = inbox->buf;
int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
int mtt_size = eq_get_mtt_size(eqc);
struct res_eq *eq;
struct res_mtt *mtt;
err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
if (err)
return err;
err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
if (err)
goto out_add;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto out_move;
err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
if (err)
goto out_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_put;
atomic_inc(&mtt->ref_count);
eq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_EQ, res_id);
return 0;
out_put:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
out_move:
res_abort_move(dev, slave, RES_EQ, res_id);
out_add:
rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
return err;
}
int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
u8 get = vhcr->op_modifier;
if (get != 1)
return -EPERM;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
return err;
}
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_mtt *mtt;
int err = -EINVAL;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
com.list) {
if (!check_mtt_range(dev, slave, start, len, mtt)) {
*res = mtt;
mtt->com.from_state = mtt->com.state;
mtt->com.state = RES_MTT_BUSY;
err = 0;
break;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int verify_qp_parameters(struct mlx4_dev *dev,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
enum qp_transition transition, u8 slave)
{
u32 qp_type;
u32 qpn;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
int port;
int num_gids;
qp_ctx = inbox->buf + 8;
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
if (slave != mlx4_master_func_num(dev)) {
qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
/* setting QP rate-limit is disallowed for VFs */
if (qp_ctx->rate_limit_params)
return -EPERM;
}
switch (qp_type) {
case MLX4_QP_ST_RC:
case MLX4_QP_ST_XRC:
case MLX4_QP_ST_UC:
switch (transition) {
case QP_TRANS_INIT2RTR:
case QP_TRANS_RTR2RTS:
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
if (slave != mlx4_master_func_num(dev))
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
else
num_gids = 1;
if (qp_ctx->pri_path.mgid_index >= num_gids)
return -EINVAL;
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
else
num_gids = 1;
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
break;
default:
break;
}
break;
case MLX4_QP_ST_MLX:
qpn = vhcr->in_modifier & 0x7fffff;
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (transition == QP_TRANS_INIT2RTR &&
slave != mlx4_master_func_num(dev) &&
mlx4_is_qp_reserved(dev, qpn) &&
!mlx4_vf_smi_enabled(dev, slave, port)) {
/* only enabled VFs may create MLX proxy QPs */
mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
__func__, slave, port);
return -EPERM;
}
break;
default:
break;
}
return 0;
}
int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_mtt mtt;
__be64 *page_list = inbox->buf;
u64 *pg_list = (u64 *)page_list;
int i;
struct res_mtt *rmtt = NULL;
int start = be64_to_cpu(page_list[0]);
int npages = vhcr->in_modifier;
int err;
err = get_containing_mtt(dev, slave, start, npages, &rmtt);
if (err)
return err;
/* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct
* - Translate inbox contents to simple addresses in host endianness */
mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0;
mtt.page_shift = 0;
for (i = 0; i < npages; ++i)
pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
((u64 *)page_list + 2));
if (rmtt)
put_res(dev, slave, rmtt->com.res_id, RES_MTT);
return err;
}
int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
if (err)
return err;
err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
if (err)
goto ex_abort;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put;
atomic_dec(&eq->mtt->ref_count);
put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_EQ, res_id);
rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
return 0;
ex_put:
put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_EQ, res_id);
return err;
}
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq_info *event_eq;
struct mlx4_cmd_mailbox *mailbox;
u32 in_modifier = 0;
int err;
int res_id;
struct res_eq *req;
if (!priv->mfunc.master.slave_state)
return -EINVAL;
/* check for slave valid, slave not PF, and slave active */
if (slave < 0 || slave > dev->persist->num_vfs ||
slave == dev->caps.function ||
!priv->mfunc.master.slave_state[slave].active)
return 0;
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
if (event_eq->eqn < 0)
return 0;
mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
res_id = (slave << 10) | event_eq->eqn;
err = get_res(dev, slave, res_id, RES_EQ, &req);
if (err)
goto unlock;
if (req->com.from_state != RES_EQ_HW) {
err = -EINVAL;
goto put;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto put;
}
if (eqe->type == MLX4_EVENT_TYPE_CMD) {
++event_eq->token;
eqe->event.cmd.token = cpu_to_be16(event_eq->token);
}
memcpy(mailbox->buf, (u8 *) eqe, 28);
in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
put_res(dev, slave, res_id, RES_EQ);
mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
put:
put_res(dev, slave, res_id, RES_EQ);
unlock:
mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
return err;
}
int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
err = get_res(dev, slave, res_id, RES_EQ, &eq);
if (err)
return err;
if (eq->com.from_state != RES_EQ_HW) {
err = -EINVAL;
goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, res_id, RES_EQ);
return err;
}
int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int cqn = vhcr->in_modifier;
struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
struct res_cq *cq = NULL;
struct res_mtt *mtt;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
if (err)
return err;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto out_move;
err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
if (err)
goto out_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_put;
atomic_inc(&mtt->ref_count);
cq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_CQ, cqn);
return 0;
out_put:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
out_move:
res_abort_move(dev, slave, RES_CQ, cqn);
return err;
}
int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int cqn = vhcr->in_modifier;
struct res_cq *cq = NULL;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_move;
atomic_dec(&cq->mtt->ref_count);
res_end_move(dev, slave, RES_CQ, cqn);
return 0;
out_move:
res_abort_move(dev, slave, RES_CQ, cqn);
return err;
}
int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int cqn = vhcr->in_modifier;
struct res_cq *cq;
int err;
err = get_res(dev, slave, cqn, RES_CQ, &cq);
if (err)
return err;
if (cq->com.from_state != RES_CQ_HW)
goto ex_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, cqn, RES_CQ);
return err;
}
static int handle_resize(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd,
struct res_cq *cq)
{
int err;
struct res_mtt *orig_mtt;
struct res_mtt *mtt;
struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
if (err)
return err;
if (orig_mtt != cq->mtt) {
err = -EINVAL;
goto ex_put;
}
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_put;
err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
if (err)
goto ex_put1;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put1;
atomic_dec(&orig_mtt->ref_count);
put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
atomic_inc(&mtt->ref_count);
cq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
return 0;
ex_put1:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_put:
put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
return err;
}
int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int cqn = vhcr->in_modifier;
struct res_cq *cq;
int err;
err = get_res(dev, slave, cqn, RES_CQ, &cq);
if (err)
return err;
if (cq->com.from_state != RES_CQ_HW)
goto ex_put;
if (vhcr->op_modifier == 0) {
err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, cqn, RES_CQ);
return err;
}
static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
{
int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
int log_rq_stride = srqc->logstride & 7;
int page_shift = (srqc->log_page_size & 0x3f) + 12;
if (log_srq_size + log_rq_stride + 4 < page_shift)
return 1;
return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
}
int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_mtt *mtt;
struct res_srq *srq = NULL;
struct mlx4_srq_context *srqc = inbox->buf;
int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
return -EINVAL;
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
if (err)
return err;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
mtt);
if (err)
goto ex_put_mtt;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_mtt;
atomic_inc(&mtt->ref_count);
srq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_SRQ, srqn);
return 0;
ex_put_mtt:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_SRQ, srqn);
return err;
}
int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq = NULL;
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
atomic_dec(&srq->mtt->ref_count);
if (srq->cq)
atomic_dec(&srq->cq->ref_count);
res_end_move(dev, slave, RES_SRQ, srqn);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_SRQ, srqn);
return err;
}
int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq;
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
return err;
if (srq->com.from_state != RES_SRQ_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, srqn, RES_SRQ);
return err;
}
int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq;
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
return err;
if (srq->com.from_state != RES_SRQ_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, srqn, RES_SRQ);
return err;
}
int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
return err;
if (qp->com.from_state != RES_QP_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox)
{
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
u8 pri_sched_queue;
int port = mlx4_slave_convert_port(
dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
if (port < 0)
return -EINVAL;
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = mlx4_slave_convert_port(
dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+ 1) - 1;
if (port < 0)
return -EINVAL;
qpc->alt_path.sched_queue =
(qpc->alt_path.sched_queue & ~(1 << 6)) |
(port & 1) << 6;
}
return 0;
}
static int roce_verify_mac(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox)
{
u64 mac;
int port;
u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
u8 sched = *(u8 *)(inbox->buf + 64);
u8 smac_ix;
port = (sched >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
return -ENOENT;
}
return 0;
}
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *qpc = inbox->buf + 8;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
__be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
u8 orig_vlan_index = qpc->pri_path.vlan_index;
u8 orig_feup = qpc->pri_path.feup;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
return err;
if (roce_verify_mac(dev, slave, qpc, inbox))
return -EINVAL;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
orig_sched_queue = qpc->pri_path.sched_queue;
err = update_vport_qp_param(dev, inbox, slave, qpn);
if (err)
return err;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
return err;
if (qp->com.from_state != RES_QP_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
/* if no error, save sched queue value passed in by VF. This is
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
qp->vlan_index = orig_vlan_index;
qp->feup = orig_feup;
}
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
int err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_gid(dev, inbox, (u8)slave);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_gid(dev, inbox, (u8)slave);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
atomic_dec(&qp->mtt->ref_count);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
if (qp->srq)
atomic_dec(&qp->srq->ref_count);
res_end_move(dev, slave, RES_QP, qpn);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
struct res_qp *rqp, u8 *gid)
{
struct res_gid *res;
list_for_each_entry(res, &rqp->mcg_list, list) {
if (!memcmp(res->gid, gid, 16))
return res;
}
return NULL;
}
static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
enum mlx4_steer_type steer, u64 reg_id)
{
struct res_gid *res;
int err;
res = kzalloc(sizeof *res, GFP_KERNEL);
if (!res)
return -ENOMEM;
spin_lock_irq(&rqp->mcg_spl);
if (find_gid(dev, slave, rqp, gid)) {
kfree(res);
err = -EEXIST;
} else {
memcpy(res->gid, gid, 16);
res->prot = prot;
res->steer = steer;
res->reg_id = reg_id;
list_add_tail(&res->list, &rqp->mcg_list);
err = 0;
}
spin_unlock_irq(&rqp->mcg_spl);
return err;
}
static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
enum mlx4_steer_type steer, u64 *reg_id)
{
struct res_gid *res;
int err;
spin_lock_irq(&rqp->mcg_spl);
res = find_gid(dev, slave, rqp, gid);
if (!res || res->prot != prot || res->steer != steer)
err = -EINVAL;
else {
*reg_id = res->reg_id;
list_del(&res->list);
kfree(res);
err = 0;
}
spin_unlock_irq(&rqp->mcg_spl);
return err;
}
static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
u8 gid[16], int block_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
int port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (port < 0)
return port;
return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_loopback, prot,
reg_id);
}
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH) {
int port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (port < 0)
return port;
gid[5] = port;
}
return mlx4_qp_attach_common(dev, qp, gid,
block_loopback, prot, type);
default:
return -EINVAL;
}
}
static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
return mlx4_flow_detach(dev, reg_id);
case MLX4_STEERING_MODE_B0:
return mlx4_qp_detach_common(dev, qp, gid, prot, type);
default:
return -EINVAL;
}
}
static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
u8 *gid, enum mlx4_protocol prot)
{
int real_port;
if (prot != MLX4_PROT_ETH)
return 0;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (real_port < 0)
return -EINVAL;
gid[5] = real_port;
}
return 0;
}
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp qp; /* dummy for calling attach/detach */
u8 *gid = inbox->buf;
enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
int err;
int qpn;
struct res_qp *rqp;
u64 reg_id = 0;
int attach = vhcr->op_modifier;
int block_loopback = vhcr->in_modifier >> 31;
u8 steer_type_mask = 2;
enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
qpn = vhcr->in_modifier & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
qp.qpn = qpn;
if (attach) {
err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
type, ®_id);
if (err) {
pr_err("Fail to attach rule to qp 0x%x\n", qpn);
goto ex_put;
}
err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
if (err)
goto ex_detach;
} else {
err = mlx4_adjust_port(dev, slave, gid, prot);
if (err)
goto ex_put;
err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
if (err)
goto ex_put;
err = qp_detach(dev, &qp, gid, prot, type, reg_id);
if (err)
pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
qpn, reg_id);
}
put_res(dev, slave, qpn, RES_QP);
return err;
ex_detach:
qp_detach(dev, &qp, gid, prot, type, reg_id);
ex_put:
put_res(dev, slave, qpn, RES_QP);
return err;
}
/*
* MAC validation for Flow Steering rules.
* VF can attach rules only with a mac address which is assigned to it.
*/
static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
struct list_head *rlist)
{
struct mac_res *res, *tmp;
__be64 be_mac;
/* make sure it isn't multicast or broadcast mac*/
if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
eth_header->eth.dst_mac, slave);
return -EINVAL;
}
return 0;
}
static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header)
{
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
struct mlx4_net_trans_rule_hw_eth *eth =
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
next_rule->rsvd == 0;
if (last_rule)
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
}
}
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
*/
static int add_eth_header(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct list_head *rlist, int header_id)
{
struct mac_res *res, *tmp;
u8 port;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct mlx4_net_trans_rule_hw_eth *eth_header;
struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
__be64 be_mac = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_IPV4:
ip_header =
(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
memmove(ip_header, eth_header,
sizeof(*ip_header) + sizeof(*l4_header));
break;
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
(eth_header + 1);
memmove(l4_header, eth_header, sizeof(*l4_header));
break;
default:
return -EINVAL;
}
list_for_each_entry_safe(res, tmp, rlist, list) {
if (port == res->port) {
be_mac = cpu_to_be64(res->mac << 16);
break;
}
}
if (!be_mac) {
pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
memset(eth_header, 0, sizeof(*eth_header));
eth_header->size = sizeof(*eth_header) >> 2;
eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
return 0;
}
#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd_info)
{
int err;
u32 qpn = vhcr->in_modifier & 0xffffff;
struct res_qp *rqp;
u64 mac;
unsigned port;
u64 pri_addr_path_mask;
struct mlx4_update_qp_context *cmd;
int smac_index;
cmd = (struct mlx4_update_qp_context *)inbox->buf;
pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
(pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
return -EPERM;
/* Just change the smac for the QP */
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
return err;
}
port = (rqp->sched_queue >> 6 & 1) + 1;
if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
}
}
err = mlx4_cmd(dev, inbox->dma,
vhcr->in_modifier, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
goto err_mac;
}
err_mac:
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err;
int qpn;
struct res_qp *rqp;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
if (ctrl->port <= 0)
return -EINVAL;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
handle_eth_header_mcast_prio(ctrl, rule_header);
if (slave == dev->caps.function)
goto execute;
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
err = -EINVAL;
goto err_put;
}
break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put;
}
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put;
}
execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto err_put;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
goto err_put;
}
atomic_inc(&rqp->ref_count);
err_put:
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct res_qp *rqp;
struct res_fs_rule *rrule;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
return -EOPNOTSUPP;
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
if (err)
return err;
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
if (err)
return err;
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (!err)
atomic_dec(&rqp->ref_count);
out:
put_res(dev, slave, rrule->qpn, RES_QP);
return err;
}
enum {
BUSY_MAX_RETRIES = 10
};
int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier & 0xffff;
err = get_res(dev, slave, index, RES_COUNTER, NULL);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
put_res(dev, slave, index, RES_COUNTER);
return err;
}
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
struct res_gid *tmp;
struct mlx4_qp qp; /* dummy for calling attach/detach */
list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
mlx4_flow_detach(dev, rgid->reg_id);
break;
case MLX4_STEERING_MODE_B0:
qp.qpn = rqp->local_qpn;
(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
rgid->prot, rgid->steer);
break;
}
list_del(&rgid->list);
kfree(rgid);
}
}
static int _move_all_busy(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int print)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
struct res_common *r;
struct res_common *tmp;
int busy;
busy = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(r, tmp, rlist, list) {
if (r->owner == slave) {
if (!r->removing) {
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
resource_str(type),
r->res_id);
++busy;
} else {
r->from_state = r->state;
r->state = RES_ANY_BUSY;
r->removing = 1;
}
}
}
}
spin_unlock_irq(mlx4_tlock(dev));
return busy;
}
static int move_all_busy(struct mlx4_dev *dev, int slave,
enum mlx4_resource type)
{
unsigned long begin;
int busy;
begin = jiffies;
do {
busy = _move_all_busy(dev, slave, type, 0);
if (time_after(jiffies, begin + 5 * HZ))
break;
if (busy)
cond_resched();
} while (busy);
if (busy)
busy = _move_all_busy(dev, slave, type, 1);
return busy;
}
static void rem_slave_qps(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *qp_list =
&tracker->slave_list[slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
int state;
u64 in_param;
int qpn;
int err;
err = move_all_busy(dev, slave, RES_QP);
if (err)
mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (qp->com.owner == slave) {
qpn = qp->com.res_id;
detach_qp(dev, slave, qp);
state = qp->com.from_state;
while (state != 0) {
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&qp->com.node,
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
if (!valid_reserved(dev, slave, qpn)) {
__mlx4_qp_release_range(dev, qpn, 1);
mlx4_release_resource(dev, slave,
RES_QP, 1, 0);
}
kfree(qp);
state = 0;
break;
case RES_QP_MAPPED:
if (!valid_reserved(dev, slave, qpn))
__mlx4_qp_free_icm(dev, qpn);
state = RES_QP_RESERVED;
break;
case RES_QP_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param,
qp->local_qpn, 2,
MLX4_CMD_2RST_QP,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
if (qp->srq)
atomic_dec(&qp->srq->ref_count);
state = RES_QP_MAPPED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *srq_list =
&tracker->slave_list[slave].res_list[RES_SRQ];
struct res_srq *srq;
struct res_srq *tmp;
int state;
u64 in_param;
LIST_HEAD(tlist);
int srqn;
int err;
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (srq->com.owner == slave) {
srqn = srq->com.res_id;
state = srq->com.from_state;
while (state != 0) {
switch (state) {
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&srq->com.node,
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
case RES_SRQ_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, srqn, 1,
MLX4_CMD_HW2SW_SRQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
if (srq->cq)
atomic_dec(&srq->cq->ref_count);
state = RES_SRQ_ALLOCATED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *cq_list =
&tracker->slave_list[slave].res_list[RES_CQ];
struct res_cq *cq;
struct res_cq *tmp;
int state;
u64 in_param;
LIST_HEAD(tlist);
int cqn;
int err;
err = move_all_busy(dev, slave, RES_CQ);
if (err)
mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
cqn = cq->com.res_id;
state = cq->com.from_state;
while (state != 0) {
switch (state) {
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&cq->com.node,
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
case RES_CQ_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, cqn, 1,
MLX4_CMD_HW2SW_CQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mpt_list =
&tracker->slave_list[slave].res_list[RES_MPT];
struct res_mpt *mpt;
struct res_mpt *tmp;
int state;
u64 in_param;
LIST_HEAD(tlist);
int mptn;
int err;
err = move_all_busy(dev, slave, RES_MPT);
if (err)
mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (mpt->com.owner == slave) {
mptn = mpt->com.res_id;
state = mpt->com.from_state;
while (state != 0) {
switch (state) {
case RES_MPT_RESERVED:
__mlx4_mpt_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mpt->com.node,
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
case RES_MPT_MAPPED:
__mlx4_mpt_free_icm(dev, mpt->key);
state = RES_MPT_RESERVED;
break;
case RES_MPT_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, mptn, 0,
MLX4_CMD_HW2SW_MPT,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
state = RES_MPT_MAPPED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *mtt_list =
&tracker->slave_list[slave].res_list[RES_MTT];
struct res_mtt *mtt;
struct res_mtt *tmp;
int state;
LIST_HEAD(tlist);
int base;
int err;
err = move_all_busy(dev, slave, RES_MTT);
if (err)
mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (mtt->com.owner == slave) {
base = mtt->com.res_id;
state = mtt->com.from_state;
while (state != 0) {
switch (state) {
case RES_MTT_ALLOCATED:
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mtt->com.node,
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave, RES_MTT,
1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *fs_rule_list =
&tracker->slave_list[slave].res_list[RES_FS_RULE];
struct res_fs_rule *fs_rule;
struct res_fs_rule *tmp;
int state;
u64 base;
int err;
err = move_all_busy(dev, slave, RES_FS_RULE);
if (err)
mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (fs_rule->com.owner == slave) {
base = fs_rule->com.res_id;
state = fs_rule->com.from_state;
while (state != 0) {
switch (state) {
case RES_FS_RULE_ALLOCATED:
/* detach rule */
err = mlx4_cmd(dev, base, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&fs_rule->com.node,
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(fs_rule);
state = 0;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *eq_list =
&tracker->slave_list[slave].res_list[RES_EQ];
struct res_eq *eq;
struct res_eq *tmp;
int err;
int state;
LIST_HEAD(tlist);
int eqn;
err = move_all_busy(dev, slave, RES_EQ);
if (err)
mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (eq->com.owner == slave) {
eqn = eq->com.res_id;
state = eq->com.from_state;
while (state != 0) {
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&eq->com.node,
&tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
state = 0;
break;
case RES_EQ_HW:
err = mlx4_cmd(dev, slave, eqn & 0x3ff,
1, MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
slave, eqn & 0x3ff);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_counters(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *counter_list =
&tracker->slave_list[slave].res_list[RES_COUNTER];
struct res_counter *counter;
struct res_counter *tmp;
int err;
int index;
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
index = counter->com.res_id;
rb_erase(&counter->com.node,
&tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *xrcdn_list =
&tracker->slave_list[slave].res_list[RES_XRCD];
struct res_xrcdn *xrcd;
struct res_xrcdn *tmp;
int err;
int xrcdn;
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
}
}
spin_unlock_irq(mlx4_tlock(dev));
}
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_reset_roce_gids(dev, slave);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
rem_slave_srqs(dev, slave);
rem_slave_cqs(dev, slave);
rem_slave_mrs(dev, slave);
rem_slave_eqs(dev, slave);
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
container_of(_work, struct mlx4_vf_immed_vlan_work, work);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *upd_context;
struct mlx4_dev *dev = &work->priv->dev;
struct mlx4_resource_tracker *tracker =
&work->priv->mfunc.master.res_tracker;
struct list_head *qp_list =
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
u64 qp_path_mask_vlan_ctrl =
((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
int port, errors = 0;
u8 vlan_control;
if (mlx4_is_slave(dev)) {
mlx4_warn(dev, "Trying to update-qp in slave %d\n",
work->slave);
goto out;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
goto out;
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
else
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (qp->com.owner == work->slave) {
if (qp->com.from_state != RES_QP_HW ||
!qp->sched_queue || /* no INIT2RTR trans yet */
mlx4_is_qp_reserved(dev, qp->local_qpn) ||
qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
spin_lock_irq(mlx4_tlock(dev));
continue;
}
port = (qp->sched_queue >> 6 & 1) + 1;
if (port != work->port) {
spin_lock_irq(mlx4_tlock(dev));
continue;
}
if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
else
upd_context->primary_addr_path_mask =
cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
if (work->vlan_id == MLX4_VGT) {
upd_context->qp_context.param3 = qp->param3;
upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
upd_context->qp_context.pri_path.feup = qp->feup;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue;
} else {
upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
upd_context->qp_context.pri_path.vlan_control = vlan_control;
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
upd_context->qp_mask |=
cpu_to_be64(1ULL <<
MLX4_UPD_QP_MASK_QOS_VPP);
upd_context->qp_context.qos_vport =
work->qos_vport;
}
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
work->slave, port, qp->local_qpn, err);
errors++;
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
mlx4_free_cmd_mailbox(dev, mailbox);
if (errors)
mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
errors, work->slave, work->port);
/* unregister previous vlan_id if needed and we had no errors
* while updating the QPs
*/
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
work->orig_vlan_id);
out:
kfree(work);
return;
}
| gpl-2.0 |
psyke83/android_kernel_samsung_msm-codeaurora | arch/powerpc/sysdev/mpic_pasemi_msi.c | 895 | 4487 | /*
* Copyright 2007, Olof Johansson, PA Semi
*
* Based on arch/powerpc/sysdev/mpic_u3msi.c:
*
* Copyright 2006, Segher Boessenkool, IBM Corporation.
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the
* License.
*
*/
#undef DEBUG
#include <linux/irq.h>
#include <linux/bootmem.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
#include "mpic.h"
/* Allocate 16 interrupts per device, to give an alignment of 16,
* since that's the size of the grouping w.r.t. affinity. If someone
* needs more than 32 MSI's down the road we'll have to rethink this,
* but it should be OK for now.
*/
#define ALLOC_CHUNK 16
#define PASEMI_MSI_ADDR 0xfc080000
/* A bit ugly, can we get this from the pci_dev somehow? */
static struct mpic *msi_mpic;
static void mpic_pasemi_msi_mask_irq(unsigned int irq)
{
pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq);
mask_msi_irq(irq);
mpic_mask_irq(irq);
}
static void mpic_pasemi_msi_unmask_irq(unsigned int irq)
{
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq);
mpic_unmask_irq(irq);
unmask_msi_irq(irq);
}
static struct irq_chip mpic_pasemi_msi_chip = {
.shutdown = mpic_pasemi_msi_mask_irq,
.mask = mpic_pasemi_msi_mask_irq,
.unmask = mpic_pasemi_msi_unmask_irq,
.eoi = mpic_end_irq,
.set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity,
.name = "PASEMI-MSI",
};
static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
{
if (type == PCI_CAP_ID_MSIX)
pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
return 0;
}
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
set_irq_msi(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
}
return;
}
static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
unsigned int virq;
struct msi_desc *entry;
struct msi_msg msg;
int hwirq;
pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
pdev, nvec, type);
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
list_for_each_entry(entry, &pdev->msi_list, list) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
* sources can be changed independently.
*/
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap,
ALLOC_CHUNK);
if (hwirq < 0) {
pr_debug("pasemi_msi: failed allocating hwirq\n");
return hwirq;
}
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
if (virq == NO_IRQ) {
pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n",
hwirq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq,
ALLOC_CHUNK);
return -ENOSPC;
}
/* Vector on MSI is really an offset, the hardware adds
* it to the value written at the magic address. So set
* it to 0 to remain sane.
*/
mpic_set_vector(virq, 0);
set_irq_msi(virq, entry);
set_irq_chip(virq, &mpic_pasemi_msi_chip);
set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
"addr 0x%x\n", virq, hwirq, msg.address_lo);
/* Likewise, the device writes [0...511] into the target
* register to generate MSI [512...1023]
*/
msg.data = hwirq-0x200;
write_msi_msg(virq, &msg);
}
return 0;
}
int mpic_pasemi_msi_init(struct mpic *mpic)
{
int rc;
if (!mpic->irqhost->of_node ||
!of_device_is_compatible(mpic->irqhost->of_node,
"pasemi,pwrficient-openpic"))
return -ENODEV;
rc = mpic_msi_init_allocator(mpic);
if (rc) {
pr_debug("pasemi_msi: Error allocating bitmap!\n");
return rc;
}
pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n");
msi_mpic = mpic;
WARN_ON(ppc_md.setup_msi_irqs);
ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
ppc_md.msi_check_device = pasemi_msi_check_device;
return 0;
}
| gpl-2.0 |
JasonMaloney-crespo4g/android_kernel_samsung_crespo | fs/btrfs/volumes.c | 1663 | 94410 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/iocontext.h>
#include <linux/capability.h>
#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
#include "async-thread.h"
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
static DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
static void lock_chunks(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->chunk_mutex);
}
static void unlock_chunks(struct btrfs_root *root)
{
mutex_unlock(&root->fs_info->chunk_mutex);
}
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
WARN_ON(fs_devices->opened);
while (!list_empty(&fs_devices->devices)) {
device = list_entry(fs_devices->devices.next,
struct btrfs_device, dev_list);
list_del(&device->dev_list);
kfree(device->name);
kfree(device);
}
kfree(fs_devices);
}
int btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
while (!list_empty(&fs_uuids)) {
fs_devices = list_entry(fs_uuids.next,
struct btrfs_fs_devices, list);
list_del(&fs_devices->list);
free_fs_devices(fs_devices);
}
return 0;
}
static noinline struct btrfs_device *__find_device(struct list_head *head,
u64 devid, u8 *uuid)
{
struct btrfs_device *dev;
list_for_each_entry(dev, head, dev_list) {
if (dev->devid == devid &&
(!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
return dev;
}
}
return NULL;
}
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
list_for_each_entry(fs_devices, &fs_uuids, list) {
if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
return fs_devices;
}
return NULL;
}
static void requeue_list(struct btrfs_pending_bios *pending_bios,
struct bio *head, struct bio *tail)
{
struct bio *old_head;
old_head = pending_bios->head;
pending_bios->head = head;
if (pending_bios->tail)
tail->bi_next = old_head;
else
pending_bios->tail = tail;
}
/*
* we try to collect pending bios for a device so we don't get a large
* number of procs sending bios down to the same device. This greatly
* improves the schedulers ability to collect and merge the bios.
*
* But, it also turns into a long list of bios to process and that is sure
* to eventually make the worker thread block. The solution here is to
* make some progress and then put this work struct back at the end of
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
static noinline int run_scheduled_bios(struct btrfs_device *device)
{
struct bio *pending;
struct backing_dev_info *bdi;
struct btrfs_fs_info *fs_info;
struct btrfs_pending_bios *pending_bios;
struct bio *tail;
struct bio *cur;
int again = 0;
unsigned long num_run;
unsigned long batch_run = 0;
unsigned long limit;
unsigned long last_waited = 0;
int force_reg = 0;
struct blk_plug plug;
/*
* this function runs all the bios we've collected for
* a particular device. We don't want to wander off to
* another device without first sending all of these down.
* So, setup a plug here and finish it off before we return
*/
blk_start_plug(&plug);
bdi = blk_get_backing_dev_info(device->bdev);
fs_info = device->dev_root->fs_info;
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
loop:
spin_lock(&device->io_lock);
loop_lock:
num_run = 0;
/* take all the bios off the list at once and process them
* later on (without the lock held). But, remember the
* tail and other pointers so the bios can be properly reinserted
* into the list if we hit congestion
*/
if (!force_reg && device->pending_sync_bios.head) {
pending_bios = &device->pending_sync_bios;
force_reg = 1;
} else {
pending_bios = &device->pending_bios;
force_reg = 0;
}
pending = pending_bios->head;
tail = pending_bios->tail;
WARN_ON(pending && !tail);
/*
* if pending was null this time around, no bios need processing
* at all and we can stop. Otherwise it'll loop back up again
* and do an additional check so no bios are missed.
*
* device->running_pending is used to synchronize with the
* schedule_bio code.
*/
if (device->pending_sync_bios.head == NULL &&
device->pending_bios.head == NULL) {
again = 0;
device->running_pending = 0;
} else {
again = 1;
device->running_pending = 1;
}
pending_bios->head = NULL;
pending_bios->tail = NULL;
spin_unlock(&device->io_lock);
while (pending) {
rmb();
/* we want to work on both lists, but do more bios on the
* sync list than the regular list
*/
if ((num_run > 32 &&
pending_bios != &device->pending_sync_bios &&
device->pending_sync_bios.head) ||
(num_run > 64 && pending_bios == &device->pending_sync_bios &&
device->pending_bios.head)) {
spin_lock(&device->io_lock);
requeue_list(pending_bios, pending, tail);
goto loop_lock;
}
cur = pending;
pending = pending->bi_next;
cur->bi_next = NULL;
atomic_dec(&fs_info->nr_async_bios);
if (atomic_read(&fs_info->nr_async_bios) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
if (need_resched())
cond_resched();
/*
* we made progress, there is more work to do and the bdi
* is now congested. Back off and let other work structs
* run instead
*/
if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
fs_info->fs_devices->open_devices > 1) {
struct io_context *ioc;
ioc = current->io_context;
/*
* the main goal here is that we don't want to
* block if we're going to be able to submit
* more requests without blocking.
*
* This code does two great things, it pokes into
* the elevator code from a filesystem _and_
* it makes assumptions about how batching works.
*/
if (ioc && ioc->nr_batch_requests > 0 &&
time_before(jiffies, ioc->last_waited + HZ/50UL) &&
(last_waited == 0 ||
ioc->last_waited == last_waited)) {
/*
* we want to go through our batch of
* requests and stop. So, we copy out
* the ioc->last_waited time and test
* against it before looping
*/
last_waited = ioc->last_waited;
if (need_resched())
cond_resched();
continue;
}
spin_lock(&device->io_lock);
requeue_list(pending_bios, pending, tail);
device->running_pending = 1;
spin_unlock(&device->io_lock);
btrfs_requeue_work(&device->work);
goto done;
}
}
cond_resched();
if (again)
goto loop;
spin_lock(&device->io_lock);
if (device->pending_bios.head || device->pending_sync_bios.head)
goto loop_lock;
spin_unlock(&device->io_lock);
done:
blk_finish_plug(&plug);
return 0;
}
static void pending_bios_fn(struct btrfs_work *work)
{
struct btrfs_device *device;
device = container_of(work, struct btrfs_device, work);
run_scheduled_bios(device);
}
static noinline int device_list_add(const char *path,
struct btrfs_super_block *disk_super,
u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices;
u64 found_transid = btrfs_super_generation(disk_super);
char *name;
fs_devices = find_fsid(disk_super->fsid);
if (!fs_devices) {
fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
if (!fs_devices)
return -ENOMEM;
INIT_LIST_HEAD(&fs_devices->devices);
INIT_LIST_HEAD(&fs_devices->alloc_list);
list_add(&fs_devices->list, &fs_uuids);
memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
fs_devices->latest_devid = devid;
fs_devices->latest_trans = found_transid;
mutex_init(&fs_devices->device_list_mutex);
device = NULL;
} else {
device = __find_device(&fs_devices->devices, devid,
disk_super->dev_item.uuid);
}
if (!device) {
if (fs_devices->opened)
return -EBUSY;
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device) {
/* we can safely leave the fs_devices entry around */
return -ENOMEM;
}
device->devid = devid;
device->work.func = pending_bios_fn;
memcpy(device->uuid, disk_super->dev_item.uuid,
BTRFS_UUID_SIZE);
spin_lock_init(&device->io_lock);
device->name = kstrdup(path, GFP_NOFS);
if (!device->name) {
kfree(device);
return -ENOMEM;
}
INIT_LIST_HEAD(&device->dev_alloc_list);
mutex_lock(&fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices);
mutex_unlock(&fs_devices->device_list_mutex);
device->fs_devices = fs_devices;
fs_devices->num_devices++;
} else if (!device->name || strcmp(device->name, path)) {
name = kstrdup(path, GFP_NOFS);
if (!name)
return -ENOMEM;
kfree(device->name);
device->name = name;
if (device->missing) {
fs_devices->missing_devices--;
device->missing = 0;
}
}
if (found_transid > fs_devices->latest_trans) {
fs_devices->latest_devid = devid;
fs_devices->latest_trans = found_transid;
}
*fs_devices_ret = fs_devices;
return 0;
}
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
struct btrfs_fs_devices *fs_devices;
struct btrfs_device *device;
struct btrfs_device *orig_dev;
fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
if (!fs_devices)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&fs_devices->devices);
INIT_LIST_HEAD(&fs_devices->alloc_list);
INIT_LIST_HEAD(&fs_devices->list);
mutex_init(&fs_devices->device_list_mutex);
fs_devices->latest_devid = orig->latest_devid;
fs_devices->latest_trans = orig->latest_trans;
memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
/* We have held the volume lock, it is safe to get the devices. */
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device)
goto error;
device->name = kstrdup(orig_dev->name, GFP_NOFS);
if (!device->name) {
kfree(device);
goto error;
}
device->devid = orig_dev->devid;
device->work.func = pending_bios_fn;
memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
spin_lock_init(&device->io_lock);
INIT_LIST_HEAD(&device->dev_list);
INIT_LIST_HEAD(&device->dev_alloc_list);
list_add(&device->dev_list, &fs_devices->devices);
device->fs_devices = fs_devices;
fs_devices->num_devices++;
}
return fs_devices;
error:
free_fs_devices(fs_devices);
return ERR_PTR(-ENOMEM);
}
int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *next;
mutex_lock(&uuid_mutex);
again:
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->in_fs_metadata)
continue;
if (device->bdev) {
blkdev_put(device->bdev, device->mode);
device->bdev = NULL;
fs_devices->open_devices--;
}
if (device->writeable) {
list_del_init(&device->dev_alloc_list);
device->writeable = 0;
fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
kfree(device->name);
kfree(device);
}
if (fs_devices->seed) {
fs_devices = fs_devices->seed;
goto again;
}
mutex_unlock(&uuid_mutex);
return 0;
}
static void __free_device(struct work_struct *work)
{
struct btrfs_device *device;
device = container_of(work, struct btrfs_device, rcu_work);
if (device->bdev)
blkdev_put(device->bdev, device->mode);
kfree(device->name);
kfree(device);
}
static void free_device(struct rcu_head *head)
{
struct btrfs_device *device;
device = container_of(head, struct btrfs_device, rcu);
INIT_WORK(&device->rcu_work, __free_device);
schedule_work(&device->rcu_work);
}
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
if (--fs_devices->opened > 0)
return 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
struct btrfs_device *new_device;
if (device->bdev)
fs_devices->open_devices--;
if (device->writeable) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
if (device->can_discard)
fs_devices->num_can_discard--;
new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
BUG_ON(!new_device);
memcpy(new_device, device, sizeof(*new_device));
new_device->name = kstrdup(device->name, GFP_NOFS);
BUG_ON(device->name && !new_device->name);
new_device->bdev = NULL;
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
new_device->can_discard = 0;
list_replace_rcu(&device->dev_list, &new_device->dev_list);
call_rcu(&device->rcu, free_device);
}
mutex_unlock(&fs_devices->device_list_mutex);
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
fs_devices->seeding = 0;
return 0;
}
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_fs_devices *seed_devices = NULL;
int ret;
mutex_lock(&uuid_mutex);
ret = __btrfs_close_devices(fs_devices);
if (!fs_devices->opened) {
seed_devices = fs_devices->seed;
fs_devices->seed = NULL;
}
mutex_unlock(&uuid_mutex);
while (seed_devices) {
fs_devices = seed_devices;
seed_devices = fs_devices->seed;
__btrfs_close_devices(fs_devices);
free_fs_devices(fs_devices);
}
return ret;
}
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
struct request_queue *q;
struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
struct btrfs_device *device;
struct block_device *latest_bdev = NULL;
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
u64 latest_devid = 0;
u64 latest_transid = 0;
u64 devid;
int seeding = 1;
int ret = 0;
flags |= FMODE_EXCL;
list_for_each_entry(device, head, dev_list) {
if (device->bdev)
continue;
if (!device->name)
continue;
bdev = blkdev_get_by_path(device->name, flags, holder);
if (IS_ERR(bdev)) {
printk(KERN_INFO "open %s failed\n", device->name);
goto error;
}
set_blocksize(bdev, 4096);
bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EINVAL;
goto error_close;
}
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
if (devid != device->devid)
goto error_brelse;
if (memcmp(device->uuid, disk_super->dev_item.uuid,
BTRFS_UUID_SIZE))
goto error_brelse;
device->generation = btrfs_super_generation(disk_super);
if (!latest_transid || device->generation > latest_transid) {
latest_devid = devid;
latest_transid = device->generation;
latest_bdev = bdev;
}
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
device->writeable = 0;
} else {
device->writeable = !bdev_read_only(bdev);
seeding = 0;
}
q = bdev_get_queue(bdev);
if (blk_queue_discard(q)) {
device->can_discard = 1;
fs_devices->num_can_discard++;
}
device->bdev = bdev;
device->in_fs_metadata = 0;
device->mode = flags;
if (!blk_queue_nonrot(bdev_get_queue(bdev)))
fs_devices->rotating = 1;
fs_devices->open_devices++;
if (device->writeable) {
fs_devices->rw_devices++;
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
}
brelse(bh);
continue;
error_brelse:
brelse(bh);
error_close:
blkdev_put(bdev, flags);
error:
continue;
}
if (fs_devices->open_devices == 0) {
ret = -EIO;
goto out;
}
fs_devices->seeding = seeding;
fs_devices->opened = 1;
fs_devices->latest_bdev = latest_bdev;
fs_devices->latest_devid = latest_devid;
fs_devices->latest_trans = latest_transid;
fs_devices->total_rw_bytes = 0;
out:
return ret;
}
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
int ret;
mutex_lock(&uuid_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
ret = 0;
} else {
ret = __btrfs_open_devices(fs_devices, flags, holder);
}
mutex_unlock(&uuid_mutex);
return ret;
}
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
struct block_device *bdev;
struct buffer_head *bh;
int ret;
u64 devid;
u64 transid;
mutex_lock(&uuid_mutex);
flags |= FMODE_EXCL;
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
goto error;
}
ret = set_blocksize(bdev, 4096);
if (ret)
goto error_close;
bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EINVAL;
goto error_close;
}
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label);
else
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
brelse(bh);
error_close:
blkdev_put(bdev, flags);
error:
mutex_unlock(&uuid_mutex);
return ret;
}
/* helper to account the used device space in the range */
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 extent_end;
int ret;
int slot;
struct extent_buffer *l;
*length = 0;
if (start >= device->total_bytes)
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 2;
key.objectid = device->devid;
key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid, key.type);
if (ret < 0)
goto out;
}
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid < device->devid)
goto next;
if (key.objectid > device->devid)
break;
if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
goto next;
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
extent_end = key.offset + btrfs_dev_extent_length(l,
dev_extent);
if (key.offset <= start && extent_end > end) {
*length = end - start + 1;
break;
} else if (key.offset <= start && extent_end > start)
*length += extent_end - start;
else if (key.offset > start && extent_end <= end)
*length += extent_end - key.offset;
else if (key.offset > start && key.offset <= end) {
*length += end - key.offset + 1;
break;
} else if (key.offset > end)
break;
next:
path->slots[0]++;
}
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
/*
* find_free_dev_extent - find free space in the specified device
* @trans: transaction handler
* @device: the device which we search the free space in
* @num_bytes: the size of the free space that we need
* @start: store the start of the free space.
* @len: the size of the free space. that we find, or the size of the max
* free space if we don't find suitable free space
*
* this uses a pretty simple search, the expectation is that it is
* called very infrequently and that a given device has a small number
* of extents
*
* @start is used to store the start of the free space if we find. But if we
* don't find suitable free space, it will be used to store the start position
* of the max free space.
*
* @len is used to store the size of the free space that we find.
* But if we don't find suitable free space, it is used to store the size of
* the max free space.
*/
int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size;
u64 extent_end;
u64 search_start;
u64 search_end = device->total_bytes;
int ret;
int slot;
struct extent_buffer *l;
/* FIXME use last free of some kind */
/* we don't want to overwrite the superblock on the drive,
* so we make sure to start at an offset of at least 1MB
*/
search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
max_hole_start = search_start;
max_hole_size = 0;
if (search_start >= search_end) {
ret = -ENOSPC;
goto error;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto error;
}
path->reada = 2;
key.objectid = device->devid;
key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid, key.type);
if (ret < 0)
goto out;
}
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid < device->devid)
goto next;
if (key.objectid > device->devid)
break;
if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
goto next;
if (key.offset > search_start) {
hole_size = key.offset - search_start;
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
/*
* If this free space is greater than which we need,
* it must be the max free space that we have found
* until now, so max_hole_start must point to the start
* of this free space and the length of this free space
* is stored in max_hole_size. Thus, we return
* max_hole_start and max_hole_size and go back to the
* caller.
*/
if (hole_size >= num_bytes) {
ret = 0;
goto out;
}
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
extent_end = key.offset + btrfs_dev_extent_length(l,
dev_extent);
if (extent_end > search_start)
search_start = extent_end;
next:
path->slots[0]++;
cond_resched();
}
hole_size = search_end- search_start;
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
/* See above. */
if (hole_size < num_bytes)
ret = -ENOSPC;
else
ret = 0;
out:
btrfs_free_path(path);
error:
*start = max_hole_start;
if (len)
*len = max_hole_size;
return ret;
}
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start)
{
int ret;
struct btrfs_path *path;
struct btrfs_root *root = device->dev_root;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf = NULL;
struct btrfs_dev_extent *extent = NULL;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = device->devid;
key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid,
BTRFS_DEV_EXTENT_KEY);
if (ret)
goto out;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
BUG_ON(found_key.offset > start || found_key.offset +
btrfs_dev_extent_length(leaf, extent) < start);
} else if (ret == 0) {
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
}
BUG_ON(ret);
if (device->bytes_used > 0)
device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset, u64 start, u64 num_bytes)
{
int ret;
struct btrfs_path *path;
struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
WARN_ON(!device->in_fs_metadata);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = device->devid;
key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
BUG_ON(ret);
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
(unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
BTRFS_UUID_SIZE);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
return ret;
}
static noinline int find_next_chunk(struct btrfs_root *root,
u64 objectid, u64 *offset)
{
struct btrfs_path *path;
int ret;
struct btrfs_key key;
struct btrfs_chunk *chunk;
struct btrfs_key found_key;
path = btrfs_alloc_path();
BUG_ON(!path);
key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto error;
BUG_ON(ret == 0);
ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
if (ret) {
*offset = 0;
} else {
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (found_key.objectid != objectid)
*offset = 0;
else {
chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_chunk);
*offset = found_key.offset +
btrfs_chunk_length(path->nodes[0], chunk);
}
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
{
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto error;
BUG_ON(ret == 0);
ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
BTRFS_DEV_ITEM_KEY);
if (ret) {
*objectid = 1;
} else {
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
*objectid = found_key.offset + 1;
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
/*
* the device information is stored in the chunk root
* the btrfs_device struct should be fully filled in
*/
int btrfs_add_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
unsigned long ptr;
root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*dev_item));
if (ret)
goto out;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
btrfs_set_device_id(leaf, dev_item, device->devid);
btrfs_set_device_generation(leaf, dev_item, 0);
btrfs_set_device_type(leaf, dev_item, device->type);
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
btrfs_set_device_group(leaf, dev_item, 0);
btrfs_set_device_seek_speed(leaf, dev_item, 0);
btrfs_set_device_bandwidth(leaf, dev_item, 0);
btrfs_set_device_start_offset(leaf, dev_item, 0);
ptr = (unsigned long)btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = (unsigned long)btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_rm_dev_item(struct btrfs_root *root,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_trans_handle *trans;
root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
lock_chunks(root);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret)
goto out;
out:
btrfs_free_path(path);
unlock_chunks(root);
btrfs_commit_transaction(trans, root);
return ret;
}
int btrfs_rm_device(struct btrfs_root *root, char *device_path)
{
struct btrfs_device *device;
struct btrfs_device *next_device;
struct block_device *bdev;
struct buffer_head *bh = NULL;
struct btrfs_super_block *disk_super;
struct btrfs_fs_devices *cur_devices;
u64 all_avail;
u64 devid;
u64 num_devices;
u8 *dev_uuid;
int ret = 0;
bool clear_super = false;
mutex_lock(&uuid_mutex);
mutex_lock(&root->fs_info->volume_mutex);
all_avail = root->fs_info->avail_data_alloc_bits |
root->fs_info->avail_system_alloc_bits |
root->fs_info->avail_metadata_alloc_bits;
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
root->fs_info->fs_devices->num_devices <= 4) {
printk(KERN_ERR "btrfs: unable to go below four devices "
"on raid10\n");
ret = -EINVAL;
goto out;
}
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
root->fs_info->fs_devices->num_devices <= 2) {
printk(KERN_ERR "btrfs: unable to go below two "
"devices on raid1\n");
ret = -EINVAL;
goto out;
}
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
struct btrfs_device *tmp;
device = NULL;
devices = &root->fs_info->fs_devices->devices;
/*
* It is safe to read the devices since the volume_mutex
* is held.
*/
list_for_each_entry(tmp, devices, dev_list) {
if (tmp->in_fs_metadata && !tmp->bdev) {
device = tmp;
break;
}
}
bdev = NULL;
bh = NULL;
disk_super = NULL;
if (!device) {
printk(KERN_ERR "btrfs: no missing devices found to "
"remove\n");
goto out;
}
} else {
bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
goto out;
}
set_blocksize(bdev, 4096);
bh = btrfs_read_dev_super(bdev);
if (!bh) {
ret = -EINVAL;
goto error_close;
}
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
device = btrfs_find_device(root, devid, dev_uuid,
disk_super->fsid);
if (!device) {
ret = -ENOENT;
goto error_brelse;
}
}
if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
printk(KERN_ERR "btrfs: unable to remove the only writeable "
"device\n");
ret = -EINVAL;
goto error_brelse;
}
if (device->writeable) {
lock_chunks(root);
list_del_init(&device->dev_alloc_list);
unlock_chunks(root);
root->fs_info->fs_devices->rw_devices--;
clear_super = true;
}
ret = btrfs_shrink_device(device, 0);
if (ret)
goto error_undo;
ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
if (ret)
goto error_undo;
device->in_fs_metadata = 0;
btrfs_scrub_cancel_dev(root, device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers.
*/
cur_devices = device->fs_devices;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
device->fs_devices->num_devices--;
if (device->missing)
root->fs_info->fs_devices->missing_devices--;
next_device = list_entry(root->fs_info->fs_devices->devices.next,
struct btrfs_device, dev_list);
if (device->bdev == root->fs_info->sb->s_bdev)
root->fs_info->sb->s_bdev = next_device->bdev;
if (device->bdev == root->fs_info->fs_devices->latest_bdev)
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
if (device->bdev)
device->fs_devices->open_devices--;
call_rcu(&device->rcu, free_device);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
fs_devices = root->fs_info->fs_devices;
while (fs_devices) {
if (fs_devices->seed == cur_devices)
break;
fs_devices = fs_devices->seed;
}
fs_devices->seed = cur_devices->seed;
cur_devices->seed = NULL;
lock_chunks(root);
__btrfs_close_devices(cur_devices);
unlock_chunks(root);
free_fs_devices(cur_devices);
}
/*
* at this point, the device is zero sized. We want to
* remove it from the devices list and zero out the old super
*/
if (clear_super) {
/* make sure this device isn't detected as part of
* the FS anymore
*/
memset(&disk_super->magic, 0, sizeof(disk_super->magic));
set_buffer_dirty(bh);
sync_dirty_buffer(bh);
}
ret = 0;
error_brelse:
brelse(bh);
error_close:
if (bdev)
blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
if (device->writeable) {
lock_chunks(root);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
unlock_chunks(root);
root->fs_info->fs_devices->rw_devices++;
}
goto error_brelse;
}
/*
* does all the dirty work required for changing file system's UUID.
*/
static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
struct btrfs_device *device;
u64 super_flags;
BUG_ON(!mutex_is_locked(&uuid_mutex));
if (!fs_devices->seeding)
return -EINVAL;
seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
if (!seed_devices)
return -ENOMEM;
old_devices = clone_fs_devices(fs_devices);
if (IS_ERR(old_devices)) {
kfree(seed_devices);
return PTR_ERR(old_devices);
}
list_add(&old_devices->list, &fs_uuids);
memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
seed_devices->opened = 1;
INIT_LIST_HEAD(&seed_devices->devices);
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
list_for_each_entry(device, &seed_devices->devices, dev_list) {
device->fs_devices = seed_devices;
}
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
btrfs_set_super_flags(disk_super, super_flags);
return 0;
}
/*
* strore the expected generation for seed devices in device items.
*/
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dev_item *dev_item;
struct btrfs_device *device;
struct btrfs_key key;
u8 fs_uuid[BTRFS_UUID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
u64 devid;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
root = root->fs_info->chunk_root;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.offset = 0;
key.type = BTRFS_DEV_ITEM_KEY;
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto error;
leaf = path->nodes[0];
next_slot:
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret > 0)
break;
if (ret < 0)
goto error;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
continue;
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
key.type != BTRFS_DEV_ITEM_KEY)
break;
dev_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_item);
devid = btrfs_device_id(leaf, dev_item);
read_extent_buffer(leaf, dev_uuid,
(unsigned long)btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid,
(unsigned long)btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
BUG_ON(!device);
if (device->fs_devices->seeding) {
btrfs_set_device_generation(leaf, dev_item,
device->generation);
btrfs_mark_buffer_dirty(leaf);
}
path->slots[0]++;
goto next_slot;
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{
struct request_queue *q;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
struct list_head *devices;
struct super_block *sb = root->fs_info->sb;
u64 total_bytes;
int seeding_dev = 0;
int ret = 0;
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
if (root->fs_info->fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
}
filemap_write_and_wait(bdev->bd_inode->i_mapping);
mutex_lock(&root->fs_info->volume_mutex);
devices = &root->fs_info->fs_devices->devices;
/*
* we have the volume lock, so we don't need the extra
* device list mutex while reading the list here.
*/
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
goto error;
}
}
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device) {
/* we can safely leave the fs_devices entry around */
ret = -ENOMEM;
goto error;
}
device->name = kstrdup(device_path, GFP_NOFS);
if (!device->name) {
kfree(device);
ret = -ENOMEM;
goto error;
}
ret = find_next_devid(root, &device->devid);
if (ret) {
kfree(device->name);
kfree(device);
goto error;
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
kfree(device->name);
kfree(device);
ret = PTR_ERR(trans);
goto error;
}
lock_chunks(root);
q = bdev_get_queue(bdev);
if (blk_queue_discard(q))
device->can_discard = 1;
device->writeable = 1;
device->work.func = pending_bios_fn;
generate_random_uuid(device->uuid);
spin_lock_init(&device->io_lock);
device->generation = trans->transid;
device->io_width = root->sectorsize;
device->io_align = root->sectorsize;
device->sector_size = root->sectorsize;
device->total_bytes = i_size_read(bdev->bd_inode);
device->disk_total_bytes = device->total_bytes;
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
device->mode = FMODE_EXCL;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
ret = btrfs_prepare_sprout(trans, root);
BUG_ON(ret);
}
device->fs_devices = root->fs_info->fs_devices;
/*
* we don't want write_supers to jump in here with our device
* half setup
*/
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++;
root->fs_info->fs_devices->rw_devices++;
if (device->can_discard)
root->fs_info->fs_devices->num_can_discard++;
root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
if (!blk_queue_nonrot(bdev_get_queue(bdev)))
root->fs_info->fs_devices->rotating = 1;
total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
btrfs_set_super_total_bytes(&root->fs_info->super_copy,
total_bytes + device->total_bytes);
total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
btrfs_set_super_num_devices(&root->fs_info->super_copy,
total_bytes + 1);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (seeding_dev) {
ret = init_first_rw_device(trans, root, device);
BUG_ON(ret);
ret = btrfs_finish_sprout(trans, root);
BUG_ON(ret);
} else {
ret = btrfs_add_device(trans, root, device);
}
/*
* we've got more storage, clear any full flags on the space
* infos
*/
btrfs_clear_space_info_full(root->fs_info);
unlock_chunks(root);
btrfs_commit_transaction(trans, root);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
ret = btrfs_relocate_sys_chunks(root);
BUG_ON(ret);
}
out:
mutex_unlock(&root->fs_info->volume_mutex);
return ret;
error:
blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
}
goto out;
}
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_root *root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
root = device->dev_root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
btrfs_set_device_id(leaf, dev_item, device->devid);
btrfs_set_device_type(leaf, dev_item, device->type);
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return ret;
}
static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
struct btrfs_super_block *super_copy =
&device->dev_root->fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
u64 diff = new_size - device->total_bytes;
if (!device->writeable)
return -EACCES;
if (new_size <= device->total_bytes)
return -EINVAL;
btrfs_set_super_total_bytes(super_copy, old_total + diff);
device->fs_devices->total_rw_bytes += diff;
device->total_bytes = new_size;
device->disk_total_bytes = new_size;
btrfs_clear_space_info_full(device->dev_root->fs_info);
return btrfs_update_device(trans, device);
}
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
int ret;
lock_chunks(device->dev_root);
ret = __btrfs_grow_device(trans, device, new_size);
unlock_chunks(device->dev_root);
return ret;
}
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset)
{
int ret;
struct btrfs_path *path;
struct btrfs_key key;
root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = chunk_objectid;
key.offset = chunk_offset;
key.type = BTRFS_CHUNK_ITEM_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
BUG_ON(ret);
ret = btrfs_del_item(trans, root, path);
btrfs_free_path(path);
return ret;
}
static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
chunk_offset)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
u8 *ptr;
int ret = 0;
u32 num_stripes;
u32 array_size;
u32 len = 0;
u32 cur;
struct btrfs_key key;
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
cur = 0;
while (cur < array_size) {
disk_key = (struct btrfs_disk_key *)ptr;
btrfs_disk_key_to_cpu(&key, disk_key);
len = sizeof(*disk_key);
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
chunk = (struct btrfs_chunk *)(ptr + len);
num_stripes = btrfs_stack_chunk_num_stripes(chunk);
len += btrfs_chunk_item_size(num_stripes);
} else {
ret = -EIO;
break;
}
if (key.objectid == chunk_objectid &&
key.offset == chunk_offset) {
memmove(ptr, ptr + len, array_size - (cur + len));
array_size -= len;
btrfs_set_super_sys_array_size(super_copy, array_size);
} else {
ptr += len;
cur += len;
}
}
return ret;
}
static int btrfs_relocate_chunk(struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset)
{
struct extent_map_tree *em_tree;
struct btrfs_root *extent_root;
struct btrfs_trans_handle *trans;
struct extent_map *em;
struct map_lookup *map;
int ret;
int i;
root = root->fs_info->chunk_root;
extent_root = root->fs_info->extent_root;
em_tree = &root->fs_info->mapping_tree.map_tree;
ret = btrfs_can_relocate(extent_root, chunk_offset);
if (ret)
return -ENOSPC;
/* step one, relocate all the extents inside this chunk */
ret = btrfs_relocate_block_group(extent_root, chunk_offset);
if (ret)
return ret;
trans = btrfs_start_transaction(root, 0);
BUG_ON(IS_ERR(trans));
lock_chunks(root);
/*
* step two, delete the device extents and the
* chunk tree entries
*/
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
read_unlock(&em_tree->lock);
BUG_ON(em->start > chunk_offset ||
em->start + em->len < chunk_offset);
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
map->stripes[i].physical);
BUG_ON(ret);
if (map->stripes[i].dev) {
ret = btrfs_update_device(trans, map->stripes[i].dev);
BUG_ON(ret);
}
}
ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
chunk_offset);
BUG_ON(ret);
trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
BUG_ON(ret);
}
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
BUG_ON(ret);
write_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
kfree(map);
em->bdev = NULL;
/* once for the tree */
free_extent_map(em);
/* once for us */
free_extent_map(em);
unlock_chunks(root);
btrfs_end_transaction(trans, root);
return 0;
}
static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
{
struct btrfs_root *chunk_root = root->fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
struct btrfs_key key;
struct btrfs_key found_key;
u64 chunk_tree = chunk_root->root_key.objectid;
u64 chunk_type;
bool retried = false;
int failed = 0;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
again:
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
BUG_ON(ret == 0);
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret < 0)
goto error;
if (ret > 0)
break;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
chunk = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_chunk);
chunk_type = btrfs_chunk_type(leaf, chunk);
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
found_key.objectid,
found_key.offset);
if (ret == -ENOSPC)
failed++;
else if (ret)
BUG();
}
if (found_key.offset == 0)
break;
key.offset = found_key.offset - 1;
}
ret = 0;
if (failed && !retried) {
failed = 0;
retried = true;
goto again;
} else if (failed && retried) {
WARN_ON(1);
ret = -ENOSPC;
}
error:
btrfs_free_path(path);
return ret;
}
static u64 div_factor(u64 num, int factor)
{
if (factor == 10)
return num;
num *= factor;
do_div(num, 10);
return num;
}
int btrfs_balance(struct btrfs_root *dev_root)
{
int ret;
struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
struct btrfs_device *device;
u64 old_size;
u64 size_to_free;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
struct btrfs_trans_handle *trans;
struct btrfs_key found_key;
if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&dev_root->fs_info->volume_mutex);
dev_root = dev_root->fs_info->dev_root;
/* step one make some room on all the devices */
list_for_each_entry(device, devices, dev_list) {
old_size = device->total_bytes;
size_to_free = div_factor(old_size, 1);
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
if (!device->writeable ||
device->total_bytes - device->bytes_used > size_to_free)
continue;
ret = btrfs_shrink_device(device, old_size - size_to_free);
if (ret == -ENOSPC)
break;
BUG_ON(ret);
trans = btrfs_start_transaction(dev_root, 0);
BUG_ON(IS_ERR(trans));
ret = btrfs_grow_device(trans, device, old_size);
BUG_ON(ret);
btrfs_end_transaction(trans, dev_root);
}
/* step two, relocate all the chunks */
path = btrfs_alloc_path();
BUG_ON(!path);
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
/*
* this shouldn't happen, it means the last relocate
* failed
*/
if (ret == 0)
break;
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
if (ret)
break;
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (found_key.objectid != key.objectid)
break;
/* chunk zero is special */
if (found_key.offset == 0)
break;
btrfs_release_path(path);
ret = btrfs_relocate_chunk(chunk_root,
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
if (ret && ret != -ENOSPC)
goto error;
key.offset = found_key.offset - 1;
}
ret = 0;
error:
btrfs_free_path(path);
mutex_unlock(&dev_root->fs_info->volume_mutex);
return ret;
}
/*
* shrinking a device means finding all of the device extents past
* the new size, and then following the back refs to the chunks.
* The chunk relocation code actually frees the device extent
*/
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
u64 length;
u64 chunk_tree;
u64 chunk_objectid;
u64 chunk_offset;
int ret;
int slot;
int failed = 0;
bool retried = false;
struct extent_buffer *l;
struct btrfs_key key;
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
u64 old_size = device->total_bytes;
u64 diff = device->total_bytes - new_size;
if (new_size >= device->total_bytes)
return -EINVAL;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 2;
lock_chunks(root);
device->total_bytes = new_size;
if (device->writeable)
device->fs_devices->total_rw_bytes -= diff;
unlock_chunks(root);
again:
key.objectid = device->devid;
key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto done;
ret = btrfs_previous_item(root, path, 0, key.type);
if (ret < 0)
goto done;
if (ret) {
ret = 0;
btrfs_release_path(path);
break;
}
l = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
btrfs_release_path(path);
break;
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
btrfs_release_path(path);
break;
}
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
chunk_offset);
if (ret && ret != -ENOSPC)
goto done;
if (ret == -ENOSPC)
failed++;
key.offset -= 1;
}
if (failed && !retried) {
failed = 0;
retried = true;
goto again;
} else if (failed && retried) {
ret = -ENOSPC;
lock_chunks(root);
device->total_bytes = old_size;
if (device->writeable)
device->fs_devices->total_rw_bytes += diff;
unlock_chunks(root);
goto done;
}
/* Shrinking succeeded, else we would be at "done". */
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto done;
}
lock_chunks(root);
device->disk_total_bytes = new_size;
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
if (ret) {
unlock_chunks(root);
btrfs_end_transaction(trans, root);
goto done;
}
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy, old_total - diff);
unlock_chunks(root);
btrfs_end_transaction(trans, root);
done:
btrfs_free_path(path);
return ret;
}
static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
struct btrfs_disk_key disk_key;
u32 array_size;
u8 *ptr;
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
return -EFBIG;
ptr = super_copy->sys_chunk_array + array_size;
btrfs_cpu_key_to_disk(&disk_key, key);
memcpy(ptr, &disk_key, sizeof(disk_key));
ptr += sizeof(disk_key);
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
return 0;
}
/*
* sort the devices in descending order by max_avail, total_avail
*/
static int btrfs_cmp_device_info(const void *a, const void *b)
{
const struct btrfs_device_info *di_a = a;
const struct btrfs_device_info *di_b = b;
if (di_a->max_avail > di_b->max_avail)
return -1;
if (di_a->max_avail < di_b->max_avail)
return 1;
if (di_a->total_avail > di_b->total_avail)
return -1;
if (di_a->total_avail < di_b->total_avail)
return 1;
return 0;
}
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct map_lookup **map_ret,
u64 *num_bytes_out, u64 *stripe_size_out,
u64 start, u64 type)
{
struct btrfs_fs_info *info = extent_root->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct list_head *cur;
struct map_lookup *map = NULL;
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_device_info *devices_info = NULL;
u64 total_avail;
int num_stripes; /* total number of stripes to allocate */
int sub_stripes; /* sub_stripes info for map */
int dev_stripes; /* stripes per dev */
int devs_max; /* max devs to use */
int devs_min; /* min devs needed */
int devs_increment; /* ndevs has to be a multiple of this */
int ncopies; /* how many copies to data has */
int ret;
u64 max_stripe_size;
u64 max_chunk_size;
u64 stripe_size;
u64 num_bytes;
int ndevs;
int i;
int j;
if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
(type & BTRFS_BLOCK_GROUP_DUP)) {
WARN_ON(1);
type &= ~BTRFS_BLOCK_GROUP_DUP;
}
if (list_empty(&fs_devices->alloc_list))
return -ENOSPC;
sub_stripes = 1;
dev_stripes = 1;
devs_increment = 1;
ncopies = 1;
devs_max = 0; /* 0 == as many as possible */
devs_min = 1;
/*
* define the properties of each RAID type.
* FIXME: move this to a global table and use it in all RAID
* calculation code
*/
if (type & (BTRFS_BLOCK_GROUP_DUP)) {
dev_stripes = 2;
ncopies = 2;
devs_max = 1;
} else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
devs_min = 2;
} else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
devs_increment = 2;
ncopies = 2;
devs_max = 2;
devs_min = 2;
} else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
sub_stripes = 2;
devs_increment = 2;
ncopies = 2;
devs_min = 4;
} else {
devs_max = 1;
}
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = 1024 * 1024 * 1024;
max_chunk_size = 10 * max_stripe_size;
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
max_stripe_size = 256 * 1024 * 1024;
max_chunk_size = max_stripe_size;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
max_stripe_size = 8 * 1024 * 1024;
max_chunk_size = 2 * max_stripe_size;
} else {
printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
type);
BUG_ON(1);
}
/* we don't want a chunk larger than 10% of writeable space */
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
max_chunk_size);
devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
GFP_NOFS);
if (!devices_info)
return -ENOMEM;
cur = fs_devices->alloc_list.next;
/*
* in the first pass through the devices list, we gather information
* about the available holes on each device.
*/
ndevs = 0;
while (cur != &fs_devices->alloc_list) {
struct btrfs_device *device;
u64 max_avail;
u64 dev_offset;
device = list_entry(cur, struct btrfs_device, dev_alloc_list);
cur = cur->next;
if (!device->writeable) {
printk(KERN_ERR
"btrfs: read-only device in alloc_list\n");
WARN_ON(1);
continue;
}
if (!device->in_fs_metadata)
continue;
if (device->total_bytes > device->bytes_used)
total_avail = device->total_bytes - device->bytes_used;
else
total_avail = 0;
/* avail is off by max(alloc_start, 1MB), but that is the same
* for all devices, so it doesn't hurt the sorting later on
*/
ret = find_free_dev_extent(trans, device,
max_stripe_size * dev_stripes,
&dev_offset, &max_avail);
if (ret && ret != -ENOSPC)
goto error;
if (ret == 0)
max_avail = max_stripe_size * dev_stripes;
if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
continue;
devices_info[ndevs].dev_offset = dev_offset;
devices_info[ndevs].max_avail = max_avail;
devices_info[ndevs].total_avail = total_avail;
devices_info[ndevs].dev = device;
++ndevs;
}
/*
* now sort the devices by hole size / available space
*/
sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
btrfs_cmp_device_info, NULL);
/* round down to number of usable stripes */
ndevs -= ndevs % devs_increment;
if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
ret = -ENOSPC;
goto error;
}
if (devs_max && ndevs > devs_max)
ndevs = devs_max;
/*
* the primary goal is to maximize the number of stripes, so use as many
* devices as possible, even if the stripes are not maximum sized.
*/
stripe_size = devices_info[ndevs-1].max_avail;
num_stripes = ndevs * dev_stripes;
if (stripe_size * num_stripes > max_chunk_size * ncopies) {
stripe_size = max_chunk_size * ncopies;
do_div(stripe_size, num_stripes);
}
do_div(stripe_size, dev_stripes);
do_div(stripe_size, BTRFS_STRIPE_LEN);
stripe_size *= BTRFS_STRIPE_LEN;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
ret = -ENOMEM;
goto error;
}
map->num_stripes = num_stripes;
for (i = 0; i < ndevs; ++i) {
for (j = 0; j < dev_stripes; ++j) {
int s = i * dev_stripes + j;
map->stripes[s].dev = devices_info[i].dev;
map->stripes[s].physical = devices_info[i].dev_offset +
j * stripe_size;
}
}
map->sector_size = extent_root->sectorsize;
map->stripe_len = BTRFS_STRIPE_LEN;
map->io_align = BTRFS_STRIPE_LEN;
map->io_width = BTRFS_STRIPE_LEN;
map->type = type;
map->sub_stripes = sub_stripes;
*map_ret = map;
num_bytes = stripe_size * (num_stripes / ncopies);
*stripe_size_out = stripe_size;
*num_bytes_out = num_bytes;
trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto error;
}
em->bdev = (struct block_device *)map;
em->start = start;
em->len = num_bytes;
em->block_start = 0;
em->block_len = em->len;
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
BUG_ON(ret);
free_extent_map(em);
ret = btrfs_make_block_group(trans, extent_root, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, num_bytes);
BUG_ON(ret);
for (i = 0; i < map->num_stripes; ++i) {
struct btrfs_device *device;
u64 dev_offset;
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
ret = btrfs_alloc_dev_extent(trans, device,
info->chunk_root->root_key.objectid,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, dev_offset, stripe_size);
BUG_ON(ret);
}
kfree(devices_info);
return 0;
error:
kfree(map);
kfree(devices_info);
return ret;
}
static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct map_lookup *map, u64 chunk_offset,
u64 chunk_size, u64 stripe_size)
{
u64 dev_offset;
struct btrfs_key key;
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
struct btrfs_device *device;
struct btrfs_chunk *chunk;
struct btrfs_stripe *stripe;
size_t item_size = btrfs_chunk_item_size(map->num_stripes);
int index = 0;
int ret;
chunk = kzalloc(item_size, GFP_NOFS);
if (!chunk)
return -ENOMEM;
index = 0;
while (index < map->num_stripes) {
device = map->stripes[index].dev;
device->bytes_used += stripe_size;
ret = btrfs_update_device(trans, device);
BUG_ON(ret);
index++;
}
index = 0;
stripe = &chunk->stripe;
while (index < map->num_stripes) {
device = map->stripes[index].dev;
dev_offset = map->stripes[index].physical;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
stripe++;
index++;
}
btrfs_set_stack_chunk_length(chunk, chunk_size);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
btrfs_set_stack_chunk_type(chunk, map->type);
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
key.offset = chunk_offset;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
BUG_ON(ret);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
item_size);
BUG_ON(ret);
}
kfree(chunk);
return 0;
}
/*
* Chunk allocation falls into two parts. The first part does works
* that make the new allocated chunk useable, but not do any operation
* that modifies the chunk tree. The second part does the works that
* require modifying the chunk tree. This division is important for the
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 type)
{
u64 chunk_offset;
u64 chunk_size;
u64 stripe_size;
struct map_lookup *map;
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
int ret;
ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
&chunk_offset);
if (ret)
return ret;
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, type);
if (ret)
return ret;
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
chunk_size, stripe_size);
BUG_ON(ret);
return 0;
}
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device)
{
u64 chunk_offset;
u64 sys_chunk_offset;
u64 chunk_size;
u64 sys_chunk_size;
u64 stripe_size;
u64 sys_stripe_size;
u64 alloc_profile;
struct map_lookup *map;
struct map_lookup *sys_map;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
ret = find_next_chunk(fs_info->chunk_root,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
BUG_ON(ret);
alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
(fs_info->metadata_alloc_profile &
fs_info->avail_metadata_alloc_bits);
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, alloc_profile);
BUG_ON(ret);
sys_chunk_offset = chunk_offset + chunk_size;
alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
(fs_info->system_alloc_profile &
fs_info->avail_system_alloc_bits);
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile);
BUG_ON(ret);
ret = btrfs_add_device(trans, fs_info->chunk_root, device);
BUG_ON(ret);
/*
* Modifying chunk tree needs allocating new blocks from both
* system block group and metadata block group. So we only can
* do operations require modifying the chunk tree after both
* block groups were created.
*/
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
chunk_size, stripe_size);
BUG_ON(ret);
ret = __finish_chunk_alloc(trans, extent_root, sys_map,
sys_chunk_offset, sys_chunk_size,
sys_stripe_size);
BUG_ON(ret);
return 0;
}
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
int readonly = 0;
int i;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
read_unlock(&map_tree->map_tree.lock);
if (!em)
return 1;
if (btrfs_test_opt(root, DEGRADED)) {
free_extent_map(em);
return 0;
}
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
if (!map->stripes[i].dev->writeable) {
readonly = 1;
break;
}
}
free_extent_map(em);
return readonly;
}
void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
extent_map_tree_init(&tree->map_tree);
}
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
struct extent_map *em;
while (1) {
write_lock(&tree->map_tree.lock);
em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
if (em)
remove_extent_mapping(&tree->map_tree, em);
write_unlock(&tree->map_tree.lock);
if (!em)
break;
kfree(em->bdev);
/* once for us */
free_extent_map(em);
/* once for the tree */
free_extent_map(em);
}
}
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
{
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
int ret;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, len);
read_unlock(&em_tree->lock);
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
map = (struct map_lookup *)em->bdev;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
ret = map->sub_stripes;
else
ret = 1;
free_extent_map(em);
return ret;
}
static int find_live_mirror(struct map_lookup *map, int first, int num,
int optimal)
{
int i;
if (map->stripes[optimal].dev->bdev)
return optimal;
for (i = first; i < first + num; i++) {
if (map->stripes[i].dev->bdev)
return i;
}
/* we couldn't find one that doesn't fail. Just return something
* and the io error handling code will clean up eventually
*/
return optimal;
}
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret,
int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
u64 offset;
u64 stripe_offset;
u64 stripe_end_offset;
u64 stripe_nr;
u64 stripe_nr_orig;
u64 stripe_nr_end;
int stripes_allocated = 8;
int stripes_required = 1;
int stripe_index;
int i;
int num_stripes;
int max_errors = 0;
struct btrfs_multi_bio *multi = NULL;
if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
stripes_allocated = 1;
again:
if (multi_ret) {
multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
GFP_NOFS);
if (!multi)
return -ENOMEM;
atomic_set(&multi->error, 0);
}
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
(unsigned long long)logical,
(unsigned long long)*length);
BUG();
}
BUG_ON(em->start > logical || em->start + em->len < logical);
map = (struct map_lookup *)em->bdev;
offset = logical - em->start;
if (mirror_num > map->num_stripes)
mirror_num = 0;
/* if our multi bio struct is too small, back off and try again */
if (rw & REQ_WRITE) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_DUP)) {
stripes_required = map->num_stripes;
max_errors = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripes_required = map->sub_stripes;
max_errors = 1;
}
}
if (rw & REQ_DISCARD) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID10)) {
stripes_required = map->num_stripes;
}
}
if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
stripes_allocated < stripes_required) {
stripes_allocated = map->num_stripes;
free_extent_map(em);
kfree(multi);
goto again;
}
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
do_div(stripe_nr, map->stripe_len);
stripe_offset = stripe_nr * map->stripe_len;
BUG_ON(offset < stripe_offset);
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
if (rw & REQ_DISCARD)
*length = min_t(u64, em->len - offset, *length);
else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_DUP)) {
/* we limit the length of each bio to what fits in a stripe */
*length = min_t(u64, em->len - offset,
map->stripe_len - stripe_offset);
} else {
*length = em->len - offset;
}
if (!multi_ret)
goto out;
num_stripes = 1;
stripe_index = 0;
stripe_nr_orig = stripe_nr;
stripe_nr_end = (offset + *length + map->stripe_len - 1) &
(~(map->stripe_len - 1));
do_div(stripe_nr_end, map->stripe_len);
stripe_end_offset = stripe_nr_end * map->stripe_len -
(offset + *length);
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
stripe_nr_end - stripe_nr_orig);
stripe_index = do_div(stripe_nr, map->num_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
else {
stripe_index = find_live_mirror(map, 0,
map->num_stripes,
current->pid % map->num_stripes);
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
int factor = map->num_stripes / map->sub_stripes;
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
if (rw & REQ_WRITE)
num_stripes = map->sub_stripes;
else if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->sub_stripes *
(stripe_nr_end - stripe_nr_orig),
map->num_stripes);
else if (mirror_num)
stripe_index += mirror_num - 1;
else {
stripe_index = find_live_mirror(map, stripe_index,
map->sub_stripes, stripe_index +
current->pid % map->sub_stripes);
}
} else {
/*
* after this do_div call, stripe_nr is the number of stripes
* on this device we have to walk to find the data, and
* stripe_index is the number of our device in the stripe array
*/
stripe_index = do_div(stripe_nr, map->num_stripes);
}
BUG_ON(stripe_index >= map->num_stripes);
if (rw & REQ_DISCARD) {
for (i = 0; i < num_stripes; i++) {
multi->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
multi->stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
u64 stripes;
u32 last_stripe = 0;
int j;
div_u64_rem(stripe_nr_end - 1,
map->num_stripes,
&last_stripe);
for (j = 0; j < map->num_stripes; j++) {
u32 test;
div_u64_rem(stripe_nr_end - 1 - j,
map->num_stripes, &test);
if (test == stripe_index)
break;
}
stripes = stripe_nr_end - 1 - j;
do_div(stripes, map->num_stripes);
multi->stripes[i].length = map->stripe_len *
(stripes - stripe_nr + 1);
if (i == 0) {
multi->stripes[i].length -=
stripe_offset;
stripe_offset = 0;
}
if (stripe_index == last_stripe)
multi->stripes[i].length -=
stripe_end_offset;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
u64 stripes;
int j;
int factor = map->num_stripes /
map->sub_stripes;
u32 last_stripe = 0;
div_u64_rem(stripe_nr_end - 1,
factor, &last_stripe);
last_stripe *= map->sub_stripes;
for (j = 0; j < factor; j++) {
u32 test;
div_u64_rem(stripe_nr_end - 1 - j,
factor, &test);
if (test ==
stripe_index / map->sub_stripes)
break;
}
stripes = stripe_nr_end - 1 - j;
do_div(stripes, factor);
multi->stripes[i].length = map->stripe_len *
(stripes - stripe_nr + 1);
if (i < map->sub_stripes) {
multi->stripes[i].length -=
stripe_offset;
if (i == map->sub_stripes - 1)
stripe_offset = 0;
}
if (stripe_index >= last_stripe &&
stripe_index <= (last_stripe +
map->sub_stripes - 1)) {
multi->stripes[i].length -=
stripe_end_offset;
}
} else
multi->stripes[i].length = *length;
stripe_index++;
if (stripe_index == map->num_stripes) {
/* This could only happen for RAID0/10 */
stripe_index = 0;
stripe_nr++;
}
}
} else {
for (i = 0; i < num_stripes; i++) {
multi->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset +
stripe_nr * map->stripe_len;
multi->stripes[i].dev =
map->stripes[stripe_index].dev;
stripe_index++;
}
}
if (multi_ret) {
*multi_ret = multi;
multi->num_stripes = num_stripes;
multi->max_errors = max_errors;
}
out:
free_extent_map(em);
return 0;
}
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret, int mirror_num)
{
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
mirror_num);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len)
{
struct extent_map_tree *em_tree = &map_tree->map_tree;
struct extent_map *em;
struct map_lookup *map;
u64 *buf;
u64 bytenr;
u64 length;
u64 stripe_nr;
int i, j, nr = 0;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_start, 1);
read_unlock(&em_tree->lock);
BUG_ON(!em || em->start != chunk_start);
map = (struct map_lookup *)em->bdev;
length = em->len;
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
do_div(length, map->num_stripes / map->sub_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
do_div(length, map->num_stripes);
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
BUG_ON(!buf);
for (i = 0; i < map->num_stripes; i++) {
if (devid && map->stripes[i].dev->devid != devid)
continue;
if (map->stripes[i].physical > physical ||
map->stripes[i].physical + length <= physical)
continue;
stripe_nr = physical - map->stripes[i].physical;
do_div(stripe_nr, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripe_nr = stripe_nr * map->num_stripes + i;
do_div(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
}
bytenr = chunk_start + stripe_nr * map->stripe_len;
WARN_ON(nr >= map->num_stripes);
for (j = 0; j < nr; j++) {
if (buf[j] == bytenr)
break;
}
if (j == nr) {
WARN_ON(nr >= map->num_stripes);
buf[nr++] = bytenr;
}
}
*logical = buf;
*naddrs = nr;
*stripe_len = map->stripe_len;
free_extent_map(em);
return 0;
}
static void end_bio_multi_stripe(struct bio *bio, int err)
{
struct btrfs_multi_bio *multi = bio->bi_private;
int is_orig_bio = 0;
if (err)
atomic_inc(&multi->error);
if (bio == multi->orig_bio)
is_orig_bio = 1;
if (atomic_dec_and_test(&multi->stripes_pending)) {
if (!is_orig_bio) {
bio_put(bio);
bio = multi->orig_bio;
}
bio->bi_private = multi->private;
bio->bi_end_io = multi->end_io;
/* only send an error to the higher layers if it is
* beyond the tolerance of the multi-bio
*/
if (atomic_read(&multi->error) > multi->max_errors) {
err = -EIO;
} else if (err) {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
*/
set_bit(BIO_UPTODATE, &bio->bi_flags);
err = 0;
}
kfree(multi);
bio_endio(bio, err);
} else if (!is_orig_bio) {
bio_put(bio);
}
}
struct async_sched {
struct bio *bio;
int rw;
struct btrfs_fs_info *info;
struct btrfs_work work;
};
/*
* see run_scheduled_bios for a description of why bios are collected for
* async submit.
*
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
static noinline int schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
/* don't bother with additional async steps for reads, right now */
if (!(rw & REQ_WRITE)) {
bio_get(bio);
submit_bio(rw, bio);
bio_put(bio);
return 0;
}
/*
* nr_async_bios allows us to reliably return congestion to the
* higher layers. Otherwise, the async bio makes it appear we have
* made progress against dirty pages when we've really just put it
* on a queue for later
*/
atomic_inc(&root->fs_info->nr_async_bios);
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
bio->bi_rw |= rw;
spin_lock(&device->io_lock);
if (bio->bi_rw & REQ_SYNC)
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
if (pending_bios->tail)
pending_bios->tail->bi_next = bio;
pending_bios->tail = bio;
if (!pending_bios->head)
pending_bios->head = bio;
if (device->running_pending)
should_queue = 0;
spin_unlock(&device->io_lock);
if (should_queue)
btrfs_queue_worker(&root->fs_info->submit_workers,
&device->work);
return 0;
}
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
int mirror_num, int async_submit)
{
struct btrfs_mapping_tree *map_tree;
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
struct btrfs_multi_bio *multi = NULL;
int ret;
int dev_nr = 0;
int total_devs = 1;
length = bio->bi_size;
map_tree = &root->fs_info->mapping_tree;
map_length = length;
ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
mirror_num);
BUG_ON(ret);
total_devs = multi->num_stripes;
if (map_length < length) {
printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
"len %llu\n", (unsigned long long)logical,
(unsigned long long)length,
(unsigned long long)map_length);
BUG();
}
multi->end_io = first_bio->bi_end_io;
multi->private = first_bio->bi_private;
multi->orig_bio = first_bio;
atomic_set(&multi->stripes_pending, multi->num_stripes);
while (dev_nr < total_devs) {
if (total_devs > 1) {
if (dev_nr < total_devs - 1) {
bio = bio_clone(first_bio, GFP_NOFS);
BUG_ON(!bio);
} else {
bio = first_bio;
}
bio->bi_private = multi;
bio->bi_end_io = end_bio_multi_stripe;
}
bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
dev = multi->stripes[dev_nr].dev;
if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
bio->bi_bdev = dev->bdev;
if (async_submit)
schedule_bio(root, dev, rw, bio);
else
submit_bio(rw, bio);
} else {
bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
bio->bi_sector = logical >> 9;
bio_endio(bio, -EIO);
}
dev_nr++;
}
if (total_devs == 1)
kfree(multi);
return 0;
}
struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
u8 *uuid, u8 *fsid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
cur_devices = root->fs_info->fs_devices;
while (cur_devices) {
if (!fsid ||
!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
device = __find_device(&cur_devices->devices,
devid, uuid);
if (device)
return device;
}
cur_devices = cur_devices->seed;
}
return NULL;
}
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device)
return NULL;
list_add(&device->dev_list,
&fs_devices->devices);
device->dev_root = root->fs_info->dev_root;
device->devid = devid;
device->work.func = pending_bios_fn;
device->fs_devices = fs_devices;
device->missing = 1;
fs_devices->num_devices++;
fs_devices->missing_devices++;
spin_lock_init(&device->io_lock);
INIT_LIST_HEAD(&device->dev_alloc_list);
memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
return device;
}
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
u64 length;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
int i;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
read_unlock(&map_tree->map_tree.lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
free_extent_map(em);
return 0;
} else if (em) {
free_extent_map(em);
}
em = alloc_extent_map();
if (!em)
return -ENOMEM;
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
return -ENOMEM;
}
em->bdev = (struct block_device *)map;
em->start = logical;
em->len = length;
em->block_start = 0;
em->block_len = em->len;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
NULL);
if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
kfree(map);
free_extent_map(em);
return -EIO;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(root, devid, uuid);
if (!map->stripes[i].dev) {
kfree(map);
free_extent_map(em);
return -EIO;
}
}
map->stripes[i].dev->in_fs_metadata = 1;
}
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em);
write_unlock(&map_tree->map_tree.lock);
BUG_ON(ret);
free_extent_map(em);
return 0;
}
static int fill_device_from_item(struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item,
struct btrfs_device *device)
{
unsigned long ptr;
device->devid = btrfs_device_id(leaf, dev_item);
device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
device->total_bytes = device->disk_total_bytes;
device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
device->type = btrfs_device_type(leaf, dev_item);
device->io_align = btrfs_device_io_align(leaf, dev_item);
device->io_width = btrfs_device_io_width(leaf, dev_item);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
ptr = (unsigned long)btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
return 0;
}
static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
int ret;
mutex_lock(&uuid_mutex);
fs_devices = root->fs_info->fs_devices->seed;
while (fs_devices) {
if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
ret = 0;
goto out;
}
fs_devices = fs_devices->seed;
}
fs_devices = find_fsid(fsid);
if (!fs_devices) {
ret = -ENOENT;
goto out;
}
fs_devices = clone_fs_devices(fs_devices);
if (IS_ERR(fs_devices)) {
ret = PTR_ERR(fs_devices);
goto out;
}
ret = __btrfs_open_devices(fs_devices, FMODE_READ,
root->fs_info->bdev_holder);
if (ret)
goto out;
if (!fs_devices->seeding) {
__btrfs_close_devices(fs_devices);
free_fs_devices(fs_devices);
ret = -EINVAL;
goto out;
}
fs_devices->seed = root->fs_info->fs_devices->seed;
root->fs_info->fs_devices->seed = fs_devices;
out:
mutex_unlock(&uuid_mutex);
return ret;
}
static int read_one_dev(struct btrfs_root *root,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
struct btrfs_device *device;
u64 devid;
int ret;
u8 fs_uuid[BTRFS_UUID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
devid = btrfs_device_id(leaf, dev_item);
read_extent_buffer(leaf, dev_uuid,
(unsigned long)btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid,
(unsigned long)btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
ret = open_seed_devices(root, fs_uuid);
if (ret && !btrfs_test_opt(root, DEGRADED))
return ret;
}
device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
if (!device || !device->bdev) {
if (!btrfs_test_opt(root, DEGRADED))
return -EIO;
if (!device) {
printk(KERN_WARNING "warning devid %llu missing\n",
(unsigned long long)devid);
device = add_missing_dev(root, devid, dev_uuid);
if (!device)
return -ENOMEM;
} else if (!device->missing) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
* device->bdev is NULL, and so we have to set
* device->missing to one here
*/
root->fs_info->fs_devices->missing_devices++;
device->missing = 1;
}
}
if (device->fs_devices != root->fs_info->fs_devices) {
BUG_ON(device->writeable);
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
return -EINVAL;
}
fill_device_from_item(leaf, dev_item, device);
device->dev_root = root->fs_info->dev_root;
device->in_fs_metadata = 1;
if (device->writeable)
device->fs_devices->total_rw_bytes += device->total_bytes;
ret = 0;
return ret;
}
int btrfs_read_sys_array(struct btrfs_root *root)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
struct extent_buffer *sb;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
u8 *ptr;
unsigned long sb_ptr;
int ret = 0;
u32 num_stripes;
u32 array_size;
u32 len = 0;
u32 cur;
struct btrfs_key key;
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
BTRFS_SUPER_INFO_SIZE);
if (!sb)
return -ENOMEM;
btrfs_set_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(sb, 0);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
cur = 0;
while (cur < array_size) {
disk_key = (struct btrfs_disk_key *)ptr;
btrfs_disk_key_to_cpu(&key, disk_key);
len = sizeof(*disk_key); ptr += len;
sb_ptr += len;
cur += len;
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
chunk = (struct btrfs_chunk *)sb_ptr;
ret = read_one_chunk(root, &key, sb, chunk);
if (ret)
break;
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
len = btrfs_chunk_item_size(num_stripes);
} else {
ret = -EIO;
break;
}
ptr += len;
sb_ptr += len;
cur += len;
}
free_extent_buffer(sb);
return ret;
}
int btrfs_read_chunk_tree(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
int slot;
root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/* first we search for all of the device items, and then we
* read in all of the chunk items. This way we can create chunk
* mappings that reference all of the devices that are afound
*/
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.offset = 0;
key.type = 0;
again:
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto error;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto error;
break;
}
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
break;
if (found_key.type == BTRFS_DEV_ITEM_KEY) {
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
struct btrfs_dev_item);
ret = read_one_dev(root, leaf, dev_item);
if (ret)
goto error;
}
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = read_one_chunk(root, &found_key, leaf, chunk);
if (ret)
goto error;
}
path->slots[0]++;
}
if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
key.objectid = 0;
btrfs_release_path(path);
goto again;
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
| gpl-2.0 |
SteveLinCH/linux | drivers/usb/misc/usblcd.c | 1919 | 11979 | /*****************************************************************************
* USBLCD Kernel Driver *
* Version 1.05 *
* (C) 2005 Georges Toth <g.toth@e-biz.lu> *
* *
* This file is licensed under the GPL. See COPYING in the package. *
* Based on usb-skeleton.c 2.0 by Greg Kroah-Hartman (greg@kroah.com) *
* *
* *
* 28.02.05 Complete rewrite of the original usblcd.c driver, *
* based on usb_skeleton.c. *
* This new driver allows more than one USB-LCD to be connected *
* and controlled, at once *
*****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#define DRIVER_VERSION "USBLCD Driver Version 1.05"
#define USBLCD_MINOR 144
#define IOCTL_GET_HARD_VERSION 1
#define IOCTL_GET_DRV_VERSION 2
static DEFINE_MUTEX(lcd_mutex);
static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static DEFINE_MUTEX(open_disc_mutex);
struct usb_lcd {
struct usb_device *udev; /* init: probe_lcd */
struct usb_interface *interface; /* the interface for
this device */
unsigned char *bulk_in_buffer; /* the buffer to receive
data */
size_t bulk_in_size; /* the size of the
receive buffer */
__u8 bulk_in_endpointAddr; /* the address of the
bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the
bulk out endpoint */
struct kref kref;
struct semaphore limit_sem; /* to stop writes at
full throttle from
using up all RAM */
struct usb_anchor submitted; /* URBs to wait for
before suspend */
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
#define USB_LCD_CONCURRENT_WRITES 5
static struct usb_driver lcd_driver;
static void lcd_delete(struct kref *kref)
{
struct usb_lcd *dev = to_lcd_dev(kref);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
}
static int lcd_open(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
struct usb_interface *interface;
int subminor, r;
mutex_lock(&lcd_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
mutex_unlock(&lcd_mutex);
printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
mutex_unlock(&lcd_mutex);
return -ENODEV;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
mutex_unlock(&open_disc_mutex);
/* grab a power reference */
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
mutex_unlock(&lcd_mutex);
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
mutex_unlock(&lcd_mutex);
return 0;
}
static int lcd_release(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* decrement the count on our device */
usb_autopm_put_interface(dev->interface);
kref_put(&dev->kref, lcd_delete);
return 0;
}
static ssize_t lcd_read(struct file *file, char __user * buffer,
size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
int retval = 0;
int bytes_read;
dev = file->private_data;
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev,
dev->bulk_in_endpointAddr),
dev->bulk_in_buffer,
min(dev->bulk_in_size, count),
&bytes_read, 10000);
/* if the read was successful, copy the data to userspace */
if (!retval) {
if (copy_to_user(buffer, dev->bulk_in_buffer, bytes_read))
retval = -EFAULT;
else
retval = bytes_read;
}
return retval;
}
static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct usb_lcd *dev;
u16 bcdDevice;
char buf[30];
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
mutex_lock(&lcd_mutex);
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
sprintf(buf, "%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
mutex_unlock(&lcd_mutex);
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
case IOCTL_GET_DRV_VERSION:
sprintf(buf, DRIVER_VERSION);
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
default:
return -ENOTTY;
break;
}
return 0;
}
static void lcd_write_bulk_callback(struct urb *urb)
{
struct usb_lcd *dev;
int status = urb->status;
dev = urb->context;
/* sync/async unlink faults aren't errors */
if (status &&
!(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN)) {
dev_dbg(&dev->interface->dev,
"nonzero write bulk status received: %d\n", status);
}
/* free up our allocated buffer */
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
int retval = 0, r;
struct urb *urb = NULL;
char *buf = NULL;
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
r = down_interruptible(&dev->limit_sem);
if (r < 0)
return -EINTR;
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
goto err_no_buf;
}
buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
}
if (copy_from_user(buf, user_buffer, count)) {
retval = -EFAULT;
goto error;
}
/* initialize the urb properly */
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out_endpointAddr),
buf, count, lcd_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->submitted);
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"%s - failed submitting write urb, error %d\n",
__func__, retval);
goto error_unanchor;
}
/* release our reference to this urb,
the USB core will eventually free it entirely */
usb_free_urb(urb);
exit:
return count;
error_unanchor:
usb_unanchor_urb(urb);
error:
usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
err_no_buf:
up(&dev->limit_sem);
return retval;
}
static const struct file_operations lcd_fops = {
.owner = THIS_MODULE,
.read = lcd_read,
.write = lcd_write,
.open = lcd_open,
.unlocked_ioctl = lcd_ioctl,
.release = lcd_release,
.llseek = noop_llseek,
};
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver lcd_class = {
.name = "lcd%d",
.fops = &lcd_fops,
.minor_base = USBLCD_MINOR,
};
static int lcd_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_lcd *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
size_t buffer_size;
int i;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
goto error;
}
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) {
dev_warn(&interface->dev, "USBLCD model not supported.\n");
retval = -ENODEV;
goto error;
}
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->bulk_in_endpointAddr &&
usb_endpoint_is_bulk_in(endpoint)) {
/* we found a bulk in endpoint */
buffer_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = buffer_size;
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
dev_err(&interface->dev,
"Could not allocate bulk_in_buffer\n");
goto error;
}
}
if (!dev->bulk_out_endpointAddr &&
usb_endpoint_is_bulk_out(endpoint)) {
/* we found a bulk out endpoint */
dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
}
}
if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
dev_err(&interface->dev,
"Could not find both bulk-in and bulk-out endpoints\n");
goto error;
}
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &lcd_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
usb_set_intfdata(interface, NULL);
goto error;
}
i = le16_to_cpu(dev->udev->descriptor.bcdDevice);
dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found "
"at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8,
(i & 0xF0)>>4, (i & 0xF), dev->udev->devnum);
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n",
interface->minor);
return 0;
error:
if (dev)
kref_put(&dev->kref, lcd_delete);
return retval;
}
static void lcd_draw_down(struct usb_lcd *dev)
{
int time;
time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
if (!time)
usb_kill_anchored_urbs(&dev->submitted);
}
static int lcd_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_lcd *dev = usb_get_intfdata(intf);
if (!dev)
return 0;
lcd_draw_down(dev);
return 0;
}
static int lcd_resume(struct usb_interface *intf)
{
return 0;
}
static void lcd_disconnect(struct usb_interface *interface)
{
struct usb_lcd *dev;
int minor = interface->minor;
mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
mutex_unlock(&open_disc_mutex);
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
dev_info(&interface->dev, "USB LCD #%d now disconnected\n", minor);
}
static struct usb_driver lcd_driver = {
.name = "usblcd",
.probe = lcd_probe,
.disconnect = lcd_disconnect,
.suspend = lcd_suspend,
.resume = lcd_resume,
.id_table = id_table,
.supports_autosuspend = 1,
};
module_usb_driver(lcd_driver);
MODULE_AUTHOR("Georges Toth <g.toth@e-biz.lu>");
MODULE_DESCRIPTION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
fbocharov/au-linux-kernel-spring-2016 | linux/drivers/gpu/drm/radeon/uvd_v3_1.c | 2431 | 2120 | /*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König <christian.koenig@amd.com>
*/
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "nid.h"
/**
* uvd_v3_1_semaphore_emit - emit semaphore command
*
* @rdev: radeon_device pointer
* @ring: radeon_ring pointer
* @semaphore: semaphore to emit commands for
* @emit_wait: true if we should emit a wait command
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
return true;
}
| gpl-2.0 |
SomethingExplosive/android_kernel_samsung_tuna | arch/arm/mach-ixp4xx/coyote-pci.c | 2431 | 1607 | /*
* arch/arm/mach-ixp4xx/coyote-pci.c
*
* PCI setup routines for ADI Engineering Coyote platform
*
* Copyright (C) 2002 Jungo Software Technologies.
* Copyright (C) 2003 MontaVista Softwrae, Inc.
*
* Maintainer: Deepak Saxena <dsaxena@mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#define SLOT0_DEVID 14
#define SLOT1_DEVID 15
/* PCI controller GPIO to IRQ pin mappings */
#define SLOT0_INTA 6
#define SLOT1_INTA 11
void __init coyote_pci_preinit(void)
{
irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init coyote_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
if (slot == SLOT0_DEVID)
return IXP4XX_GPIO_IRQ(SLOT0_INTA);
else if (slot == SLOT1_DEVID)
return IXP4XX_GPIO_IRQ(SLOT1_INTA);
else return -1;
}
struct hw_pci coyote_pci __initdata = {
.nr_controllers = 1,
.preinit = coyote_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = coyote_map_irq,
};
int __init coyote_pci_init(void)
{
if (machine_is_adi_coyote())
pci_common_init(&coyote_pci);
return 0;
}
subsys_initcall(coyote_pci_init);
| gpl-2.0 |
flzyup/gnexus_kernel | drivers/media/rc/keymaps/rc-encore-enltv.c | 2943 | 2901 | /* encore-enltv.h - Keytable for encore_enltv Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
Juan Pablo Sormani <sorman@gmail.com> */
static struct rc_map_table encore_enltv[] = {
/* Power button does nothing, neither in Windows app,
although it sends data (used for BIOS wakeup?) */
{ 0x0d, KEY_MUTE },
{ 0x1e, KEY_TV },
{ 0x00, KEY_VIDEO },
{ 0x01, KEY_AUDIO }, /* music */
{ 0x02, KEY_CAMERA }, /* picture */
{ 0x1f, KEY_1 },
{ 0x03, KEY_2 },
{ 0x04, KEY_3 },
{ 0x05, KEY_4 },
{ 0x1c, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x1d, KEY_9 },
{ 0x0a, KEY_0 },
{ 0x09, KEY_LIST }, /* -/-- */
{ 0x0b, KEY_LAST }, /* recall */
{ 0x14, KEY_HOME }, /* win start menu */
{ 0x15, KEY_EXIT }, /* exit */
{ 0x16, KEY_CHANNELUP }, /* UP */
{ 0x12, KEY_CHANNELDOWN }, /* DOWN */
{ 0x0c, KEY_VOLUMEUP }, /* RIGHT */
{ 0x17, KEY_VOLUMEDOWN }, /* LEFT */
{ 0x18, KEY_ENTER }, /* OK */
{ 0x0e, KEY_ESC },
{ 0x13, KEY_CYCLEWINDOWS }, /* desktop */
{ 0x11, KEY_TAB },
{ 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
{ 0x1a, KEY_MENU },
{ 0x1b, KEY_ZOOM }, /* fullscreen */
{ 0x44, KEY_TIME }, /* time shift */
{ 0x40, KEY_MODE }, /* source */
{ 0x5a, KEY_RECORD },
{ 0x42, KEY_PLAY }, /* play/pause */
{ 0x45, KEY_STOP },
{ 0x43, KEY_CAMERA }, /* camera icon */
{ 0x48, KEY_REWIND },
{ 0x4a, KEY_FASTFORWARD },
{ 0x49, KEY_PREVIOUS },
{ 0x4b, KEY_NEXT },
{ 0x4c, KEY_FAVORITES }, /* tv wall */
{ 0x4d, KEY_SOUND }, /* DVD sound */
{ 0x4e, KEY_LANGUAGE }, /* DVD lang */
{ 0x4f, KEY_TEXT }, /* DVD text */
{ 0x50, KEY_SLEEP }, /* shutdown */
{ 0x51, KEY_MODE }, /* stereo > main */
{ 0x52, KEY_SELECT }, /* stereo > sap */
{ 0x53, KEY_TEXT }, /* teletext */
{ 0x59, KEY_RED }, /* AP1 */
{ 0x41, KEY_GREEN }, /* AP2 */
{ 0x47, KEY_YELLOW }, /* AP3 */
{ 0x57, KEY_BLUE }, /* AP4 */
};
static struct rc_map_list encore_enltv_map = {
.map = {
.scan = encore_enltv,
.size = ARRAY_SIZE(encore_enltv),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_ENCORE_ENLTV,
}
};
static int __init init_rc_map_encore_enltv(void)
{
return rc_map_register(&encore_enltv_map);
}
static void __exit exit_rc_map_encore_enltv(void)
{
rc_map_unregister(&encore_enltv_map);
}
module_init(init_rc_map_encore_enltv)
module_exit(exit_rc_map_encore_enltv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
Dee-UK/RK3188_tablet_kernel_sources | drivers/staging/line6/midi.c | 3199 | 10657 | /*
* Line6 Linux USB driver - 0.9.1beta
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/slab.h>
#include <linux/usb.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include "audio.h"
#include "driver.h"
#include "midi.h"
#include "pod.h"
#include "usbdefs.h"
#define line6_rawmidi_substream_midi(substream) \
((struct snd_line6_midi *)((substream)->rmidi->private_data))
static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
int length);
/*
Pass data received via USB to MIDI.
*/
void line6_midi_receive(struct usb_line6 *line6, unsigned char *data,
int length)
{
if (line6->line6midi->substream_receive)
snd_rawmidi_receive(line6->line6midi->substream_receive,
data, length);
}
/*
Read data from MIDI buffer and transmit them via USB.
*/
static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
{
struct usb_line6 *line6 =
line6_rawmidi_substream_midi(substream)->line6;
struct snd_line6_midi *line6midi = line6->line6midi;
struct MidiBuffer *mb = &line6midi->midibuf_out;
unsigned long flags;
unsigned char chunk[line6->max_packet_size];
int req, done;
spin_lock_irqsave(&line6->line6midi->midi_transmit_lock, flags);
for (;;) {
req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
done = snd_rawmidi_transmit_peek(substream, chunk, req);
if (done == 0)
break;
#ifdef CONFIG_LINE6_USB_DUMP_MIDI
line6_write_hexdump(line6, 's', chunk, done);
#endif
line6_midibuf_write(mb, chunk, done);
snd_rawmidi_transmit_ack(substream, done);
}
for (;;) {
done = line6_midibuf_read(mb, chunk, line6->max_packet_size);
if (done == 0)
break;
if (line6_midibuf_skip_message
(mb, line6midi->midi_mask_transmit))
continue;
send_midi_async(line6, chunk, done);
}
spin_unlock_irqrestore(&line6->line6midi->midi_transmit_lock, flags);
}
/*
Notification of completion of MIDI transmission.
*/
static void midi_sent(struct urb *urb)
{
unsigned long flags;
int status;
int num;
struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
status = urb->status;
kfree(urb->transfer_buffer);
usb_free_urb(urb);
if (status == -ESHUTDOWN)
return;
spin_lock_irqsave(&line6->line6midi->send_urb_lock, flags);
num = --line6->line6midi->num_active_send_urbs;
if (num == 0) {
line6_midi_transmit(line6->line6midi->substream_transmit);
num = line6->line6midi->num_active_send_urbs;
}
if (num == 0)
wake_up(&line6->line6midi->send_wait);
spin_unlock_irqrestore(&line6->line6midi->send_urb_lock, flags);
}
/*
Send an asynchronous MIDI message.
Assumes that line6->line6midi->send_urb_lock is held
(i.e., this function is serialized).
*/
static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
int length)
{
struct urb *urb;
int retval;
unsigned char *transfer_buffer;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL) {
dev_err(line6->ifcdev, "Out of memory\n");
return -ENOMEM;
}
#ifdef CONFIG_LINE6_USB_DUMP_CTRL
line6_write_hexdump(line6, 'S', data, length);
#endif
transfer_buffer = kmalloc(length, GFP_ATOMIC);
if (transfer_buffer == NULL) {
usb_free_urb(urb);
dev_err(line6->ifcdev, "Out of memory\n");
return -ENOMEM;
}
memcpy(transfer_buffer, data, length);
usb_fill_int_urb(urb, line6->usbdev,
usb_sndbulkpipe(line6->usbdev,
line6->ep_control_write),
transfer_buffer, length, midi_sent, line6,
line6->interval);
urb->actual_length = 0;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval < 0) {
dev_err(line6->ifcdev, "usb_submit_urb failed\n");
usb_free_urb(urb);
return -EINVAL;
}
++line6->line6midi->num_active_send_urbs;
switch (line6->usbdev->descriptor.idProduct) {
case LINE6_DEVID_BASSPODXT:
case LINE6_DEVID_BASSPODXTLIVE:
case LINE6_DEVID_BASSPODXTPRO:
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
case LINE6_DEVID_POCKETPOD:
line6_pod_midi_postprocess((struct usb_line6_pod *)line6, data,
length);
break;
case LINE6_DEVID_VARIAX:
break;
default:
MISSING_CASE;
}
return 0;
}
static int line6_midi_output_open(struct snd_rawmidi_substream *substream)
{
return 0;
}
static int line6_midi_output_close(struct snd_rawmidi_substream *substream)
{
return 0;
}
static void line6_midi_output_trigger(struct snd_rawmidi_substream *substream,
int up)
{
unsigned long flags;
struct usb_line6 *line6 =
line6_rawmidi_substream_midi(substream)->line6;
line6->line6midi->substream_transmit = substream;
spin_lock_irqsave(&line6->line6midi->send_urb_lock, flags);
if (line6->line6midi->num_active_send_urbs == 0)
line6_midi_transmit(substream);
spin_unlock_irqrestore(&line6->line6midi->send_urb_lock, flags);
}
static void line6_midi_output_drain(struct snd_rawmidi_substream *substream)
{
struct usb_line6 *line6 =
line6_rawmidi_substream_midi(substream)->line6;
struct snd_line6_midi *midi = line6->line6midi;
wait_event_interruptible(midi->send_wait,
midi->num_active_send_urbs == 0);
}
static int line6_midi_input_open(struct snd_rawmidi_substream *substream)
{
return 0;
}
static int line6_midi_input_close(struct snd_rawmidi_substream *substream)
{
return 0;
}
static void line6_midi_input_trigger(struct snd_rawmidi_substream *substream,
int up)
{
struct usb_line6 *line6 =
line6_rawmidi_substream_midi(substream)->line6;
if (up)
line6->line6midi->substream_receive = substream;
else
line6->line6midi->substream_receive = 0;
}
static struct snd_rawmidi_ops line6_midi_output_ops = {
.open = line6_midi_output_open,
.close = line6_midi_output_close,
.trigger = line6_midi_output_trigger,
.drain = line6_midi_output_drain,
};
static struct snd_rawmidi_ops line6_midi_input_ops = {
.open = line6_midi_input_open,
.close = line6_midi_input_close,
.trigger = line6_midi_input_trigger,
};
/*
Cleanup the Line6 MIDI device.
*/
static void line6_cleanup_midi(struct snd_rawmidi *rmidi)
{
}
/* Create a MIDI device */
static int snd_line6_new_midi(struct snd_line6_midi *line6midi)
{
struct snd_rawmidi *rmidi;
int err;
err = snd_rawmidi_new(line6midi->line6->card, "Line6 MIDI", 0, 1, 1,
&rmidi);
if (err < 0)
return err;
rmidi->private_data = line6midi;
rmidi->private_free = line6_cleanup_midi;
strcpy(rmidi->id, line6midi->line6->properties->id);
strcpy(rmidi->name, line6midi->line6->properties->name);
rmidi->info_flags =
SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX;
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
&line6_midi_output_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&line6_midi_input_ops);
return 0;
}
/*
"read" request on "midi_mask_transmit" special file.
*/
static ssize_t midi_get_midi_mask_transmit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", line6->line6midi->midi_mask_transmit);
}
/*
"write" request on "midi_mask" special file.
*/
static ssize_t midi_set_midi_mask_transmit(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
line6->line6midi->midi_mask_transmit = value;
return count;
}
/*
"read" request on "midi_mask_receive" special file.
*/
static ssize_t midi_get_midi_mask_receive(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", line6->line6midi->midi_mask_receive);
}
/*
"write" request on "midi_mask" special file.
*/
static ssize_t midi_set_midi_mask_receive(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
line6->line6midi->midi_mask_receive = value;
return count;
}
static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO,
midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO,
midi_get_midi_mask_receive, midi_set_midi_mask_receive);
/* MIDI device destructor */
static int snd_line6_midi_free(struct snd_device *device)
{
struct snd_line6_midi *line6midi = device->device_data;
device_remove_file(line6midi->line6->ifcdev,
&dev_attr_midi_mask_transmit);
device_remove_file(line6midi->line6->ifcdev,
&dev_attr_midi_mask_receive);
line6_midibuf_destroy(&line6midi->midibuf_in);
line6_midibuf_destroy(&line6midi->midibuf_out);
return 0;
}
/*
Initialize the Line6 MIDI subsystem.
*/
int line6_init_midi(struct usb_line6 *line6)
{
static struct snd_device_ops midi_ops = {
.dev_free = snd_line6_midi_free,
};
int err;
struct snd_line6_midi *line6midi;
if (!(line6->properties->capabilities & LINE6_BIT_CONTROL)) {
/* skip MIDI initialization and report success */
return 0;
}
line6midi = kzalloc(sizeof(struct snd_line6_midi), GFP_KERNEL);
if (line6midi == NULL)
return -ENOMEM;
err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
if (err < 0)
return err;
err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
if (err < 0)
return err;
line6midi->line6 = line6;
line6midi->midi_mask_transmit = 1;
line6midi->midi_mask_receive = 4;
line6->line6midi = line6midi;
err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi,
&midi_ops);
if (err < 0)
return err;
snd_card_set_dev(line6->card, line6->ifcdev);
err = snd_line6_new_midi(line6midi);
if (err < 0)
return err;
err = device_create_file(line6->ifcdev, &dev_attr_midi_mask_transmit);
if (err < 0)
return err;
err = device_create_file(line6->ifcdev, &dev_attr_midi_mask_receive);
if (err < 0)
return err;
init_waitqueue_head(&line6midi->send_wait);
spin_lock_init(&line6midi->send_urb_lock);
spin_lock_init(&line6midi->midi_transmit_lock);
return 0;
}
| gpl-2.0 |
mdeejay/android_kernel_lge_x3 | fs/ncpfs/symlink.c | 3199 | 4347 | /*
* linux/fs/ncpfs/symlink.c
*
* Code for allowing symbolic links on NCPFS (i.e. NetWare)
* Symbolic links are not supported on native NetWare, so we use an
* infrequently-used flag (Sh) and store a two-word magic header in
* the file to make sure we don't accidentally use a non-link file
* as a link.
*
* When using the NFS namespace, we set the mode to indicate a symlink and
* don't bother with the magic numbers.
*
* from linux/fs/ext2/symlink.c
*
* Copyright (C) 1998-99, Frank A. Vorstenbosch
*
* ncpfs symlink handling code
* NLS support (c) 1999 Petr Vandrovec
* Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
*
*/
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include "ncp_fs.h"
/* these magic numbers must appear in the symlink file -- this makes it a bit
more resilient against the magic attributes being set on random files. */
#define NCP_SYMLINK_MAGIC0 cpu_to_le32(0x6c6d7973) /* "symlnk->" */
#define NCP_SYMLINK_MAGIC1 cpu_to_le32(0x3e2d6b6e)
/* ----- read a symbolic link ------------------------------------------ */
static int ncp_symlink_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
int error, length, len;
char *link, *rawlink;
char *buf = kmap(page);
error = -ENOMEM;
rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
if (!rawlink)
goto fail;
if (ncp_make_open(inode,O_RDONLY))
goto failEIO;
error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle,
0,NCP_MAX_SYMLINK_SIZE,rawlink,&length);
ncp_inode_close(inode);
/* Close file handle if no other users... */
ncp_make_closed(inode);
if (error)
goto failEIO;
if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) {
if (length<NCP_MIN_SYMLINK_SIZE ||
((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 ||
((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1)
goto failEIO;
link = rawlink + 8;
length -= 8;
} else {
link = rawlink;
}
len = NCP_MAX_SYMLINK_SIZE;
error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0);
kfree(rawlink);
if (error)
goto fail;
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
failEIO:
error = -EIO;
kfree(rawlink);
fail:
SetPageError(page);
kunmap(page);
unlock_page(page);
return error;
}
/*
* symlinks can't do much...
*/
const struct address_space_operations ncp_symlink_aops = {
.readpage = ncp_symlink_readpage,
};
/* ----- create a new symbolic link -------------------------------------- */
int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
struct inode *inode;
char *rawlink;
int length, err, i, outlen;
int kludge;
int mode;
__le32 attr;
unsigned int hdr;
DPRINTK("ncp_symlink(dir=%p,dentry=%p,symname=%s)\n",dir,dentry,symname);
if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber))
kludge = 0;
else
#ifdef CONFIG_NCPFS_EXTRAS
if (NCP_SERVER(dir)->m.flags & NCP_MOUNT_SYMLINKS)
kludge = 1;
else
#endif
/* EPERM is returned by VFS if symlink procedure does not exist */
return -EPERM;
rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
if (!rawlink)
return -ENOMEM;
if (kludge) {
mode = 0;
attr = aSHARED | aHIDDEN;
((__le32 *)rawlink)[0]=NCP_SYMLINK_MAGIC0;
((__le32 *)rawlink)[1]=NCP_SYMLINK_MAGIC1;
hdr = 8;
} else {
mode = S_IFLNK | S_IRWXUGO;
attr = 0;
hdr = 0;
}
length = strlen(symname);
/* map to/from server charset, do not touch upper/lower case as
symlink can point out of ncp filesystem */
outlen = NCP_MAX_SYMLINK_SIZE - hdr;
err = ncp_io2vol(NCP_SERVER(dir), rawlink + hdr, &outlen, symname, length, 0);
if (err)
goto failfree;
outlen += hdr;
err = -EIO;
if (ncp_create_new(dir,dentry,mode,0,attr)) {
goto failfree;
}
inode=dentry->d_inode;
if (ncp_make_open(inode, O_WRONLY))
goto failfree;
if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
0, outlen, rawlink, &i) || i!=outlen) {
goto fail;
}
ncp_inode_close(inode);
ncp_make_closed(inode);
kfree(rawlink);
return 0;
fail:;
ncp_inode_close(inode);
ncp_make_closed(inode);
failfree:;
kfree(rawlink);
return err;
}
/* ----- EOF ----- */
| gpl-2.0 |
xboxfanj/android_kernel_oneplus_one | arch/arm/mach-sa1100/pleb.c | 4735 | 2894 | /*
* linux/arch/arm/mach-sa1100/pleb.c
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include <asm/mach/serial_sa1100.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* Ethernet IRQ mappings
*/
#define PLEB_ETH0_P (0x20000300) /* Ethernet 0 in PCMCIA0 IO */
#define PLEB_ETH0_V (0xf6000300)
#define GPIO_ETH0_IRQ GPIO_GPIO(21)
#define GPIO_ETH0_EN GPIO_GPIO(26)
#define IRQ_GPIO_ETH0_IRQ IRQ_GPIO21
static struct resource smc91x_resources[] = {
[0] = DEFINE_RES_MEM(PLEB_ETH0_P, 0x04000000),
#if 0 /* Autoprobe instead, to get rising/falling edge characteristic right */
[1] = DEFINE_RES_IRQ(IRQ_GPIO_ETH0_IRQ),
#endif
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct platform_device *devices[] __initdata = {
&smc91x_device,
};
/*
* Pleb's memory map
* has flash memory (typically 4 or 8 meg) selected by
* the two SA1100 lowest chip select outputs.
*/
static struct resource pleb_flash_resources[] = {
[0] = DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_8M),
[1] = DEFINE_RES_MEM(SA1100_CS1_PHYS, SZ_8M),
};
static struct mtd_partition pleb_partitions[] = {
{
.name = "blob",
.offset = 0,
.size = 0x00020000,
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 0x000e0000,
}, {
.name = "rootfs",
.offset = MTDPART_OFS_APPEND,
.size = 0x00300000,
}
};
static struct flash_platform_data pleb_flash_data = {
.map_name = "cfi_probe",
.parts = pleb_partitions,
.nr_parts = ARRAY_SIZE(pleb_partitions),
};
static void __init pleb_init(void)
{
sa11x0_register_mtd(&pleb_flash_data, pleb_flash_resources,
ARRAY_SIZE(pleb_flash_resources));
platform_add_devices(devices, ARRAY_SIZE(devices));
}
static void __init pleb_map_io(void)
{
sa1100_map_io();
sa1100_register_uart(0, 3);
sa1100_register_uart(1, 1);
GAFR |= (GPIO_UART_TXD | GPIO_UART_RXD);
GPDR |= GPIO_UART_TXD;
GPDR &= ~GPIO_UART_RXD;
PPAR |= PPAR_UPR;
/*
* Fix expansion memory timing for network card
*/
MECR = ((2<<10) | (2<<5) | (2<<0));
/*
* Enable the SMC ethernet controller
*/
GPDR |= GPIO_ETH0_EN; /* set to output */
GPCR = GPIO_ETH0_EN; /* clear MCLK (enable smc) */
GPDR &= ~GPIO_ETH0_IRQ;
irq_set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING);
}
MACHINE_START(PLEB, "PLEB")
.map_io = pleb_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = pleb_init,
.restart = sa11x0_restart,
MACHINE_END
| gpl-2.0 |
gwindlord/android_kernel_lenovo_b8000 | arch/arm/mach-omap1/clock.c | 4735 | 13778 | /*
* linux/arch/arm/mach-omap1/clock.c
*
* Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
* Modified to use omap shared clock framework by
* Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <asm/mach-types.h>
#include <plat/cpu.h>
#include <plat/usb.h>
#include <plat/clock.h>
#include <plat/sram.h>
#include <plat/clkdev_omap.h>
#include <mach/hardware.h>
#include "iomap.h"
#include "clock.h"
#include "opp.h"
__u32 arm_idlect1_mask;
struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
/*
* Omap1 specific clock functions
*/
unsigned long omap1_uart_recalc(struct clk *clk)
{
unsigned int val = __raw_readl(clk->enable_reg);
return val & clk->enable_bit ? 48000000 : 12000000;
}
unsigned long omap1_sossi_recalc(struct clk *clk)
{
u32 div = omap_readl(MOD_CONF_CTRL_1);
div = (div >> 17) & 0x7;
div++;
return clk->parent->rate / div;
}
static void omap1_clk_allow_idle(struct clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
if (!(clk->flags & CLOCK_IDLE_CONTROL))
return;
if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
arm_idlect1_mask |= 1 << iclk->idlect_shift;
}
static void omap1_clk_deny_idle(struct clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
if (!(clk->flags & CLOCK_IDLE_CONTROL))
return;
if (iclk->no_idle_count++ == 0)
arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
}
static __u16 verify_ckctl_value(__u16 newval)
{
/* This function checks for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*
* In addition following rules are enforced:
* LCD_CK <= TC_CK
* ARMPER_CK <= TC_CK
*
* However, maximum frequencies are not checked for!
*/
__u8 per_exp;
__u8 lcd_exp;
__u8 arm_exp;
__u8 dsp_exp;
__u8 tc_exp;
__u8 dspmmu_exp;
per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
if (dspmmu_exp < dsp_exp)
dspmmu_exp = dsp_exp;
if (dspmmu_exp > dsp_exp+1)
dspmmu_exp = dsp_exp+1;
if (tc_exp < arm_exp)
tc_exp = arm_exp;
if (tc_exp < dspmmu_exp)
tc_exp = dspmmu_exp;
if (tc_exp > lcd_exp)
lcd_exp = tc_exp;
if (tc_exp > per_exp)
per_exp = tc_exp;
newval &= 0xf000;
newval |= per_exp << CKCTL_PERDIV_OFFSET;
newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
newval |= tc_exp << CKCTL_TCDIV_OFFSET;
newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
return newval;
}
static int calc_dsor_exp(struct clk *clk, unsigned long rate)
{
/* Note: If target frequency is too low, this function will return 4,
* which is invalid value. Caller must check for this value and act
* accordingly.
*
* Note: This function does not check for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*/
unsigned long realrate;
struct clk * parent;
unsigned dsor_exp;
parent = clk->parent;
if (unlikely(parent == NULL))
return -EIO;
realrate = parent->rate;
for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
if (realrate <= rate)
break;
realrate /= 2;
}
return dsor_exp;
}
unsigned long omap1_ckctl_recalc(struct clk *clk)
{
/* Calculate divisor encoded as 2-bit exponent */
int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
return clk->parent->rate / dsor;
}
unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
{
int dsor;
/* Calculate divisor encoded as 2-bit exponent
*
* The clock control bits are in DSP domain,
* so api_ck is needed for access.
* Note that DSP_CKCTL virt addr = phys addr, so
* we must use __raw_readw() instead of omap_readw().
*/
omap1_clk_enable(api_ck_p);
dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
omap1_clk_disable(api_ck_p);
return clk->parent->rate / dsor;
}
/* MPU virtual clock functions */
int omap1_select_table_rate(struct clk *clk, unsigned long rate)
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr;
unsigned long dpll1_rate, ref_rate;
dpll1_rate = ck_dpll1_p->rate;
ref_rate = ck_ref_p->rate;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
if (!(ptr->flags & cpu_mask))
continue;
if (ptr->xtal != ref_rate)
continue;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
if (!ptr->rate)
return -EINVAL;
/*
* In most cases we should not need to reprogram DPLL.
* Reprogramming the DPLL is tricky, it must be done from SRAM.
*/
omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
ck_dpll1_p->rate = ptr->pll_rate;
return 0;
}
int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
{
int dsor_exp;
u16 regval;
dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
regval = __raw_readw(DSP_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
__raw_writew(regval, DSP_CKCTL);
clk->rate = clk->parent->rate / (1 << dsor_exp);
return 0;
}
long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
{
int dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp < 0)
return dsor_exp;
if (dsor_exp > 3)
dsor_exp = 3;
return clk->parent->rate / (1 << dsor_exp);
}
int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
{
int dsor_exp;
u16 regval;
dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
regval = omap_readw(ARM_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
regval = verify_ckctl_value(regval);
omap_writew(regval, ARM_CKCTL);
clk->rate = clk->parent->rate / (1 << dsor_exp);
return 0;
}
long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr;
long highest_rate;
unsigned long ref_rate;
ref_rate = ck_ref_p->rate;
highest_rate = -EINVAL;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
if (!(ptr->flags & cpu_mask))
continue;
if (ptr->xtal != ref_rate)
continue;
highest_rate = ptr->rate;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
return highest_rate;
}
static unsigned calc_ext_dsor(unsigned long rate)
{
unsigned dsor;
/* MCLK and BCLK divisor selection is not linear:
* freq = 96MHz / dsor
*
* RATIO_SEL range: dsor <-> RATIO_SEL
* 0..6: (RATIO_SEL+2) <-> (dsor-2)
* 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
* Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
* can not be used.
*/
for (dsor = 2; dsor < 96; ++dsor) {
if ((dsor & 1) && dsor > 8)
continue;
if (rate >= 96000000 / dsor)
break;
}
return dsor;
}
/* XXX Only needed on 1510 */
int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
{
unsigned int val;
val = __raw_readl(clk->enable_reg);
if (rate == 12000000)
val &= ~(1 << clk->enable_bit);
else if (rate == 48000000)
val |= (1 << clk->enable_bit);
else
return -EINVAL;
__raw_writel(val, clk->enable_reg);
clk->rate = rate;
return 0;
}
/* External clock (MCLK & BCLK) functions */
int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
{
unsigned dsor;
__u16 ratio_bits;
dsor = calc_ext_dsor(rate);
clk->rate = 96000000 / dsor;
if (dsor > 8)
ratio_bits = ((dsor - 8) / 2 + 6) << 2;
else
ratio_bits = (dsor - 2) << 2;
ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
__raw_writew(ratio_bits, clk->enable_reg);
return 0;
}
int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
{
u32 l;
int div;
unsigned long p_rate;
p_rate = clk->parent->rate;
/* Round towards slower frequency */
div = (p_rate + rate - 1) / rate;
div--;
if (div < 0 || div > 7)
return -EINVAL;
l = omap_readl(MOD_CONF_CTRL_1);
l &= ~(7 << 17);
l |= div << 17;
omap_writel(l, MOD_CONF_CTRL_1);
clk->rate = p_rate / (div + 1);
return 0;
}
long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
{
return 96000000 / calc_ext_dsor(rate);
}
void omap1_init_ext_clk(struct clk *clk)
{
unsigned dsor;
__u16 ratio_bits;
/* Determine current rate and ensure clock is based on 96MHz APLL */
ratio_bits = __raw_readw(clk->enable_reg) & ~1;
__raw_writew(ratio_bits, clk->enable_reg);
ratio_bits = (ratio_bits & 0xfc) >> 2;
if (ratio_bits > 6)
dsor = (ratio_bits - 6) * 2 + 8;
else
dsor = ratio_bits + 2;
clk-> rate = 96000000 / dsor;
}
int omap1_clk_enable(struct clk *clk)
{
int ret = 0;
if (clk->usecount++ == 0) {
if (clk->parent) {
ret = omap1_clk_enable(clk->parent);
if (ret)
goto err;
if (clk->flags & CLOCK_NO_IDLE_PARENT)
omap1_clk_deny_idle(clk->parent);
}
ret = clk->ops->enable(clk);
if (ret) {
if (clk->parent)
omap1_clk_disable(clk->parent);
goto err;
}
}
return ret;
err:
clk->usecount--;
return ret;
}
void omap1_clk_disable(struct clk *clk)
{
if (clk->usecount > 0 && !(--clk->usecount)) {
clk->ops->disable(clk);
if (likely(clk->parent)) {
omap1_clk_disable(clk->parent);
if (clk->flags & CLOCK_NO_IDLE_PARENT)
omap1_clk_allow_idle(clk->parent);
}
}
}
static int omap1_clk_enable_generic(struct clk *clk)
{
__u16 regval16;
__u32 regval32;
if (unlikely(clk->enable_reg == NULL)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
clk->name);
return -EINVAL;
}
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 |= (1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval16 = __raw_readw(clk->enable_reg);
regval16 |= (1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
}
return 0;
}
static void omap1_clk_disable_generic(struct clk *clk)
{
__u16 regval16;
__u32 regval32;
if (clk->enable_reg == NULL)
return;
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 &= ~(1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval16 = __raw_readw(clk->enable_reg);
regval16 &= ~(1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
}
}
const struct clkops clkops_generic = {
.enable = omap1_clk_enable_generic,
.disable = omap1_clk_disable_generic,
};
static int omap1_clk_enable_dsp_domain(struct clk *clk)
{
int retval;
retval = omap1_clk_enable(api_ck_p);
if (!retval) {
retval = omap1_clk_enable_generic(clk);
omap1_clk_disable(api_ck_p);
}
return retval;
}
static void omap1_clk_disable_dsp_domain(struct clk *clk)
{
if (omap1_clk_enable(api_ck_p) == 0) {
omap1_clk_disable_generic(clk);
omap1_clk_disable(api_ck_p);
}
}
const struct clkops clkops_dspck = {
.enable = omap1_clk_enable_dsp_domain,
.disable = omap1_clk_disable_dsp_domain,
};
/* XXX SYSC register handling does not belong in the clock framework */
static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
{
int ret;
struct uart_clk *uclk;
ret = omap1_clk_enable_generic(clk);
if (ret == 0) {
/* Set smart idle acknowledgement mode */
uclk = (struct uart_clk *)clk;
omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
uclk->sysc_addr);
}
return ret;
}
/* XXX SYSC register handling does not belong in the clock framework */
static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
{
struct uart_clk *uclk;
/* Set force idle acknowledgement mode */
uclk = (struct uart_clk *)clk;
omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
omap1_clk_disable_generic(clk);
}
/* XXX SYSC register handling does not belong in the clock framework */
const struct clkops clkops_uart_16xx = {
.enable = omap1_clk_enable_uart_functional_16xx,
.disable = omap1_clk_disable_uart_functional_16xx,
};
long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
{
if (clk->round_rate != NULL)
return clk->round_rate(clk, rate);
return clk->rate;
}
int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret = -EINVAL;
if (clk->set_rate)
ret = clk->set_rate(clk, rate);
return ret;
}
/*
* Omap1 clock reset and init functions
*/
#ifdef CONFIG_OMAP_RESET_CLOCKS
void omap1_clk_disable_unused(struct clk *clk)
{
__u32 regval32;
/* Clocks in the DSP domain need api_ck. Just assume bootloader
* has not enabled any DSP clocks */
if (clk->enable_reg == DSP_IDLECT2) {
printk(KERN_INFO "Skipping reset check for DSP domain "
"clock \"%s\"\n", clk->name);
return;
}
/* Is the clock already disabled? */
if (clk->flags & ENABLE_REG_32BIT)
regval32 = __raw_readl(clk->enable_reg);
else
regval32 = __raw_readw(clk->enable_reg);
if ((regval32 & (1 << clk->enable_bit)) == 0)
return;
printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
clk->ops->disable(clk);
printk(" done\n");
}
#endif
| gpl-2.0 |
lawnn/kernel10c | sound/firewire/speakers.c | 4991 | 20587 | /*
* OXFW970-based speakers driver
*
* Copyright (c) Clemens Ladisch <clemens@ladisch.de>
* Licensed under the terms of the GNU General Public License, version 2.
*/
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include "cmp.h"
#include "fcp.h"
#include "amdtp.h"
#include "lib.h"
#define OXFORD_FIRMWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x50000)
/* 0x970?vvvv or 0x971?vvvv, where vvvv = firmware version */
#define OXFORD_HARDWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x90020)
#define OXFORD_HARDWARE_ID_OXFW970 0x39443841
#define OXFORD_HARDWARE_ID_OXFW971 0x39373100
#define VENDOR_GRIFFIN 0x001292
#define VENDOR_LACIE 0x00d04b
#define SPECIFIER_1394TA 0x00a02d
#define VERSION_AVC 0x010001
struct device_info {
const char *driver_name;
const char *short_name;
const char *long_name;
int (*pcm_constraints)(struct snd_pcm_runtime *runtime);
unsigned int mixer_channels;
u8 mute_fb_id;
u8 volume_fb_id;
};
struct fwspk {
struct snd_card *card;
struct fw_unit *unit;
const struct device_info *device_info;
struct snd_pcm_substream *pcm;
struct mutex mutex;
struct cmp_connection connection;
struct amdtp_out_stream stream;
bool stream_running;
bool mute;
s16 volume[6];
s16 volume_min;
s16 volume_max;
};
MODULE_DESCRIPTION("FireWire speakers driver");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL v2");
static int firewave_rate_constraint(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
static unsigned int stereo_rates[] = { 48000, 96000 };
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *rate =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
/* two channels work only at 48/96 kHz */
if (snd_interval_max(channels) < 6)
return snd_interval_list(rate, 2, stereo_rates, 0);
return 0;
}
static int firewave_channels_constraint(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
static const struct snd_interval all_channels = { .min = 6, .max = 6 };
struct snd_interval *rate =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
/* 32/44.1 kHz work only with all six channels */
if (snd_interval_max(rate) < 48000)
return snd_interval_refine(channels, &all_channels);
return 0;
}
static int firewave_constraints(struct snd_pcm_runtime *runtime)
{
static unsigned int channels_list[] = { 2, 6 };
static struct snd_pcm_hw_constraint_list channels_list_constraint = {
.count = 2,
.list = channels_list,
};
int err;
runtime->hw.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_96000;
runtime->hw.channels_max = 6;
err = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
&channels_list_constraint);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
firewave_rate_constraint, NULL,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
firewave_channels_constraint, NULL,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
return 0;
}
static int lacie_speakers_constraints(struct snd_pcm_runtime *runtime)
{
runtime->hw.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000;
return 0;
}
static int fwspk_open(struct snd_pcm_substream *substream)
{
static const struct snd_pcm_hardware hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER,
.formats = AMDTP_OUT_PCM_FORMAT_BITS,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 4 * 1024 * 1024,
.period_bytes_min = 1,
.period_bytes_max = UINT_MAX,
.periods_min = 1,
.periods_max = UINT_MAX,
};
struct fwspk *fwspk = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->hw = hardware;
err = fwspk->device_info->pcm_constraints(runtime);
if (err < 0)
return err;
err = snd_pcm_limit_hw_rates(runtime);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
5000, UINT_MAX);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
if (err < 0)
return err;
return 0;
}
static int fwspk_close(struct snd_pcm_substream *substream)
{
return 0;
}
static void fwspk_stop_stream(struct fwspk *fwspk)
{
if (fwspk->stream_running) {
amdtp_out_stream_stop(&fwspk->stream);
cmp_connection_break(&fwspk->connection);
fwspk->stream_running = false;
}
}
static int fwspk_set_rate(struct fwspk *fwspk, unsigned int sfc)
{
u8 *buf;
int err;
buf = kmalloc(8, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = 0x00; /* AV/C, CONTROL */
buf[1] = 0xff; /* unit */
buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */
buf[3] = 0x00; /* plug 0 */
buf[4] = 0x90; /* format: audio */
buf[5] = 0x00 | sfc; /* AM824, frequency */
buf[6] = 0xff; /* SYT (not used) */
buf[7] = 0xff;
err = fcp_avc_transaction(fwspk->unit, buf, 8, buf, 8,
BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5));
if (err < 0)
goto error;
if (err < 6 || buf[0] != 0x09 /* ACCEPTED */) {
dev_err(&fwspk->unit->device, "failed to set sample rate\n");
err = -EIO;
goto error;
}
err = 0;
error:
kfree(buf);
return err;
}
static int fwspk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct fwspk *fwspk = substream->private_data;
int err;
mutex_lock(&fwspk->mutex);
fwspk_stop_stream(fwspk);
mutex_unlock(&fwspk->mutex);
err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
if (err < 0)
goto error;
amdtp_out_stream_set_rate(&fwspk->stream, params_rate(hw_params));
amdtp_out_stream_set_pcm(&fwspk->stream, params_channels(hw_params));
amdtp_out_stream_set_pcm_format(&fwspk->stream,
params_format(hw_params));
err = fwspk_set_rate(fwspk, fwspk->stream.sfc);
if (err < 0)
goto err_buffer;
return 0;
err_buffer:
snd_pcm_lib_free_vmalloc_buffer(substream);
error:
return err;
}
static int fwspk_hw_free(struct snd_pcm_substream *substream)
{
struct fwspk *fwspk = substream->private_data;
mutex_lock(&fwspk->mutex);
fwspk_stop_stream(fwspk);
mutex_unlock(&fwspk->mutex);
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
static int fwspk_prepare(struct snd_pcm_substream *substream)
{
struct fwspk *fwspk = substream->private_data;
int err;
mutex_lock(&fwspk->mutex);
if (amdtp_out_streaming_error(&fwspk->stream))
fwspk_stop_stream(fwspk);
if (!fwspk->stream_running) {
err = cmp_connection_establish(&fwspk->connection,
amdtp_out_stream_get_max_payload(&fwspk->stream));
if (err < 0)
goto err_mutex;
err = amdtp_out_stream_start(&fwspk->stream,
fwspk->connection.resources.channel,
fwspk->connection.speed);
if (err < 0)
goto err_connection;
fwspk->stream_running = true;
}
mutex_unlock(&fwspk->mutex);
amdtp_out_stream_pcm_prepare(&fwspk->stream);
return 0;
err_connection:
cmp_connection_break(&fwspk->connection);
err_mutex:
mutex_unlock(&fwspk->mutex);
return err;
}
static int fwspk_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct fwspk *fwspk = substream->private_data;
struct snd_pcm_substream *pcm;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
pcm = substream;
break;
case SNDRV_PCM_TRIGGER_STOP:
pcm = NULL;
break;
default:
return -EINVAL;
}
amdtp_out_stream_pcm_trigger(&fwspk->stream, pcm);
return 0;
}
static snd_pcm_uframes_t fwspk_pointer(struct snd_pcm_substream *substream)
{
struct fwspk *fwspk = substream->private_data;
return amdtp_out_stream_pcm_pointer(&fwspk->stream);
}
static int fwspk_create_pcm(struct fwspk *fwspk)
{
static struct snd_pcm_ops ops = {
.open = fwspk_open,
.close = fwspk_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = fwspk_hw_params,
.hw_free = fwspk_hw_free,
.prepare = fwspk_prepare,
.trigger = fwspk_trigger,
.pointer = fwspk_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(fwspk->card, "OXFW970", 0, 1, 0, &pcm);
if (err < 0)
return err;
pcm->private_data = fwspk;
strcpy(pcm->name, fwspk->device_info->short_name);
fwspk->pcm = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
fwspk->pcm->ops = &ops;
return 0;
}
enum control_action { CTL_READ, CTL_WRITE };
enum control_attribute {
CTL_MIN = 0x02,
CTL_MAX = 0x03,
CTL_CURRENT = 0x10,
};
static int fwspk_mute_command(struct fwspk *fwspk, bool *value,
enum control_action action)
{
u8 *buf;
u8 response_ok;
int err;
buf = kmalloc(11, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (action == CTL_READ) {
buf[0] = 0x01; /* AV/C, STATUS */
response_ok = 0x0c; /* STABLE */
} else {
buf[0] = 0x00; /* AV/C, CONTROL */
response_ok = 0x09; /* ACCEPTED */
}
buf[1] = 0x08; /* audio unit 0 */
buf[2] = 0xb8; /* FUNCTION BLOCK */
buf[3] = 0x81; /* function block type: feature */
buf[4] = fwspk->device_info->mute_fb_id; /* function block ID */
buf[5] = 0x10; /* control attribute: current */
buf[6] = 0x02; /* selector length */
buf[7] = 0x00; /* audio channel number */
buf[8] = 0x01; /* control selector: mute */
buf[9] = 0x01; /* control data length */
if (action == CTL_READ)
buf[10] = 0xff;
else
buf[10] = *value ? 0x70 : 0x60;
err = fcp_avc_transaction(fwspk->unit, buf, 11, buf, 11, 0x3fe);
if (err < 0)
goto error;
if (err < 11) {
dev_err(&fwspk->unit->device, "short FCP response\n");
err = -EIO;
goto error;
}
if (buf[0] != response_ok) {
dev_err(&fwspk->unit->device, "mute command failed\n");
err = -EIO;
goto error;
}
if (action == CTL_READ)
*value = buf[10] == 0x70;
err = 0;
error:
kfree(buf);
return err;
}
static int fwspk_volume_command(struct fwspk *fwspk, s16 *value,
unsigned int channel,
enum control_attribute attribute,
enum control_action action)
{
u8 *buf;
u8 response_ok;
int err;
buf = kmalloc(12, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (action == CTL_READ) {
buf[0] = 0x01; /* AV/C, STATUS */
response_ok = 0x0c; /* STABLE */
} else {
buf[0] = 0x00; /* AV/C, CONTROL */
response_ok = 0x09; /* ACCEPTED */
}
buf[1] = 0x08; /* audio unit 0 */
buf[2] = 0xb8; /* FUNCTION BLOCK */
buf[3] = 0x81; /* function block type: feature */
buf[4] = fwspk->device_info->volume_fb_id; /* function block ID */
buf[5] = attribute; /* control attribute */
buf[6] = 0x02; /* selector length */
buf[7] = channel; /* audio channel number */
buf[8] = 0x02; /* control selector: volume */
buf[9] = 0x02; /* control data length */
if (action == CTL_READ) {
buf[10] = 0xff;
buf[11] = 0xff;
} else {
buf[10] = *value >> 8;
buf[11] = *value;
}
err = fcp_avc_transaction(fwspk->unit, buf, 12, buf, 12, 0x3fe);
if (err < 0)
goto error;
if (err < 12) {
dev_err(&fwspk->unit->device, "short FCP response\n");
err = -EIO;
goto error;
}
if (buf[0] != response_ok) {
dev_err(&fwspk->unit->device, "volume command failed\n");
err = -EIO;
goto error;
}
if (action == CTL_READ)
*value = (buf[10] << 8) | buf[11];
err = 0;
error:
kfree(buf);
return err;
}
static int fwspk_mute_get(struct snd_kcontrol *control,
struct snd_ctl_elem_value *value)
{
struct fwspk *fwspk = control->private_data;
value->value.integer.value[0] = !fwspk->mute;
return 0;
}
static int fwspk_mute_put(struct snd_kcontrol *control,
struct snd_ctl_elem_value *value)
{
struct fwspk *fwspk = control->private_data;
bool mute;
int err;
mute = !value->value.integer.value[0];
if (mute == fwspk->mute)
return 0;
err = fwspk_mute_command(fwspk, &mute, CTL_WRITE);
if (err < 0)
return err;
fwspk->mute = mute;
return 1;
}
static int fwspk_volume_info(struct snd_kcontrol *control,
struct snd_ctl_elem_info *info)
{
struct fwspk *fwspk = control->private_data;
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = fwspk->device_info->mixer_channels;
info->value.integer.min = fwspk->volume_min;
info->value.integer.max = fwspk->volume_max;
return 0;
}
static const u8 channel_map[6] = { 0, 1, 4, 5, 2, 3 };
static int fwspk_volume_get(struct snd_kcontrol *control,
struct snd_ctl_elem_value *value)
{
struct fwspk *fwspk = control->private_data;
unsigned int i;
for (i = 0; i < fwspk->device_info->mixer_channels; ++i)
value->value.integer.value[channel_map[i]] = fwspk->volume[i];
return 0;
}
static int fwspk_volume_put(struct snd_kcontrol *control,
struct snd_ctl_elem_value *value)
{
struct fwspk *fwspk = control->private_data;
unsigned int i, changed_channels;
bool equal_values = true;
s16 volume;
int err;
for (i = 0; i < fwspk->device_info->mixer_channels; ++i) {
if (value->value.integer.value[i] < fwspk->volume_min ||
value->value.integer.value[i] > fwspk->volume_max)
return -EINVAL;
if (value->value.integer.value[i] !=
value->value.integer.value[0])
equal_values = false;
}
changed_channels = 0;
for (i = 0; i < fwspk->device_info->mixer_channels; ++i)
if (value->value.integer.value[channel_map[i]] !=
fwspk->volume[i])
changed_channels |= 1 << (i + 1);
if (equal_values && changed_channels != 0)
changed_channels = 1 << 0;
for (i = 0; i <= fwspk->device_info->mixer_channels; ++i) {
volume = value->value.integer.value[channel_map[i ? i - 1 : 0]];
if (changed_channels & (1 << i)) {
err = fwspk_volume_command(fwspk, &volume, i,
CTL_CURRENT, CTL_WRITE);
if (err < 0)
return err;
}
if (i > 0)
fwspk->volume[i - 1] = volume;
}
return changed_channels != 0;
}
static int fwspk_create_mixer(struct fwspk *fwspk)
{
static const struct snd_kcontrol_new controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.info = snd_ctl_boolean_mono_info,
.get = fwspk_mute_get,
.put = fwspk_mute_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.info = fwspk_volume_info,
.get = fwspk_volume_get,
.put = fwspk_volume_put,
},
};
unsigned int i, first_ch;
int err;
err = fwspk_volume_command(fwspk, &fwspk->volume_min,
0, CTL_MIN, CTL_READ);
if (err < 0)
return err;
err = fwspk_volume_command(fwspk, &fwspk->volume_max,
0, CTL_MAX, CTL_READ);
if (err < 0)
return err;
err = fwspk_mute_command(fwspk, &fwspk->mute, CTL_READ);
if (err < 0)
return err;
first_ch = fwspk->device_info->mixer_channels == 1 ? 0 : 1;
for (i = 0; i < fwspk->device_info->mixer_channels; ++i) {
err = fwspk_volume_command(fwspk, &fwspk->volume[i],
first_ch + i, CTL_CURRENT, CTL_READ);
if (err < 0)
return err;
}
for (i = 0; i < ARRAY_SIZE(controls); ++i) {
err = snd_ctl_add(fwspk->card,
snd_ctl_new1(&controls[i], fwspk));
if (err < 0)
return err;
}
return 0;
}
static u32 fwspk_read_firmware_version(struct fw_unit *unit)
{
__be32 data;
int err;
err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
OXFORD_FIRMWARE_ID_ADDRESS, &data, 4);
return err >= 0 ? be32_to_cpu(data) : 0;
}
static void fwspk_card_free(struct snd_card *card)
{
struct fwspk *fwspk = card->private_data;
amdtp_out_stream_destroy(&fwspk->stream);
cmp_connection_destroy(&fwspk->connection);
fw_unit_put(fwspk->unit);
mutex_destroy(&fwspk->mutex);
}
static const struct device_info *__devinit fwspk_detect(struct fw_device *dev)
{
static const struct device_info griffin_firewave = {
.driver_name = "FireWave",
.short_name = "FireWave",
.long_name = "Griffin FireWave Surround",
.pcm_constraints = firewave_constraints,
.mixer_channels = 6,
.mute_fb_id = 0x01,
.volume_fb_id = 0x02,
};
static const struct device_info lacie_speakers = {
.driver_name = "FWSpeakers",
.short_name = "FireWire Speakers",
.long_name = "LaCie FireWire Speakers",
.pcm_constraints = lacie_speakers_constraints,
.mixer_channels = 1,
.mute_fb_id = 0x01,
.volume_fb_id = 0x01,
};
struct fw_csr_iterator i;
int key, value;
fw_csr_iterator_init(&i, dev->config_rom);
while (fw_csr_iterator_next(&i, &key, &value))
if (key == CSR_VENDOR)
switch (value) {
case VENDOR_GRIFFIN:
return &griffin_firewave;
case VENDOR_LACIE:
return &lacie_speakers;
}
return NULL;
}
static int __devinit fwspk_probe(struct device *unit_dev)
{
struct fw_unit *unit = fw_unit(unit_dev);
struct fw_device *fw_dev = fw_parent_device(unit);
struct snd_card *card;
struct fwspk *fwspk;
u32 firmware;
int err;
err = snd_card_create(-1, NULL, THIS_MODULE, sizeof(*fwspk), &card);
if (err < 0)
return err;
snd_card_set_dev(card, unit_dev);
fwspk = card->private_data;
fwspk->card = card;
mutex_init(&fwspk->mutex);
fwspk->unit = fw_unit_get(unit);
fwspk->device_info = fwspk_detect(fw_dev);
if (!fwspk->device_info) {
err = -ENODEV;
goto err_unit;
}
err = cmp_connection_init(&fwspk->connection, unit, 0);
if (err < 0)
goto err_unit;
err = amdtp_out_stream_init(&fwspk->stream, unit, CIP_NONBLOCKING);
if (err < 0)
goto err_connection;
card->private_free = fwspk_card_free;
strcpy(card->driver, fwspk->device_info->driver_name);
strcpy(card->shortname, fwspk->device_info->short_name);
firmware = fwspk_read_firmware_version(unit);
snprintf(card->longname, sizeof(card->longname),
"%s (OXFW%x %04x), GUID %08x%08x at %s, S%d",
fwspk->device_info->long_name,
firmware >> 20, firmware & 0xffff,
fw_dev->config_rom[3], fw_dev->config_rom[4],
dev_name(&unit->device), 100 << fw_dev->max_speed);
strcpy(card->mixername, "OXFW970");
err = fwspk_create_pcm(fwspk);
if (err < 0)
goto error;
err = fwspk_create_mixer(fwspk);
if (err < 0)
goto error;
err = snd_card_register(card);
if (err < 0)
goto error;
dev_set_drvdata(unit_dev, fwspk);
return 0;
err_connection:
cmp_connection_destroy(&fwspk->connection);
err_unit:
fw_unit_put(fwspk->unit);
mutex_destroy(&fwspk->mutex);
error:
snd_card_free(card);
return err;
}
static int __devexit fwspk_remove(struct device *dev)
{
struct fwspk *fwspk = dev_get_drvdata(dev);
amdtp_out_stream_pcm_abort(&fwspk->stream);
snd_card_disconnect(fwspk->card);
mutex_lock(&fwspk->mutex);
fwspk_stop_stream(fwspk);
mutex_unlock(&fwspk->mutex);
snd_card_free_when_closed(fwspk->card);
return 0;
}
static void fwspk_bus_reset(struct fw_unit *unit)
{
struct fwspk *fwspk = dev_get_drvdata(&unit->device);
fcp_bus_reset(fwspk->unit);
if (cmp_connection_update(&fwspk->connection) < 0) {
amdtp_out_stream_pcm_abort(&fwspk->stream);
mutex_lock(&fwspk->mutex);
fwspk_stop_stream(fwspk);
mutex_unlock(&fwspk->mutex);
return;
}
amdtp_out_stream_update(&fwspk->stream);
}
static const struct ieee1394_device_id fwspk_id_table[] = {
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
IEEE1394_MATCH_MODEL_ID |
IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.vendor_id = VENDOR_GRIFFIN,
.model_id = 0x00f970,
.specifier_id = SPECIFIER_1394TA,
.version = VERSION_AVC,
},
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
IEEE1394_MATCH_MODEL_ID |
IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.vendor_id = VENDOR_LACIE,
.model_id = 0x00f970,
.specifier_id = SPECIFIER_1394TA,
.version = VERSION_AVC,
},
{ }
};
MODULE_DEVICE_TABLE(ieee1394, fwspk_id_table);
static struct fw_driver fwspk_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
.probe = fwspk_probe,
.remove = __devexit_p(fwspk_remove),
},
.update = fwspk_bus_reset,
.id_table = fwspk_id_table,
};
static int __init alsa_fwspk_init(void)
{
return driver_register(&fwspk_driver.driver);
}
static void __exit alsa_fwspk_exit(void)
{
driver_unregister(&fwspk_driver.driver);
}
module_init(alsa_fwspk_init);
module_exit(alsa_fwspk_exit);
| gpl-2.0 |
TaichiN/android_kernel_google_msm | arch/mips/cobalt/led.c | 9343 | 1733 | /*
* Registration of Cobalt LED platform device.
*
* Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <cobalt.h>
static struct resource cobalt_led_resource __initdata = {
.start = 0x1c000000,
.end = 0x1c000000,
.flags = IORESOURCE_MEM,
};
static __init int cobalt_led_add(void)
{
struct platform_device *pdev;
int retval;
if (cobalt_board_id == COBALT_BRD_ID_QUBE1 ||
cobalt_board_id == COBALT_BRD_ID_QUBE2)
pdev = platform_device_alloc("cobalt-qube-leds", -1);
else
pdev = platform_device_alloc("cobalt-raq-leds", -1);
if (!pdev)
return -ENOMEM;
retval = platform_device_add_resources(pdev, &cobalt_led_resource, 1);
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(cobalt_led_add);
| gpl-2.0 |
sudosurootdev/kernel_lge_lgl24 | arch/mips/math-emu/sp_scalb.c | 10367 | 1464 | /* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
ieee754sp ieee754sp_scalb(ieee754sp x, int n)
{
COMPXSP;
CLEARCX;
EXPLODEXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754sp_nanxcpt(x, "scalb", x, n);
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_INF:
case IEEE754_CLASS_ZERO:
return x;
case IEEE754_CLASS_DNORM:
SPDNORMX;
break;
case IEEE754_CLASS_NORM:
break;
}
SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n);
}
ieee754sp ieee754sp_ldexp(ieee754sp x, int n)
{
return ieee754sp_scalb(x, n);
}
| gpl-2.0 |
arrdalan/dfv_android_tuna_kernel | drivers/infiniband/hw/ipath/ipath_wc_ppc64.c | 13439 | 2170 | /*
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file is conditionally built on PowerPC only. Otherwise weak symbol
* versions of the functions exported from here are used.
*/
#include "ipath_kernel.h"
/**
* ipath_enable_wc - enable write combining for MMIO writes to the device
* @dd: infinipath device
*
* Nothing to do on PowerPC, so just return without error.
*/
int ipath_enable_wc(struct ipath_devdata *dd)
{
return 0;
}
/**
* ipath_unordered_wc - indicate whether write combining is unordered
*
* Because our performance depends on our ability to do write
* combining mmio writes in the most efficient way, we need to
* know if we are on a processor that may reorder stores when
* write combining.
*/
int ipath_unordered_wc(void)
{
return 1;
}
| gpl-2.0 |
XileForce/Linaro-LSK | arch/powerpc/xmon/ppc-dis.c | 13695 | 5571 | /* ppc-dis.c -- Disassemble PowerPC instructions
Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
GDB, GAS, and the GNU binutils are free software; you can redistribute
them and/or modify them under the terms of the GNU General Public
License as published by the Free Software Foundation; either version
2, or (at your option) any later version.
GDB, GAS, and the GNU binutils are distributed in the hope that they
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#include <asm/cputable.h>
#include "nonstdio.h"
#include "ansidecl.h"
#include "ppc.h"
#include "dis-asm.h"
/* Print a PowerPC or POWER instruction. */
int
print_insn_powerpc (unsigned long insn, unsigned long memaddr)
{
const struct powerpc_opcode *opcode;
const struct powerpc_opcode *opcode_end;
unsigned long op;
int dialect;
dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
| PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
if (cpu_has_feature(CPU_FTRS_POWER5))
dialect |= PPC_OPCODE_POWER5;
if (cpu_has_feature(CPU_FTRS_CELL))
dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC;
if (cpu_has_feature(CPU_FTRS_POWER6))
dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC;
/* Get the major opcode of the instruction. */
op = PPC_OP (insn);
/* Find the first match in the opcode table. We could speed this up
a bit by doing a binary search on the major opcode. */
opcode_end = powerpc_opcodes + powerpc_num_opcodes;
again:
for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++)
{
unsigned long table_op;
const unsigned char *opindex;
const struct powerpc_operand *operand;
int invalid;
int need_comma;
int need_paren;
table_op = PPC_OP (opcode->opcode);
if (op < table_op)
break;
if (op > table_op)
continue;
if ((insn & opcode->mask) != opcode->opcode
|| (opcode->flags & dialect) == 0)
continue;
/* Make two passes over the operands. First see if any of them
have extraction functions, and, if they do, make sure the
instruction is valid. */
invalid = 0;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
operand = powerpc_operands + *opindex;
if (operand->extract)
(*operand->extract) (insn, dialect, &invalid);
}
if (invalid)
continue;
/* The instruction is valid. */
printf("%s", opcode->name);
if (opcode->operands[0] != 0)
printf("\t");
/* Now extract and print the operands. */
need_comma = 0;
need_paren = 0;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
long value;
operand = powerpc_operands + *opindex;
/* Operands that are marked FAKE are simply ignored. We
already made sure that the extract function considered
the instruction to be valid. */
if ((operand->flags & PPC_OPERAND_FAKE) != 0)
continue;
/* Extract the value from the instruction. */
if (operand->extract)
value = (*operand->extract) (insn, dialect, &invalid);
else
{
value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
if ((operand->flags & PPC_OPERAND_SIGNED) != 0
&& (value & (1 << (operand->bits - 1))) != 0)
value -= 1 << operand->bits;
}
/* If the operand is optional, and the value is zero, don't
print anything. */
if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0
&& (operand->flags & PPC_OPERAND_NEXT) == 0
&& value == 0)
continue;
if (need_comma)
{
printf(",");
need_comma = 0;
}
/* Print the operand as directed by the flags. */
if ((operand->flags & PPC_OPERAND_GPR) != 0
|| ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0))
printf("r%ld", value);
else if ((operand->flags & PPC_OPERAND_FPR) != 0)
printf("f%ld", value);
else if ((operand->flags & PPC_OPERAND_VR) != 0)
printf("v%ld", value);
else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0)
print_address (memaddr + value);
else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0)
print_address (value & 0xffffffff);
else if ((operand->flags & PPC_OPERAND_CR) == 0
|| (dialect & PPC_OPCODE_PPC) == 0)
printf("%ld", value);
else
{
if (operand->bits == 3)
printf("cr%ld", value);
else
{
static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
int cr;
int cc;
cr = value >> 2;
if (cr != 0)
printf("4*cr%d+", cr);
cc = value & 3;
printf("%s", cbnames[cc]);
}
}
if (need_paren)
{
printf(")");
need_paren = 0;
}
if ((operand->flags & PPC_OPERAND_PARENS) == 0)
need_comma = 1;
else
{
printf("(");
need_paren = 1;
}
}
/* We have found and printed an instruction; return. */
return 4;
}
if ((dialect & PPC_OPCODE_ANY) != 0)
{
dialect = ~PPC_OPCODE_ANY;
goto again;
}
/* We could not find a match. */
printf(".long 0x%lx", insn);
return 4;
}
| gpl-2.0 |
jcbless/linux-xlnx | drivers/video/fbdev/i810/i810-i2c.c | 13695 | 5076 | /*-*- linux-c -*-
* linux/drivers/video/i810-i2c.c -- Intel 810/815 I2C support
*
* Copyright (C) 2004 Antonino Daplas<adaplas@pol.net>
* All Rights Reserved
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/fb.h>
#include "i810.h"
#include "i810_regs.h"
#include "i810_main.h"
#include "../edid.h"
/* bit locations in the registers */
#define SCL_DIR_MASK 0x0001
#define SCL_DIR 0x0002
#define SCL_VAL_MASK 0x0004
#define SCL_VAL_OUT 0x0008
#define SCL_VAL_IN 0x0010
#define SDA_DIR_MASK 0x0100
#define SDA_DIR 0x0200
#define SDA_VAL_MASK 0x0400
#define SDA_VAL_OUT 0x0800
#define SDA_VAL_IN 0x1000
#define DEBUG /* define this for verbose EDID parsing output */
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(fmt,## args)
#else
#define DPRINTK(fmt, args...)
#endif
static void i810i2c_setscl(void *data, int state)
{
struct i810fb_i2c_chan *chan = data;
struct i810fb_par *par = chan->par;
u8 __iomem *mmio = par->mmio_start_virtual;
if (state)
i810_writel(mmio, chan->ddc_base, SCL_DIR_MASK | SCL_VAL_MASK);
else
i810_writel(mmio, chan->ddc_base, SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK);
i810_readl(mmio, chan->ddc_base); /* flush posted write */
}
static void i810i2c_setsda(void *data, int state)
{
struct i810fb_i2c_chan *chan = data;
struct i810fb_par *par = chan->par;
u8 __iomem *mmio = par->mmio_start_virtual;
if (state)
i810_writel(mmio, chan->ddc_base, SDA_DIR_MASK | SDA_VAL_MASK);
else
i810_writel(mmio, chan->ddc_base, SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK);
i810_readl(mmio, chan->ddc_base); /* flush posted write */
}
static int i810i2c_getscl(void *data)
{
struct i810fb_i2c_chan *chan = data;
struct i810fb_par *par = chan->par;
u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, chan->ddc_base, SCL_DIR_MASK);
i810_writel(mmio, chan->ddc_base, 0);
return ((i810_readl(mmio, chan->ddc_base) & SCL_VAL_IN) != 0);
}
static int i810i2c_getsda(void *data)
{
struct i810fb_i2c_chan *chan = data;
struct i810fb_par *par = chan->par;
u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(mmio, chan->ddc_base, SDA_DIR_MASK);
i810_writel(mmio, chan->ddc_base, 0);
return ((i810_readl(mmio, chan->ddc_base) & SDA_VAL_IN) != 0);
}
static int i810_setup_i2c_bus(struct i810fb_i2c_chan *chan, const char *name)
{
int rc;
strcpy(chan->adapter.name, name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &chan->par->dev->dev;
chan->algo.setsda = i810i2c_setsda;
chan->algo.setscl = i810i2c_setscl;
chan->algo.getsda = i810i2c_getsda;
chan->algo.getscl = i810i2c_getscl;
chan->algo.udelay = 10;
chan->algo.timeout = (HZ/2);
chan->algo.data = chan;
i2c_set_adapdata(&chan->adapter, chan);
/* Raise SCL and SDA */
chan->algo.setsda(chan, 1);
chan->algo.setscl(chan, 1);
udelay(20);
rc = i2c_bit_add_bus(&chan->adapter);
if (rc == 0)
dev_dbg(&chan->par->dev->dev, "I2C bus %s registered.\n",name);
else {
dev_warn(&chan->par->dev->dev, "Failed to register I2C bus "
"%s.\n", name);
chan->par = NULL;
}
return rc;
}
void i810_create_i2c_busses(struct i810fb_par *par)
{
par->chan[0].par = par;
par->chan[1].par = par;
par->chan[2].par = par;
par->chan[0].ddc_base = GPIOA;
i810_setup_i2c_bus(&par->chan[0], "I810-DDC");
par->chan[1].ddc_base = GPIOB;
i810_setup_i2c_bus(&par->chan[1], "I810-I2C");
par->chan[2].ddc_base = GPIOC;
i810_setup_i2c_bus(&par->chan[2], "I810-GPIOC");
}
void i810_delete_i2c_busses(struct i810fb_par *par)
{
if (par->chan[0].par)
i2c_del_adapter(&par->chan[0].adapter);
par->chan[0].par = NULL;
if (par->chan[1].par)
i2c_del_adapter(&par->chan[1].adapter);
par->chan[1].par = NULL;
if (par->chan[2].par)
i2c_del_adapter(&par->chan[2].adapter);
par->chan[2].par = NULL;
}
int i810_probe_i2c_connector(struct fb_info *info, u8 **out_edid, int conn)
{
struct i810fb_par *par = info->par;
u8 *edid = NULL;
DPRINTK("i810-i2c: Probe DDC%i Bus\n", conn+1);
if (conn < par->ddc_num) {
edid = fb_ddc_read(&par->chan[conn].adapter);
} else {
const u8 *e = fb_firmware_edid(info->device);
if (e != NULL) {
DPRINTK("i810-i2c: Getting EDID from BIOS\n");
edid = kmemdup(e, EDID_LENGTH, GFP_KERNEL);
}
}
*out_edid = edid;
return (edid) ? 0 : 1;
}
| gpl-2.0 |
curbthepain/revkernel_us990 | arch/x86/math-emu/fpu_tags.c | 14463 | 2777 | /*---------------------------------------------------------------------------+
| fpu_tags.c |
| |
| Set FPU register tags. |
| |
| Copyright (C) 1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@jacobi.maths.monash.edu.au |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_emu.h"
#include "fpu_system.h"
#include "exception.h"
void FPU_pop(void)
{
fpu_tag_word |= 3 << ((top & 7) * 2);
top++;
}
int FPU_gettag0(void)
{
return (fpu_tag_word >> ((top & 7) * 2)) & 3;
}
int FPU_gettagi(int stnr)
{
return (fpu_tag_word >> (((top + stnr) & 7) * 2)) & 3;
}
int FPU_gettag(int regnr)
{
return (fpu_tag_word >> ((regnr & 7) * 2)) & 3;
}
void FPU_settag0(int tag)
{
int regnr = top;
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
void FPU_settagi(int stnr, int tag)
{
int regnr = stnr + top;
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
void FPU_settag(int regnr, int tag)
{
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
int FPU_Special(FPU_REG const *ptr)
{
int exp = exponent(ptr);
if (exp == EXP_BIAS + EXP_UNDER)
return TW_Denormal;
else if (exp != EXP_BIAS + EXP_OVER)
return TW_NaN;
else if ((ptr->sigh == 0x80000000) && (ptr->sigl == 0))
return TW_Infinity;
return TW_NaN;
}
int isNaN(FPU_REG const *ptr)
{
return ((exponent(ptr) == EXP_BIAS + EXP_OVER)
&& !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)));
}
int FPU_empty_i(int stnr)
{
int regnr = (top + stnr) & 7;
return ((fpu_tag_word >> (regnr * 2)) & 3) == TAG_Empty;
}
int FPU_stackoverflow(FPU_REG ** st_new_ptr)
{
*st_new_ptr = &st(-1);
return ((fpu_tag_word >> (((top - 1) & 7) * 2)) & 3) != TAG_Empty;
}
void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr)
{
reg_copy(r, &st(stnr));
FPU_settagi(stnr, tag);
}
void FPU_copy_to_reg1(FPU_REG const *r, u_char tag)
{
reg_copy(r, &st(1));
FPU_settagi(1, tag);
}
void FPU_copy_to_reg0(FPU_REG const *r, u_char tag)
{
int regnr = top;
regnr &= 7;
reg_copy(r, &st(0));
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
| gpl-2.0 |
AndreyPopovNew/asuswrt-merlin-rt-n | release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/pcmcia/ibmtr_cs.c | 128 | 11760 | /*======================================================================
A PCMCIA token-ring driver for IBM-based cards
This driver supports the IBM PCMCIA Token-Ring Card.
Written by Steve Kipisz, kipisz@vnet.ibm.com or
bungy@ibm.net
Written 1995,1996.
This code is based on pcnet_cs.c from David Hinds.
V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com
Linux V2.2.x presented significant changes to the underlying
ibmtr.c code. Mainly the code became a lot more organized and
modular.
This caused the old PCMCIA Token Ring driver to give up and go
home early. Instead of just patching the old code to make it
work, the PCMCIA code has been streamlined, updated and possibly
improved.
This code now only contains code required for the Card Services.
All we do here is set the card up enough so that the real ibmtr.c
driver can find it and work with it properly.
i.e. We set up the io port, irq, mmio memory and shared ram
memory. This enables ibmtr_probe in ibmtr.c to find the card and
configure it as though it was a normal ISA and/or PnP card.
CHANGES
v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com)
Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c
v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com)
Updated to version 2.2.7 to match the first version of the kernel
that the modification to ibmtr.c were incorporated into.
v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com)
Address translation feature of PCMCIA controller is usable so
memory windows can be placed in High memory (meaning above
0xFFFFF.)
======================================================================*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/ibmtr.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#define PCMCIA
#include "../tokenring/ibmtr.c"
/*====================================================================*/
/* Parameters that can be set with 'insmod' */
/* MMIO base address */
static u_long mmiobase = 0xce000;
/* SRAM base address */
static u_long srambase = 0xd0000;
/* SRAM size 8,16,32,64 */
static u_long sramsize = 64;
/* Ringspeed 4,16 */
static int ringspeed = 16;
module_param(mmiobase, ulong, 0);
module_param(srambase, ulong, 0);
module_param(sramsize, ulong, 0);
module_param(ringspeed, int, 0);
MODULE_LICENSE("GPL");
/*====================================================================*/
static int ibmtr_config(struct pcmcia_device *link);
static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
static void ibmtr_release(struct pcmcia_device *link);
static void ibmtr_detach(struct pcmcia_device *p_dev);
/*====================================================================*/
typedef struct ibmtr_dev_t {
struct pcmcia_device *p_dev;
struct net_device *dev;
window_handle_t sram_win_handle;
struct tok_info *ti;
} ibmtr_dev_t;
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, "ibmtr_cs");
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
};
static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
ibmtr_dev_t *info = dev_id;
struct net_device *dev = info->dev;
return tok_interrupt(irq, dev);
};
/*======================================================================
ibmtr_attach() creates an "instance" of the driver, allocating
local data structures for one device. The device is registered
with Card Services.
======================================================================*/
static int __devinit ibmtr_attach(struct pcmcia_device *link)
{
ibmtr_dev_t *info;
struct net_device *dev;
dev_dbg(&link->dev, "ibmtr_attach()\n");
/* Create new token-ring device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
dev = alloc_trdev(sizeof(struct tok_info));
if (!dev) {
kfree(info);
return -ENOMEM;
}
info->p_dev = link;
link->priv = info;
info->ti = netdev_priv(dev);
link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
link->resource[0]->end = 4;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
info->dev = dev;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
return ibmtr_config(link);
} /* ibmtr_attach */
/*======================================================================
This deletes a driver "instance". The device is de-registered
with Card Services. If it has been released, all local data
structures are freed. Otherwise, the structures will be freed
when the device is released.
======================================================================*/
static void ibmtr_detach(struct pcmcia_device *link)
{
struct ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
struct tok_info *ti = netdev_priv(dev);
dev_dbg(&link->dev, "ibmtr_detach\n");
/*
* When the card removal interrupt hits tok_interrupt(),
* bail out early, so we don't crash the machine
*/
ti->sram_phys |= 1;
unregister_netdev(dev);
del_timer_sync(&(ti->tr_timer));
ibmtr_release(link);
free_netdev(dev);
kfree(info);
} /* ibmtr_detach */
/*======================================================================
ibmtr_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
token-ring device available to the system.
======================================================================*/
static int __devinit ibmtr_config(struct pcmcia_device *link)
{
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
struct tok_info *ti = netdev_priv(dev);
win_req_t req;
int i, ret;
dev_dbg(&link->dev, "ibmtr_config\n");
link->conf.ConfigIndex = 0x61;
link->io_lines = 16;
/* Determine if this is PRIMARY or ALTERNATE. */
/* Try PRIMARY card at 0xA20-0xA23 */
link->resource[0]->start = 0xA20;
i = pcmcia_request_io(link);
if (i != 0) {
/* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
link->resource[0]->start = 0xA24;
ret = pcmcia_request_io(link);
if (ret)
goto failed;
}
dev->base_addr = link->resource[0]->start;
ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
goto failed;
dev->irq = link->irq;
ti->irq = link->irq;
ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
/* Allocate the MMIO memory window */
req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
req.Attributes |= WIN_USE_WAIT;
req.Base = 0;
req.Size = 0x2000;
req.AccessSpeed = 250;
ret = pcmcia_request_window(link, &req, &link->win);
if (ret)
goto failed;
ret = pcmcia_map_mem_page(link, link->win, mmiobase);
if (ret)
goto failed;
ti->mmio = ioremap(req.Base, req.Size);
/* Allocate the SRAM memory window */
req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
req.Attributes |= WIN_USE_WAIT;
req.Base = 0;
req.Size = sramsize * 1024;
req.AccessSpeed = 250;
ret = pcmcia_request_window(link, &req, &info->sram_win_handle);
if (ret)
goto failed;
ret = pcmcia_map_mem_page(link, info->sram_win_handle, srambase);
if (ret)
goto failed;
ti->sram_base = srambase >> 12;
ti->sram_virt = ioremap(req.Base, req.Size);
ti->sram_phys = req.Base;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
/* Set up the Token-Ring Controller Configuration Register and
turn on the card. Check the "Local Area Network Credit Card
Adapters Technical Reference" SC30-3585 for this info. */
ibmtr_hw_setup(dev, mmiobase);
SET_NETDEV_DEV(dev, &link->dev);
i = ibmtr_probe_card(dev);
if (i != 0) {
printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
goto failed;
}
printk(KERN_INFO
"%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
dev->name, dev->base_addr, dev->irq,
(u_long)ti->mmio, (u_long)(ti->sram_base << 12),
dev->dev_addr);
return 0;
failed:
ibmtr_release(link);
return -ENODEV;
} /* ibmtr_config */
/*======================================================================
After a card is removed, ibmtr_release() will unregister the net
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
======================================================================*/
static void ibmtr_release(struct pcmcia_device *link)
{
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
dev_dbg(&link->dev, "ibmtr_release\n");
if (link->win) {
struct tok_info *ti = netdev_priv(dev);
iounmap(ti->mmio);
}
pcmcia_disable_device(link);
}
static int ibmtr_suspend(struct pcmcia_device *link)
{
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
if (link->open)
netif_device_detach(dev);
return 0;
}
static int __devinit ibmtr_resume(struct pcmcia_device *link)
{
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
if (link->open) {
ibmtr_probe(dev); /* really? */
netif_device_attach(dev);
}
return 0;
}
/*====================================================================*/
static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
{
int i;
/* Bizarre IBM behavior, there are 16 bits of information we
need to set, but the card only allows us to send 4 bits at a
time. For each byte sent to base_addr, bits 7-4 tell the
card which part of the 16 bits we are setting, bits 3-0 contain
the actual information */
/* First nibble provides 4 bits of mmio */
i = (mmiobase >> 16) & 0x0F;
outb(i, dev->base_addr);
/* Second nibble provides 3 bits of mmio */
i = 0x10 | ((mmiobase >> 12) & 0x0E);
outb(i, dev->base_addr);
/* Third nibble, hard-coded values */
i = 0x26;
outb(i, dev->base_addr);
/* Fourth nibble sets shared ram page size */
/* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */
i = (sramsize >> 4) & 0x07;
i = ((i == 4) ? 3 : i) << 2;
i |= 0x30;
if (ringspeed == 16)
i |= 2;
if (dev->base_addr == 0xA24)
i |= 1;
outb(i, dev->base_addr);
/* 0x40 will release the card for use */
outb(0x40, dev->base_addr);
}
static struct pcmcia_device_id ibmtr_ids[] = {
PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e),
PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids);
static struct pcmcia_driver ibmtr_cs_driver = {
.owner = THIS_MODULE,
.drv = {
.name = "ibmtr_cs",
},
.probe = ibmtr_attach,
.remove = ibmtr_detach,
.id_table = ibmtr_ids,
.suspend = ibmtr_suspend,
.resume = ibmtr_resume,
};
static int __init init_ibmtr_cs(void)
{
return pcmcia_register_driver(&ibmtr_cs_driver);
}
static void __exit exit_ibmtr_cs(void)
{
pcmcia_unregister_driver(&ibmtr_cs_driver);
}
module_init(init_ibmtr_cs);
module_exit(exit_ibmtr_cs);
| gpl-2.0 |
iamroot12b/kernel | drivers/staging/comedi/drivers/ni_labpc_pci.c | 128 | 3629 | /*
* comedi/drivers/ni_labpc_pci.c
* Driver for National Instruments Lab-PC PCI-1200
* Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: ni_labpc_pci
* Description: National Instruments Lab-PC PCI-1200
* Devices: [National Instruments] PCI-1200 (ni_pci-1200)
* Author: Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: works
*
* This is the PCI-specific support split off from the ni_labpc driver.
*
* Configuration Options: not applicable, uses PCI auto config
*
* NI manuals:
* 340914a (pci-1200)
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include "../comedidev.h"
#include "ni_labpc.h"
enum labpc_pci_boardid {
BOARD_NI_PCI1200,
};
static const struct labpc_boardinfo labpc_pci_boards[] = {
[BOARD_NI_PCI1200] = {
.name = "ni_pci-1200",
.ai_speed = 10000,
.ai_scan_up = 1,
.has_ao = 1,
.is_labpc1200 = 1,
},
};
/* ripped from mite.h and mite_setup2() to avoid mite dependancy */
#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
#define WENAB (1 << 7) /* window enable */
static int labpc_pci_mite_init(struct pci_dev *pcidev)
{
void __iomem *mite_base;
u32 main_phys_addr;
/* ioremap the MITE registers (BAR 0) temporarily */
mite_base = pci_ioremap_bar(pcidev, 0);
if (!mite_base)
return -ENOMEM;
/* set data window to main registers (BAR 1) */
main_phys_addr = pci_resource_start(pcidev, 1);
writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
/* finished with MITE registers */
iounmap(mite_base);
return 0;
}
static int labpc_pci_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct labpc_boardinfo *board = NULL;
int ret;
if (context < ARRAY_SIZE(labpc_pci_boards))
board = &labpc_pci_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
ret = labpc_pci_mite_init(pcidev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 1);
if (!dev->mmio)
return -ENOMEM;
return labpc_common_attach(dev, pcidev->irq, IRQF_SHARED);
}
static struct comedi_driver labpc_pci_comedi_driver = {
.driver_name = "labpc_pci",
.module = THIS_MODULE,
.auto_attach = labpc_pci_auto_attach,
.detach = comedi_pci_detach,
};
static const struct pci_device_id labpc_pci_table[] = {
{ PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, labpc_pci_table);
static int labpc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &labpc_pci_comedi_driver,
id->driver_data);
}
static struct pci_driver labpc_pci_driver = {
.name = "labpc_pci",
.id_table = labpc_pci_table,
.probe = labpc_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(labpc_pci_comedi_driver, labpc_pci_driver);
MODULE_DESCRIPTION("Comedi: National Instruments Lab-PC PCI-1200 driver");
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_LICENSE("GPL");
| gpl-2.0 |
leitec/openwrt-leitec | target/linux/ramips/files/arch/mips/ralink/rt288x/mach-wzr-agl300nh.c | 128 | 1851 | /*
* Buffalo WZR-AGL300NH board support
*
* Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <asm/mach-ralink/machine.h>
#include <asm/mach-ralink/rt288x.h>
#include <asm/mach-ralink/rt288x_regs.h>
#include "devices.h"
/*
* MTD layout from stock firmware:
* mtd0: 00030000 00010000 "uboot"
* mtd1: 00010000 00010000 "uboot_environ"
* mtd2: 00010000 00010000 "factory_default"
* mtd3: 000b0000 00010000 "linux"
* mtd4: 002f0000 00010000 "rootfs"
* mtd5: 00010000 00010000 "user_property"
*/
static struct mtd_partition wzr_agl300nh_partitions[] = {
{
.name = "uboot",
.offset = 0,
.size = 0x030000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "uboot_environ",
.offset = 0x030000,
.size = 0x010000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "factory_default",
.offset = 0x040000,
.size = 0x010000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "linux",
.offset = 0x050000,
.size = 0x0b0000,
}, {
.name = "rootfs",
.offset = 0x100000,
.size = 0x2f0000,
}, {
.name = "user_property",
.offset = 0x3f0000,
.size = 0x010000,
}
};
static void __init wzr_agl300nh_init(void)
{
rt288x_gpio_init(RT2880_GPIO_MODE_UART0);
rt288x_flash0_data.nr_parts = ARRAY_SIZE(wzr_agl300nh_partitions);
rt288x_flash0_data.parts = wzr_agl300nh_partitions;
rt288x_register_flash(0);
rt288x_register_wifi();
rt288x_register_wdt();
}
MIPS_MACHINE(RAMIPS_MACH_WZR_AGL300NH, "WZR-AGL300NH",
"Buffalo WZR-AGL300NH", wzr_agl300nh_init);
| gpl-2.0 |
poitee/Grouper | drivers/hwmon/pmbus/ucd9200.c | 384 | 5640 | /*
* Hardware monitoring driver for ucd9200 series Digital PWM System Controllers
*
* Copyright (C) 2011 Ericsson AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pmbus.h>
#include "pmbus.h"
#define UCD9200_PHASE_INFO 0xd2
#define UCD9200_DEVICE_ID 0xfd
enum chips { ucd9200, ucd9220, ucd9222, ucd9224, ucd9240, ucd9244, ucd9246,
ucd9248 };
static const struct i2c_device_id ucd9200_id[] = {
{"ucd9200", ucd9200},
{"ucd9220", ucd9220},
{"ucd9222", ucd9222},
{"ucd9224", ucd9224},
{"ucd9240", ucd9240},
{"ucd9244", ucd9244},
{"ucd9246", ucd9246},
{"ucd9248", ucd9248},
{}
};
MODULE_DEVICE_TABLE(i2c, ucd9200_id);
static int ucd9200_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
struct pmbus_driver_info *info;
const struct i2c_device_id *mid;
int i, j, ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID,
block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read device ID\n");
return ret;
}
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
for (mid = ucd9200_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
if (id->driver_data != ucd9200 && id->driver_data != mid->driver_data)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO,
block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read phase information\n");
goto out;
}
/*
* Calculate number of configured pages (rails) from PHASE_INFO
* register.
* Rails have to be sequential, so we can abort after finding
* the first unconfigured rail.
*/
info->pages = 0;
for (i = 0; i < ret; i++) {
if (!block_buffer[i])
break;
info->pages++;
}
if (!info->pages) {
dev_err(&client->dev, "No rails configured\n");
ret = -ENODEV;
goto out;
}
dev_info(&client->dev, "%d rails configured\n", info->pages);
/*
* Set PHASE registers on all pages to 0xff to ensure that phase
* specific commands will apply to all phases of a given page (rail).
* This only affects the READ_IOUT and READ_TEMPERATURE2 registers.
* READ_IOUT will return the sum of currents of all phases of a rail,
* and READ_TEMPERATURE2 will return the maximum temperature detected
* for the the phases of the rail.
*/
for (i = 0; i < info->pages; i++) {
/*
* Setting PAGE & PHASE fails once in a while for no obvious
* reason, so we need to retry a couple of times.
*/
for (j = 0; j < 3; j++) {
ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
if (ret < 0)
continue;
ret = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
0xff);
if (ret < 0)
continue;
break;
}
if (ret < 0) {
dev_err(&client->dev,
"Failed to initialize PHASE registers\n");
goto out;
}
}
if (info->pages > 1)
i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP |
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
for (i = 1; i < info->pages; i++)
info->func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_POUT |
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
/* ucd9240 supports a single fan */
if (mid->driver_data == ucd9240)
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12;
ret = pmbus_do_probe(client, mid, info);
if (ret < 0)
goto out;
return 0;
out:
kfree(info);
return ret;
}
static int ucd9200_remove(struct i2c_client *client)
{
int ret;
const struct pmbus_driver_info *info;
info = pmbus_get_driver_info(client);
ret = pmbus_do_remove(client);
kfree(info);
return ret;
}
/* This is the driver that will be inserted */
static struct i2c_driver ucd9200_driver = {
.driver = {
.name = "ucd9200",
},
.probe = ucd9200_probe,
.remove = ucd9200_remove,
.id_table = ucd9200_id,
};
static int __init ucd9200_init(void)
{
return i2c_add_driver(&ucd9200_driver);
}
static void __exit ucd9200_exit(void)
{
i2c_del_driver(&ucd9200_driver);
}
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for TI UCD922x, UCD924x");
MODULE_LICENSE("GPL");
module_init(ucd9200_init);
module_exit(ucd9200_exit);
| gpl-2.0 |
Epirex/Samsung_STE_Kernel | net/bluetooth/l2cap_sock.c | 640 | 23372 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth L2CAP sockets. */
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
static const struct proto_ops l2cap_sock_ops;
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
BT_DBG("sk %p", sk);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
memset(&la, 0, sizeof(la));
len = min_t(unsigned int, sizeof(la), alen);
memcpy(&la, addr, len);
if (la.l2_cid && la.l2_psm)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (la.l2_psm) {
__u16 psm = __le16_to_cpu(la.l2_psm);
/* PSM must be odd and lsb of upper byte must be 0 */
if ((psm & 0x0101) != 0x0001) {
err = -EINVAL;
goto done;
}
/* Restrict usage of well-known PSMs */
if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto done;
}
}
if (la.l2_cid)
err = l2cap_add_scid(chan, la.l2_cid);
else
err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
if (err < 0)
goto done;
if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
__le16_to_cpu(la.l2_psm) == 0x0003)
chan->sec_level = BT_SECURITY_SDP;
bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
chan->state = BT_BOUND;
sk->sk_state = BT_BOUND;
done:
release_sock(sk);
return err;
}
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
BT_DBG("sk %p", sk);
if (!addr || alen < sizeof(addr->sa_family) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
memset(&la, 0, sizeof(la));
len = min_t(unsigned int, sizeof(la), alen);
memcpy(&la, addr, len);
if (la.l2_cid && la.l2_psm)
return -EINVAL;
lock_sock(sk);
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
&& !(la.l2_psm || la.l2_cid)) {
err = -EINVAL;
goto done;
}
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
default:
err = -ENOTSUPP;
goto done;
}
switch (sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
/* Already connecting */
goto wait;
case BT_CONNECTED:
/* Already connected */
err = -EISCONN;
goto done;
case BT_OPEN:
case BT_BOUND:
/* Can connect */
break;
default:
err = -EBADFD;
goto done;
}
/* PSM must be odd and lsb of upper byte must be 0 */
if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
goto done;
}
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
chan->psm = la.l2_psm;
chan->dcid = la.l2_cid;
err = l2cap_chan_connect(l2cap_pi(sk)->chan);
if (err)
goto done;
wait:
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int l2cap_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
|| sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
default:
err = -ENOTSUPP;
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
chan->state = BT_LISTEN;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
BT_DBG("sock %p, sk %p", sock, sk);
memset(la, 0, sizeof(struct sockaddr_l2));
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_l2);
if (peer) {
la->l2_psm = chan->psm;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
la->l2_cid = cpu_to_le16(chan->dcid);
} else {
la->l2_psm = chan->sport;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
la->l2_cid = cpu_to_le16(chan->scid);
}
return 0;
}
static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case L2CAP_OPTIONS:
memset(&opts, 0, sizeof(opts));
opts.imtu = chan->imtu;
opts.omtu = chan->omtu;
opts.flush_to = chan->flush_to;
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
opts.txwin_size = (__u16)chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
break;
case L2CAP_LM:
switch (chan->sec_level) {
case BT_SECURITY_LOW:
opt = L2CAP_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
L2CAP_LM_SECURE;
break;
default:
opt = 0;
break;
}
if (chan->role_switch)
opt |= L2CAP_LM_MASTER;
if (chan->force_reliable)
opt |= L2CAP_LM_RELIABLE;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
bt_sk(sk)->defer_setup)) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_L2CAP)
return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
break;
}
memset(&sec, 0, sizeof(sec));
sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
err = -EFAULT;
break;
case BT_FLUSHABLE:
if (put_user(chan->flushable, (u32 __user *) optval))
err = -EFAULT;
break;
case BT_POWER:
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
&& sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
pwr.force_active = chan->force_active;
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case L2CAP_OPTIONS:
if (sk->sk_state == BT_CONNECTED) {
err = -EINVAL;
break;
}
opts.imtu = chan->imtu;
opts.omtu = chan->omtu;
opts.flush_to = chan->flush_to;
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
opts.txwin_size = (__u16)chan->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
err = -EFAULT;
break;
}
if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
err = -EINVAL;
break;
}
chan->mode = opts.mode;
switch (chan->mode) {
case L2CAP_MODE_BASIC:
clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
default:
err = -EINVAL;
break;
}
chan->imtu = opts.imtu;
chan->omtu = opts.omtu;
chan->fcs = opts.fcs;
chan->max_tx = opts.max_tx;
chan->tx_win = (__u8)opts.txwin_size;
break;
case L2CAP_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & L2CAP_LM_AUTH)
chan->sec_level = BT_SECURITY_LOW;
if (opt & L2CAP_LM_ENCRYPT)
chan->sec_level = BT_SECURITY_MEDIUM;
if (opt & L2CAP_LM_SECURE)
chan->sec_level = BT_SECURITY_HIGH;
chan->role_switch = (opt & L2CAP_LM_MASTER);
chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
struct l2cap_conn *conn;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_L2CAP)
return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level < BT_SECURITY_LOW ||
sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
chan->sec_level = sec.level;
conn = chan->conn;
if (conn && chan->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
}
if (smp_conn_security(conn, sec.level))
break;
err = 0;
sk->sk_state = BT_CONFIG;
}
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
bt_sk(sk)->defer_setup = opt;
break;
case BT_FLUSHABLE:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt > BT_FLUSHABLE_ON) {
err = -EINVAL;
break;
}
if (opt == BT_FLUSHABLE_OFF) {
struct l2cap_conn *conn = chan->conn;
/* proceed further only when we have l2cap_conn and
No Flush support in the LM */
if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
err = -EINVAL;
break;
}
}
chan->flushable = opt;
break;
case BT_POWER:
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
break;
}
pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
len = min_t(unsigned int, sizeof(pwr), optlen);
if (copy_from_user((char *) &pwr, optval, len)) {
err = -EFAULT;
break;
}
chan->force_active = pwr.force_active;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
release_sock(sk);
return -ENOTCONN;
}
err = l2cap_chan_send(chan, msg, len);
release_sock(sk);
return err;
}
static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct l2cap_pinfo *pi = l2cap_pi(sk);
int err;
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
sk->sk_state = BT_CONFIG;
__l2cap_connect_rsp_defer(pi->chan);
release_sock(sk);
return 0;
}
release_sock(sk);
if (sock->type == SOCK_STREAM)
err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
else
err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
if (pi->chan->mode != L2CAP_MODE_ERTM)
return err;
/* Attempt to put pending rx data in the socket buffer */
lock_sock(sk);
if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
goto done;
if (pi->rx_busy_skb) {
if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
pi->rx_busy_skb = NULL;
else
goto done;
}
/* Restore data flow when half of the receive buffer is
* available. This avoids resending large numbers of
* frames.
*/
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
l2cap_chan_busy(pi->chan, 0);
done:
release_sock(sk);
return err;
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void l2cap_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
l2cap_chan_destroy(l2cap_pi(sk)->chan);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static int l2cap_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
if (chan->mode == L2CAP_MODE_ERTM)
err = __l2cap_wait_ack(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
l2cap_chan_close(chan, 0);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
if (!err && sk->sk_err)
err = -sk->sk_err;
release_sock(sk);
return err;
}
static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = l2cap_sock_shutdown(sock, 2);
sock_orphan(sk);
l2cap_sock_kill(sk);
return err;
}
static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
{
struct sock *sk, *parent = data;
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
GFP_ATOMIC);
if (!sk)
return NULL;
l2cap_sock_init(sk, parent);
return l2cap_pi(sk)->chan;
}
static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
{
int err;
struct sock *sk = data;
struct l2cap_pinfo *pi = l2cap_pi(sk);
if (pi->rx_busy_skb)
return -ENOMEM;
err = sock_queue_rcv_skb(sk, skb);
/* For ERTM, handle one skb that doesn't fit into the recv
* buffer. This is important to do because the data frames
* have already been acked, so the skb cannot be discarded.
*
* Notify the l2cap core that the buffer is full, so the
* LOCAL_BUSY state is entered and no more frames are
* acked and reassembled until there is buffer space
* available.
*/
if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
pi->rx_busy_skb = skb;
l2cap_chan_busy(pi->chan, 1);
err = 0;
}
return err;
}
static void l2cap_sock_close_cb(void *data)
{
struct sock *sk = data;
l2cap_sock_kill(sk);
}
static void l2cap_sock_state_change_cb(void *data, int state)
{
struct sock *sk = data;
sk->sk_state = state;
}
static struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
.recv = l2cap_sock_recv_cb,
.close = l2cap_sock_close_cb,
.state_change = l2cap_sock_state_change_cb,
};
static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
}
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_chan *chan = pi->chan;
BT_DBG("sk %p", sk);
if (parent) {
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
chan->omtu = pchan->omtu;
chan->conf_state = pchan->conf_state;
chan->mode = pchan->mode;
chan->fcs = pchan->fcs;
chan->max_tx = pchan->max_tx;
chan->tx_win = pchan->tx_win;
chan->sec_level = pchan->sec_level;
chan->role_switch = pchan->role_switch;
chan->force_reliable = pchan->force_reliable;
chan->flushable = pchan->flushable;
chan->force_active = pchan->force_active;
} else {
switch (sk->sk_type) {
case SOCK_RAW:
chan->chan_type = L2CAP_CHAN_RAW;
break;
case SOCK_DGRAM:
chan->chan_type = L2CAP_CHAN_CONN_LESS;
break;
case SOCK_SEQPACKET:
case SOCK_STREAM:
chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
break;
}
chan->imtu = L2CAP_DEFAULT_MTU;
chan->omtu = 0;
if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
chan->mode = L2CAP_MODE_ERTM;
set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
} else {
chan->mode = L2CAP_MODE_BASIC;
}
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->fcs = L2CAP_FCS_CRC16;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
chan->role_switch = 0;
chan->force_reliable = 0;
chan->flushable = BT_FLUSHABLE_OFF;
chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
}
/* Default config options */
chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
chan->data = sk;
chan->ops = &l2cap_chan_ops;
}
static struct proto l2cap_proto = {
.name = "L2CAP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct l2cap_pinfo)
};
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
struct l2cap_chan *chan;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
chan = l2cap_chan_create(sk);
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
l2cap_pi(sk)->chan = chan;
return sk;
}
static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
return -EPERM;
sock->ops = &l2cap_sock_ops;
sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
l2cap_sock_init(sk, NULL);
return 0;
}
static const struct proto_ops l2cap_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = l2cap_sock_release,
.bind = l2cap_sock_bind,
.connect = l2cap_sock_connect,
.listen = l2cap_sock_listen,
.accept = l2cap_sock_accept,
.getname = l2cap_sock_getname,
.sendmsg = l2cap_sock_sendmsg,
.recvmsg = l2cap_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = l2cap_sock_shutdown,
.setsockopt = l2cap_sock_setsockopt,
.getsockopt = l2cap_sock_getsockopt
};
static const struct net_proto_family l2cap_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = l2cap_sock_create,
};
int __init l2cap_init_sockets(void)
{
int err;
err = proto_register(&l2cap_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
if (err < 0)
goto error;
BT_INFO("L2CAP socket layer initialized");
return 0;
error:
BT_ERR("L2CAP socket registration failed");
proto_unregister(&l2cap_proto);
return err;
}
void l2cap_cleanup_sockets(void)
{
if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
BT_ERR("L2CAP socket unregistration failed");
proto_unregister(&l2cap_proto);
}
| gpl-2.0 |
attn1/cm-kernel-vivo-2.6.35 | drivers/media/video/cx88/cx88-blackbird.c | 896 | 38992 | /*
*
* Support for a cx23416 mpeg encoder via cx2388x host port.
* "blackbird" reference design.
*
* (c) 2004 Jelle Foks <jelle@foks.us>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org>
*
* (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
* - video_ioctl2 conversion
*
* Includes parts from the ivtv driver( http://ivtv.sourceforge.net/),
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/smp_lock.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/cx2341x.h>
#include "cx88.h"
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
static unsigned int mpegbufs = 32;
module_param(mpegbufs,int,0644);
MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32");
static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
#define dprintk(level,fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
/* ------------------------------------------------------------------ */
#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
/* defines below are from ivtv-driver.h */
#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
/* Firmware API commands */
#define IVTV_API_STD_TIMEOUT 500
enum blackbird_capture_type {
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_CAPTURE,
BLACKBIRD_RAW_PASSTHRU_CAPTURE
};
enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_NONE = 0x00,
BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01,
BLACKBIRD_RAW_BITS_PCM_CAPTURE = 0x02,
BLACKBIRD_RAW_BITS_VBI_CAPTURE = 0x04,
BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10
};
enum blackbird_capture_end {
BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */
BLACKBIRD_END_NOW, /* stop immediately, no irq */
};
enum blackbird_framerate {
BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */
BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */
};
enum blackbird_stream_port {
BLACKBIRD_OUTPUT_PORT_MEMORY,
BLACKBIRD_OUTPUT_PORT_STREAMING,
BLACKBIRD_OUTPUT_PORT_SERIAL
};
enum blackbird_data_xfer_status {
BLACKBIRD_MORE_BUFFERS_FOLLOW,
BLACKBIRD_LAST_BUFFER,
};
enum blackbird_picture_mask {
BLACKBIRD_PICTURE_MASK_NONE,
BLACKBIRD_PICTURE_MASK_I_FRAMES,
BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3,
BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7,
};
enum blackbird_vbi_mode_bits {
BLACKBIRD_VBI_BITS_SLICED,
BLACKBIRD_VBI_BITS_RAW,
};
enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM = 0x2 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
};
enum blackbird_dma_unit {
BLACKBIRD_DMA_BYTES,
BLACKBIRD_DMA_FRAMES,
};
enum blackbird_dma_transfer_status_bits {
BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01,
BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04,
BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
};
enum blackbird_pause {
BLACKBIRD_PAUSE_ENCODING,
BLACKBIRD_RESUME_ENCODING,
};
enum blackbird_copyright {
BLACKBIRD_COPYRIGHT_OFF,
BLACKBIRD_COPYRIGHT_ON,
};
enum blackbird_notification_type {
BLACKBIRD_NOTIFICATION_REFRESH,
};
enum blackbird_notification_status {
BLACKBIRD_NOTIFICATION_OFF,
BLACKBIRD_NOTIFICATION_ON,
};
enum blackbird_notification_mailbox {
BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1,
};
enum blackbird_field1_lines {
BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */
};
enum blackbird_field2_lines {
BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */
};
enum blackbird_custom_data_type {
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
BLACKBIRD_CUSTOM_PRIVATE_PACKET,
};
enum blackbird_mute {
BLACKBIRD_UNMUTE,
BLACKBIRD_MUTE,
};
enum blackbird_mute_video_mask {
BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00,
BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000,
BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000,
};
enum blackbird_mute_video_shift {
BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8,
BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16,
BLACKBIRD_MUTE_VIDEO_Y_SHIFT = 24,
};
/* Registers */
#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_SPU (0x9050 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_HW_BLOCKS (0x9054 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_VPU (0x9058 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_APU (0xA064 /*| IVTV_REG_OFFSET*/)
/* ------------------------------------------------------------------ */
static void host_setup(struct cx88_core *core)
{
/* toggle reset of the host */
cx_write(MO_GPHST_SOFT_RST, 1);
udelay(100);
cx_write(MO_GPHST_SOFT_RST, 0);
udelay(100);
/* host port setup */
cx_write(MO_GPHST_WSC, 0x44444444U);
cx_write(MO_GPHST_XFR, 0);
cx_write(MO_GPHST_WDTH, 15);
cx_write(MO_GPHST_HDSHK, 0);
cx_write(MO_GPHST_MUX16, 0x44448888U);
cx_write(MO_GPHST_MODE, 0);
}
/* ------------------------------------------------------------------ */
#define P1_MDATA0 0x390000
#define P1_MDATA1 0x390001
#define P1_MDATA2 0x390002
#define P1_MDATA3 0x390003
#define P1_MADDR2 0x390004
#define P1_MADDR1 0x390005
#define P1_MADDR0 0x390006
#define P1_RDATA0 0x390008
#define P1_RDATA1 0x390009
#define P1_RDATA2 0x39000A
#define P1_RDATA3 0x39000B
#define P1_RADDR0 0x39000C
#define P1_RADDR1 0x39000D
#define P1_RRDWR 0x39000E
static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 gpio0,need;
need = state ? 2 : 0;
for (;;) {
gpio0 = cx_read(MO_GP0_IO) & 2;
if (need == gpio0)
return 0;
if (time_after(jiffies,timeout))
return -1;
udelay(1);
}
}
static int memory_write(struct cx88_core *core, u32 address, u32 value)
{
/* Warning: address is dword address (4 bytes) */
cx_writeb(P1_MDATA0, (unsigned int)value);
cx_writeb(P1_MDATA1, (unsigned int)(value >> 8));
cx_writeb(P1_MDATA2, (unsigned int)(value >> 16));
cx_writeb(P1_MDATA3, (unsigned int)(value >> 24));
cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) | 0x40);
cx_writeb(P1_MADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MDATA0);
cx_read(P1_MADDR0);
return wait_ready_gpio0_bit1(core,1);
}
static int memory_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
u32 val;
/* Warning: address is dword address (4 bytes) */
cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) & ~0xC0);
cx_writeb(P1_MADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MADDR0);
retval = wait_ready_gpio0_bit1(core,1);
cx_writeb(P1_MDATA3, 0);
val = (unsigned char)cx_read(P1_MDATA3) << 24;
cx_writeb(P1_MDATA2, 0);
val |= (unsigned char)cx_read(P1_MDATA2) << 16;
cx_writeb(P1_MDATA1, 0);
val |= (unsigned char)cx_read(P1_MDATA1) << 8;
cx_writeb(P1_MDATA0, 0);
val |= (unsigned char)cx_read(P1_MDATA0);
*value = val;
return retval;
}
static int register_write(struct cx88_core *core, u32 address, u32 value)
{
cx_writeb(P1_RDATA0, (unsigned int)value);
cx_writeb(P1_RDATA1, (unsigned int)(value >> 8));
cx_writeb(P1_RDATA2, (unsigned int)(value >> 16));
cx_writeb(P1_RDATA3, (unsigned int)(value >> 24));
cx_writeb(P1_RADDR0, (unsigned int)address);
cx_writeb(P1_RADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_RRDWR, 1);
cx_read(P1_RDATA0);
cx_read(P1_RADDR0);
return wait_ready_gpio0_bit1(core,1);
}
static int register_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
u32 val;
cx_writeb(P1_RADDR0, (unsigned int)address);
cx_writeb(P1_RADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_RRDWR, 0);
cx_read(P1_RADDR0);
retval = wait_ready_gpio0_bit1(core,1);
val = (unsigned char)cx_read(P1_RDATA0);
val |= (unsigned char)cx_read(P1_RDATA1) << 8;
val |= (unsigned char)cx_read(P1_RDATA2) << 16;
val |= (unsigned char)cx_read(P1_RDATA3) << 24;
*value = val;
return retval;
}
/* ------------------------------------------------------------------ */
static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx8802_dev *dev = priv;
unsigned long timeout;
u32 value, flag, retval;
int i;
dprintk(1,"%s: 0x%X\n", __func__, command);
/* this may not be 100% safe if we can't read any memory location
without side effects */
memory_read(dev->core, dev->mailbox - 4, &value);
if (value != 0x12345678) {
dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
return -1;
}
memory_read(dev->core, dev->mailbox, &flag);
if (flag) {
dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag);
return -1;
}
flag |= 1; /* tell 'em we're working on it */
memory_write(dev->core, dev->mailbox, flag);
/* write command + args + fill remaining with zeros */
memory_write(dev->core, dev->mailbox + 1, command); /* command code */
memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */
for (i = 0; i < in; i++) {
memory_write(dev->core, dev->mailbox + 4 + i, data[i]);
dprintk(1, "API Input %d = %d\n", i, data[i]);
}
for (; i < CX2341X_MBOX_MAX_DATA; i++)
memory_write(dev->core, dev->mailbox + 4 + i, 0);
flag |= 3; /* tell 'em we're done writing */
memory_write(dev->core, dev->mailbox, flag);
/* wait for firmware to handle the API command */
timeout = jiffies + msecs_to_jiffies(10);
for (;;) {
memory_read(dev->core, dev->mailbox, &flag);
if (0 != (flag & 4))
break;
if (time_after(jiffies,timeout)) {
dprintk(0, "ERROR: API Mailbox timeout\n");
return -1;
}
udelay(10);
}
/* read output values */
for (i = 0; i < out; i++) {
memory_read(dev->core, dev->mailbox + 4 + i, data + i);
dprintk(1, "API Output %d = %d\n", i, data[i]);
}
memory_read(dev->core, dev->mailbox + 2, &retval);
dprintk(1, "API result = %d\n",retval);
flag = 0;
memory_write(dev->core, dev->mailbox, flag);
return retval;
}
/* ------------------------------------------------------------------ */
/* We don't need to call the API often, so using just one mailbox will probably suffice */
static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
u32 inputcnt, u32 outputcnt, ...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list vargs;
int i, err;
va_start(vargs, outputcnt);
for (i = 0; i < inputcnt; i++) {
data[i] = va_arg(vargs, int);
}
err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data);
for (i = 0; i < outputcnt; i++) {
int *vptr = va_arg(vargs, int *);
*vptr = data[i];
}
va_end(vargs);
return err;
}
static int blackbird_find_mailbox(struct cx8802_dev *dev)
{
u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456};
int signaturecnt=0;
u32 value;
int i;
for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) {
memory_read(dev->core, i, &value);
if (value == signature[signaturecnt])
signaturecnt++;
else
signaturecnt = 0;
if (4 == signaturecnt) {
dprintk(1, "Mailbox signature found\n");
return i+1;
}
}
dprintk(0, "Mailbox signature values not found!\n");
return -1;
}
static int blackbird_load_firmware(struct cx8802_dev *dev)
{
static const unsigned char magic[8] = {
0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa
};
const struct firmware *firmware;
int i, retval = 0;
u32 value = 0;
u32 checksum = 0;
u32 *dataptr;
retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640);
retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
msleep(1);
retval |= register_write(dev->core, IVTV_REG_APU, 0);
if (retval < 0)
dprintk(0, "Error with register_write\n");
retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME,
&dev->pci->dev);
if (retval != 0) {
dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n",
CX2341X_FIRM_ENC_FILENAME);
dprintk(0, "Please fix your hotplug setup, the board will "
"not work without firmware loaded!\n");
return -1;
}
if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
/* transfer to the chip */
dprintk(1,"Loading firmware ...\n");
dataptr = (u32*)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = *dataptr;
checksum += ~value;
memory_write(dev->core, i, value);
dataptr++;
}
/* read back to verify with the checksum */
for (i--; i >= 0; i--) {
memory_read(dev->core, i, &value);
checksum -= ~value;
}
if (checksum) {
dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n");
release_firmware(firmware);
return -1;
}
release_firmware(firmware);
dprintk(0, "Firmware upload successful.\n");
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
retval |= register_read(dev->core, IVTV_REG_SPU, &value);
retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE);
msleep(1);
retval |= register_read(dev->core, IVTV_REG_VPU, &value);
retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8);
if (retval < 0)
dprintk(0, "Error with register_write\n");
return 0;
}
/**
Settings used by the windows tv app for PVR2000:
=================================================================================================================
Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
-----------------------------------------------------------------------------------------------------------------
MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
=================================================================================================================
*DB: "DirectBurn"
*/
static void blackbird_codec_settings(struct cx8802_dev *dev)
{
/* assign frame size */
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->height, dev->width);
dev->params.width = dev->width;
dev->params.height = dev->height;
dev->params.is_50hz = (dev->core->tvnorm & V4L2_STD_625_50) != 0;
cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params);
}
static int blackbird_initialize_codec(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
int version;
int retval;
dprintk(1,"Initialize codec\n");
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dev->mpeg_active = 0;
/* ping was not successful, reset and upload firmware */
cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */
cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */
retval = blackbird_load_firmware(dev);
if (retval < 0)
return retval;
retval = blackbird_find_mailbox(dev);
if (retval < 0)
return -1;
dev->mailbox = retval;
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dprintk(0, "ERROR: Firmware ping failed!\n");
return -1;
}
retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version);
if (retval < 0) {
dprintk(0, "ERROR: Firmware get encoder version failed!\n");
return -1;
}
dprintk(0, "Firmware version is 0x%08x\n", version);
}
cx_write(MO_PINMUX_IO, 0x88); /* 656-8bit IO and enable MPEG parallel IO */
cx_clear(MO_INPUT_FORMAT, 0x100); /* chroma subcarrier lock to normal? */
cx_write(MO_VBOS_CONTROL, 0x84A00); /* no 656 mode, 8-bit pixels, disable VBI */
cx_clear(MO_OUTPUT_FORMAT, 0x0008); /* Normal Y-limits to let the mpeg encoder sync */
blackbird_codec_settings(dev);
blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
BLACKBIRD_FIELD1_SAA7115,
BLACKBIRD_FIELD2_SAA7115
);
blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
static int blackbird_start_codec(struct file *file, void *priv)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
/* start capturing to the host interface */
u32 reg;
int i;
int lastchange = -1;
int lastval = 0;
for (i = 0; (i < 10) && (i < (lastchange + 4)); i++) {
reg = cx_read(AUD_STATUS);
dprintk(1, "AUD_STATUS:%dL: 0x%x\n", i, reg);
if ((reg & 0x0F) != lastval) {
lastval = reg & 0x0F;
lastchange = i;
}
msleep(100);
}
/* unmute audio source */
cx_clear(AUD_VOL_CTL, (1 << 6));
blackbird_api_cmd(dev, CX2341X_ENC_REFRESH_INPUT, 0, 0);
/* initialize the video input */
blackbird_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
/* start capturing to the host interface */
blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_BITS_NONE
);
dev->mpeg_active = 1;
return 0;
}
static int blackbird_stop_codec(struct cx8802_dev *dev)
{
blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
BLACKBIRD_END_NOW,
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_BITS_NONE
);
dev->mpeg_active = 0;
return 0;
}
/* ------------------------------------------------------------------ */
static int bb_buf_setup(struct videobuf_queue *q,
unsigned int *count, unsigned int *size)
{
struct cx8802_fh *fh = q->priv_data;
fh->dev->ts_packet_size = 188 * 4; /* was: 512 */
fh->dev->ts_packet_count = mpegbufs; /* was: 100 */
*size = fh->dev->ts_packet_size * fh->dev->ts_packet_count;
*count = fh->dev->ts_packet_count;
return 0;
}
static int
bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx8802_fh *fh = q->priv_data;
return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field);
}
static void
bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx8802_fh *fh = q->priv_data;
cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb);
}
static void
bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
cx88_free_buffer(q, (struct cx88_buffer*)vb);
}
static struct videobuf_queue_ops blackbird_qops = {
.buf_setup = bb_buf_setup,
.buf_prepare = bb_buf_prepare,
.buf_queue = bb_buf_queue,
.buf_release = bb_buf_release,
};
/* ------------------------------------------------------------------ */
static const u32 *ctrl_classes[] = {
cx88_user_ctrls,
cx2341x_mpeg_ctrls,
NULL
};
static int blackbird_queryctrl(struct cx8802_dev *dev, struct v4l2_queryctrl *qctrl)
{
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (qctrl->id == 0)
return -EINVAL;
/* Standard V4L2 controls */
if (cx8800_ctrl_query(dev->core, qctrl) == 0)
return 0;
/* MPEG V4L2 controls */
if (cx2341x_ctrl_query(&dev->params, qctrl))
qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
return 0;
}
/* ------------------------------------------------------------------ */
/* IOCTL Handlers */
static int vidioc_querymenu (struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct v4l2_queryctrl qctrl;
qctrl.id = qmenu->id;
blackbird_queryctrl(dev, &qctrl);
return v4l2_ctrl_query_menu(qmenu, &qctrl,
cx2341x_ctrl_get_menu(&dev->params, qmenu->id));
}
static int vidioc_querycap (struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx88_blackbird");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
if (UNSET != core->board.tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
strlcpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
}
static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */
f->fmt.pix.colorspace = 0;
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.field = fh->mpegq.field;
dprintk(0,"VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
f->fmt.pix.colorspace = 0;
dprintk(0,"VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
f->fmt.pix.colorspace = 0;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
fh->mpegq.field = f->fmt.pix.field;
cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
f->fmt.pix.height, f->fmt.pix.width);
dprintk(0,"VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field );
return 0;
}
static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_reqbufs(&fh->mpegq, p));
}
static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_querybuf(&fh->mpegq, p));
}
static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_qbuf(&fh->mpegq, p));
}
static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_dqbuf(&fh->mpegq, p,
file->f_flags & O_NONBLOCK));
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
return videobuf_streamon(&fh->mpegq);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
return videobuf_streamoff(&fh->mpegq);
}
static int vidioc_g_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
return cx2341x_ext_ctrls(&dev->params, 0, f, VIDIOC_G_EXT_CTRLS);
}
static int vidioc_s_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
p = dev->params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS);
if (!err) {
err = cx2341x_update(dev, blackbird_mbox_func, &dev->params, &p);
dev->params = p;
}
return err;
}
static int vidioc_try_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
p = dev->params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
return err;
}
static int vidioc_s_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
cx88_set_freq (core,f);
blackbird_initialize_codec(dev);
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
return 0;
}
static int vidioc_log_status (struct file *file, void *priv)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", core->name);
printk("%s/2: ============ START LOG STATUS ============\n",
core->name);
call_all(core, core, log_status);
cx2341x_log_status(&dev->params, name);
printk("%s/2: ============= END LOG STATUS =============\n",
core->name);
return 0;
}
static int vidioc_queryctrl (struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
if (blackbird_queryctrl(dev, qctrl) == 0)
return 0;
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (unlikely(qctrl->id == 0))
return -EINVAL;
return cx8800_ctrl_query(dev->core, qctrl);
}
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return cx88_enum_input (core,i);
}
static int vidioc_g_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return
cx88_get_control(core,ctl);
}
static int vidioc_s_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return
cx88_set_control(core,ctl);
}
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8802_fh *fh = priv;
struct cx88_core *core = fh->dev->core;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = core->freq;
call_all(core, tuner, g_frequency, f);
return 0;
}
static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
*i = core->input;
return 0;
}
static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
if (i >= 4)
return -EINVAL;
mutex_lock(&core->lock);
cx88_newstation(core);
cx88_video_mux(core,i);
mutex_unlock(&core->lock);
return 0;
}
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
u32 reg;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
cx88_get_stereo(core ,t);
reg = cx_read(MO_DEVICE_STATUS);
t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
return 0;
}
static int vidioc_s_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
if (UNSET == core->board.tuner_type)
return -EINVAL;
if (0 != t->index)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
return 0;
}
static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
mutex_lock(&core->lock);
cx88_set_tvnorm(core,*id);
mutex_unlock(&core->lock);
return 0;
}
/* FIXME: cx88_ioctl_hook not implemented */
static int mpeg_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct cx8802_dev *dev = video_drvdata(file);
struct cx8802_fh *fh;
struct cx8802_driver *drv = NULL;
int err;
dprintk( 1, "%s\n", __func__);
lock_kernel();
/* Make sure we can acquire the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
if (drv) {
err = drv->request_acquire(drv);
if(err != 0) {
dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
unlock_kernel();
return err;
}
}
if (!atomic_read(&dev->core->mpeg_users) && blackbird_initialize_codec(dev) < 0) {
if (drv)
drv->request_release(drv);
unlock_kernel();
return -EINVAL;
}
dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
if (drv)
drv->request_release(drv);
unlock_kernel();
return -ENOMEM;
}
file->private_data = fh;
fh->dev = dev;
videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
fh);
/* FIXME: locking against other video device */
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
unlock_kernel();
atomic_inc(&dev->core->mpeg_users);
return 0;
}
static int mpeg_release(struct file *file)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
struct cx8802_driver *drv = NULL;
if (dev->mpeg_active && atomic_read(&dev->core->mpeg_users) == 1)
blackbird_stop_codec(dev);
cx8802_cancel_buffers(fh->dev);
/* stop mpeg capture */
videobuf_stop(&fh->mpegq);
videobuf_mmap_free(&fh->mpegq);
file->private_data = NULL;
kfree(fh);
/* Make sure we release the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
if (drv)
drv->request_release(drv);
atomic_dec(&dev->core->mpeg_users);
return 0;
}
static ssize_t
mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
if (!dev->mpeg_active)
blackbird_start_codec(file, fh);
return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int
mpeg_poll(struct file *file, struct poll_table_struct *wait)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
if (!dev->mpeg_active)
blackbird_start_codec(file, fh);
return videobuf_poll_stream(file, &fh->mpegq, wait);
}
static int
mpeg_mmap(struct file *file, struct vm_area_struct * vma)
{
struct cx8802_fh *fh = file->private_data;
return videobuf_mmap_mapper(&fh->mpegq, vma);
}
static const struct v4l2_file_operations mpeg_fops =
{
.owner = THIS_MODULE,
.open = mpeg_open,
.release = mpeg_release,
.read = mpeg_read,
.poll = mpeg_poll,
.mmap = mpeg_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
.vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
.vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = vidioc_log_status,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_s_std = vidioc_s_std,
};
static struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX88_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
/* ------------------------------------------------------------------ */
/* The CX8802 MPEG API will call this when we can use the hardware */
static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* By default, core setup will leave the cx22702 out of reset, on the bus.
* We left the hardware on power up with the cx22702 active.
* We're being given access to re-arrange the GPIOs.
* Take the bus off the cx22702 and put the cx23416 on it.
*/
/* Toggle reset on cx22702 leaving i2c active */
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000080);
udelay(50);
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
/* tri-state the cx22702 pins */
cx_set(MO_GP0_IO, 0x00000004);
udelay(1000);
break;
default:
err = -ENODEV;
}
return err;
}
/* The CX8802 MPEG API will call this when we need to release the hardware */
static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* Exit leaving the cx23416 on the bus */
break;
default:
err = -ENODEV;
}
return err;
}
static void blackbird_unregister_video(struct cx8802_dev *dev)
{
if (dev->mpeg_dev) {
if (video_is_registered(dev->mpeg_dev))
video_unregister_device(dev->mpeg_dev);
else
video_device_release(dev->mpeg_dev);
dev->mpeg_dev = NULL;
}
}
static int blackbird_register_video(struct cx8802_dev *dev)
{
int err;
dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
&cx8802_mpeg_template,"mpeg");
video_set_drvdata(dev->mpeg_dev, dev);
err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s/2: can't register mpeg device\n",
dev->core->name);
return err;
}
printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
dev->core->name, video_device_node_name(dev->mpeg_dev));
return 0;
}
/* ----------------------------------------------------------- */
static int cx8802_blackbird_probe(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = core->dvbdev;
int err;
dprintk( 1, "%s\n", __func__);
dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
core->pci_slot);
err = -ENODEV;
if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD))
goto fail_core;
dev->width = 720;
dev->height = 576;
cx2341x_fill_defaults(&dev->params);
dev->params.port = CX2341X_PORT_STREAMING;
cx8802_mpeg_template.current_norm = core->tvnorm;
if (core->tvnorm & V4L2_STD_525_60) {
dev->height = 480;
} else {
dev->height = 576;
}
/* blackbird stuff */
printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
core->name);
host_setup(dev->core);
blackbird_initialize_codec(dev);
blackbird_register_video(dev);
/* initial device configuration: needed ? */
mutex_lock(&dev->core->lock);
// init_controls(core);
cx88_set_tvnorm(core,core->tvnorm);
cx88_video_mux(core,0);
mutex_unlock(&dev->core->lock);
return 0;
fail_core:
return err;
}
static int cx8802_blackbird_remove(struct cx8802_driver *drv)
{
/* blackbird */
blackbird_unregister_video(drv->core->dvbdev);
return 0;
}
static struct cx8802_driver cx8802_blackbird_driver = {
.type_id = CX88_MPEG_BLACKBIRD,
.hw_access = CX8802_DRVCTL_SHARED,
.probe = cx8802_blackbird_probe,
.remove = cx8802_blackbird_remove,
.advise_acquire = cx8802_blackbird_advise_acquire,
.advise_release = cx8802_blackbird_advise_release,
};
static int __init blackbird_init(void)
{
printk(KERN_INFO "cx2388x blackbird driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
(CX88_VERSION_CODE >> 8) & 0xff,
CX88_VERSION_CODE & 0xff);
#ifdef SNAPSHOT
printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
#endif
return cx8802_register_driver(&cx8802_blackbird_driver);
}
static void __exit blackbird_fini(void)
{
cx8802_unregister_driver(&cx8802_blackbird_driver);
}
module_init(blackbird_init);
module_exit(blackbird_fini);
module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages [video]");
/* ----------------------------------------------------------- */
/*
* Local variables:
* c-basic-offset: 8
* End:
* kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
*/
| gpl-2.0 |
KanoComputing/raspberrypi-linux | arch/parisc/kernel/traps.c | 896 | 21773 | /*
* linux/arch/parisc/traps.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <asm/assembly.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs);
static int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
while (mask != 0) {
*buf++ = (mask & x ? '1' : '0');
mask >>= 1;
}
*buf = '\0';
return nbits;
}
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FFMT "%016llx" /* fpregs are 64-bit always */
#define PRINTREGS(lvl,r,f,fmt,x) \
printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
(r)[(x)+2], (r)[(x)+3])
static void print_gr(char *level, struct pt_regs *regs)
{
int i;
char buf[64];
printk("%s\n", level);
printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
printbinary(buf, regs->gr[0], 32);
printk("%sPSW: %s %s\n", level, buf, print_tainted());
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->gr, "r", RFMT, i);
}
static void print_fr(char *level, struct pt_regs *regs)
{
int i;
char buf[64];
struct { u32 sw[2]; } s;
/* FR are 64bit everywhere. Need to use asm to get the content
* of fpsr/fper1, and we assume that we won't have a FP Identify
* in our way, otherwise we're screwed.
* The fldd is used to restore the T-bit if there was one, as the
* store clears it anyway.
* PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
asm volatile ("fstd %%fr0,0(%1) \n\t"
"fldd 0(%1),%%fr0 \n\t"
: "=m" (s) : "r" (&s) : "r0");
printk("%s\n", level);
printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
printbinary(buf, s.sw[0], 32);
printk("%sFPSR: %s\n", level, buf);
printk("%sFPER1: %08x\n", level, s.sw[1]);
/* here we'll print fr0 again, tho it'll be meaningless */
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->fr, "fr", FFMT, i);
}
void show_regs(struct pt_regs *regs)
{
int i, user;
char *level;
unsigned long cr30, cr31;
user = user_mode(regs);
level = user ? KERN_DEBUG : KERN_CRIT;
show_regs_print_info(level);
print_gr(level, regs);
for (i = 0; i < 8; i += 4)
PRINTREGS(level, regs->sr, "sr", RFMT, i);
if (user)
print_fr(level, regs);
cr30 = mfctl(30);
cr31 = mfctl(31);
printk("%s\n", level);
printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
level, regs->iir, regs->isr, regs->ior);
printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
level, current_thread_info()->cpu, cr30, cr31);
printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
if (user) {
printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
} else {
printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
parisc_show_stack(current, NULL, regs);
}
}
static DEFINE_RATELIMIT_STATE(_hppa_rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
printk(fmt, ##__VA_ARGS__); \
show_regs(regs); \
} \
}
static void do_show_stack(struct unwind_frame_info *info)
{
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
printk(KERN_CRIT " [<" RFMT ">] %pS\n",
info->ip, (void *) info->ip);
i++;
}
}
printk(KERN_CRIT "\n");
}
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs)
{
struct unwind_frame_info info;
struct task_struct *t;
t = task ? task : current;
if (regs) {
unwind_frame_init(&info, t, regs);
goto show_stack;
}
if (t == current) {
unsigned long sp;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
{
struct pt_regs r;
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
}
} else {
unwind_frame_init_from_blocked_task(&info, t);
}
show_stack:
do_show_stack(&info);
}
void show_stack(struct task_struct *t, unsigned long *sp)
{
return parisc_show_stack(t, sp, NULL);
}
int is_valid_bugaddr(unsigned long iaoq)
{
return 1;
}
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs)) {
if (err == 0)
return; /* STFU */
parisc_printk_ratelimited(1, regs,
KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
return;
}
oops_in_progress = 1;
oops_enter();
/* Amuse the user in a SPARC fashion */
if (err) printk(KERN_CRIT
" _______________________________ \n"
" < Your System ate a SPARC! Gah! >\n"
" ------------------------------- \n"
" \\ ^__^\n"
" (__)\\ )\\/\\\n"
" U ||----w |\n"
" || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* maybe the kernel hasn't booted very far yet and hasn't been able
* to initialize the serial or STI console. In that case we should
* re-enable the pdc console, so that the user will be able to
* identify the problem. */
if (!console_drivers)
pdc_console_restart();
if (err)
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, task_pid_nr(current), str, err);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __func__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
show_regs(regs);
dump_stack();
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
oops_exit();
do_exit(SIGSEGV);
}
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
static void handle_gdb_break(struct pt_regs *regs, int wot)
{
struct siginfo si;
si.si_signo = SIGTRAP;
si.si_errno = 0;
si.si_code = wot;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
force_sig_info(SIGTRAP, &si, current);
}
static void handle_break(struct pt_regs *regs)
{
unsigned iir = regs->iir;
if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
/* check if a BUG() or WARN() trapped here. */
enum bug_trap_type tt;
tt = report_bug(regs->iaoq[0] & ~3, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->iaoq[0] += 4;
regs->iaoq[1] += 4;
return; /* return to next instruction when WARN_ON(). */
}
die_if_kernel("Unknown kernel breakpoint", regs,
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
task_pid_nr(current), current->comm);
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
}
static void default_trap(int code, struct pt_regs *regs)
{
printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
show_regs(regs);
}
void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
struct pdc_hpmc_pim_11 *pim_narrow;
struct pdc_hpmc_pim_20 *pim_wide;
if (boot_cpu_data.cpu_type >= pcxu) {
pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
/*
* Note: The following code will probably generate a
* bunch of truncation error warnings from the compiler.
* Could be handled with an ifdef, but perhaps there
* is a better way.
*/
regs->gr[0] = pim_wide->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_wide->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_wide->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_wide->sr[i];
regs->iasq[0] = pim_wide->cr[17];
regs->iasq[1] = pim_wide->iasq_back;
regs->iaoq[0] = pim_wide->cr[18];
regs->iaoq[1] = pim_wide->iaoq_back;
regs->sar = pim_wide->cr[11];
regs->iir = pim_wide->cr[19];
regs->isr = pim_wide->cr[20];
regs->ior = pim_wide->cr[21];
}
else {
pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
regs->gr[0] = pim_narrow->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_narrow->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_narrow->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_narrow->sr[i];
regs->iasq[0] = pim_narrow->cr[17];
regs->iasq[1] = pim_narrow->iasq_back;
regs->iaoq[0] = pim_narrow->cr[18];
regs->iaoq[1] = pim_narrow->iaoq_back;
regs->sar = pim_narrow->cr[11];
regs->iir = pim_narrow->cr[19];
regs->isr = pim_narrow->cr[20];
regs->ior = pim_narrow->cr[21];
}
/*
* The following fields only have meaning if we came through
* another path. So just zero them here.
*/
regs->ksp = 0;
regs->kpc = 0;
regs->orig_r28 = 0;
}
/*
* This routine is called as a last resort when everything else
* has gone clearly wrong. We get called for faults in kernel space,
* and HPMC's.
*/
void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
{
static DEFINE_SPINLOCK(terminate_lock);
oops_in_progress = 1;
set_eiem(0);
local_irq_disable();
spin_lock(&terminate_lock);
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* restart pdc console if necessary */
if (!console_drivers)
pdc_console_restart();
/* Not all paths will gutter the processor... */
switch(code){
case 1:
transfer_pim_to_trap_frame(regs);
break;
default:
/* Fall through */
break;
}
{
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
do_show_stack(&info);
}
printk("\n");
printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
msg, code, regs, offset);
show_regs(regs);
spin_unlock(&terminate_lock);
/* put soft power button back under hardware control;
* if the user had pressed it once at any time, the
* system will shut down immediately right here. */
pdc_soft_power_button(0);
/* Call kernel panic() so reboot timeouts work properly
* FIXME: This function should be on the list of
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
* parisc_terminate, but rather the oter way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/
panic(msg);
}
void notrace handle_interruption(int code, struct pt_regs *regs)
{
unsigned long fault_address = 0;
unsigned long fault_space = 0;
struct siginfo si;
if (code == 1)
pdc_console_restart(); /* switch back to pdc if HPMC */
else
local_irq_enable();
/* Security check:
* If the priority level is still user, and the
* faulting space is not equal to the active space
* then the user is attempting something in a space
* that does not belong to them. Kill the process.
*
* This is normally the situation when the user
* attempts to jump into the kernel space at the
* wrong offset, be it at the gateway page or a
* random location.
*
* We cannot normally signal the process because it
* could *be* on the gateway page, and processes
* executing on the gateway page can't have signals
* delivered.
*
* We merely readjust the address into the users
* space, at a destination address of zero, and
* allow processing to continue.
*/
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
regs->iaoq[0] = 0 | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
return;
}
#if 0
printk(KERN_CRIT "Interruption # %d\n", code);
#endif
switch(code) {
case 1:
/* High-priority machine check (HPMC) */
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
parisc_terminate("High Priority Machine Check (HPMC)",
regs, code, 0);
/* NOT REACHED */
case 2:
/* Power failure interrupt */
printk(KERN_CRIT "Power failure interrupt !\n");
return;
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */
return;
case 5:
/* Low-priority machine check */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
flush_cache_all();
flush_tlb_all();
cpu_lpmc(5, regs);
return;
case 6:
/* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
break;
case 8:
/* Illegal instruction trap */
die_if_kernel("Illegal instruction", regs, code);
si.si_code = ILL_ILLOPC;
goto give_sigill;
case 9:
/* Break instruction trap */
handle_break(regs);
return;
case 10:
/* Privileged operation trap */
die_if_kernel("Privileged operation", regs, code);
si.si_code = ILL_PRVOPC;
goto give_sigill;
case 11:
/* Privileged register trap */
if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
/* This is a MFCTL cr26/cr27 to gr instruction.
* PCXS traps on this, so we need to emulate it.
*/
if (regs->iir & 0x00200000)
regs->gr[regs->iir & 0x1f] = mfctl(27);
else
regs->gr[regs->iir & 0x1f] = mfctl(26);
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
regs->iasq[0] = regs->iasq[1];
return;
}
die_if_kernel("Privileged register usage", regs, code);
si.si_code = ILL_PRVREG;
give_sigill:
si.si_signo = SIGILL;
si.si_errno = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGILL, &si, current);
return;
case 12:
/* Overflow Trap, let the userland signal handler do the cleanup */
si.si_signo = SIGFPE;
si.si_code = FPE_INTOVF;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
case 13:
/* Conditional Trap
The condition succeeds in an instruction which traps
on condition */
if(user_mode(regs)){
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
the insn pointed to by si_addr */
si.si_code = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
}
/* The kernel doesn't want to handle condition codes */
break;
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
__inc_irq_stat(irq_fpassist_count);
handle_fpe(regs);
return;
case 15:
/* Data TLB miss fault/Data page fault */
/* Fall through */
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
/* Fall through */
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
Still need to add slow path emulation code here!
If the insn used a non-shadow register, then the tlb
handlers could not have their side-effect (e.g. probe
writing to a target register) emulated since rfir would
erase the changes to said register. Instead we have to
setup everything, call this function we are in, and emulate
by hand. Technically we need to emulate:
fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
*/
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 18:
/* PCXS only -- later cpu's split this into types 26,27 & 28 */
/* Check for unaligned access */
if (check_unaligned(regs)) {
handle_unaligned(regs);
return;
}
/* Fall Through */
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
/* fall thru */
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
return;
case 25:
/* Taken branch trap */
regs->gr[0] &= ~PSW_T;
if (user_space(regs))
handle_gdb_break(regs, TRAP_BRANCH);
/* else this must be the start of a syscall - just let it
* run.
*/
return;
case 7:
/* Instruction access rights */
/* PCXL: Instruction memory protection trap */
/*
* This could be caused by either: 1) a process attempting
* to execute within a vma that does not have execute
* permission, or 2) an access rights violation caused by a
* flush only translation set up by ptep_get_and_clear().
* So we check the vma permissions to differentiate the two.
* If the vma indicates we have execute permission, then
* the cause is the latter one. In this case, we need to
* call do_page_fault() to fix the problem.
*/
if (user_mode(regs)) {
struct vm_area_struct *vma;
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
up_read(¤t->mm->mmap_sem);
break; /* call do_page_fault() */
}
up_read(¤t->mm->mmap_sem);
}
/* Fall Through */
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
fixup_exception(regs))
return;
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
si.si_errno = 0;
if (code == 7)
si.si_addr = (void __user *) regs->iaoq[0];
else
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
case 28:
/* Unaligned data reference trap */
handle_unaligned(regs);
return;
default:
if (user_mode(regs)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"handle_interruption() pid=%d command='%s'\n",
task_pid_nr(current), current->comm);
/* SIGBUS, for lack of a better one. */
si.si_signo = SIGBUS;
si.si_code = BUS_OBJERR;
si.si_errno = 0;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGBUS, &si, current);
return;
}
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Unexpected interruption", regs, code, 0);
/* NOT REACHED */
}
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
code, fault_space,
task_pid_nr(current), current->comm);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
}
}
else {
/*
* The kernel should never fault on its own address space,
* unless pagefault_disable() was called before.
*/
if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}
}
do_page_fault(regs, code, fault_address);
}
int __init check_ivt(void *iva)
{
extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
u32 length;
if (strcmp((char *)iva, "cows can fly"))
return -1;
ivap = (u32 *)iva;
for (i = 0; i < 8; i++)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
for (i=0; i<length/4; i++)
check += *hpmcp++;
for (i=0; i<8; i++)
check += ivap[i];
ivap[5] = -check;
return 0;
}
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
#endif
extern const void fault_vector_20;
void __init trap_init(void)
{
void *iva;
if (boot_cpu_data.cpu_type >= pcxu)
iva = (void *) &fault_vector_20;
else
#ifdef CONFIG_64BIT
panic("Can't boot 64-bit OS on PA1.1 processor!");
#else
iva = (void *) &fault_vector_11;
#endif
if (check_ivt(iva))
panic("IVT invalid");
}
| gpl-2.0 |
networkosnet/linux | drivers/staging/comedi/drivers/dt2811.c | 896 | 10973 | /*
comedi/drivers/dt2811.c
Hardware driver for Data Translation DT2811
COMEDI - Linux Control and Measurement Device Interface
History:
Base Version - David A. Schleef <ds@schleef.org>
December 1998 - Updated to work. David does not have a DT2811
board any longer so this was suffering from bitrot.
Updated performed by ...
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/*
Driver: dt2811
Description: Data Translation DT2811
Author: ds
Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
Status: works
Configuration options:
[0] - I/O port base address
[1] - IRQ, although this is currently unused
[2] - A/D reference
0 = signle-ended
1 = differential
2 = pseudo-differential (common reference)
[3] - A/D range
0 = [-5, 5]
1 = [-2.5, 2.5]
2 = [0, 5]
[4] - D/A 0 range (same choices)
[4] - D/A 1 range (same choices)
*/
#include <linux/module.h>
#include "../comedidev.h"
static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
4, {
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
4, {
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
4, {
UNI_RANGE(5),
UNI_RANGE(0.5),
UNI_RANGE(0.05),
UNI_RANGE(0.01)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
4, {
BIP_RANGE(2.5),
BIP_RANGE(0.25),
BIP_RANGE(0.025),
BIP_RANGE(0.005)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
4, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.01)
}
};
/*
0x00 ADCSR R/W A/D Control/Status Register
bit 7 - (R) 1 indicates A/D conversion done
reading ADDAT clears bit
(W) ignored
bit 6 - (R) 1 indicates A/D error
(W) ignored
bit 5 - (R) 1 indicates A/D busy, cleared at end
of conversion
(W) ignored
bit 4 - (R) 0
(W)
bit 3 - (R) 0
bit 2 - (R/W) 1 indicates interrupts enabled
bits 1,0 - (R/W) mode bits
00 single conversion on ADGCR load
01 continuous conversion, internal clock,
(clock enabled on ADGCR load)
10 continuous conversion, internal clock,
external trigger
11 continuous conversion, external clock,
external trigger
0x01 ADGCR R/W A/D Gain/Channel Register
bit 6,7 - (R/W) gain select
00 gain=1, both PGH, PGL models
01 gain=2 PGH, 10 PGL
10 gain=4 PGH, 100 PGL
11 gain=8 PGH, 500 PGL
bit 4,5 - reserved
bit 3-0 - (R/W) channel select
channel number from 0-15
0x02,0x03 (R) ADDAT A/D Data Register
(W) DADAT0 D/A Data Register 0
0x02 low byte
0x03 high byte
0x04,0x05 (W) DADAT0 D/A Data Register 1
0x06 (R) DIO0 Digital Input Port 0
(W) DIO1 Digital Output Port 1
0x07 TMRCTR (R/W) Timer/Counter Register
bits 6,7 - reserved
bits 5-3 - Timer frequency control (mantissa)
543 divisor freqency (kHz)
000 1 600
001 10 60
010 2 300
011 3 200
100 4 150
101 5 120
110 6 100
111 12 50
bits 2-0 - Timer frequency control (exponent)
210 multiply divisor/divide frequency by
000 1
001 10
010 100
011 1000
100 10000
101 100000
110 1000000
111 10000000
*/
#define TIMEOUT 10000
#define DT2811_ADCSR 0
#define DT2811_ADGCR 1
#define DT2811_ADDATLO 2
#define DT2811_ADDATHI 3
#define DT2811_DADAT0LO 2
#define DT2811_DADAT0HI 3
#define DT2811_DADAT1LO 4
#define DT2811_DADAT1HI 5
#define DT2811_DIO 6
#define DT2811_TMRCTR 7
/*
* flags
*/
/* ADCSR */
#define DT2811_ADDONE 0x80
#define DT2811_ADERROR 0x40
#define DT2811_ADBUSY 0x20
#define DT2811_CLRERROR 0x10
#define DT2811_INTENB 0x04
#define DT2811_ADMODE 0x03
struct dt2811_board {
const char *name;
const struct comedi_lrange *bip_5;
const struct comedi_lrange *bip_2_5;
const struct comedi_lrange *unip_5;
};
enum { card_2811_pgh, card_2811_pgl };
struct dt2811_private {
int ntrig;
int curadchan;
enum {
adc_singleended, adc_diff, adc_pseudo_diff
} adc_mux;
enum {
dac_bipolar_5, dac_bipolar_2_5, dac_unipolar_5
} dac_range[2];
const struct comedi_lrange *range_type_list[2];
};
static const struct comedi_lrange *dac_range_types[] = {
&range_bipolar5,
&range_bipolar2_5,
&range_unipolar5
};
static int dt2811_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + DT2811_ADCSR);
if ((status & DT2811_ADBUSY) == 0)
return 0;
return -EBUSY;
}
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
int ret;
int i;
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + DT2811_ADGCR);
ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
if (ret)
return ret;
data[i] = inb(dev->iobase + DT2811_ADDATLO);
data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
data[i] &= 0xfff;
}
return i;
}
static int dt2811_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
for (i = 0; i < insn->n; i++) {
val = data[i];
outb(val & 0xff, dev->iobase + DT2811_DADAT0LO + 2 * chan);
outb((val >> 8) & 0xff,
dev->iobase + DT2811_DADAT0HI + 2 * chan);
}
s->readback[chan] = val;
return insn->n;
}
static int dt2811_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[1] = inb(dev->iobase + DT2811_DIO);
return insn->n;
}
static int dt2811_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DT2811_DIO);
data[1] = s->state;
return insn->n;
}
/*
options[0] Board base address
options[1] IRQ
options[2] Input configuration
0 == single-ended
1 == differential
2 == pseudo-differential
options[3] Analog input range configuration
0 == bipolar 5 (-5V -- +5V)
1 == bipolar 2.5V (-2.5V -- +2.5V)
2 == unipolar 5V (0V -- +5V)
options[4] Analog output 0 range configuration
0 == bipolar 5 (-5V -- +5V)
1 == bipolar 2.5V (-2.5V -- +2.5V)
2 == unipolar 5V (0V -- +5V)
options[5] Analog output 1 range configuration
0 == bipolar 5 (-5V -- +5V)
1 == bipolar 2.5V (-2.5V -- +2.5V)
2 == unipolar 5V (0V -- +5V)
*/
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
/* int i; */
const struct dt2811_board *board = dev->board_ptr;
struct dt2811_private *devpriv;
int ret;
struct comedi_subdevice *s;
ret = comedi_request_region(dev, it->options[0], 0x8);
if (ret)
return ret;
#if 0
outb(0, dev->iobase + DT2811_ADCSR);
udelay(100);
i = inb(dev->iobase + DT2811_ADDATLO);
i = inb(dev->iobase + DT2811_ADDATHI);
#endif
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
switch (it->options[2]) {
case 0:
devpriv->adc_mux = adc_singleended;
break;
case 1:
devpriv->adc_mux = adc_diff;
break;
case 2:
devpriv->adc_mux = adc_pseudo_diff;
break;
default:
devpriv->adc_mux = adc_singleended;
break;
}
switch (it->options[4]) {
case 0:
devpriv->dac_range[0] = dac_bipolar_5;
break;
case 1:
devpriv->dac_range[0] = dac_bipolar_2_5;
break;
case 2:
devpriv->dac_range[0] = dac_unipolar_5;
break;
default:
devpriv->dac_range[0] = dac_bipolar_5;
break;
}
switch (it->options[5]) {
case 0:
devpriv->dac_range[1] = dac_bipolar_5;
break;
case 1:
devpriv->dac_range[1] = dac_bipolar_2_5;
break;
case 2:
devpriv->dac_range[1] = dac_unipolar_5;
break;
default:
devpriv->dac_range[1] = dac_bipolar_5;
break;
}
s = &dev->subdevices[0];
/* initialize the ADC subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = devpriv->adc_mux == adc_diff ? 8 : 16;
s->insn_read = dt2811_ai_insn;
s->maxdata = 0xfff;
switch (it->options[3]) {
case 0:
default:
s->range_table = board->bip_5;
break;
case 1:
s->range_table = board->bip_2_5;
break;
case 2:
s->range_table = board->unip_5;
break;
}
s = &dev->subdevices[1];
/* ao subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0xfff;
s->range_table_list = devpriv->range_type_list;
devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
s->insn_write = dt2811_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[2];
/* di subdevice */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 8;
s->insn_bits = dt2811_di_insn_bits;
s->maxdata = 1;
s->range_table = &range_digital;
s = &dev->subdevices[3];
/* do subdevice */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->insn_bits = dt2811_do_insn_bits;
s->maxdata = 1;
s->state = 0;
s->range_table = &range_digital;
return 0;
}
static const struct dt2811_board boardtypes[] = {
{
.name = "dt2811-pgh",
.bip_5 = &range_dt2811_pgh_ai_5_bipolar,
.bip_2_5 = &range_dt2811_pgh_ai_2_5_bipolar,
.unip_5 = &range_dt2811_pgh_ai_5_unipolar,
}, {
.name = "dt2811-pgl",
.bip_5 = &range_dt2811_pgl_ai_5_bipolar,
.bip_2_5 = &range_dt2811_pgl_ai_2_5_bipolar,
.unip_5 = &range_dt2811_pgl_ai_5_unipolar,
},
};
static struct comedi_driver dt2811_driver = {
.driver_name = "dt2811",
.module = THIS_MODULE,
.attach = dt2811_attach,
.detach = comedi_legacy_detach,
.board_name = &boardtypes[0].name,
.num_names = ARRAY_SIZE(boardtypes),
.offset = sizeof(struct dt2811_board),
};
module_comedi_driver(dt2811_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ISTweak/android_kernel_sharp_is05 | drivers/hwmon/w83791d.c | 1152 | 51461 | /*
w83791d.c - Part of lm_sensors, Linux kernel modules for hardware
monitoring
Copyright (C) 2006-2007 Charles Spirakis <bezaur@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Supports following chips:
Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
w83791d 10 5 5 3 0x71 0x5ca3 yes no
The w83791d chip appears to be part way between the 83781d and the
83792d. Thus, this file is derived from both the w83792d.c and
w83781d.c files.
The w83791g chip is the same as the w83791d but lead-free.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#define NUMBER_OF_VIN 10
#define NUMBER_OF_FANIN 5
#define NUMBER_OF_TEMPIN 3
#define NUMBER_OF_PWM 5
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
static int reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset");
static int init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force extra software initialization");
/* The W83791D registers */
static const u8 W83791D_REG_IN[NUMBER_OF_VIN] = {
0x20, /* VCOREA in DataSheet */
0x21, /* VINR0 in DataSheet */
0x22, /* +3.3VIN in DataSheet */
0x23, /* VDD5V in DataSheet */
0x24, /* +12VIN in DataSheet */
0x25, /* -12VIN in DataSheet */
0x26, /* -5VIN in DataSheet */
0xB0, /* 5VSB in DataSheet */
0xB1, /* VBAT in DataSheet */
0xB2 /* VINR1 in DataSheet */
};
static const u8 W83791D_REG_IN_MAX[NUMBER_OF_VIN] = {
0x2B, /* VCOREA High Limit in DataSheet */
0x2D, /* VINR0 High Limit in DataSheet */
0x2F, /* +3.3VIN High Limit in DataSheet */
0x31, /* VDD5V High Limit in DataSheet */
0x33, /* +12VIN High Limit in DataSheet */
0x35, /* -12VIN High Limit in DataSheet */
0x37, /* -5VIN High Limit in DataSheet */
0xB4, /* 5VSB High Limit in DataSheet */
0xB6, /* VBAT High Limit in DataSheet */
0xB8 /* VINR1 High Limit in DataSheet */
};
static const u8 W83791D_REG_IN_MIN[NUMBER_OF_VIN] = {
0x2C, /* VCOREA Low Limit in DataSheet */
0x2E, /* VINR0 Low Limit in DataSheet */
0x30, /* +3.3VIN Low Limit in DataSheet */
0x32, /* VDD5V Low Limit in DataSheet */
0x34, /* +12VIN Low Limit in DataSheet */
0x36, /* -12VIN Low Limit in DataSheet */
0x38, /* -5VIN Low Limit in DataSheet */
0xB5, /* 5VSB Low Limit in DataSheet */
0xB7, /* VBAT Low Limit in DataSheet */
0xB9 /* VINR1 Low Limit in DataSheet */
};
static const u8 W83791D_REG_FAN[NUMBER_OF_FANIN] = {
0x28, /* FAN 1 Count in DataSheet */
0x29, /* FAN 2 Count in DataSheet */
0x2A, /* FAN 3 Count in DataSheet */
0xBA, /* FAN 4 Count in DataSheet */
0xBB, /* FAN 5 Count in DataSheet */
};
static const u8 W83791D_REG_FAN_MIN[NUMBER_OF_FANIN] = {
0x3B, /* FAN 1 Count Low Limit in DataSheet */
0x3C, /* FAN 2 Count Low Limit in DataSheet */
0x3D, /* FAN 3 Count Low Limit in DataSheet */
0xBC, /* FAN 4 Count Low Limit in DataSheet */
0xBD, /* FAN 5 Count Low Limit in DataSheet */
};
static const u8 W83791D_REG_PWM[NUMBER_OF_PWM] = {
0x81, /* PWM 1 duty cycle register in DataSheet */
0x83, /* PWM 2 duty cycle register in DataSheet */
0x94, /* PWM 3 duty cycle register in DataSheet */
0xA0, /* PWM 4 duty cycle register in DataSheet */
0xA1, /* PWM 5 duty cycle register in DataSheet */
};
static const u8 W83791D_REG_TEMP_TARGET[3] = {
0x85, /* PWM 1 target temperature for temp 1 */
0x86, /* PWM 2 target temperature for temp 2 */
0x96, /* PWM 3 target temperature for temp 3 */
};
static const u8 W83791D_REG_TEMP_TOL[2] = {
0x87, /* PWM 1/2 temperature tolerance */
0x97, /* PWM 3 temperature tolerance */
};
static const u8 W83791D_REG_FAN_CFG[2] = {
0x84, /* FAN 1/2 configuration */
0x95, /* FAN 3 configuration */
};
static const u8 W83791D_REG_FAN_DIV[3] = {
0x47, /* contains FAN1 and FAN2 Divisor */
0x4b, /* contains FAN3 Divisor */
0x5C, /* contains FAN4 and FAN5 Divisor */
};
#define W83791D_REG_BANK 0x4E
#define W83791D_REG_TEMP2_CONFIG 0xC2
#define W83791D_REG_TEMP3_CONFIG 0xCA
static const u8 W83791D_REG_TEMP1[3] = {
0x27, /* TEMP 1 in DataSheet */
0x39, /* TEMP 1 Over in DataSheet */
0x3A, /* TEMP 1 Hyst in DataSheet */
};
static const u8 W83791D_REG_TEMP_ADD[2][6] = {
{0xC0, /* TEMP 2 in DataSheet */
0xC1, /* TEMP 2(0.5 deg) in DataSheet */
0xC5, /* TEMP 2 Over High part in DataSheet */
0xC6, /* TEMP 2 Over Low part in DataSheet */
0xC3, /* TEMP 2 Thyst High part in DataSheet */
0xC4}, /* TEMP 2 Thyst Low part in DataSheet */
{0xC8, /* TEMP 3 in DataSheet */
0xC9, /* TEMP 3(0.5 deg) in DataSheet */
0xCD, /* TEMP 3 Over High part in DataSheet */
0xCE, /* TEMP 3 Over Low part in DataSheet */
0xCB, /* TEMP 3 Thyst High part in DataSheet */
0xCC} /* TEMP 3 Thyst Low part in DataSheet */
};
#define W83791D_REG_BEEP_CONFIG 0x4D
static const u8 W83791D_REG_BEEP_CTRL[3] = {
0x56, /* BEEP Control Register 1 */
0x57, /* BEEP Control Register 2 */
0xA3, /* BEEP Control Register 3 */
};
#define W83791D_REG_GPIO 0x15
#define W83791D_REG_CONFIG 0x40
#define W83791D_REG_VID_FANDIV 0x47
#define W83791D_REG_DID_VID4 0x49
#define W83791D_REG_WCHIPID 0x58
#define W83791D_REG_CHIPMAN 0x4F
#define W83791D_REG_PIN 0x4B
#define W83791D_REG_I2C_SUBADDR 0x4A
#define W83791D_REG_ALARM1 0xA9 /* realtime status register1 */
#define W83791D_REG_ALARM2 0xAA /* realtime status register2 */
#define W83791D_REG_ALARM3 0xAB /* realtime status register3 */
#define W83791D_REG_VBAT 0x5D
#define W83791D_REG_I2C_ADDR 0x48
/* The SMBus locks itself. The Winbond W83791D has a bank select register
(index 0x4e), but the driver only accesses registers in bank 0. Since
we don't switch banks, we don't need any special code to handle
locking access between bank switches */
static inline int w83791d_read(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
/* The analog voltage inputs have 16mV LSB. Since the sysfs output is
in mV as would be measured on the chip input pin, need to just
multiply/divide by 16 to translate from/to register values. */
#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static u8 fan_to_reg(long rpm, int div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val,div) ((val) == 0 ? -1 : \
((val) == 255 ? 0 : \
1350000 / ((val) * (div))))
/* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */
#define TEMP1_FROM_REG(val) ((val) * 1000)
#define TEMP1_TO_REG(val) ((val) <= -128000 ? -128 : \
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
/* for temp2 and temp3 which are 9-bit resolution, LSB = 0.5 degree Celsius
Assumes the top 8 bits are the integral amount and the bottom 8 bits
are the fractional amount. Since we only have 0.5 degree resolution,
the bottom 7 bits will always be zero */
#define TEMP23_FROM_REG(val) ((val) / 128 * 500)
#define TEMP23_TO_REG(val) ((val) <= -128000 ? 0x8000 : \
(val) >= 127500 ? 0x7F80 : \
(val) < 0 ? ((val) - 250) / 500 * 128 : \
((val) + 250) / 500 * 128)
/* for thermal cruise target temp, 7-bits, LSB = 1 degree Celsius */
#define TARGET_TEMP_TO_REG(val) ((val) < 0 ? 0 : \
(val) >= 127000 ? 127 : \
((val) + 500) / 1000)
/* for thermal cruise temp tolerance, 4-bits, LSB = 1 degree Celsius */
#define TOL_TEMP_TO_REG(val) ((val) < 0 ? 0 : \
(val) >= 15000 ? 15 : \
((val) + 500) / 1000)
#define BEEP_MASK_TO_REG(val) ((val) & 0xffffff)
#define BEEP_MASK_FROM_REG(val) ((val) & 0xffffff)
#define DIV_FROM_REG(val) (1 << (val))
static u8 div_to_reg(int nr, long val)
{
int i;
/* fan divisors max out at 128 */
val = SENSORS_LIMIT(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
val >>= 1;
}
return (u8) i;
}
struct w83791d_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
/* volts */
u8 in[NUMBER_OF_VIN]; /* Register value */
u8 in_max[NUMBER_OF_VIN]; /* Register value */
u8 in_min[NUMBER_OF_VIN]; /* Register value */
/* fans */
u8 fan[NUMBER_OF_FANIN]; /* Register value */
u8 fan_min[NUMBER_OF_FANIN]; /* Register value */
u8 fan_div[NUMBER_OF_FANIN]; /* Register encoding, shifted right */
/* Temperature sensors */
s8 temp1[3]; /* current, over, thyst */
s16 temp_add[2][3]; /* fixed point value. Top 8 bits are the
integral part, bottom 8 bits are the
fractional part. We only use the top
9 bits as the resolution is only
to the 0.5 degree C...
two sensors with three values
(cur, over, hyst) */
/* PWMs */
u8 pwm[5]; /* pwm duty cycle */
u8 pwm_enable[3]; /* pwm enable status for fan 1-3
(fan 4-5 only support manual mode) */
u8 temp_target[3]; /* pwm 1-3 target temperature */
u8 temp_tolerance[3]; /* pwm 1-3 temperature tolerance */
/* Misc */
u32 alarms; /* realtime status register encoding,combined */
u8 beep_enable; /* Global beep enable */
u32 beep_mask; /* Mask off specific beeps */
u8 vid; /* Register encoding, combined */
u8 vrm; /* hwmon-vid */
};
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
static int w83791d_read(struct i2c_client *client, u8 register);
static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
static struct w83791d_data *w83791d_update_device(struct device *dev);
#ifdef DEBUG
static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
#endif
static void w83791d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83791d_id[] = {
{ "w83791d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83791d_id);
static struct i2c_driver w83791d_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83791d",
},
.probe = w83791d_probe,
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
.address_list = normal_i2c,
};
/* following are the sysfs callback functions */
#define show_in_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct w83791d_data *data = w83791d_update_device(dev); \
int nr = sensor_attr->index; \
return sprintf(buf,"%d\n", IN_FROM_REG(data->reg[nr])); \
}
show_in_reg(in);
show_in_reg(in_min);
show_in_reg(in_max);
#define store_in_reg(REG, reg) \
static ssize_t store_in_##reg(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct i2c_client *client = to_i2c_client(dev); \
struct w83791d_data *data = i2c_get_clientdata(client); \
unsigned long val = simple_strtoul(buf, NULL, 10); \
int nr = sensor_attr->index; \
\
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = IN_TO_REG(val); \
w83791d_write(client, W83791D_REG_IN_##REG[nr], data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
\
return count; \
}
store_in_reg(MIN, min);
store_in_reg(MAX, max);
static struct sensor_device_attribute sda_in_input[] = {
SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
};
static struct sensor_device_attribute sda_in_min[] = {
SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
};
static struct sensor_device_attribute sda_in_max[] = {
SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
};
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int bitnr = sensor_attr->index;
return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1);
}
static ssize_t store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int bitnr = sensor_attr->index;
int bytenr = bitnr / 8;
long val = simple_strtol(buf, NULL, 10) ? 1 : 0;
mutex_lock(&data->update_lock);
data->beep_mask &= ~(0xff << (bytenr * 8));
data->beep_mask |= w83791d_read(client, W83791D_REG_BEEP_CTRL[bytenr])
<< (bytenr * 8);
data->beep_mask &= ~(1 << bitnr);
data->beep_mask |= val << bitnr;
w83791d_write(client, W83791D_REG_BEEP_CTRL[bytenr],
(data->beep_mask >> (bytenr * 8)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int bitnr = sensor_attr->index;
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
/* Note: The bitmask for the beep enable/disable is different than
the bitmask for the alarm. */
static struct sensor_device_attribute sda_in_beep[] = {
SENSOR_ATTR(in0_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 0),
SENSOR_ATTR(in1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 13),
SENSOR_ATTR(in2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 2),
SENSOR_ATTR(in3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 3),
SENSOR_ATTR(in4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 8),
SENSOR_ATTR(in5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 9),
SENSOR_ATTR(in6_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 10),
SENSOR_ATTR(in7_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 16),
SENSOR_ATTR(in8_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 17),
SENSOR_ATTR(in9_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 14),
};
static struct sensor_device_attribute sda_in_alarm[] = {
SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9),
SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10),
SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19),
SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20),
SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 14),
};
#define show_fan_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct w83791d_data *data = w83791d_update_device(dev); \
int nr = sensor_attr->index; \
return sprintf(buf,"%d\n", \
FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
show_fan_reg(fan_min);
static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
unsigned long val = simple_strtoul(buf, NULL, 10);
int nr = sensor_attr->index;
mutex_lock(&data->update_lock);
data->fan_min[nr] = fan_to_reg(val, DIV_FROM_REG(data->fan_div[nr]));
w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr]));
}
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long min;
u8 tmp_fan_div;
u8 fan_div_reg;
u8 vbat_reg;
int indx = 0;
u8 keep_mask = 0;
u8 new_shift = 0;
/* Save fan_min */
min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
mutex_lock(&data->update_lock);
data->fan_div[nr] = div_to_reg(nr, simple_strtoul(buf, NULL, 10));
switch (nr) {
case 0:
indx = 0;
keep_mask = 0xcf;
new_shift = 4;
break;
case 1:
indx = 0;
keep_mask = 0x3f;
new_shift = 6;
break;
case 2:
indx = 1;
keep_mask = 0x3f;
new_shift = 6;
break;
case 3:
indx = 2;
keep_mask = 0xf8;
new_shift = 0;
break;
case 4:
indx = 2;
keep_mask = 0x8f;
new_shift = 4;
break;
#ifdef DEBUG
default:
dev_warn(dev, "store_fan_div: Unexpected nr seen: %d\n", nr);
count = -EINVAL;
goto err_exit;
#endif
}
fan_div_reg = w83791d_read(client, W83791D_REG_FAN_DIV[indx])
& keep_mask;
tmp_fan_div = (data->fan_div[nr] << new_shift) & ~keep_mask;
w83791d_write(client, W83791D_REG_FAN_DIV[indx],
fan_div_reg | tmp_fan_div);
/* Bit 2 of fans 0-2 is stored in the vbat register (bits 5-7) */
if (nr < 3) {
keep_mask = ~(1 << (nr + 5));
vbat_reg = w83791d_read(client, W83791D_REG_VBAT)
& keep_mask;
tmp_fan_div = (data->fan_div[nr] << (3 + nr)) & ~keep_mask;
w83791d_write(client, W83791D_REG_VBAT,
vbat_reg | tmp_fan_div);
}
/* Restore fan_min */
data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr]));
w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]);
#ifdef DEBUG
err_exit:
#endif
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_fan_input[] = {
SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
};
static struct sensor_device_attribute sda_fan_min[] = {
SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 0),
SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 1),
SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 2),
SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 3),
SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 4),
};
static struct sensor_device_attribute sda_fan_div[] = {
SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 0),
SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 1),
SENSOR_ATTR(fan3_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 2),
SENSOR_ATTR(fan4_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 3),
SENSOR_ATTR(fan5_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 4),
};
static struct sensor_device_attribute sda_fan_beep[] = {
SENSOR_ATTR(fan1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 6),
SENSOR_ATTR(fan2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 7),
SENSOR_ATTR(fan3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 11),
SENSOR_ATTR(fan4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 21),
SENSOR_ATTR(fan5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 22),
};
static struct sensor_device_attribute sda_fan_alarm[] = {
SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21),
SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22),
};
/* read/write PWMs */
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->pwm[nr]);
}
static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_pwm[] = {
SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 0),
SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 1),
SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 2),
SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 3),
SENSOR_ATTR(pwm5, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 4),
};
static ssize_t show_pwmenable(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->pwm_enable[nr] + 1);
}
static ssize_t store_pwmenable(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
u8 reg_cfg_tmp;
u8 reg_idx = 0;
u8 val_shift = 0;
u8 keep_mask = 0;
int ret = strict_strtoul(buf, 10, &val);
if (ret || val < 1 || val > 3)
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm_enable[nr] = val - 1;
switch (nr) {
case 0:
reg_idx = 0;
val_shift = 2;
keep_mask = 0xf3;
break;
case 1:
reg_idx = 0;
val_shift = 4;
keep_mask = 0xcf;
break;
case 2:
reg_idx = 1;
val_shift = 2;
keep_mask = 0xf3;
break;
}
reg_cfg_tmp = w83791d_read(client, W83791D_REG_FAN_CFG[reg_idx]);
reg_cfg_tmp = (reg_cfg_tmp & keep_mask) |
data->pwm_enable[nr] << val_shift;
w83791d_write(client, W83791D_REG_FAN_CFG[reg_idx], reg_cfg_tmp);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_pwmenable[] = {
SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 0),
SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 1),
SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 2),
};
/* For Smart Fan I / Thermal Cruise */
static ssize_t show_temp_target(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = sensor_attr->index;
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_target[nr]));
}
static ssize_t store_temp_target(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
u8 target_mask;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_target[nr] = TARGET_TEMP_TO_REG(val);
target_mask = w83791d_read(client,
W83791D_REG_TEMP_TARGET[nr]) & 0x80;
w83791d_write(client, W83791D_REG_TEMP_TARGET[nr],
data->temp_target[nr] | target_mask);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_temp_target[] = {
SENSOR_ATTR(temp1_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 0),
SENSOR_ATTR(temp2_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 1),
SENSOR_ATTR(temp3_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 2),
};
static ssize_t show_temp_tolerance(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = sensor_attr->index;
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_tolerance[nr]));
}
static ssize_t store_temp_tolerance(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
u8 target_mask;
u8 reg_idx = 0;
u8 val_shift = 0;
u8 keep_mask = 0;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
switch (nr) {
case 0:
reg_idx = 0;
val_shift = 0;
keep_mask = 0xf0;
break;
case 1:
reg_idx = 0;
val_shift = 4;
keep_mask = 0x0f;
break;
case 2:
reg_idx = 1;
val_shift = 0;
keep_mask = 0xf0;
break;
}
mutex_lock(&data->update_lock);
data->temp_tolerance[nr] = TOL_TEMP_TO_REG(val);
target_mask = w83791d_read(client,
W83791D_REG_TEMP_TOL[reg_idx]) & keep_mask;
w83791d_write(client, W83791D_REG_TEMP_TOL[reg_idx],
(data->temp_tolerance[nr] << val_shift) | target_mask);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_temp_tolerance[] = {
SENSOR_ATTR(temp1_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 0),
SENSOR_ATTR(temp2_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 1),
SENSOR_ATTR(temp3_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 2),
};
/* read/write the temperature1, includes measured value and limits */
static ssize_t show_temp1(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[attr->index]));
}
static ssize_t store_temp1(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
int nr = attr->index;
mutex_lock(&data->update_lock);
data->temp1[nr] = TEMP1_TO_REG(val);
w83791d_write(client, W83791D_REG_TEMP1[nr], data->temp1[nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* read/write temperature2-3, includes measured value and limits */
static ssize_t show_temp23(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = attr->nr;
int index = attr->index;
return sprintf(buf, "%d\n", TEMP23_FROM_REG(data->temp_add[nr][index]));
}
static ssize_t store_temp23(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
int nr = attr->nr;
int index = attr->index;
mutex_lock(&data->update_lock);
data->temp_add[nr][index] = TEMP23_TO_REG(val);
w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2],
data->temp_add[nr][index] >> 8);
w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2 + 1],
data->temp_add[nr][index] & 0x80);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute_2 sda_temp_input[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0),
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0),
};
static struct sensor_device_attribute_2 sda_temp_max[] = {
SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 1),
SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 1),
SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 1),
};
static struct sensor_device_attribute_2 sda_temp_max_hyst[] = {
SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 2),
SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 2),
SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 2),
};
/* Note: The bitmask for the beep enable/disable is different than
the bitmask for the alarm. */
static struct sensor_device_attribute sda_temp_beep[] = {
SENSOR_ATTR(temp1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 4),
SENSOR_ATTR(temp2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 5),
SENSOR_ATTR(temp3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 1),
};
static struct sensor_device_attribute sda_temp_alarm[] = {
SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
};
/* get reatime status of all sensors items: voltage, temp, fan */
static ssize_t show_alarms_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
/* Beep control */
#define GLOBAL_BEEP_ENABLE_SHIFT 15
#define GLOBAL_BEEP_ENABLE_MASK (1 << GLOBAL_BEEP_ENABLE_SHIFT)
static ssize_t show_beep_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", data->beep_enable);
}
static ssize_t show_beep_mask(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", BEEP_MASK_FROM_REG(data->beep_mask));
}
static ssize_t store_beep_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&data->update_lock);
/* The beep_enable state overrides any enabling request from
the masks */
data->beep_mask = BEEP_MASK_TO_REG(val) & ~GLOBAL_BEEP_ENABLE_MASK;
data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT);
val = data->beep_mask;
for (i = 0; i < 3; i++) {
w83791d_write(client, W83791D_REG_BEEP_CTRL[i], (val & 0xff));
val >>= 8;
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t store_beep_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->beep_enable = val ? 1 : 0;
/* Keep the full mask value in sync with the current enable */
data->beep_mask &= ~GLOBAL_BEEP_ENABLE_MASK;
data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT);
/* The global control is in the second beep control register
so only need to update that register */
val = (data->beep_mask >> 8) & 0xff;
w83791d_write(client, W83791D_REG_BEEP_CTRL[1], val);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_beep_ctrl[] = {
SENSOR_ATTR(beep_enable, S_IRUGO | S_IWUSR,
show_beep_enable, store_beep_enable, 0),
SENSOR_ATTR(beep_mask, S_IRUGO | S_IWUSR,
show_beep_mask, store_beep_mask, 1)
};
/* cpu voltage regulation information */
static ssize_t show_vid_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
static ssize_t show_vrm_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t store_vrm_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83791d_data *data = dev_get_drvdata(dev);
/* No lock needed as vrm is internal to the driver
(not read from a chip register) and so is not
updated in w83791d_update_device() */
data->vrm = simple_strtoul(buf, NULL, 10);
return count;
}
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
#define IN_UNIT_ATTRS(X) \
&sda_in_input[X].dev_attr.attr, \
&sda_in_min[X].dev_attr.attr, \
&sda_in_max[X].dev_attr.attr, \
&sda_in_beep[X].dev_attr.attr, \
&sda_in_alarm[X].dev_attr.attr
#define FAN_UNIT_ATTRS(X) \
&sda_fan_input[X].dev_attr.attr, \
&sda_fan_min[X].dev_attr.attr, \
&sda_fan_div[X].dev_attr.attr, \
&sda_fan_beep[X].dev_attr.attr, \
&sda_fan_alarm[X].dev_attr.attr
#define TEMP_UNIT_ATTRS(X) \
&sda_temp_input[X].dev_attr.attr, \
&sda_temp_max[X].dev_attr.attr, \
&sda_temp_max_hyst[X].dev_attr.attr, \
&sda_temp_beep[X].dev_attr.attr, \
&sda_temp_alarm[X].dev_attr.attr
static struct attribute *w83791d_attributes[] = {
IN_UNIT_ATTRS(0),
IN_UNIT_ATTRS(1),
IN_UNIT_ATTRS(2),
IN_UNIT_ATTRS(3),
IN_UNIT_ATTRS(4),
IN_UNIT_ATTRS(5),
IN_UNIT_ATTRS(6),
IN_UNIT_ATTRS(7),
IN_UNIT_ATTRS(8),
IN_UNIT_ATTRS(9),
FAN_UNIT_ATTRS(0),
FAN_UNIT_ATTRS(1),
FAN_UNIT_ATTRS(2),
TEMP_UNIT_ATTRS(0),
TEMP_UNIT_ATTRS(1),
TEMP_UNIT_ATTRS(2),
&dev_attr_alarms.attr,
&sda_beep_ctrl[0].dev_attr.attr,
&sda_beep_ctrl[1].dev_attr.attr,
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
&sda_pwm[0].dev_attr.attr,
&sda_pwm[1].dev_attr.attr,
&sda_pwm[2].dev_attr.attr,
&sda_pwmenable[0].dev_attr.attr,
&sda_pwmenable[1].dev_attr.attr,
&sda_pwmenable[2].dev_attr.attr,
&sda_temp_target[0].dev_attr.attr,
&sda_temp_target[1].dev_attr.attr,
&sda_temp_target[2].dev_attr.attr,
&sda_temp_tolerance[0].dev_attr.attr,
&sda_temp_tolerance[1].dev_attr.attr,
&sda_temp_tolerance[2].dev_attr.attr,
NULL
};
static const struct attribute_group w83791d_group = {
.attrs = w83791d_attributes,
};
/* Separate group of attributes for fan/pwm 4-5. Their pins can also be
in use for GPIO in which case their sysfs-interface should not be made
available */
static struct attribute *w83791d_attributes_fanpwm45[] = {
FAN_UNIT_ATTRS(3),
FAN_UNIT_ATTRS(4),
&sda_pwm[3].dev_attr.attr,
&sda_pwm[4].dev_attr.attr,
NULL
};
static const struct attribute_group w83791d_group_fanpwm45 = {
.attrs = w83791d_attributes_fanpwm45,
};
static int w83791d_detect_subclients(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
int i, id, err;
u8 val;
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48 ||
force_subclients[i] > 0x4f) {
dev_err(&client->dev,
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -ENODEV;
goto error_sc_0;
}
}
w83791d_write(client, W83791D_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
if (!(val & 0x08)) {
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
}
if (!(val & 0x80)) {
if ((data->lm75[0] != NULL) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclient\n",
data->lm75[0]->addr);
err = -ENODEV;
goto error_sc_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((val >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
error_sc_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
error_sc_0:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int val1, val2;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
return -ENODEV;
}
if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80)
return -ENODEV;
val1 = w83791d_read(client, W83791D_REG_BANK);
val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
/* Check for Winbond ID if in bank 0 */
if (!(val1 & 0x07)) {
if ((!(val1 & 0x80) && val2 != 0xa3) ||
( (val1 & 0x80) && val2 != 0x5c)) {
return -ENODEV;
}
}
/* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR
should match */
if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address)
return -ENODEV;
/* We want bank 0 and Vendor ID high byte */
val1 = w83791d_read(client, W83791D_REG_BANK) & 0x78;
w83791d_write(client, W83791D_REG_BANK, val1 | 0x80);
/* Verify it is a Winbond w83791d */
val1 = w83791d_read(client, W83791D_REG_WCHIPID);
val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
if (val1 != 0x71 || val2 != 0x5c)
return -ENODEV;
strlcpy(info->type, "w83791d", I2C_NAME_SIZE);
return 0;
}
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct w83791d_data *data;
struct device *dev = &client->dev;
int i, err;
u8 has_fanpwm45;
#ifdef DEBUG
int val1;
val1 = w83791d_read(client, W83791D_REG_DID_VID4);
dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n",
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
#endif
data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto error0;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
err = w83791d_detect_subclients(client);
if (err)
goto error1;
/* Initialize the chip */
w83791d_init_client(client);
/* If the fan_div is changed, make sure there is a rational
fan_min in place */
for (i = 0; i < NUMBER_OF_FANIN; i++) {
data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]);
}
/* Register sysfs hooks */
if ((err = sysfs_create_group(&client->dev.kobj, &w83791d_group)))
goto error3;
/* Check if pins of fan/pwm 4-5 are in use as GPIO */
has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10;
if (has_fanpwm45) {
err = sysfs_create_group(&client->dev.kobj,
&w83791d_group_fanpwm45);
if (err)
goto error4;
}
/* Everything is ready, now register the working device */
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error5;
}
return 0;
error5:
if (has_fanpwm45)
sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45);
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
error3:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
error1:
kfree(data);
error0:
return err;
}
static int w83791d_remove(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
kfree(data);
return 0;
}
static void w83791d_init_client(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
u8 tmp;
u8 old_beep;
/* The difference between reset and init is that reset
does a hard reset of the chip via index 0x40, bit 7,
but init simply forces certain registers to have "sane"
values. The hope is that the BIOS has done the right
thing (which is why the default is reset=0, init=0),
but if not, reset is the hard hammer and init
is the soft mallet both of which are trying to whack
things into place...
NOTE: The data sheet makes a distinction between
"power on defaults" and "reset by MR". As far as I can tell,
the hard reset puts everything into a power-on state so I'm
not sure what "reset by MR" means or how it can happen.
*/
if (reset || init) {
/* keep some BIOS settings when we... */
old_beep = w83791d_read(client, W83791D_REG_BEEP_CONFIG);
if (reset) {
/* ... reset the chip and ... */
w83791d_write(client, W83791D_REG_CONFIG, 0x80);
}
/* ... disable power-on abnormal beep */
w83791d_write(client, W83791D_REG_BEEP_CONFIG, old_beep | 0x80);
/* disable the global beep (not done by hard reset) */
tmp = w83791d_read(client, W83791D_REG_BEEP_CTRL[1]);
w83791d_write(client, W83791D_REG_BEEP_CTRL[1], tmp & 0xef);
if (init) {
/* Make sure monitoring is turned on for add-ons */
tmp = w83791d_read(client, W83791D_REG_TEMP2_CONFIG);
if (tmp & 1) {
w83791d_write(client, W83791D_REG_TEMP2_CONFIG,
tmp & 0xfe);
}
tmp = w83791d_read(client, W83791D_REG_TEMP3_CONFIG);
if (tmp & 1) {
w83791d_write(client, W83791D_REG_TEMP3_CONFIG,
tmp & 0xfe);
}
/* Start monitoring */
tmp = w83791d_read(client, W83791D_REG_CONFIG) & 0xf7;
w83791d_write(client, W83791D_REG_CONFIG, tmp | 0x01);
}
}
data->vrm = vid_which_vrm();
}
static struct w83791d_data *w83791d_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int i, j;
u8 reg_array_tmp[3];
u8 vbat_reg;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + (HZ * 3))
|| !data->valid) {
dev_dbg(dev, "Starting w83791d device update\n");
/* Update the voltages measured value and limits */
for (i = 0; i < NUMBER_OF_VIN; i++) {
data->in[i] = w83791d_read(client,
W83791D_REG_IN[i]);
data->in_max[i] = w83791d_read(client,
W83791D_REG_IN_MAX[i]);
data->in_min[i] = w83791d_read(client,
W83791D_REG_IN_MIN[i]);
}
/* Update the fan counts and limits */
for (i = 0; i < NUMBER_OF_FANIN; i++) {
/* Update the Fan measured value and limits */
data->fan[i] = w83791d_read(client,
W83791D_REG_FAN[i]);
data->fan_min[i] = w83791d_read(client,
W83791D_REG_FAN_MIN[i]);
}
/* Update the fan divisor */
for (i = 0; i < 3; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_FAN_DIV[i]);
}
data->fan_div[0] = (reg_array_tmp[0] >> 4) & 0x03;
data->fan_div[1] = (reg_array_tmp[0] >> 6) & 0x03;
data->fan_div[2] = (reg_array_tmp[1] >> 6) & 0x03;
data->fan_div[3] = reg_array_tmp[2] & 0x07;
data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07;
/* The fan divisor for fans 0-2 get bit 2 from
bits 5-7 respectively of vbat register */
vbat_reg = w83791d_read(client, W83791D_REG_VBAT);
for (i = 0; i < 3; i++)
data->fan_div[i] |= (vbat_reg >> (3 + i)) & 0x04;
/* Update PWM duty cycle */
for (i = 0; i < NUMBER_OF_PWM; i++) {
data->pwm[i] = w83791d_read(client,
W83791D_REG_PWM[i]);
}
/* Update PWM enable status */
for (i = 0; i < 2; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_FAN_CFG[i]);
}
data->pwm_enable[0] = (reg_array_tmp[0] >> 2) & 0x03;
data->pwm_enable[1] = (reg_array_tmp[0] >> 4) & 0x03;
data->pwm_enable[2] = (reg_array_tmp[1] >> 2) & 0x03;
/* Update PWM target temperature */
for (i = 0; i < 3; i++) {
data->temp_target[i] = w83791d_read(client,
W83791D_REG_TEMP_TARGET[i]) & 0x7f;
}
/* Update PWM temperature tolerance */
for (i = 0; i < 2; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_TEMP_TOL[i]);
}
data->temp_tolerance[0] = reg_array_tmp[0] & 0x0f;
data->temp_tolerance[1] = (reg_array_tmp[0] >> 4) & 0x0f;
data->temp_tolerance[2] = reg_array_tmp[1] & 0x0f;
/* Update the first temperature sensor */
for (i = 0; i < 3; i++) {
data->temp1[i] = w83791d_read(client,
W83791D_REG_TEMP1[i]);
}
/* Update the rest of the temperature sensors */
for (i = 0; i < 2; i++) {
for (j = 0; j < 3; j++) {
data->temp_add[i][j] =
(w83791d_read(client,
W83791D_REG_TEMP_ADD[i][j * 2]) << 8) |
w83791d_read(client,
W83791D_REG_TEMP_ADD[i][j * 2 + 1]);
}
}
/* Update the realtime status */
data->alarms =
w83791d_read(client, W83791D_REG_ALARM1) +
(w83791d_read(client, W83791D_REG_ALARM2) << 8) +
(w83791d_read(client, W83791D_REG_ALARM3) << 16);
/* Update the beep configuration information */
data->beep_mask =
w83791d_read(client, W83791D_REG_BEEP_CTRL[0]) +
(w83791d_read(client, W83791D_REG_BEEP_CTRL[1]) << 8) +
(w83791d_read(client, W83791D_REG_BEEP_CTRL[2]) << 16);
/* Extract global beep enable flag */
data->beep_enable =
(data->beep_mask >> GLOBAL_BEEP_ENABLE_SHIFT) & 0x01;
/* Update the cpu voltage information */
i = w83791d_read(client, W83791D_REG_VID_FANDIV);
data->vid = i & 0x0f;
data->vid |= (w83791d_read(client, W83791D_REG_DID_VID4) & 0x01)
<< 4;
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
#ifdef DEBUG
w83791d_print_debug(data, dev);
#endif
return data;
}
#ifdef DEBUG
static void w83791d_print_debug(struct w83791d_data *data, struct device *dev)
{
int i = 0, j = 0;
dev_dbg(dev, "======Start of w83791d debug values======\n");
dev_dbg(dev, "%d set of Voltages: ===>\n", NUMBER_OF_VIN);
for (i = 0; i < NUMBER_OF_VIN; i++) {
dev_dbg(dev, "vin[%d] is: 0x%02x\n", i, data->in[i]);
dev_dbg(dev, "vin[%d] min is: 0x%02x\n", i, data->in_min[i]);
dev_dbg(dev, "vin[%d] max is: 0x%02x\n", i, data->in_max[i]);
}
dev_dbg(dev, "%d set of Fan Counts/Divisors: ===>\n", NUMBER_OF_FANIN);
for (i = 0; i < NUMBER_OF_FANIN; i++) {
dev_dbg(dev, "fan[%d] is: 0x%02x\n", i, data->fan[i]);
dev_dbg(dev, "fan[%d] min is: 0x%02x\n", i, data->fan_min[i]);
dev_dbg(dev, "fan_div[%d] is: 0x%02x\n", i, data->fan_div[i]);
}
/* temperature math is signed, but only print out the
bits that matter */
dev_dbg(dev, "%d set of Temperatures: ===>\n", NUMBER_OF_TEMPIN);
for (i = 0; i < 3; i++) {
dev_dbg(dev, "temp1[%d] is: 0x%02x\n", i, (u8) data->temp1[i]);
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 3; j++) {
dev_dbg(dev, "temp_add[%d][%d] is: 0x%04x\n", i, j,
(u16) data->temp_add[i][j]);
}
}
dev_dbg(dev, "Misc Information: ===>\n");
dev_dbg(dev, "alarm is: 0x%08x\n", data->alarms);
dev_dbg(dev, "beep_mask is: 0x%08x\n", data->beep_mask);
dev_dbg(dev, "beep_enable is: %d\n", data->beep_enable);
dev_dbg(dev, "vid is: 0x%02x\n", data->vid);
dev_dbg(dev, "vrm is: 0x%02x\n", data->vrm);
dev_dbg(dev, "=======End of w83791d debug values========\n");
dev_dbg(dev, "\n");
}
#endif
static int __init sensors_w83791d_init(void)
{
return i2c_add_driver(&w83791d_driver);
}
static void __exit sensors_w83791d_exit(void)
{
i2c_del_driver(&w83791d_driver);
}
MODULE_AUTHOR("Charles Spirakis <bezaur@gmail.com>");
MODULE_DESCRIPTION("W83791D driver");
MODULE_LICENSE("GPL");
module_init(sensors_w83791d_init);
module_exit(sensors_w83791d_exit);
| gpl-2.0 |
dbones/linux | arch/powerpc/platforms/40x/virtex.c | 1408 | 1433 | /*
* Xilinx Virtex (IIpro & 4FX) based board support
*
* Copyright 2007 Secret Lab Technologies Ltd.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/xilinx_intc.h>
#include <asm/xilinx_pci.h>
#include <asm/ppc4xx.h>
static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
{ .compatible = "xlnx,plb-v46-1.00.a", },
{ .compatible = "xlnx,plb-v34-1.01.a", },
{ .compatible = "xlnx,plb-v34-1.02.a", },
{ .compatible = "xlnx,opb-v20-1.10.c", },
{ .compatible = "xlnx,dcr-v29-1.00.a", },
{ .compatible = "xlnx,compound", },
{}
};
static int __init virtex_device_probe(void)
{
of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
return 0;
}
machine_device_initcall(virtex, virtex_device_probe);
static int __init virtex_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "xlnx,virtex"))
return 0;
return 1;
}
define_machine(virtex) {
.name = "Xilinx Virtex",
.probe = virtex_probe,
.setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
.get_irq = xilinx_intc_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
| gpl-2.0 |
mongoose700/xen-coalesce-kernel | arch/arm/mach-s3c64xx/setup-ide.c | 2176 | 1280 | /* linux/arch/arm/mach-s3c64xx/setup-ide.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S3C64XX setup information for IDE
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <plat/gpio-cfg.h>
#include <mach/gpio-samsung.h>
#include <linux/platform_data/ata-samsung_cf.h>
void s3c64xx_ide_setup_gpio(void)
{
u32 reg;
reg = readl(S3C_MEM_SYS_CFG) & (~0x3f);
/* Independent CF interface, CF chip select configuration */
writel(reg | MEM_SYS_CFG_INDEP_CF |
MEM_SYS_CFG_EBI_FIX_PRI_CFCON, S3C_MEM_SYS_CFG);
s3c_gpio_cfgpin(S3C64XX_GPB(4), S3C_GPIO_SFN(4));
/* Set XhiDATA[15:0] pins as CF Data[15:0] */
s3c_gpio_cfgpin_range(S3C64XX_GPK(0), 16, S3C_GPIO_SFN(5));
/* Set XhiADDR[2:0] pins as CF ADDR[2:0] */
s3c_gpio_cfgpin_range(S3C64XX_GPL(0), 3, S3C_GPIO_SFN(6));
/* Set Xhi ctrl pins as CF ctrl pins(IORDY, IOWR, IORD, CE[0:1]) */
s3c_gpio_cfgpin(S3C64XX_GPM(5), S3C_GPIO_SFN(1));
s3c_gpio_cfgpin_range(S3C64XX_GPM(0), 5, S3C_GPIO_SFN(6));
}
| gpl-2.0 |
okrt/note8-jb-kernel | drivers/ssb/driver_chipcommon.c | 2432 | 14813 | /*
* Sonics Silicon Backplane
* Broadcom ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/pci.h>
#include "ssb_private.h"
/* Clock sources */
enum ssb_clksrc {
/* PCI clock */
SSB_CHIPCO_CLKSRC_PCI,
/* Crystal slow clock oscillator */
SSB_CHIPCO_CLKSRC_XTALOS,
/* Low power oscillator */
SSB_CHIPCO_CLKSRC_LOPWROS,
};
static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset,
u32 mask, u32 value)
{
value &= mask;
value |= chipco_read32(cc, offset) & ~mask;
chipco_write32(cc, offset, value);
return value;
}
void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
enum ssb_clkmode mode)
{
struct ssb_device *ccdev = cc->dev;
struct ssb_bus *bus;
u32 tmp;
if (!ccdev)
return;
bus = ccdev->bus;
/* We support SLOW only on 6..9 */
if (ccdev->id.revision >= 10 && mode == SSB_CLKMODE_SLOW)
mode = SSB_CLKMODE_DYNAMIC;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
return; /* PMU controls clockmode, separated function needed */
SSB_WARN_ON(ccdev->id.revision >= 20);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
if (ccdev->id.revision < 6)
return;
/* ChipCommon cores rev10+ need testing */
if (ccdev->id.revision >= 10)
return;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
switch (mode) {
case SSB_CLKMODE_SLOW: /* For revs 6..9 only */
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
break;
case SSB_CLKMODE_FAST:
if (ccdev->id.revision < 10) {
ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
} else {
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) |
SSB_CHIPCO_SYSCLKCTL_FORCEHT));
/* udelay(150); TODO: not available in early init */
}
break;
case SSB_CLKMODE_DYNAMIC:
if (ccdev->id.revision < 10) {
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL;
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) !=
SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL)
tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
/* For dynamic control, we have to release our xtal_pu
* "force on" */
if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL)
ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0);
} else {
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) &
~SSB_CHIPCO_SYSCLKCTL_FORCEHT));
}
break;
default:
SSB_WARN_ON(1);
}
}
/* Get the Slow Clock Source */
static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
u32 uninitialized_var(tmp);
if (cc->dev->id.revision < 6) {
if (bus->bustype == SSB_BUSTYPE_SSB ||
bus->bustype == SSB_BUSTYPE_PCMCIA)
return SSB_CHIPCO_CLKSRC_XTALOS;
if (bus->bustype == SSB_BUSTYPE_PCI) {
pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &tmp);
if (tmp & 0x10)
return SSB_CHIPCO_CLKSRC_PCI;
return SSB_CHIPCO_CLKSRC_XTALOS;
}
}
if (cc->dev->id.revision < 10) {
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= 0x7;
if (tmp == 0)
return SSB_CHIPCO_CLKSRC_LOPWROS;
if (tmp == 1)
return SSB_CHIPCO_CLKSRC_XTALOS;
if (tmp == 2)
return SSB_CHIPCO_CLKSRC_PCI;
}
return SSB_CHIPCO_CLKSRC_XTALOS;
}
/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */
static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
{
int uninitialized_var(limit);
enum ssb_clksrc clocksrc;
int divisor = 1;
u32 tmp;
clocksrc = chipco_pctl_get_slowclksrc(cc);
if (cc->dev->id.revision < 6) {
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_PCI:
divisor = 64;
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
divisor = 32;
break;
default:
SSB_WARN_ON(1);
}
} else if (cc->dev->id.revision < 10) {
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_LOPWROS:
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
case SSB_CHIPCO_CLKSRC_PCI:
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
divisor = (tmp >> 16) + 1;
divisor *= 4;
break;
}
} else {
tmp = chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL);
divisor = (tmp >> 16) + 1;
divisor *= 4;
}
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_LOPWROS:
if (get_max)
limit = 43000;
else
limit = 25000;
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
if (get_max)
limit = 20200000;
else
limit = 19800000;
break;
case SSB_CHIPCO_CLKSRC_PCI:
if (get_max)
limit = 34000000;
else
limit = 25000000;
break;
}
limit /= divisor;
return limit;
}
static void chipco_powercontrol_init(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
if (bus->chip_id == 0x4321) {
if (bus->chip_rev == 0)
chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0x3A4);
else if (bus->chip_rev == 1)
chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0xA4);
}
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
if (cc->dev->id.revision >= 10) {
/* Set Idle Power clock rate to 1Mhz */
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) &
0x0000FFFF) | 0x00040000);
} else {
int maxfreq;
maxfreq = chipco_pctl_clockfreqlimit(cc, 1);
chipco_write32(cc, SSB_CHIPCO_PLLONDELAY,
(maxfreq * 150 + 999999) / 1000000);
chipco_write32(cc, SSB_CHIPCO_FREFSELDELAY,
(maxfreq * 15 + 999999) / 1000000);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
switch (bus->chip_id) {
case 0x4312:
case 0x4322:
case 0x4328:
return 7000;
case 0x4325:
/* TODO: */
default:
return 15000;
}
}
/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
int minfreq;
unsigned int tmp;
u32 pll_on_delay;
if (bus->bustype != SSB_BUSTYPE_PCI)
return;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
return;
}
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
minfreq = chipco_pctl_clockfreqlimit(cc, 0);
pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY);
tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq;
SSB_WARN_ON(tmp & ~0xFFFF);
cc->fast_pwrup_delay = tmp;
}
void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
if (cc->dev->id.revision >= 20) {
chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
chipco_write32(cc, SSB_CHIPCO_GPIOPULLDOWN, 0);
}
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
calc_fast_powerup_delay(cc);
}
void ssb_chipco_suspend(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return;
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW);
}
void ssb_chipco_resume(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return;
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
}
/* Get the processor clock */
void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc,
u32 *plltype, u32 *n, u32 *m)
{
*n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N);
*plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
switch (*plltype) {
case SSB_PLLTYPE_2:
case SSB_PLLTYPE_4:
case SSB_PLLTYPE_6:
case SSB_PLLTYPE_7:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS);
break;
case SSB_PLLTYPE_3:
/* 5350 uses m2 to control mips */
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
break;
}
}
/* Get the bus clock */
void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
u32 *plltype, u32 *n, u32 *m)
{
*n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N);
*plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
switch (*plltype) {
case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS);
break;
case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
if (cc->dev->bus->chip_id != 0x5365) {
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
}
/* Fallthough */
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
}
}
void ssb_chipco_timing_init(struct ssb_chipcommon *cc,
unsigned long ns)
{
struct ssb_device *dev = cc->dev;
struct ssb_bus *bus = dev->bus;
u32 tmp;
/* set register for external IO to control LED. */
chipco_write32(cc, SSB_CHIPCO_PROG_CFG, 0x11);
tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */
tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 40ns */
tmp |= DIV_ROUND_UP(240, ns); /* Waitcount-0 = 240ns */
chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */
/* Set timing for the flash */
tmp = DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_3_SHIFT; /* Waitcount-3 = 10nS */
tmp |= DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_1_SHIFT; /* Waitcount-1 = 10nS */
tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120nS */
if ((bus->chip_id == 0x5365) ||
(dev->id.revision < 9))
chipco_write32(cc, SSB_CHIPCO_FLASH_WAITCNT, tmp);
if ((bus->chip_id == 0x5365) ||
(dev->id.revision < 9) ||
((bus->chip_id == 0x5350) && (bus->chip_rev == 0)))
chipco_write32(cc, SSB_CHIPCO_PCMCIA_MEMWAIT, tmp);
if (bus->chip_id == 0x5350) {
/* Enable EXTIF */
tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */
tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; /* Waitcount-2 = 20ns */
tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 100ns */
tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120ns */
chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */
}
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks)
{
/* instant NMI */
chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks);
}
void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
chipco_write32_masked(cc, SSB_CHIPCO_IRQMASK, mask, value);
}
u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask)
{
return chipco_read32(cc, SSB_CHIPCO_IRQSTAT) & mask;
}
u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
{
return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask;
}
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
}
u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
}
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
}
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
}
#ifdef CONFIG_SSB_SERIAL
int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
struct ssb_serial_port *ports)
{
struct ssb_bus *bus = cc->dev->bus;
int nr_ports = 0;
u32 plltype;
unsigned int irq;
u32 baud_base, div;
u32 i, n;
unsigned int ccrev = cc->dev->id.revision;
plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
irq = ssb_mips_irq(cc->dev);
if (plltype == SSB_PLLTYPE_1) {
/* PLL clock */
baud_base = ssb_calc_clock_rate(plltype,
chipco_read32(cc, SSB_CHIPCO_CLOCK_N),
chipco_read32(cc, SSB_CHIPCO_CLOCK_M2));
div = 1;
} else {
if (ccrev == 20) {
/* BCM5354 uses constant 25MHz clock */
baud_base = 25000000;
div = 48;
/* Set the override bit so we don't divide it */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLK0);
} else if ((ccrev >= 11) && (ccrev != 15)) {
/* Fixed ALP clock */
baud_base = 20000000;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
/* FIXME: baud_base is different for devices with a PMU */
SSB_WARN_ON(1);
}
div = 1;
if (ccrev >= 21) {
/* Turn off UART clock before switching clocksource. */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
& ~SSB_CHIPCO_CORECTL_UARTCLKEN);
}
/* Set the override bit so we don't divide it */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLK0);
if (ccrev >= 21) {
/* Re-enable the UART clock. */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLKEN);
}
} else if (ccrev >= 3) {
/* Internal backplane clock */
baud_base = ssb_clockspeed(bus);
div = chipco_read32(cc, SSB_CHIPCO_CLKDIV)
& SSB_CHIPCO_CLKDIV_UART;
} else {
/* Fixed internal backplane clock */
baud_base = 88000000;
div = 48;
}
/* Clock source depends on strapping if UartClkOverride is unset */
if ((ccrev > 0) &&
!(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) {
if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) ==
SSB_CHIPCO_CAP_UARTCLK_INT) {
/* Internal divided backplane clock */
baud_base /= div;
} else {
/* Assume external clock of 1.8432 MHz */
baud_base = 1843200;
}
}
}
/* Determine the registers of the UARTs */
n = (cc->capabilities & SSB_CHIPCO_CAP_NRUART);
for (i = 0; i < n; i++) {
void __iomem *cc_mmio;
void __iomem *uart_regs;
cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE);
uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA;
/* Offset changed at after rev 0 */
if (ccrev == 0)
uart_regs += (i * 8);
else
uart_regs += (i * 256);
nr_ports++;
ports[i].regs = uart_regs;
ports[i].irq = irq;
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
return nr_ports;
}
#endif /* CONFIG_SSB_SERIAL */
| gpl-2.0 |
yun3195/android_kernel_ZTE_Z5S | security/selinux/netnode.c | 2688 | 8462 | /*
* Network node table
*
* SELinux must keep a mapping of network nodes to labels/SIDs. This
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
* Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
* (see security/selinux/netif.c for more information)
*
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2007
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "netnode.h"
#include "objsec.h"
#define SEL_NETNODE_HASH_SIZE 256
#define SEL_NETNODE_HASH_BKT_LIMIT 16
struct sel_netnode_bkt {
unsigned int size;
struct list_head list;
};
struct sel_netnode {
struct netnode_security_struct nsec;
struct list_head list;
struct rcu_head rcu;
};
/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
* for this is that I suspect most users will not make heavy use of both
* address families at the same time so one table will usually end up wasted,
* if this becomes a problem we can always add a hash table for each address
* family later */
static LIST_HEAD(sel_netnode_list);
static DEFINE_SPINLOCK(sel_netnode_lock);
static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
/**
* sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
* @addr: IPv4 address
*
* Description:
* This is the IPv4 hashing function for the node interface table, it returns
* the bucket number for the given IP address.
*
*/
static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
{
/* at some point we should determine if the mismatch in byte order
* affects the hash function dramatically */
return (addr & (SEL_NETNODE_HASH_SIZE - 1));
}
/**
* sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
* @addr: IPv6 address
*
* Description:
* This is the IPv6 hashing function for the node interface table, it returns
* the bucket number for the given IP address.
*
*/
static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
{
/* just hash the least significant 32 bits to keep things fast (they
* are the most likely to be different anyway), we can revisit this
* later if needed */
return (addr->s6_addr32[3] & (SEL_NETNODE_HASH_SIZE - 1));
}
/**
* sel_netnode_find - Search for a node record
* @addr: IP address
* @family: address family
*
* Description:
* Search the network node table and return the record matching @addr. If an
* entry can not be found in the table return NULL.
*
*/
static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
{
unsigned int idx;
struct sel_netnode *node;
switch (family) {
case PF_INET:
idx = sel_netnode_hashfn_ipv4(*(__be32 *)addr);
break;
case PF_INET6:
idx = sel_netnode_hashfn_ipv6(addr);
break;
default:
BUG();
return NULL;
}
list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
if (node->nsec.family == family)
switch (family) {
case PF_INET:
if (node->nsec.addr.ipv4 == *(__be32 *)addr)
return node;
break;
case PF_INET6:
if (ipv6_addr_equal(&node->nsec.addr.ipv6,
addr))
return node;
break;
}
return NULL;
}
/**
* sel_netnode_insert - Insert a new node into the table
* @node: the new node record
*
* Description:
* Add a new node record to the network address hash table.
*
*/
static void sel_netnode_insert(struct sel_netnode *node)
{
unsigned int idx;
switch (node->nsec.family) {
case PF_INET:
idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
break;
case PF_INET6:
idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
break;
default:
BUG();
}
/* we need to impose a limit on the growth of the hash table so check
* this bucket to make sure it is within the specified bounds */
list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
rcu_dereference(sel_netnode_hash[idx].list.prev),
struct sel_netnode, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
} else
sel_netnode_hash[idx].size++;
}
/**
* sel_netnode_sid_slow - Lookup the SID of a network address using the policy
* @addr: the IP address
* @family: the address family
* @sid: node SID
*
* Description:
* This function determines the SID of a network address by quering the
* security policy. The result is added to the network address table to
* speedup future queries. Returns zero on success, negative values on
* failure.
*
*/
static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
{
int ret = -ENOMEM;
struct sel_netnode *node;
struct sel_netnode *new = NULL;
spin_lock_bh(&sel_netnode_lock);
node = sel_netnode_find(addr, family);
if (node != NULL) {
*sid = node->nsec.sid;
spin_unlock_bh(&sel_netnode_lock);
return 0;
}
new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (new == NULL)
goto out;
switch (family) {
case PF_INET:
ret = security_node_sid(PF_INET,
addr, sizeof(struct in_addr), sid);
new->nsec.addr.ipv4 = *(__be32 *)addr;
break;
case PF_INET6:
ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), sid);
new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
}
if (ret != 0)
goto out;
new->nsec.family = family;
new->nsec.sid = *sid;
sel_netnode_insert(new);
out:
spin_unlock_bh(&sel_netnode_lock);
if (unlikely(ret)) {
printk(KERN_WARNING
"SELinux: failure in sel_netnode_sid_slow(),"
" unable to determine network node label\n");
kfree(new);
}
return ret;
}
/**
* sel_netnode_sid - Lookup the SID of a network address
* @addr: the IP address
* @family: the address family
* @sid: node SID
*
* Description:
* This function determines the SID of a network address using the fastest
* method possible. First the address table is queried, but if an entry
* can't be found then the policy is queried and the result is added to the
* table to speedup future queries. Returns zero on success, negative values
* on failure.
*
*/
int sel_netnode_sid(void *addr, u16 family, u32 *sid)
{
struct sel_netnode *node;
rcu_read_lock();
node = sel_netnode_find(addr, family);
if (node != NULL) {
*sid = node->nsec.sid;
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
return sel_netnode_sid_slow(addr, family, sid);
}
/**
* sel_netnode_flush - Flush the entire network address table
*
* Description:
* Remove all entries from the network address table.
*
*/
static void sel_netnode_flush(void)
{
unsigned int idx;
struct sel_netnode *node, *node_tmp;
spin_lock_bh(&sel_netnode_lock);
for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
list_for_each_entry_safe(node, node_tmp,
&sel_netnode_hash[idx].list, list) {
list_del_rcu(&node->list);
kfree_rcu(node, rcu);
}
sel_netnode_hash[idx].size = 0;
}
spin_unlock_bh(&sel_netnode_lock);
}
static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid,
u16 class, u32 perms, u32 *retained)
{
if (event == AVC_CALLBACK_RESET) {
sel_netnode_flush();
synchronize_net();
}
return 0;
}
static __init int sel_netnode_init(void)
{
int iter;
int ret;
if (!selinux_enabled)
return 0;
for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
sel_netnode_hash[iter].size = 0;
}
ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
return ret;
}
__initcall(sel_netnode_init);
| gpl-2.0 |
OneEducation/kernel-rk310-kitkat-firefly | arch/mips/mm/pgtable-64.c | 3968 | 2228 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2003 by Ralf Baechle
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
void pgd_init(unsigned long page)
{
unsigned long *p, *end;
unsigned long entry;
#ifdef __PAGETABLE_PMD_FOLDED
entry = (unsigned long)invalid_pte_table;
#else
entry = (unsigned long)invalid_pmd_table;
#endif
p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
p += 8;
p[-3] = entry;
p[-2] = entry;
p[-1] = entry;
} while (p != end);
}
#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
if (!pmd_trans_splitting(*pmdp)) {
pmd_t pmd = pmd_mksplitting(*pmdp);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
}
}
#endif
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
return pmd;
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
flush_tlb_all();
}
void __init pagetable_init(void)
{
unsigned long vaddr;
pgd_t *pgd_base;
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
}
| gpl-2.0 |
Split-Screen/android_kernel_moto_shamu | arch/cris/arch-v32/drivers/pci/bios.c | 4224 | 2375 | #include <linux/pci.h>
#include <linux/kernel.h>
#include <arch/hwregs/intr_vect.h>
void pcibios_fixup_bus(struct pci_bus *b)
{
}
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
prot = pgprot_val(vma->vm_page_prot);
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored, it is changed via the mtrr
* interfaces on this platform.
*/
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
return start;
}
int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1<<idx)))
continue;
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
int pcibios_enable_irq(struct pci_dev *dev)
{
dev->irq = EXT_INTR_VECT;
return 0;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
if (!dev->msi_enabled)
pcibios_enable_irq(dev);
return 0;
}
| gpl-2.0 |
childofthehorn/android_kernel_oneplus_msm8994 | drivers/md/dm-log.c | 4480 | 20440 | /*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "dirty region log"
static LIST_HEAD(_log_types);
static DEFINE_SPINLOCK(_lock);
static struct dm_dirty_log_type *__find_dirty_log_type(const char *name)
{
struct dm_dirty_log_type *log_type;
list_for_each_entry(log_type, &_log_types, list)
if (!strcmp(name, log_type->name))
return log_type;
return NULL;
}
static struct dm_dirty_log_type *_get_dirty_log_type(const char *name)
{
struct dm_dirty_log_type *log_type;
spin_lock(&_lock);
log_type = __find_dirty_log_type(name);
if (log_type && !try_module_get(log_type->module))
log_type = NULL;
spin_unlock(&_lock);
return log_type;
}
/*
* get_type
* @type_name
*
* Attempt to retrieve the dm_dirty_log_type by name. If not already
* available, attempt to load the appropriate module.
*
* Log modules are named "dm-log-" followed by the 'type_name'.
* Modules may contain multiple types.
* This function will first try the module "dm-log-<type_name>",
* then truncate 'type_name' on the last '-' and try again.
*
* For example, if type_name was "clustered-disk", it would search
* 'dm-log-clustered-disk' then 'dm-log-clustered'.
*
* Returns: dirty_log_type* on success, NULL on failure
*/
static struct dm_dirty_log_type *get_type(const char *type_name)
{
char *p, *type_name_dup;
struct dm_dirty_log_type *log_type;
if (!type_name)
return NULL;
log_type = _get_dirty_log_type(type_name);
if (log_type)
return log_type;
type_name_dup = kstrdup(type_name, GFP_KERNEL);
if (!type_name_dup) {
DMWARN("No memory left to attempt log module load for \"%s\"",
type_name);
return NULL;
}
while (request_module("dm-log-%s", type_name_dup) ||
!(log_type = _get_dirty_log_type(type_name))) {
p = strrchr(type_name_dup, '-');
if (!p)
break;
p[0] = '\0';
}
if (!log_type)
DMWARN("Module for logging type \"%s\" not found.", type_name);
kfree(type_name_dup);
return log_type;
}
static void put_type(struct dm_dirty_log_type *type)
{
if (!type)
return;
spin_lock(&_lock);
if (!__find_dirty_log_type(type->name))
goto out;
module_put(type->module);
out:
spin_unlock(&_lock);
}
int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
{
int r = 0;
spin_lock(&_lock);
if (!__find_dirty_log_type(type->name))
list_add(&type->list, &_log_types);
else
r = -EEXIST;
spin_unlock(&_lock);
return r;
}
EXPORT_SYMBOL(dm_dirty_log_type_register);
int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
{
spin_lock(&_lock);
if (!__find_dirty_log_type(type->name)) {
spin_unlock(&_lock);
return -EINVAL;
}
list_del(&type->list);
spin_unlock(&_lock);
return 0;
}
EXPORT_SYMBOL(dm_dirty_log_type_unregister);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
unsigned int argc, char **argv)
{
struct dm_dirty_log_type *type;
struct dm_dirty_log *log;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return NULL;
type = get_type(type_name);
if (!type) {
kfree(log);
return NULL;
}
log->flush_callback_fn = flush_callback_fn;
log->type = type;
if (type->ctr(log, ti, argc, argv)) {
kfree(log);
put_type(type);
return NULL;
}
return log;
}
EXPORT_SYMBOL(dm_dirty_log_create);
void dm_dirty_log_destroy(struct dm_dirty_log *log)
{
log->type->dtr(log);
put_type(log->type);
kfree(log);
}
EXPORT_SYMBOL(dm_dirty_log_destroy);
/*-----------------------------------------------------------------
* Persistent and core logs share a lot of their implementation.
* FIXME: need a reload method to be called from a resume
*---------------------------------------------------------------*/
/*
* Magic for persistent mirrors: "MiRr"
*/
#define MIRROR_MAGIC 0x4D695272
/*
* The on-disk version of the metadata.
*/
#define MIRROR_DISK_VERSION 2
#define LOG_OFFSET 2
struct log_header_disk {
__le32 magic;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
__le32 version;
__le64 nr_regions;
} __packed;
struct log_header_core {
uint32_t magic;
uint32_t version;
uint64_t nr_regions;
};
struct log_c {
struct dm_target *ti;
int touched_dirtied;
int touched_cleaned;
int flush_failed;
uint32_t region_size;
unsigned int region_count;
region_t sync_count;
unsigned bitset_uint32_count;
uint32_t *clean_bits;
uint32_t *sync_bits;
uint32_t *recovering_bits; /* FIXME: this seems excessive */
int sync_search;
/* Resync flag */
enum sync {
DEFAULTSYNC, /* Synchronize if necessary */
NOSYNC, /* Devices known to be already in sync */
FORCESYNC, /* Force a sync to happen */
} sync;
struct dm_io_request io_req;
/*
* Disk log fields
*/
int log_dev_failed;
int log_dev_flush_failed;
struct dm_dev *log_dev;
struct log_header_core header;
struct dm_io_region header_location;
struct log_header_disk *disk_header;
};
/*
* The touched member needs to be updated every time we access
* one of the bitsets.
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
__set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
__clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
* Header IO
*--------------------------------------------------------------*/
static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
disk->version = cpu_to_le32(core->version);
disk->nr_regions = cpu_to_le64(core->nr_regions);
}
static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
{
core->magic = le32_to_cpu(disk->magic);
core->version = le32_to_cpu(disk->version);
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
static int rw_header(struct log_c *lc, int rw)
{
lc->io_req.bi_rw = rw;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
static int flush_header(struct log_c *lc)
{
struct dm_io_region null_location = {
.bdev = lc->header_location.bdev,
.sector = 0,
.count = 0,
};
lc->io_req.bi_rw = WRITE_FLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
static int read_header(struct log_c *log)
{
int r;
r = rw_header(log, READ);
if (r)
return r;
header_from_disk(&log->header, log->disk_header);
/* New log required? */
if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
log->header.magic = MIRROR_MAGIC;
log->header.version = MIRROR_DISK_VERSION;
log->header.nr_regions = 0;
}
#ifdef __LITTLE_ENDIAN
if (log->header.version == 1)
log->header.version = 2;
#endif
if (log->header.version != MIRROR_DISK_VERSION) {
DMWARN("incompatible disk log version");
return -EINVAL;
}
return 0;
}
static int _check_region_size(struct dm_target *ti, uint32_t region_size)
{
if (region_size < 2 || region_size > ti->len)
return 0;
if (!is_power_of_2(region_size))
return 0;
return 1;
}
/*----------------------------------------------------------------
* core log constructor/destructor
*
* argv contains region_size followed optionally by [no]sync
*--------------------------------------------------------------*/
#define BYTE_SHIFT 3
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv,
struct dm_dev *dev)
{
enum sync sync = DEFAULTSYNC;
struct log_c *lc;
uint32_t region_size;
unsigned int region_count;
size_t bitset_size, buf_size;
int r;
char dummy;
if (argc < 1 || argc > 2) {
DMWARN("wrong number of arguments to dirty region log");
return -EINVAL;
}
if (argc > 1) {
if (!strcmp(argv[1], "sync"))
sync = FORCESYNC;
else if (!strcmp(argv[1], "nosync"))
sync = NOSYNC;
else {
DMWARN("unrecognised sync argument to "
"dirty region log: %s", argv[1]);
return -EINVAL;
}
}
if (sscanf(argv[0], "%u%c", ®ion_size, &dummy) != 1 ||
!_check_region_size(ti, region_size)) {
DMWARN("invalid region size %s", argv[0]);
return -EINVAL;
}
region_count = dm_sector_div_up(ti->len, region_size);
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (!lc) {
DMWARN("couldn't allocate core log");
return -ENOMEM;
}
lc->ti = ti;
lc->touched_dirtied = 0;
lc->touched_cleaned = 0;
lc->flush_failed = 0;
lc->region_size = region_size;
lc->region_count = region_count;
lc->sync = sync;
/*
* Work out how many "unsigned long"s we need to hold the bitset.
*/
bitset_size = dm_round_up(region_count,
sizeof(*lc->clean_bits) << BYTE_SHIFT);
bitset_size >>= BYTE_SHIFT;
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
/*
* Disk log?
*/
if (!dev) {
lc->clean_bits = vmalloc(bitset_size);
if (!lc->clean_bits) {
DMWARN("couldn't allocate clean bitset");
kfree(lc);
return -ENOMEM;
}
lc->disk_header = NULL;
} else {
lc->log_dev = dev;
lc->log_dev_failed = 0;
lc->log_dev_flush_failed = 0;
lc->header_location.bdev = lc->log_dev->bdev;
lc->header_location.sector = 0;
/*
* Buffer holds both header and bitset.
*/
buf_size =
dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
bdev_logical_block_size(lc->header_location.
bdev));
if (buf_size > i_size_read(dev->bdev->bd_inode)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
return -EINVAL;
}
lc->header_location.count = buf_size >> SECTOR_SHIFT;
lc->io_req.mem.type = DM_IO_VMA;
lc->io_req.notify.fn = NULL;
lc->io_req.client = dm_io_client_create();
if (IS_ERR(lc->io_req.client)) {
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
kfree(lc);
return r;
}
lc->disk_header = vmalloc(buf_size);
if (!lc->disk_header) {
DMWARN("couldn't allocate disk log buffer");
dm_io_client_destroy(lc->io_req.client);
kfree(lc);
return -ENOMEM;
}
lc->io_req.mem.ptr.vma = lc->disk_header;
lc->clean_bits = (void *)lc->disk_header +
(LOG_OFFSET << SECTOR_SHIFT);
}
memset(lc->clean_bits, -1, bitset_size);
lc->sync_bits = vmalloc(bitset_size);
if (!lc->sync_bits) {
DMWARN("couldn't allocate sync bitset");
if (!dev)
vfree(lc->clean_bits);
else
dm_io_client_destroy(lc->io_req.client);
vfree(lc->disk_header);
kfree(lc);
return -ENOMEM;
}
memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
lc->sync_count = (sync == NOSYNC) ? region_count : 0;
lc->recovering_bits = vzalloc(bitset_size);
if (!lc->recovering_bits) {
DMWARN("couldn't allocate sync bitset");
vfree(lc->sync_bits);
if (!dev)
vfree(lc->clean_bits);
else
dm_io_client_destroy(lc->io_req.client);
vfree(lc->disk_header);
kfree(lc);
return -ENOMEM;
}
lc->sync_search = 0;
log->context = lc;
return 0;
}
static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
return create_log_context(log, ti, argc, argv, NULL);
}
static void destroy_log_context(struct log_c *lc)
{
vfree(lc->sync_bits);
vfree(lc->recovering_bits);
kfree(lc);
}
static void core_dtr(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
vfree(lc->clean_bits);
destroy_log_context(lc);
}
/*----------------------------------------------------------------
* disk log constructor/destructor
*
* argv contains log_device region_size followed optionally by [no]sync
*--------------------------------------------------------------*/
static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
int r;
struct dm_dev *dev;
if (argc < 2 || argc > 3) {
DMWARN("wrong number of arguments to disk dirty region log");
return -EINVAL;
}
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
if (r)
return r;
r = create_log_context(log, ti, argc - 1, argv + 1, dev);
if (r) {
dm_put_device(ti, dev);
return r;
}
return 0;
}
static void disk_dtr(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
dm_put_device(lc->ti, lc->log_dev);
vfree(lc->disk_header);
dm_io_client_destroy(lc->io_req.client);
destroy_log_context(lc);
}
static void fail_log_device(struct log_c *lc)
{
if (lc->log_dev_failed)
return;
lc->log_dev_failed = 1;
dm_table_event(lc->ti->table);
}
static int disk_resume(struct dm_dirty_log *log)
{
int r;
unsigned i;
struct log_c *lc = (struct log_c *) log->context;
size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
/* read the disk header */
r = read_header(lc);
if (r) {
DMWARN("%s: Failed to read header on dirty region log device",
lc->log_dev->name);
fail_log_device(lc);
/*
* If the log device cannot be read, we must assume
* all regions are out-of-sync. If we simply return
* here, the state will be uninitialized and could
* lead us to return 'in-sync' status for regions
* that are actually 'out-of-sync'.
*/
lc->header.nr_regions = 0;
}
/* set or clear any new bits -- device has grown */
if (lc->sync == NOSYNC)
for (i = lc->header.nr_regions; i < lc->region_count; i++)
/* FIXME: amazingly inefficient */
log_set_bit(lc, lc->clean_bits, i);
else
for (i = lc->header.nr_regions; i < lc->region_count; i++)
/* FIXME: amazingly inefficient */
log_clear_bit(lc, lc->clean_bits, i);
/* clear any old bits -- device has shrunk */
for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
log_clear_bit(lc, lc->clean_bits, i);
/* copy clean across to sync */
memcpy(lc->sync_bits, lc->clean_bits, size);
lc->sync_count = memweight(lc->clean_bits,
lc->bitset_uint32_count * sizeof(uint32_t));
lc->sync_search = 0;
/* set the correct number of regions in the header */
lc->header.nr_regions = lc->region_count;
header_to_disk(&lc->header, lc->disk_header);
/* write the new header */
r = rw_header(lc, WRITE);
if (!r) {
r = flush_header(lc);
if (r)
lc->log_dev_flush_failed = 1;
}
if (r) {
DMWARN("%s: Failed to write header on dirty region log device",
lc->log_dev->name);
fail_log_device(lc);
}
return r;
}
static uint32_t core_get_region_size(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
return lc->region_size;
}
static int core_resume(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
lc->sync_search = 0;
return 0;
}
static int core_is_clean(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
return log_test_bit(lc->clean_bits, region);
}
static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
{
struct log_c *lc = (struct log_c *) log->context;
return log_test_bit(lc->sync_bits, region);
}
static int core_flush(struct dm_dirty_log *log)
{
/* no op */
return 0;
}
static int disk_flush(struct dm_dirty_log *log)
{
int r, i;
struct log_c *lc = log->context;
/* only write if the log has changed */
if (!lc->touched_cleaned && !lc->touched_dirtied)
return 0;
if (lc->touched_cleaned && log->flush_callback_fn &&
log->flush_callback_fn(lc->ti)) {
/*
* At this point it is impossible to determine which
* regions are clean and which are dirty (without
* re-reading the log off disk). So mark all of them
* dirty.
*/
lc->flush_failed = 1;
for (i = 0; i < lc->region_count; i++)
log_clear_bit(lc, lc->clean_bits, i);
}
r = rw_header(lc, WRITE);
if (r)
fail_log_device(lc);
else {
if (lc->touched_dirtied) {
r = flush_header(lc);
if (r) {
lc->log_dev_flush_failed = 1;
fail_log_device(lc);
} else
lc->touched_dirtied = 0;
}
lc->touched_cleaned = 0;
}
return r;
}
static void core_mark_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
log_clear_bit(lc, lc->clean_bits, region);
}
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
if (likely(!lc->flush_failed))
log_set_bit(lc, lc->clean_bits, region);
}
static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
{
struct log_c *lc = (struct log_c *) log->context;
if (lc->sync_search >= lc->region_count)
return 0;
do {
*region = find_next_zero_bit_le(lc->sync_bits,
lc->region_count,
lc->sync_search);
lc->sync_search = *region + 1;
if (*region >= lc->region_count)
return 0;
} while (log_test_bit(lc->recovering_bits, *region));
log_set_bit(lc, lc->recovering_bits, *region);
return 1;
}
static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
int in_sync)
{
struct log_c *lc = (struct log_c *) log->context;
log_clear_bit(lc, lc->recovering_bits, region);
if (in_sync) {
log_set_bit(lc, lc->sync_bits, region);
lc->sync_count++;
} else if (log_test_bit(lc->sync_bits, region)) {
lc->sync_count--;
log_clear_bit(lc, lc->sync_bits, region);
}
}
static region_t core_get_sync_count(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
return lc->sync_count;
}
#define DMEMIT_SYNC \
if (lc->sync != DEFAULTSYNC) \
DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
static int core_status(struct dm_dirty_log *log, status_type_t status,
char *result, unsigned int maxlen)
{
int sz = 0;
struct log_c *lc = log->context;
switch(status) {
case STATUSTYPE_INFO:
DMEMIT("1 %s", log->type->name);
break;
case STATUSTYPE_TABLE:
DMEMIT("%s %u %u ", log->type->name,
lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
DMEMIT_SYNC;
}
return sz;
}
static int disk_status(struct dm_dirty_log *log, status_type_t status,
char *result, unsigned int maxlen)
{
int sz = 0;
struct log_c *lc = log->context;
switch(status) {
case STATUSTYPE_INFO:
DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
lc->log_dev_flush_failed ? 'F' :
lc->log_dev_failed ? 'D' :
'A');
break;
case STATUSTYPE_TABLE:
DMEMIT("%s %u %s %u ", log->type->name,
lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
lc->region_size);
DMEMIT_SYNC;
}
return sz;
}
static struct dm_dirty_log_type _core_type = {
.name = "core",
.module = THIS_MODULE,
.ctr = core_ctr,
.dtr = core_dtr,
.resume = core_resume,
.get_region_size = core_get_region_size,
.is_clean = core_is_clean,
.in_sync = core_in_sync,
.flush = core_flush,
.mark_region = core_mark_region,
.clear_region = core_clear_region,
.get_resync_work = core_get_resync_work,
.set_region_sync = core_set_region_sync,
.get_sync_count = core_get_sync_count,
.status = core_status,
};
static struct dm_dirty_log_type _disk_type = {
.name = "disk",
.module = THIS_MODULE,
.ctr = disk_ctr,
.dtr = disk_dtr,
.postsuspend = disk_flush,
.resume = disk_resume,
.get_region_size = core_get_region_size,
.is_clean = core_is_clean,
.in_sync = core_in_sync,
.flush = disk_flush,
.mark_region = core_mark_region,
.clear_region = core_clear_region,
.get_resync_work = core_get_resync_work,
.set_region_sync = core_set_region_sync,
.get_sync_count = core_get_sync_count,
.status = disk_status,
};
static int __init dm_dirty_log_init(void)
{
int r;
r = dm_dirty_log_type_register(&_core_type);
if (r)
DMWARN("couldn't register core log");
r = dm_dirty_log_type_register(&_disk_type);
if (r) {
DMWARN("couldn't register disk type");
dm_dirty_log_type_unregister(&_core_type);
}
return r;
}
static void __exit dm_dirty_log_exit(void)
{
dm_dirty_log_type_unregister(&_disk_type);
dm_dirty_log_type_unregister(&_core_type);
}
module_init(dm_dirty_log_init);
module_exit(dm_dirty_log_exit);
MODULE_DESCRIPTION(DM_NAME " dirty region log");
MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DarkforestGroup/sony-kernel-msm7x30-ics | arch/x86/crypto/twofish_glue.c | 4736 | 3202 | /*
* Glue Code for assembler optimized version of TWOFISH
*
* Originally Twofish for GPG
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(tfm, dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(tfm, dst, src);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt
}
}
};
static int __init init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
MODULE_ALIAS("twofish");
MODULE_ALIAS("twofish-asm");
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_klte | drivers/video/via/via-gpio.c | 5248 | 7531 | /*
* Support for viafb GPIO ports.
*
* Copyright 2009 Jonathan Corbet <corbet@lwn.net>
* Distributable under version 2 of the GNU General Public License.
*/
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/via-core.h>
#include <linux/via-gpio.h>
#include <linux/export.h>
/*
* The ports we know about. Note that the port-25 gpios are not
* mentioned in the datasheet.
*/
struct viafb_gpio {
char *vg_name; /* Data sheet name */
u16 vg_io_port;
u8 vg_port_index;
int vg_mask_shift;
};
static struct viafb_gpio viafb_all_gpios[] = {
{
.vg_name = "VGPIO0", /* Guess - not in datasheet */
.vg_io_port = VIASR,
.vg_port_index = 0x25,
.vg_mask_shift = 1
},
{
.vg_name = "VGPIO1",
.vg_io_port = VIASR,
.vg_port_index = 0x25,
.vg_mask_shift = 0
},
{
.vg_name = "VGPIO2", /* aka DISPCLKI0 */
.vg_io_port = VIASR,
.vg_port_index = 0x2c,
.vg_mask_shift = 1
},
{
.vg_name = "VGPIO3", /* aka DISPCLKO0 */
.vg_io_port = VIASR,
.vg_port_index = 0x2c,
.vg_mask_shift = 0
},
{
.vg_name = "VGPIO4", /* DISPCLKI1 */
.vg_io_port = VIASR,
.vg_port_index = 0x3d,
.vg_mask_shift = 1
},
{
.vg_name = "VGPIO5", /* DISPCLKO1 */
.vg_io_port = VIASR,
.vg_port_index = 0x3d,
.vg_mask_shift = 0
},
};
#define VIAFB_NUM_GPIOS ARRAY_SIZE(viafb_all_gpios)
/*
* This structure controls the active GPIOs, which may be a subset
* of those which are known.
*/
struct viafb_gpio_cfg {
struct gpio_chip gpio_chip;
struct viafb_dev *vdev;
struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS];
const char *gpio_names[VIAFB_NUM_GPIOS];
};
/*
* GPIO access functions
*/
static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
int value)
{
struct viafb_gpio_cfg *cfg = container_of(chip,
struct viafb_gpio_cfg,
gpio_chip);
u8 reg;
struct viafb_gpio *gpio;
unsigned long flags;
spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
gpio = cfg->active_gpios[nr];
reg = via_read_reg(VIASR, gpio->vg_port_index);
reg |= 0x40 << gpio->vg_mask_shift; /* output enable */
if (value)
reg |= 0x10 << gpio->vg_mask_shift;
else
reg &= ~(0x10 << gpio->vg_mask_shift);
via_write_reg(VIASR, gpio->vg_port_index, reg);
spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
}
static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
int value)
{
via_gpio_set(chip, nr, value);
return 0;
}
/*
* Set the input direction. I'm not sure this is right; we should
* be able to do input without disabling output.
*/
static int via_gpio_dir_input(struct gpio_chip *chip, unsigned int nr)
{
struct viafb_gpio_cfg *cfg = container_of(chip,
struct viafb_gpio_cfg,
gpio_chip);
struct viafb_gpio *gpio;
unsigned long flags;
spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
gpio = cfg->active_gpios[nr];
via_write_reg_mask(VIASR, gpio->vg_port_index, 0,
0x40 << gpio->vg_mask_shift);
spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
return 0;
}
static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
{
struct viafb_gpio_cfg *cfg = container_of(chip,
struct viafb_gpio_cfg,
gpio_chip);
u8 reg;
struct viafb_gpio *gpio;
unsigned long flags;
spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
gpio = cfg->active_gpios[nr];
reg = via_read_reg(VIASR, gpio->vg_port_index);
spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
return reg & (0x04 << gpio->vg_mask_shift);
}
static struct viafb_gpio_cfg viafb_gpio_config = {
.gpio_chip = {
.label = "VIAFB onboard GPIO",
.owner = THIS_MODULE,
.direction_output = via_gpio_dir_out,
.set = via_gpio_set,
.direction_input = via_gpio_dir_input,
.get = via_gpio_get,
.base = -1,
.ngpio = 0,
.can_sleep = 0
}
};
/*
* Manage the software enable bit.
*/
static void viafb_gpio_enable(struct viafb_gpio *gpio)
{
via_write_reg_mask(VIASR, gpio->vg_port_index, 0x02, 0x02);
}
static void viafb_gpio_disable(struct viafb_gpio *gpio)
{
via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x02);
}
#ifdef CONFIG_PM
static int viafb_gpio_suspend(void *private)
{
return 0;
}
static int viafb_gpio_resume(void *private)
{
int i;
for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
return 0;
}
static struct viafb_pm_hooks viafb_gpio_pm_hooks = {
.suspend = viafb_gpio_suspend,
.resume = viafb_gpio_resume
};
#endif /* CONFIG_PM */
/*
* Look up a specific gpio and return the number it was assigned.
*/
int viafb_gpio_lookup(const char *name)
{
int i;
for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i++)
if (!strcmp(name, viafb_gpio_config.active_gpios[i]->vg_name))
return viafb_gpio_config.gpio_chip.base + i;
return -1;
}
EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
/*
* Platform device stuff.
*/
static __devinit int viafb_gpio_probe(struct platform_device *platdev)
{
struct viafb_dev *vdev = platdev->dev.platform_data;
struct via_port_cfg *port_cfg = vdev->port_cfg;
int i, ngpio = 0, ret;
struct viafb_gpio *gpio;
unsigned long flags;
/*
* Set up entries for all GPIOs which have been configured to
* operate as such (as opposed to as i2c ports).
*/
for (i = 0; i < VIAFB_NUM_PORTS; i++) {
if (port_cfg[i].mode != VIA_MODE_GPIO)
continue;
for (gpio = viafb_all_gpios;
gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
if (gpio->vg_port_index == port_cfg[i].ioport_index) {
viafb_gpio_config.active_gpios[ngpio] = gpio;
viafb_gpio_config.gpio_names[ngpio] =
gpio->vg_name;
ngpio++;
}
}
viafb_gpio_config.gpio_chip.ngpio = ngpio;
viafb_gpio_config.gpio_chip.names = viafb_gpio_config.gpio_names;
viafb_gpio_config.vdev = vdev;
if (ngpio == 0) {
printk(KERN_INFO "viafb: no GPIOs configured\n");
return 0;
}
/*
* Enable the ports. They come in pairs, with a single
* enable bit for both.
*/
spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
for (i = 0; i < ngpio; i += 2)
viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
/*
* Get registered.
*/
viafb_gpio_config.gpio_chip.base = -1; /* Dynamic */
ret = gpiochip_add(&viafb_gpio_config.gpio_chip);
if (ret) {
printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
viafb_gpio_config.gpio_chip.ngpio = 0;
}
#ifdef CONFIG_PM
viafb_pm_register(&viafb_gpio_pm_hooks);
#endif
return ret;
}
static int viafb_gpio_remove(struct platform_device *platdev)
{
unsigned long flags;
int ret = 0, i;
#ifdef CONFIG_PM
viafb_pm_unregister(&viafb_gpio_pm_hooks);
#endif
/*
* Get unregistered.
*/
if (viafb_gpio_config.gpio_chip.ngpio > 0) {
ret = gpiochip_remove(&viafb_gpio_config.gpio_chip);
if (ret) { /* Somebody still using it? */
printk(KERN_ERR "Viafb: GPIO remove failed\n");
return ret;
}
}
/*
* Disable the ports.
*/
spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
viafb_gpio_disable(viafb_gpio_config.active_gpios[i]);
viafb_gpio_config.gpio_chip.ngpio = 0;
spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
return ret;
}
static struct platform_driver via_gpio_driver = {
.driver = {
.name = "viafb-gpio",
},
.probe = viafb_gpio_probe,
.remove = viafb_gpio_remove,
};
int viafb_gpio_init(void)
{
return platform_driver_register(&via_gpio_driver);
}
void viafb_gpio_exit(void)
{
platform_driver_unregister(&via_gpio_driver);
}
| gpl-2.0 |
barome/HD_mako | drivers/tty/serial/mcf.c | 5248 | 17708 | /****************************************************************************/
/*
* mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/nettel.h>
/****************************************************************************/
/*
* Some boards implement the DTR/DCD lines using GPIO lines, most
* don't. Dummy out the access macros for those that don't. Those
* that do should define these macros somewhere in there board
* specific inlude files.
*/
#if !defined(mcf_getppdcd)
#define mcf_getppdcd(p) (1)
#endif
#if !defined(mcf_getppdtr)
#define mcf_getppdtr(p) (1)
#endif
#if !defined(mcf_setppdtr)
#define mcf_setppdtr(p, v) do { } while (0)
#endif
/****************************************************************************/
/*
* Local per-uart structure.
*/
struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
};
/****************************************************************************/
static unsigned int mcf_tx_empty(struct uart_port *port)
{
return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ?
TIOCSER_TEMT : 0;
}
/****************************************************************************/
static unsigned int mcf_get_mctrl(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int sigs;
sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
0 : TIOCM_CTS;
sigs |= (pp->sigs & TIOCM_RTS);
sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
return sigs;
}
/****************************************************************************/
static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->sigs = sigs;
mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
if (sigs & TIOCM_RTS)
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
else
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
}
/****************************************************************************/
static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_rx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (break_state == -1)
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
else
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_enable_ms(struct uart_port *port)
{
}
/****************************************************************************/
static int mcf_startup(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Reset UART, get it into known state... */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
/* Enable the UART transmitter and receiver */
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
/* Enable RX interrupts now */
pp->imr = MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
/****************************************************************************/
static void mcf_shutdown(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable all interrupts now */
pp->imr = 0;
writeb(pp->imr, port->membase + MCFUART_UIMR);
/* Disable UART transmitter and receiver */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
unsigned int baudfr;
#endif
unsigned char mr1, mr2;
baud = uart_get_baud_rate(port, termios, old, 0, 230400);
#if defined(CONFIG_M5272)
baudclk = (MCF_BUSCLK / baud) / 32;
baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16;
#else
baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
#endif
mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
mr2 = 0;
switch (termios->c_cflag & CSIZE) {
case CS5: mr1 |= MCFUART_MR1_CS5; break;
case CS6: mr1 |= MCFUART_MR1_CS6; break;
case CS7: mr1 |= MCFUART_MR1_CS7; break;
case CS8:
default: mr1 |= MCFUART_MR1_CS8; break;
}
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYMARK;
else
mr1 |= MCFUART_MR1_PARITYSPACE;
} else {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYODD;
else
mr1 |= MCFUART_MR1_PARITYEVEN;
}
} else {
mr1 |= MCFUART_MR1_PARITYNONE;
}
if (termios->c_cflag & CSTOPB)
mr2 |= MCFUART_MR2_STOP2;
else
mr2 |= MCFUART_MR2_STOP1;
if (termios->c_cflag & CRTSCTS) {
mr1 |= MCFUART_MR1_RXRTS;
mr2 |= MCFUART_MR2_TXCTS;
}
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
#if defined(CONFIG_M5272)
writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD);
#endif
writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
port->membase + MCFUART_UCSR);
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_rx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
unsigned char status, ch, flag;
while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
ch = readb(port->membase + MCFUART_URB);
flag = TTY_NORMAL;
port->icount.rx++;
if (status & MCFUART_USR_RXERR) {
writeb(MCFUART_UCR_CMDRESETERR,
port->membase + MCFUART_UCR);
if (status & MCFUART_USR_RXBREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (status & MCFUART_USR_RXPARITY) {
port->icount.parity++;
} else if (status & MCFUART_USR_RXOVERRUN) {
port->icount.overrun++;
} else if (status & MCFUART_USR_RXFRAMING) {
port->icount.frame++;
}
status &= port->read_status_mask;
if (status & MCFUART_USR_RXBREAK)
flag = TTY_BREAK;
else if (status & MCFUART_USR_RXPARITY)
flag = TTY_PARITY;
else if (status & MCFUART_USR_RXFRAMING)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
tty_flip_buffer_push(port->state->port.tty);
}
/****************************************************************************/
static void mcf_tx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
/* Send special char - probably flow control */
writeb(port->x_char, port->membase + MCFUART_UTB);
port->x_char = 0;
port->icount.tx++;
return;
}
while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) {
if (xmit->head == xmit->tail)
break;
writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (xmit->head == xmit->tail) {
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
}
/****************************************************************************/
static irqreturn_t mcf_interrupt(int irq, void *data)
{
struct uart_port *port = data;
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int isr;
irqreturn_t ret = IRQ_NONE;
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
spin_lock(&port->lock);
if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
ret = IRQ_HANDLED;
}
if (isr & MCFUART_UIR_TXREADY) {
mcf_tx_chars(pp);
ret = IRQ_HANDLED;
}
spin_unlock(&port->lock);
return ret;
}
/****************************************************************************/
static void mcf_config_port(struct uart_port *port, int flags)
{
port->type = PORT_MCF;
port->fifosize = MCFUART_TXFIFOSIZE;
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
if (request_irq(port->irq, mcf_interrupt, 0, "UART", port))
printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
"interrupt vector=%d\n", port->line, port->irq);
}
/****************************************************************************/
static const char *mcf_type(struct uart_port *port)
{
return (port->type == PORT_MCF) ? "ColdFire UART" : NULL;
}
/****************************************************************************/
static int mcf_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
/****************************************************************************/
static void mcf_release_port(struct uart_port *port)
{
/* Nothing to release... */
}
/****************************************************************************/
static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF))
return -EINVAL;
return 0;
}
/****************************************************************************/
/*
* Define the basic serial functions we support.
*/
static const struct uart_ops mcf_uart_ops = {
.tx_empty = mcf_tx_empty,
.get_mctrl = mcf_get_mctrl,
.set_mctrl = mcf_set_mctrl,
.start_tx = mcf_start_tx,
.stop_tx = mcf_stop_tx,
.stop_rx = mcf_stop_rx,
.enable_ms = mcf_enable_ms,
.break_ctl = mcf_break_ctl,
.startup = mcf_startup,
.shutdown = mcf_shutdown,
.set_termios = mcf_set_termios,
.type = mcf_type,
.request_port = mcf_request_port,
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
};
static struct mcf_uart mcf_ports[4];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
/****************************************************************************/
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
/****************************************************************************/
int __init early_mcf_setup(struct mcf_platform_uart *platp)
{
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) port->mapbase;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->flags = ASYNC_BOOT_AUTOCONF;
port->ops = &mcf_uart_ops;
}
return 0;
}
/****************************************************************************/
static void mcf_console_putc(struct console *co, const char c)
{
struct uart_port *port = &(mcf_ports + co->index)->port;
int i;
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
writeb(c, port->membase + MCFUART_UTB);
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
}
/****************************************************************************/
static void mcf_console_write(struct console *co, const char *s, unsigned int count)
{
for (; (count); count--, s++) {
mcf_console_putc(co, *s);
if (*s == '\n')
mcf_console_putc(co, '\r');
}
}
/****************************************************************************/
static int __init mcf_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = CONFIG_SERIAL_MCF_BAUDRATE;
int bits = 8;
int parity = 'n';
int flow = 'n';
if ((co->index < 0) || (co->index >= MCF_MAXPORTS))
co->index = 0;
port = &mcf_ports[co->index].port;
if (port->membase == 0)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
/****************************************************************************/
static struct uart_driver mcf_driver;
static struct console mcf_console = {
.name = "ttyS",
.write = mcf_console_write,
.device = uart_console_device,
.setup = mcf_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mcf_driver,
};
static int __init mcf_console_init(void)
{
register_console(&mcf_console);
return 0;
}
console_initcall(mcf_console_init);
#define MCF_CONSOLE &mcf_console
/****************************************************************************/
#else
/****************************************************************************/
#define MCF_CONSOLE NULL
/****************************************************************************/
#endif /* CONFIG_MCF_CONSOLE */
/****************************************************************************/
/*
* Define the mcf UART driver structure.
*/
static struct uart_driver mcf_driver = {
.owner = THIS_MODULE,
.driver_name = "mcf",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = MCF_MAXPORTS,
.cons = MCF_CONSOLE,
};
/****************************************************************************/
static int __devinit mcf_probe(struct platform_device *pdev)
{
struct mcf_platform_uart *platp = pdev->dev.platform_data;
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) platp[i].mapbase;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->ops = &mcf_uart_ops;
port->flags = ASYNC_BOOT_AUTOCONF;
uart_add_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static int __devexit mcf_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i;
for (i = 0; (i < MCF_MAXPORTS); i++) {
port = &mcf_ports[i].port;
if (port)
uart_remove_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static struct platform_driver mcf_platform_driver = {
.probe = mcf_probe,
.remove = __devexit_p(mcf_remove),
.driver = {
.name = "mcfuart",
.owner = THIS_MODULE,
},
};
/****************************************************************************/
static int __init mcf_init(void)
{
int rc;
printk("ColdFire internal UART serial driver\n");
rc = uart_register_driver(&mcf_driver);
if (rc)
return rc;
rc = platform_driver_register(&mcf_platform_driver);
if (rc)
return rc;
return 0;
}
/****************************************************************************/
static void __exit mcf_exit(void)
{
platform_driver_unregister(&mcf_platform_driver);
uart_unregister_driver(&mcf_driver);
}
/****************************************************************************/
module_init(mcf_init);
module_exit(mcf_exit);
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
MODULE_DESCRIPTION("Freescale ColdFire UART driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mcfuart");
/****************************************************************************/
| gpl-2.0 |
TeamExodus/kernel_xiaomi_cancro | arch/sparc/prom/init_32.c | 7296 | 1768 | /*
* init.c: Initialize internal variables used by the PROM
* library functions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
struct linux_romvec *romvec;
EXPORT_SYMBOL(romvec);
enum prom_major_version prom_vers;
unsigned int prom_rev, prom_prev;
/* The root node of the prom device tree. */
phandle prom_root_node;
EXPORT_SYMBOL(prom_root_node);
/* Pointer to the device tree operations structure. */
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
* routines in the prom library. It returns 0 on success, 1 on
* failure. It gets passed the pointer to the PROM vector.
*/
extern void prom_meminit(void);
extern void prom_ranges_init(void);
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
switch(romvec->pv_romvers) {
case 0:
prom_vers = PROM_V0;
break;
case 2:
prom_vers = PROM_V2;
break;
case 3:
prom_vers = PROM_V3;
break;
default:
prom_printf("PROMLIB: Bad PROM version %d\n",
romvec->pv_romvers);
prom_halt();
break;
}
prom_rev = romvec->pv_plugin_revision;
prom_prev = romvec->pv_printrev;
prom_nodeops = romvec->pv_nodeops;
prom_root_node = prom_getsibling(0);
if ((prom_root_node == 0) || ((s32)prom_root_node == -1))
prom_halt();
if((((unsigned long) prom_nodeops) == 0) ||
(((unsigned long) prom_nodeops) == -1))
prom_halt();
prom_meminit();
prom_ranges_init();
printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
romvec->pv_romvers, prom_rev);
/* Initialization successful. */
}
| gpl-2.0 |
MoKee/android_kernel_samsung_lentislte | sound/pci/ctxfi/cttimer.c | 8320 | 11344 | /*
* PCM timer handling on ctxfi
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*/
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "ctatc.h"
#include "cthardware.h"
#include "cttimer.h"
static bool use_system_timer;
MODULE_PARM_DESC(use_system_timer, "Force to use system-timer");
module_param(use_system_timer, bool, S_IRUGO);
struct ct_timer_ops {
void (*init)(struct ct_timer_instance *);
void (*prepare)(struct ct_timer_instance *);
void (*start)(struct ct_timer_instance *);
void (*stop)(struct ct_timer_instance *);
void (*free_instance)(struct ct_timer_instance *);
void (*interrupt)(struct ct_timer *);
void (*free_global)(struct ct_timer *);
};
/* timer instance -- assigned to each PCM stream */
struct ct_timer_instance {
spinlock_t lock;
struct ct_timer *timer_base;
struct ct_atc_pcm *apcm;
struct snd_pcm_substream *substream;
struct timer_list timer;
struct list_head instance_list;
struct list_head running_list;
unsigned int position;
unsigned int frag_count;
unsigned int running:1;
unsigned int need_update:1;
};
/* timer instance manager */
struct ct_timer {
spinlock_t lock; /* global timer lock (for xfitimer) */
spinlock_t list_lock; /* lock for instance list */
struct ct_atc *atc;
struct ct_timer_ops *ops;
struct list_head instance_head;
struct list_head running_head;
unsigned int wc; /* current wallclock */
unsigned int irq_handling:1; /* in IRQ handling */
unsigned int reprogram:1; /* need to reprogram the internval */
unsigned int running:1; /* global timer running */
};
/*
* system-timer-based updates
*/
static void ct_systimer_callback(unsigned long data)
{
struct ct_timer_instance *ti = (struct ct_timer_instance *)data;
struct snd_pcm_substream *substream = ti->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct ct_atc_pcm *apcm = ti->apcm;
unsigned int period_size = runtime->period_size;
unsigned int buffer_size = runtime->buffer_size;
unsigned long flags;
unsigned int position, dist, interval;
position = substream->ops->pointer(substream);
dist = (position + buffer_size - ti->position) % buffer_size;
if (dist >= period_size ||
position / period_size != ti->position / period_size) {
apcm->interrupt(apcm);
ti->position = position;
}
/* Add extra HZ*5/1000 to avoid overrun issue when recording
* at 8kHz in 8-bit format or at 88kHz in 24-bit format. */
interval = ((period_size - (position % period_size))
* HZ + (runtime->rate - 1)) / runtime->rate + HZ * 5 / 1000;
spin_lock_irqsave(&ti->lock, flags);
if (ti->running)
mod_timer(&ti->timer, jiffies + interval);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_init(struct ct_timer_instance *ti)
{
setup_timer(&ti->timer, ct_systimer_callback,
(unsigned long)ti);
}
static void ct_systimer_start(struct ct_timer_instance *ti)
{
struct snd_pcm_runtime *runtime = ti->substream->runtime;
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 1;
mod_timer(&ti->timer,
jiffies + (runtime->period_size * HZ +
(runtime->rate - 1)) / runtime->rate);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_stop(struct ct_timer_instance *ti)
{
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 0;
del_timer(&ti->timer);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_prepare(struct ct_timer_instance *ti)
{
ct_systimer_stop(ti);
try_to_del_timer_sync(&ti->timer);
}
#define ct_systimer_free ct_systimer_prepare
static struct ct_timer_ops ct_systimer_ops = {
.init = ct_systimer_init,
.free_instance = ct_systimer_free,
.prepare = ct_systimer_prepare,
.start = ct_systimer_start,
.stop = ct_systimer_stop,
};
/*
* Handling multiple streams using a global emu20k1 timer irq
*/
#define CT_TIMER_FREQ 48000
#define MIN_TICKS 1
#define MAX_TICKS ((1 << 13) - 1)
static void ct_xfitimer_irq_rearm(struct ct_timer *atimer, int ticks)
{
struct hw *hw = atimer->atc->hw;
if (ticks > MAX_TICKS)
ticks = MAX_TICKS;
hw->set_timer_tick(hw, ticks);
if (!atimer->running)
hw->set_timer_irq(hw, 1);
atimer->running = 1;
}
static void ct_xfitimer_irq_stop(struct ct_timer *atimer)
{
if (atimer->running) {
struct hw *hw = atimer->atc->hw;
hw->set_timer_irq(hw, 0);
hw->set_timer_tick(hw, 0);
atimer->running = 0;
}
}
static inline unsigned int ct_xfitimer_get_wc(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
return hw->get_wc(hw);
}
/*
* reprogram the timer interval;
* checks the running instance list and determines the next timer interval.
* also updates the each stream position, returns the number of streams
* to call snd_pcm_period_elapsed() appropriately
*
* call this inside the lock and irq disabled
*/
static int ct_xfitimer_reprogram(struct ct_timer *atimer, int can_update)
{
struct ct_timer_instance *ti;
unsigned int min_intr = (unsigned int)-1;
int updates = 0;
unsigned int wc, diff;
if (list_empty(&atimer->running_head)) {
ct_xfitimer_irq_stop(atimer);
atimer->reprogram = 0; /* clear flag */
return 0;
}
wc = ct_xfitimer_get_wc(atimer);
diff = wc - atimer->wc;
atimer->wc = wc;
list_for_each_entry(ti, &atimer->running_head, running_list) {
if (ti->frag_count > diff)
ti->frag_count -= diff;
else {
unsigned int pos;
unsigned int period_size, rate;
period_size = ti->substream->runtime->period_size;
rate = ti->substream->runtime->rate;
pos = ti->substream->ops->pointer(ti->substream);
if (pos / period_size != ti->position / period_size) {
ti->need_update = 1;
ti->position = pos;
updates++;
}
pos %= period_size;
pos = period_size - pos;
ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ +
rate - 1, rate);
}
if (ti->need_update && !can_update)
min_intr = 0; /* pending to the next irq */
if (ti->frag_count < min_intr)
min_intr = ti->frag_count;
}
if (min_intr < MIN_TICKS)
min_intr = MIN_TICKS;
ct_xfitimer_irq_rearm(atimer, min_intr);
atimer->reprogram = 0; /* clear flag */
return updates;
}
/* look through the instance list and call period_elapsed if needed */
static void ct_xfitimer_check_period(struct ct_timer *atimer)
{
struct ct_timer_instance *ti;
unsigned long flags;
spin_lock_irqsave(&atimer->list_lock, flags);
list_for_each_entry(ti, &atimer->instance_head, instance_list) {
if (ti->running && ti->need_update) {
ti->need_update = 0;
ti->apcm->interrupt(ti->apcm);
}
}
spin_unlock_irqrestore(&atimer->list_lock, flags);
}
/* Handle timer-interrupt */
static void ct_xfitimer_callback(struct ct_timer *atimer)
{
int update;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
atimer->irq_handling = 1;
do {
update = ct_xfitimer_reprogram(atimer, 1);
spin_unlock(&atimer->lock);
if (update)
ct_xfitimer_check_period(atimer);
spin_lock(&atimer->lock);
} while (atimer->reprogram);
atimer->irq_handling = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_prepare(struct ct_timer_instance *ti)
{
ti->frag_count = ti->substream->runtime->period_size;
ti->running = 0;
ti->need_update = 0;
}
/* start/stop the timer */
static void ct_xfitimer_update(struct ct_timer *atimer)
{
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (atimer->irq_handling) {
/* reached from IRQ handler; let it handle later */
atimer->reprogram = 1;
spin_unlock_irqrestore(&atimer->lock, flags);
return;
}
ct_xfitimer_irq_stop(atimer);
ct_xfitimer_reprogram(atimer, 0);
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (list_empty(&ti->running_list))
atimer->wc = ct_xfitimer_get_wc(atimer);
ti->running = 1;
ti->need_update = 0;
list_add(&ti->running_list, &atimer->running_head);
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
list_del_init(&ti->running_list);
ti->running = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_free_global(struct ct_timer *atimer)
{
ct_xfitimer_irq_stop(atimer);
}
static struct ct_timer_ops ct_xfitimer_ops = {
.prepare = ct_xfitimer_prepare,
.start = ct_xfitimer_start,
.stop = ct_xfitimer_stop,
.interrupt = ct_xfitimer_callback,
.free_global = ct_xfitimer_free_global,
};
/*
* timer instance
*/
struct ct_timer_instance *
ct_timer_instance_new(struct ct_timer *atimer, struct ct_atc_pcm *apcm)
{
struct ct_timer_instance *ti;
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return NULL;
spin_lock_init(&ti->lock);
INIT_LIST_HEAD(&ti->instance_list);
INIT_LIST_HEAD(&ti->running_list);
ti->timer_base = atimer;
ti->apcm = apcm;
ti->substream = apcm->substream;
if (atimer->ops->init)
atimer->ops->init(ti);
spin_lock_irq(&atimer->list_lock);
list_add(&ti->instance_list, &atimer->instance_head);
spin_unlock_irq(&atimer->list_lock);
return ti;
}
void ct_timer_prepare(struct ct_timer_instance *ti)
{
if (ti->timer_base->ops->prepare)
ti->timer_base->ops->prepare(ti);
ti->position = 0;
ti->running = 0;
}
void ct_timer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->start(ti);
}
void ct_timer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti);
}
void ct_timer_instance_free(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti); /* to be sure */
if (atimer->ops->free_instance)
atimer->ops->free_instance(ti);
spin_lock_irq(&atimer->list_lock);
list_del(&ti->instance_list);
spin_unlock_irq(&atimer->list_lock);
kfree(ti);
}
/*
* timer manager
*/
static void ct_timer_interrupt(void *data, unsigned int status)
{
struct ct_timer *timer = data;
/* Interval timer interrupt */
if ((status & IT_INT) && timer->ops->interrupt)
timer->ops->interrupt(timer);
}
struct ct_timer *ct_timer_new(struct ct_atc *atc)
{
struct ct_timer *atimer;
struct hw *hw;
atimer = kzalloc(sizeof(*atimer), GFP_KERNEL);
if (!atimer)
return NULL;
spin_lock_init(&atimer->lock);
spin_lock_init(&atimer->list_lock);
INIT_LIST_HEAD(&atimer->instance_head);
INIT_LIST_HEAD(&atimer->running_head);
atimer->atc = atc;
hw = atc->hw;
if (!use_system_timer && hw->set_timer_irq) {
snd_printd(KERN_INFO "ctxfi: Use xfi-native timer\n");
atimer->ops = &ct_xfitimer_ops;
hw->irq_callback_data = atimer;
hw->irq_callback = ct_timer_interrupt;
} else {
snd_printd(KERN_INFO "ctxfi: Use system timer\n");
atimer->ops = &ct_systimer_ops;
}
return atimer;
}
void ct_timer_free(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
hw->irq_callback = NULL;
if (atimer->ops->free_global)
atimer->ops->free_global(atimer);
kfree(atimer);
}
| gpl-2.0 |
phenomx4/android_kernel_lge_fx3 | drivers/isdn/capi/capilib.c | 9600 | 4630 |
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isdn/capilli.h>
#define DBG(format, arg...) do { \
printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
} while (0)
struct capilib_msgidqueue {
struct capilib_msgidqueue *next;
u16 msgid;
};
struct capilib_ncci {
struct list_head list;
u16 applid;
u32 ncci;
u32 winsize;
int nmsg;
struct capilib_msgidqueue *msgidqueue;
struct capilib_msgidqueue *msgidlast;
struct capilib_msgidqueue *msgidfree;
struct capilib_msgidqueue msgidpool[CAPI_MAXDATAWINDOW];
};
// ---------------------------------------------------------------------------
// NCCI Handling
static inline void mq_init(struct capilib_ncci *np)
{
u_int i;
np->msgidqueue = NULL;
np->msgidlast = NULL;
np->nmsg = 0;
memset(np->msgidpool, 0, sizeof(np->msgidpool));
np->msgidfree = &np->msgidpool[0];
for (i = 1; i < np->winsize; i++) {
np->msgidpool[i].next = np->msgidfree;
np->msgidfree = &np->msgidpool[i];
}
}
static inline int mq_enqueue(struct capilib_ncci *np, u16 msgid)
{
struct capilib_msgidqueue *mq;
if ((mq = np->msgidfree) == NULL)
return 0;
np->msgidfree = mq->next;
mq->msgid = msgid;
mq->next = NULL;
if (np->msgidlast)
np->msgidlast->next = mq;
np->msgidlast = mq;
if (!np->msgidqueue)
np->msgidqueue = mq;
np->nmsg++;
return 1;
}
static inline int mq_dequeue(struct capilib_ncci *np, u16 msgid)
{
struct capilib_msgidqueue **pp;
for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) {
if ((*pp)->msgid == msgid) {
struct capilib_msgidqueue *mq = *pp;
*pp = mq->next;
if (mq == np->msgidlast)
np->msgidlast = NULL;
mq->next = np->msgidfree;
np->msgidfree = mq;
np->nmsg--;
return 1;
}
}
return 0;
}
void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize)
{
struct capilib_ncci *np;
np = kmalloc(sizeof(*np), GFP_ATOMIC);
if (!np) {
printk(KERN_WARNING "capilib_new_ncci: no memory.\n");
return;
}
if (winsize > CAPI_MAXDATAWINDOW) {
printk(KERN_ERR "capi_new_ncci: winsize %d too big\n",
winsize);
winsize = CAPI_MAXDATAWINDOW;
}
np->applid = applid;
np->ncci = ncci;
np->winsize = winsize;
mq_init(np);
list_add_tail(&np->list, head);
DBG("kcapi: appl %d ncci 0x%x up", applid, ncci);
}
EXPORT_SYMBOL(capilib_new_ncci);
void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
printk(KERN_INFO "kcapi: appl %d ncci 0x%x down\n", applid, ncci);
list_del(&np->list);
kfree(np);
return;
}
printk(KERN_ERR "capilib_free_ncci: ncci 0x%x not found\n", ncci);
}
EXPORT_SYMBOL(capilib_free_ncci);
void capilib_release_appl(struct list_head *head, u16 applid)
{
struct list_head *l, *n;
struct capilib_ncci *np;
list_for_each_safe(l, n, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", applid, np->ncci);
list_del(&np->list);
kfree(np);
}
}
EXPORT_SYMBOL(capilib_release_appl);
void capilib_release(struct list_head *head)
{
struct list_head *l, *n;
struct capilib_ncci *np;
list_for_each_safe(l, n, head) {
np = list_entry(l, struct capilib_ncci, list);
printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", np->applid, np->ncci);
list_del(&np->list);
kfree(np);
}
}
EXPORT_SYMBOL(capilib_release);
u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
if (mq_enqueue(np, msgid) == 0)
return CAPI_SENDQUEUEFULL;
return CAPI_NOERROR;
}
printk(KERN_ERR "capilib_data_b3_req: ncci 0x%x not found\n", ncci);
return CAPI_NOERROR;
}
EXPORT_SYMBOL(capilib_data_b3_req);
void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
{
struct list_head *l;
struct capilib_ncci *np;
list_for_each(l, head) {
np = list_entry(l, struct capilib_ncci, list);
if (np->applid != applid)
continue;
if (np->ncci != ncci)
continue;
if (mq_dequeue(np, msgid) == 0) {
printk(KERN_ERR "kcapi: msgid %hu ncci 0x%x not on queue\n",
msgid, ncci);
}
return;
}
printk(KERN_ERR "capilib_data_b3_conf: ncci 0x%x not found\n", ncci);
}
EXPORT_SYMBOL(capilib_data_b3_conf);
| gpl-2.0 |
upichie/ghost | arch/powerpc/platforms/52xx/mpc52xx_pm.c | 12160 | 5307 | #include <linux/init.h>
#include <linux/suspend.h>
#include <linux/io.h>
#include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
/* these are defined in mpc52xx_sleep.S, and only used here */
extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
extern void mpc52xx_ds_sram(void);
extern const long mpc52xx_ds_sram_size;
extern void mpc52xx_ds_cached(void);
extern const long mpc52xx_ds_cached_size;
static void __iomem *mbar;
static void __iomem *sdram;
static struct mpc52xx_cdm __iomem *cdm;
static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_gpio_wkup __iomem *gpiow;
static void __iomem *sram;
static int sram_size;
struct mpc52xx_suspend mpc52xx_suspend;
static int mpc52xx_pm_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
return 1;
default:
return 0;
}
}
int mpc52xx_set_wakeup_gpio(u8 pin, u8 level)
{
u16 tmp;
/* enable gpio */
out_8(&gpiow->wkup_gpioe, in_8(&gpiow->wkup_gpioe) | (1 << pin));
/* set as input */
out_8(&gpiow->wkup_ddr, in_8(&gpiow->wkup_ddr) & ~(1 << pin));
/* enable deep sleep interrupt */
out_8(&gpiow->wkup_inten, in_8(&gpiow->wkup_inten) | (1 << pin));
/* low/high level creates wakeup interrupt */
tmp = in_be16(&gpiow->wkup_itype);
tmp &= ~(0x3 << (pin * 2));
tmp |= (!level + 1) << (pin * 2);
out_be16(&gpiow->wkup_itype, tmp);
/* master enable */
out_8(&gpiow->wkup_maste, 1);
return 0;
}
int mpc52xx_pm_prepare(void)
{
struct device_node *np;
const struct of_device_id immr_ids[] = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{}
};
struct resource res;
/* map the whole register space */
np = of_find_matching_node(NULL, immr_ids);
if (of_address_to_resource(np, 0, &res)) {
pr_err("mpc52xx_pm_prepare(): could not get IMMR address\n");
of_node_put(np);
return -ENOSYS;
}
mbar = ioremap(res.start, 0xc000); /* we should map whole region including SRAM */
of_node_put(np);
if (!mbar) {
pr_err("mpc52xx_pm_prepare(): could not map registers\n");
return -ENOSYS;
}
/* these offsets are from mpc5200 users manual */
sdram = mbar + 0x100;
cdm = mbar + 0x200;
intr = mbar + 0x500;
gpiow = mbar + 0xc00;
sram = mbar + 0x8000; /* Those will be handled by the */
sram_size = 0x4000; /* bestcomm driver soon */
/* call board suspend code, if applicable */
if (mpc52xx_suspend.board_suspend_prepare)
mpc52xx_suspend.board_suspend_prepare(mbar);
else {
printk(KERN_ALERT "%s: %i don't know how to wake up the board\n",
__func__, __LINE__);
goto out_unmap;
}
return 0;
out_unmap:
iounmap(mbar);
return -ENOSYS;
}
char saved_sram[0x4000];
int mpc52xx_pm_enter(suspend_state_t state)
{
u32 clk_enables;
u32 msr, hid0;
u32 intr_main_mask;
void __iomem * irq_0x500 = (void __iomem *)CONFIG_KERNEL_START + 0x500;
unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
char saved_0x500[mpc52xx_ds_cached_size];
/* disable all interrupts in PIC */
intr_main_mask = in_be32(&intr->main_mask);
out_be32(&intr->main_mask, intr_main_mask | 0x1ffff);
/* don't let DEC expire any time soon */
mtspr(SPRN_DEC, 0x7fffffff);
/* save SRAM */
memcpy(saved_sram, sram, sram_size);
/* copy low level suspend code to sram */
memcpy(sram, mpc52xx_ds_sram, mpc52xx_ds_sram_size);
out_8(&cdm->ccs_sleep_enable, 1);
out_8(&cdm->osc_sleep_enable, 1);
out_8(&cdm->ccs_qreq_test, 1);
/* disable all but SDRAM and bestcomm (SRAM) clocks */
clk_enables = in_be32(&cdm->clk_enables);
out_be32(&cdm->clk_enables, clk_enables & 0x00088000);
/* disable power management */
msr = mfmsr();
mtmsr(msr & ~MSR_POW);
/* enable sleep mode, disable others */
hid0 = mfspr(SPRN_HID0);
mtspr(SPRN_HID0, (hid0 & ~(HID0_DOZE | HID0_NAP | HID0_DPM)) | HID0_SLEEP);
/* save original, copy our irq handler, flush from dcache and invalidate icache */
memcpy(saved_0x500, irq_0x500, mpc52xx_ds_cached_size);
memcpy(irq_0x500, mpc52xx_ds_cached, mpc52xx_ds_cached_size);
flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);
/* call low-level sleep code */
mpc52xx_deep_sleep(sram, sdram, cdm, intr);
/* restore original irq handler */
memcpy(irq_0x500, saved_0x500, mpc52xx_ds_cached_size);
flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);
/* restore old power mode */
mtmsr(msr & ~MSR_POW);
mtspr(SPRN_HID0, hid0);
mtmsr(msr);
out_be32(&cdm->clk_enables, clk_enables);
out_8(&cdm->ccs_sleep_enable, 0);
out_8(&cdm->osc_sleep_enable, 0);
/* restore SRAM */
memcpy(sram, saved_sram, sram_size);
/* reenable interrupts in PIC */
out_be32(&intr->main_mask, intr_main_mask);
return 0;
}
void mpc52xx_pm_finish(void)
{
/* call board resume code */
if (mpc52xx_suspend.board_resume_finish)
mpc52xx_suspend.board_resume_finish(mbar);
iounmap(mbar);
}
static const struct platform_suspend_ops mpc52xx_pm_ops = {
.valid = mpc52xx_pm_valid,
.prepare = mpc52xx_pm_prepare,
.enter = mpc52xx_pm_enter,
.finish = mpc52xx_pm_finish,
};
int __init mpc52xx_pm_init(void)
{
suspend_set_ops(&mpc52xx_pm_ops);
return 0;
}
| gpl-2.0 |
aatjitra/M7 | drivers/mmc/core/sdio_ops.c | 385 | 3817 | /*
* linux/drivers/mmc/sdio_ops.c
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include "core.h"
#include "sdio_ops.h"
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {0};
int i, err = 0;
BUG_ON(!host);
cmd.opcode = SD_IO_SEND_OP_COND;
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
break;
if (ocr == 0)
break;
if (mmc_host_is_spi(host)) {
if (cmd.resp[1] & MMC_CARD_BUSY)
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (rocr)
*rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0];
return err;
}
static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
struct mmc_command cmd = {0};
int err;
BUG_ON(!host);
BUG_ON(fn > 7);
if (addr & ~0x1FFFF)
return -EINVAL;
cmd.opcode = SD_IO_RW_DIRECT;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
cmd.arg |= addr << 9;
cmd.arg |= in;
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host)) {
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
if (out) {
if (mmc_host_is_spi(host))
*out = (cmd.resp[0] >> 8) & 0xFF;
else
*out = cmd.resp[0] & 0xFF;
}
return 0;
}
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
BUG_ON(!card);
return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
}
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
BUG_ON(!card);
BUG_ON(fn > 7);
WARN_ON(blksz == 0);
if (addr & ~0x1FFFF)
return -EINVAL;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_IO_RW_EXTENDED;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
if (blocks == 0)
cmd.arg |= (blksz == 512) ? 0 : blksz;
else
cmd.arg |= 0x08000000 | blocks;
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
data.blksz = blksz;
data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, data.blksz * data.blocks);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
if (mmc_host_is_spi(card->host)) {
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
return 0;
}
int sdio_reset(struct mmc_host *host)
{
int ret;
u8 abort;
ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort);
if (ret)
abort = 0x08;
else
abort |= 0x08;
ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
return ret;
}
| gpl-2.0 |
CyanogenMod/android_kernel_oneplus_msm8994 | drivers/base/power/opp.c | 1153 | 23100 | /*
* Generic OPP Interface
*
* Copyright (C) 2009-2010 Texas Instruments Incorporated.
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
/*
* Internal data structure organization with the OPP layer library is as
* follows:
* dev_opp_list (root)
* |- device 1 (represents voltage domain 1)
* | |- opp 1 (availability, freq, voltage)
* | |- opp 2 ..
* ... ...
* | `- opp n ..
* |- device 2 (represents the next voltage domain)
* ...
* `- device m (represents mth voltage domain)
* device 1, 2.. are represented by dev_opp structure while each opp
* is represented by the opp structure.
*/
/**
* struct opp - Generic OPP description structure
* @node: opp list node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
* RCU usage: opp list is traversed with RCU locks. node
* modification is possible realtime, hence the modifications
* are protected by the dev_opp_list_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
* @available: true/false - marks if this OPP as available or not
* @rate: Frequency in hertz
* @u_volt: Nominal voltage in microvolts corresponding to this OPP
* @dev_opp: points back to the device_opp struct this opp belongs to
* @head: RCU callback head used for deferred freeing
*
* This structure stores the OPP information for a given device.
*/
struct opp {
struct list_head node;
bool available;
unsigned long rate;
unsigned long u_volt;
struct device_opp *dev_opp;
struct rcu_head head;
};
/**
* struct device_opp - Device opp structure
* @node: list node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
* list.
* RCU usage: nodes are not modified in the list of device_opp,
* however addition is possible and is secured by dev_opp_list_lock
* @dev: device pointer
* @head: notifier head to notify the OPP availability changes.
* @opp_list: list of opps
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
* meant for book keeping and private to OPP library
*/
struct device_opp {
struct list_head node;
struct device *dev;
struct srcu_notifier_head head;
struct list_head opp_list;
};
/*
* The root of the list of all devices. All device_opp structures branch off
* from here, with each device_opp containing the list of opp it supports in
* various states of availability.
*/
static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
*
* Search list of device OPPs for one containing matching device. Does a RCU
* reader operation to grab the pointer needed.
*
* Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: This function must be called under rcu_read_lock(). device_opp
* is a RCU protected pointer. This means that device_opp is valid as long
* as we are under RCU lock.
*/
static struct device_opp *find_device_opp(struct device *dev)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
if (unlikely(IS_ERR_OR_NULL(dev))) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
if (tmp_dev_opp->dev == dev) {
dev_opp = tmp_dev_opp;
break;
}
}
return dev_opp;
}
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
* Return voltage in micro volt corresponding to the opp, else
* return 0
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. This means that opp which could have been fetched by
* opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
* under RCU lock. The pointer returned by the opp_find_freq family must be
* used in the same section as the usage of this function with the pointer
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
unsigned long dev_pm_opp_get_voltage(struct opp *opp)
{
struct opp *tmp_opp;
unsigned long v = 0;
tmp_opp = rcu_dereference(opp);
if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
v = tmp_opp->u_volt;
return v;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
* dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
* Return frequency in hertz corresponding to the opp, else
* return 0
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. This means that opp which could have been fetched by
* opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
* under RCU lock. The pointer returned by the opp_find_freq family must be
* used in the same section as the usage of this function with the pointer
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
unsigned long dev_pm_opp_get_freq(struct opp *opp)
{
struct opp *tmp_opp;
unsigned long f = 0;
tmp_opp = rcu_dereference(opp);
if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
f = tmp_opp->rate;
return f;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
* Locking: This function must be called under rcu_read_lock(). This function
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
struct device_opp *dev_opp;
struct opp *temp_opp;
int count = 0;
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
return r;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available)
count++;
}
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/**
* dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
* @available: true/false - match for available opp
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
* using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
*
* Note: available is a modifier for the search. if available=true, then the
* match is for exact matching frequency and is available in the stored OPP
* table. if false, the match is for exact frequency which is not available.
*
* This provides a mechanism to enable an opp which is not available currently
* or the opposite as well.
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
* returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq,
bool available)
{
struct device_opp *dev_opp;
struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
return ERR_PTR(r);
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
break;
}
}
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
* Search for the matching ceil *available* OPP from a starting freq
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
* ERR_PTR in case of error and should be handled using IS_ERR. Error return
* values can be:
* EINVAL: for bad pointer
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
* returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
}
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
break;
}
}
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
* dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
* Search for the matching floor *available* OPP from a starting freq
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
* ERR_PTR in case of error and should be handled using IS_ERR. Error return
* values can be:
* EINVAL: for bad pointer
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
* returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
}
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
break;
else
opp = temp_opp;
}
}
if (!IS_ERR(opp))
*freq = opp->rate;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
struct device_opp *dev_opp = NULL;
struct opp *opp, *new_opp;
struct list_head *head;
/* allocate new OPP node */
new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM;
}
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
/*
* Allocate a new device OPP table. In the infrequent case
* where a new device is needed to be added, we pay this
* penalty.
*/
dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
if (!dev_opp) {
mutex_unlock(&dev_opp_list_lock);
kfree(new_opp);
dev_warn(dev,
"%s: Unable to create device OPP structure\n",
__func__);
return -ENOMEM;
}
dev_opp->dev = dev;
srcu_init_notifier_head(&dev_opp->head);
INIT_LIST_HEAD(&dev_opp->opp_list);
/* Secure the device list modification */
list_add_rcu(&dev_opp->node, &dev_opp_list);
}
/* populate the opp table */
new_opp->dev_opp = dev_opp;
new_opp->rate = freq;
new_opp->u_volt = u_volt;
new_opp->available = true;
/* Insert new OPP in order of increasing frequency */
head = &dev_opp->opp_list;
list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
if (new_opp->rate < opp->rate)
break;
else
head = &opp->node;
}
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
* @dev: device for which we do this operation
* @freq: OPP frequency to modify availability
* @availability_req: availability status requested for this opp
*
* Set the availability of an OPP with an RCU operation, opp_{enable,disable}
* share a common logic which is isolated here.
*
* Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
* copy operation, returns 0 if no modifcation was done OR modification was
* successful.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks to
* keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
/* keep the node allocated */
new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM;
}
mutex_lock(&dev_opp_list_lock);
/* Find the device_opp */
list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
if (dev == tmp_dev_opp->dev) {
dev_opp = tmp_dev_opp;
break;
}
}
if (IS_ERR(dev_opp)) {
r = PTR_ERR(dev_opp);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
goto unlock;
}
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
if (tmp_opp->rate == freq) {
opp = tmp_opp;
break;
}
}
if (IS_ERR(opp)) {
r = PTR_ERR(opp);
goto unlock;
}
/* Is update really needed? */
if (opp->available == availability_req)
goto unlock;
/* copy the old data over */
*new_opp = *opp;
/* plug in new node */
new_opp->available = availability_req;
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
kfree_rcu(opp, head);
/* Notify the change of the OPP availability */
if (availability_req)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
new_opp);
else
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
new_opp);
return 0;
unlock:
mutex_unlock(&dev_opp_list_lock);
kfree(new_opp);
return r;
}
/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
*
* Enables a provided opp. If the operation is valid, this returns 0, else the
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
/**
* dev_pm_opp_disable() - Disable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to disable
*
* Disables a provided opp. If the operation is valid, this returns
* 0, else the corresponding error value. It is meant to be a temporary
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
* dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
* @dev: device for which we do this operation
* @table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
* opp list is already initialized and ready for usage.
*
* This function allocates required memory for the cpufreq table. It is
* expected that the caller does the required maintenance such as freeing
* the table as required.
*
* Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
* if no memory available for the operation (table is not populated), returns 0
* if successful and table is populated.
*
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* To simplify the logic, we pretend we are updater and hold relevant mutex here
* Callers should ensure that this function is *NOT* called under RCU protection
* or in contexts where mutex locking cannot be used.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
struct device_opp *dev_opp;
struct opp *opp;
struct cpufreq_frequency_table *freq_table;
int i = 0;
/* Pretend as if I am an updater */
mutex_lock(&dev_opp_list_lock);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
mutex_unlock(&dev_opp_list_lock);
dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
return r;
}
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
(dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
if (!freq_table) {
mutex_unlock(&dev_opp_list_lock);
dev_warn(dev, "%s: Unable to allocate frequency table\n",
__func__);
return -ENOMEM;
}
list_for_each_entry(opp, &dev_opp->opp_list, node) {
if (opp->available) {
freq_table[i].driver_data = i;
freq_table[i].frequency = opp->rate / 1000;
i++;
}
}
mutex_unlock(&dev_opp_list_lock);
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
*table = &freq_table[0];
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
/**
* dev_pm_opp_free_cpufreq_table() - free the cpufreq table
* @dev: device for which we do this operation
* @table: table to free
*
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
void dev_pm_opp_free_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
if (!table)
return;
kfree(*table);
*table = NULL;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
struct device_opp *dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
return ERR_CAST(dev_opp); /* matching type */
return &dev_opp->head;
}
#ifdef CONFIG_OF
/**
* of_init_opp_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup device OPPs.
*
* Register the initial OPP table with the OPP library for given device.
*/
int of_init_opp_table(struct device *dev)
{
const struct property *prop;
const __be32 *val;
int nr;
prop = of_find_property(dev->of_node, "operating-points", NULL);
if (!prop)
return -ENODEV;
if (!prop->value)
return -ENODATA;
/*
* Each OPP is a set of tuples consisting of frequency and
* voltage like <freq-kHz vol-uV>.
*/
nr = prop->length / sizeof(u32);
if (nr % 2) {
dev_err(dev, "%s: Invalid OPP list\n", __func__);
return -EINVAL;
}
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
if (dev_pm_opp_add(dev, freq, volt)) {
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
continue;
}
nr -= 2;
}
return 0;
}
EXPORT_SYMBOL_GPL(of_init_opp_table);
#endif
| gpl-2.0 |
vickylinuxer/at91sam9263-kernel | drivers/video/fbcmap.c | 1665 | 8344 | /*
* linux/drivers/video/fbcmap.c -- Colormap handling for frame buffer devices
*
* Created 15 Jun 1997 by Geert Uytterhoeven
*
* 2001 - Documented with DocBook
* - Brad Douglas <brad@neruo.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static u16 red2[] __read_mostly = {
0x0000, 0xaaaa
};
static u16 green2[] __read_mostly = {
0x0000, 0xaaaa
};
static u16 blue2[] __read_mostly = {
0x0000, 0xaaaa
};
static u16 red4[] __read_mostly = {
0x0000, 0xaaaa, 0x5555, 0xffff
};
static u16 green4[] __read_mostly = {
0x0000, 0xaaaa, 0x5555, 0xffff
};
static u16 blue4[] __read_mostly = {
0x0000, 0xaaaa, 0x5555, 0xffff
};
static u16 red8[] __read_mostly = {
0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa
};
static u16 green8[] __read_mostly = {
0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa
};
static u16 blue8[] __read_mostly = {
0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa
};
static u16 red16[] __read_mostly = {
0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa,
0x5555, 0x5555, 0x5555, 0x5555, 0xffff, 0xffff, 0xffff, 0xffff
};
static u16 green16[] __read_mostly = {
0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa,
0x5555, 0x5555, 0xffff, 0xffff, 0x5555, 0x5555, 0xffff, 0xffff
};
static u16 blue16[] __read_mostly = {
0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa,
0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff
};
static const struct fb_cmap default_2_colors = {
.len=2, .red=red2, .green=green2, .blue=blue2
};
static const struct fb_cmap default_8_colors = {
.len=8, .red=red8, .green=green8, .blue=blue8
};
static const struct fb_cmap default_4_colors = {
.len=4, .red=red4, .green=green4, .blue=blue4
};
static const struct fb_cmap default_16_colors = {
.len=16, .red=red16, .green=green16, .blue=blue16
};
/**
* fb_alloc_cmap - allocate a colormap
* @cmap: frame buffer colormap structure
* @len: length of @cmap
* @transp: boolean, 1 if there is transparency, 0 otherwise
*
* Allocates memory for a colormap @cmap. @len is the
* number of entries in the palette.
*
* Returns negative errno on error, or zero on success.
*
*/
int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp)
{
int size = len*sizeof(u16);
if (cmap->len != len) {
fb_dealloc_cmap(cmap);
if (!len)
return 0;
if (!(cmap->red = kmalloc(size, GFP_ATOMIC)))
goto fail;
if (!(cmap->green = kmalloc(size, GFP_ATOMIC)))
goto fail;
if (!(cmap->blue = kmalloc(size, GFP_ATOMIC)))
goto fail;
if (transp) {
if (!(cmap->transp = kmalloc(size, GFP_ATOMIC)))
goto fail;
} else
cmap->transp = NULL;
}
cmap->start = 0;
cmap->len = len;
fb_copy_cmap(fb_default_cmap(len), cmap);
return 0;
fail:
fb_dealloc_cmap(cmap);
return -ENOMEM;
}
/**
* fb_dealloc_cmap - deallocate a colormap
* @cmap: frame buffer colormap structure
*
* Deallocates a colormap that was previously allocated with
* fb_alloc_cmap().
*
*/
void fb_dealloc_cmap(struct fb_cmap *cmap)
{
kfree(cmap->red);
kfree(cmap->green);
kfree(cmap->blue);
kfree(cmap->transp);
cmap->red = cmap->green = cmap->blue = cmap->transp = NULL;
cmap->len = 0;
}
/**
* fb_copy_cmap - copy a colormap
* @from: frame buffer colormap structure
* @to: frame buffer colormap structure
*
* Copy contents of colormap from @from to @to.
*/
int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
int tooff = 0, fromoff = 0;
int size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
size = to->len - tooff;
if (size > (int) (from->len - fromoff))
size = from->len - fromoff;
if (size <= 0)
return -EINVAL;
size *= sizeof(u16);
memcpy(to->red+tooff, from->red+fromoff, size);
memcpy(to->green+tooff, from->green+fromoff, size);
memcpy(to->blue+tooff, from->blue+fromoff, size);
if (from->transp && to->transp)
memcpy(to->transp+tooff, from->transp+fromoff, size);
return 0;
}
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
int tooff = 0, fromoff = 0;
int size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
size = to->len - tooff;
if (size > (int) (from->len - fromoff))
size = from->len - fromoff;
if (size <= 0)
return -EINVAL;
size *= sizeof(u16);
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
return -EFAULT;
if (copy_to_user(to->green+tooff, from->green+fromoff, size))
return -EFAULT;
if (copy_to_user(to->blue+tooff, from->blue+fromoff, size))
return -EFAULT;
if (from->transp && to->transp)
if (copy_to_user(to->transp+tooff, from->transp+fromoff, size))
return -EFAULT;
return 0;
}
/**
* fb_set_cmap - set the colormap
* @cmap: frame buffer colormap structure
* @info: frame buffer info structure
*
* Sets the colormap @cmap for a screen of device @info.
*
* Returns negative errno on error, or zero on success.
*
*/
int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
{
int i, start, rc = 0;
u16 *red, *green, *blue, *transp;
u_int hred, hgreen, hblue, htransp = 0xffff;
red = cmap->red;
green = cmap->green;
blue = cmap->blue;
transp = cmap->transp;
start = cmap->start;
if (start < 0 || (!info->fbops->fb_setcolreg &&
!info->fbops->fb_setcmap))
return -EINVAL;
if (info->fbops->fb_setcmap) {
rc = info->fbops->fb_setcmap(cmap, info);
} else {
for (i = 0; i < cmap->len; i++) {
hred = *red++;
hgreen = *green++;
hblue = *blue++;
if (transp)
htransp = *transp++;
if (info->fbops->fb_setcolreg(start++,
hred, hgreen, hblue,
htransp, info))
break;
}
}
if (rc == 0)
fb_copy_cmap(cmap, &info->cmap);
return rc;
}
int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
{
int rc, size = cmap->len * sizeof(u16);
struct fb_cmap umap;
memset(&umap, 0, sizeof(struct fb_cmap));
rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL);
if (rc)
return rc;
if (copy_from_user(umap.red, cmap->red, size) ||
copy_from_user(umap.green, cmap->green, size) ||
copy_from_user(umap.blue, cmap->blue, size) ||
(cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) {
rc = -EFAULT;
goto out;
}
umap.start = cmap->start;
if (!lock_fb_info(info)) {
rc = -ENODEV;
goto out;
}
if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
!info->fbops->fb_setcmap)) {
rc = -EINVAL;
goto out1;
}
rc = fb_set_cmap(&umap, info);
out1:
unlock_fb_info(info);
out:
fb_dealloc_cmap(&umap);
return rc;
}
/**
* fb_default_cmap - get default colormap
* @len: size of palette for a depth
*
* Gets the default colormap for a specific screen depth. @len
* is the size of the palette for a particular screen depth.
*
* Returns pointer to a frame buffer colormap structure.
*
*/
const struct fb_cmap *fb_default_cmap(int len)
{
if (len <= 2)
return &default_2_colors;
if (len <= 4)
return &default_4_colors;
if (len <= 8)
return &default_8_colors;
return &default_16_colors;
}
/**
* fb_invert_cmaps - invert all defaults colormaps
*
* Invert all default colormaps.
*
*/
void fb_invert_cmaps(void)
{
u_int i;
for (i = 0; i < ARRAY_SIZE(red2); i++) {
red2[i] = ~red2[i];
green2[i] = ~green2[i];
blue2[i] = ~blue2[i];
}
for (i = 0; i < ARRAY_SIZE(red4); i++) {
red4[i] = ~red4[i];
green4[i] = ~green4[i];
blue4[i] = ~blue4[i];
}
for (i = 0; i < ARRAY_SIZE(red8); i++) {
red8[i] = ~red8[i];
green8[i] = ~green8[i];
blue8[i] = ~blue8[i];
}
for (i = 0; i < ARRAY_SIZE(red16); i++) {
red16[i] = ~red16[i];
green16[i] = ~green16[i];
blue16[i] = ~blue16[i];
}
}
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(fb_alloc_cmap);
EXPORT_SYMBOL(fb_dealloc_cmap);
EXPORT_SYMBOL(fb_copy_cmap);
EXPORT_SYMBOL(fb_set_cmap);
EXPORT_SYMBOL(fb_default_cmap);
EXPORT_SYMBOL(fb_invert_cmaps);
| gpl-2.0 |
surdupetru/p6-kernel | arch/powerpc/kernel/perf_event_fsl_emb.c | 2433 | 14598 | /*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* context locked on entry */
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
if (!(flags & PERF_EF_START)) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
}
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_pmu_enable(event->pmu);
return ret;
}
/* context locked on entry */
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return -EINVAL;
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
return err;
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_enable,
.pmu_disable = fsl_emb_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.add = fsl_emb_pmu_add,
.del = fsl_emb_pmu_del,
.start = fsl_emb_pmu_start,
.stop = fsl_emb_pmu_stop,
.read = fsl_emb_pmu_read,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
| gpl-2.0 |
tarunkapadia93/android_kernel_xiaomi_cancro | drivers/md/persistent-data/dm-space-map-disk.c | 2945 | 7172 | /*
* Copyright (C) 2011 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-space-map-checker.h"
#include "dm-space-map-common.h"
#include "dm-space-map-disk.h"
#include "dm-space-map.h"
#include "dm-transaction-manager.h"
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "space map disk"
/*----------------------------------------------------------------*/
/*
* Space map interface.
*/
struct sm_disk {
struct dm_space_map sm;
struct ll_disk ll;
struct ll_disk old_ll;
dm_block_t begin;
dm_block_t nr_allocated_this_transaction;
};
static void sm_disk_destroy(struct dm_space_map *sm)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
kfree(smd);
}
static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_extend(&smd->ll, extra_blocks);
}
static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = smd->old_ll.nr_blocks;
return 0;
}
static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
return 0;
}
static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_lookup(&smd->ll, b, result);
}
static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
int *result)
{
int r;
uint32_t count;
r = sm_disk_get_count(sm, b, &count);
if (r)
return r;
return count > 1;
}
static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count)
{
int r;
uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_insert(&smd->ll, b, count, &ev);
if (!r) {
switch (ev) {
case SM_NONE:
break;
case SM_ALLOC:
/*
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
break;
case SM_FREE:
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (r)
return r;
if (!old_count)
smd->nr_allocated_this_transaction--;
break;
}
}
return r;
}
static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
{
int r;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_inc(&smd->ll, b, &ev);
if (!r && (ev == SM_ALLOC))
/*
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
return r;
}
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
int r;
uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_dec(&smd->ll, b, &ev);
if (!r && (ev == SM_FREE)) {
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (r)
return r;
if (!old_count)
smd->nr_allocated_this_transaction--;
}
return r;
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
{
int r;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
/* FIXME: we should loop round a couple of times */
r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
if (r)
return r;
smd->begin = *b + 1;
r = sm_ll_inc(&smd->ll, *b, &ev);
if (!r) {
BUG_ON(ev != SM_ALLOC);
smd->nr_allocated_this_transaction++;
}
return r;
}
static int sm_disk_commit(struct dm_space_map *sm)
{
int r;
dm_block_t nr_free;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
r = sm_ll_commit(&smd->ll);
if (r)
return r;
memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
return 0;
}
static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
{
*result = sizeof(struct disk_sm_root);
return 0;
}
static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
struct disk_sm_root root_le;
root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
if (max < sizeof(root_le))
return -ENOSPC;
memcpy(where_le, &root_le, sizeof(root_le));
return 0;
}
/*----------------------------------------------------------------*/
static struct dm_space_map ops = {
.destroy = sm_disk_destroy,
.extend = sm_disk_extend,
.get_nr_blocks = sm_disk_get_nr_blocks,
.get_nr_free = sm_disk_get_nr_free,
.get_count = sm_disk_get_count,
.count_is_more_than_one = sm_disk_count_is_more_than_one,
.set_count = sm_disk_set_count,
.inc_block = sm_disk_inc_block,
.dec_block = sm_disk_dec_block,
.new_block = sm_disk_new_block,
.commit = sm_disk_commit,
.root_size = sm_disk_root_size,
.copy_root = sm_disk_copy_root
};
static struct dm_space_map *dm_sm_disk_create_real(
struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
int r;
struct sm_disk *smd;
smd = kmalloc(sizeof(*smd), GFP_KERNEL);
if (!smd)
return ERR_PTR(-ENOMEM);
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
memcpy(&smd->sm, &ops, sizeof(smd->sm));
r = sm_ll_new_disk(&smd->ll, tm);
if (r)
goto bad;
r = sm_ll_extend(&smd->ll, nr_blocks);
if (r)
goto bad;
r = sm_disk_commit(&smd->sm);
if (r)
goto bad;
return &smd->sm;
bad:
kfree(smd);
return ERR_PTR(r);
}
struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
return dm_sm_checker_create_fresh(sm);
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
static struct dm_space_map *dm_sm_disk_open_real(
struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
int r;
struct sm_disk *smd;
smd = kmalloc(sizeof(*smd), GFP_KERNEL);
if (!smd)
return ERR_PTR(-ENOMEM);
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
memcpy(&smd->sm, &ops, sizeof(smd->sm));
r = sm_ll_open_disk(&smd->ll, tm, root_le, len);
if (r)
goto bad;
r = sm_disk_commit(&smd->sm);
if (r)
goto bad;
return &smd->sm;
bad:
kfree(smd);
return ERR_PTR(r);
}
struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
return dm_sm_checker_create(
dm_sm_disk_open_real(tm, root_le, len));
}
EXPORT_SYMBOL_GPL(dm_sm_disk_open);
/*----------------------------------------------------------------*/
| gpl-2.0 |
radxa/linux-rockchip | drivers/net/ixgbevf/vf.c | 2945 | 12570 | /*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "vf.h"
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
{
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
return 0;
}
/**
* ixgbevf_init_hw_vf - virtual function hardware initialization
* @hw: pointer to hardware structure
*
* Initialize the hardware by resetting the hardware and then starting
* the hardware
**/
static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
{
s32 status = hw->mac.ops.start_hw(hw);
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
return status;
}
/**
* ixgbevf_reset_hw_vf - Performs hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by reseting the transmit and receive units, masks and
* clears all interrupts.
**/
static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw) && timeout) {
timeout--;
udelay(5);
}
if (!timeout)
return IXGBE_ERR_RESET_FAILED;
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
msleep(10);
/* set our "perm_addr" based on info provided by PF */
/* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3 */
ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
}
/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
{
u32 number_of_queues;
u32 reg_val;
u16 i;
/*
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
/* Disable the receive unit by stopped each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
if (reg_val & IXGBE_RXDCTL_ENABLE) {
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
}
IXGBE_WRITE_FLUSH(hw);
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
/* Clear any pending interrupts */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
/* Disable the transmit unit. Each queue must be disabled. */
number_of_queues = hw->mac.max_tx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
if (reg_val & IXGBE_TXDCTL_ENABLE) {
reg_val &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
}
}
return 0;
}
/**
* ixgbevf_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
break;
case 1: /* use bits [46:35] of the address */
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
break;
case 2: /* use bits [45:34] of the address */
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
break;
case 3: /* use bits [43:32] of the address */
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
break;
default: /* Invalid mc_filter_type */
break;
}
/* vector can only be 12-bits or boundary will be exceeded */
vector &= 0xFFF;
return vector;
}
/**
* ixgbevf_get_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to storage for retrieved MAC address
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
return 0;
}
static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
/*
* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
if (!ret_val)
if (msgbuf[0] ==
(IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
ret_val = -ENOMEM;
return ret_val;
}
/**
* ixgbevf_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: Unused in this implementation
**/
static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return ret_val;
}
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 cnt, i;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
cnt = netdev_mc_count(netdev);
if (cnt > 30)
cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
i = 0;
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
/**
* ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
**/
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
return mbx->ops.write_posted(hw, msgbuf, 2);
}
/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
* @autoneg: Unused in this implementation
* @autoneg_wait_to_complete: Unused in this implementation
*
* Do nothing and return success. VF drivers are not allowed to change
* global settings. Maintained for driver compatibility.
**/
static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
return 0;
}
/**
* ixgbevf_check_mac_link_vf - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool autoneg_wait_to_complete)
{
u32 links_reg;
if (!(hw->mbx.ops.check_for_rst(hw))) {
*link_up = false;
*speed = 0;
return -1;
}
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
*link_up = true;
else
*link_up = false;
if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}
static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
.get_mac_addr = ixgbevf_get_mac_addr_vf,
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
| gpl-2.0 |
cyandream-devices/neak-n7100-jb | drivers/staging/bcm/hostmibs.c | 3201 | 8463 |
/*
* File Name: hostmibs.c
*
* Author: Beceem Communications Pvt. Ltd
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
*
*/
#include "headers.h"
INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
S_PHS_RULE *pstPhsRule = NULL;
S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION)&Adapter->stBCMPhsContext;
UINT nClassifierIndex = 0, nPhsTableIndex = 0,nSfIndex = 0, uiIndex = 0;
if(pDeviceExtension == NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
//Copy the classifier Table
for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
nClassifierIndex++)
{
if(Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
memcpy((PVOID)&pstHostMibs->astClassifierTable[nClassifierIndex],
(PVOID)&Adapter->astClassifierTable[nClassifierIndex],
sizeof(S_MIBS_CLASSIFIER_RULE));
}
//Copy the SF Table
for(nSfIndex=0; nSfIndex < NO_OF_QUEUES ; nSfIndex++)
{
if(Adapter->PackInfo[nSfIndex].bValid)
{
memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
else
{
//if index in not valid, don't process this for the PHS table. Go For the next entry.
continue ;
}
//Retrieve the SFID Entry Index for requested Service Flow
if(PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
Adapter->PackInfo[nSfIndex].usVCID_Value ,&pstServiceFlowEntry))
{
continue;
}
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
for(uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++)
{
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
if(pstClassifierRule->bUsed)
{
pstPhsRule = pstClassifierRule->pstPhsRule;
pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
&pstPhsRule->u8PHSI,
sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
}
}
//copy other Host Statistics parameters
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
pstHostMibs->stHostInfo.CurrNumFreeDesc =
atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist,Adapter->aTxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist,Adapter->aRxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
&(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfSfid = psfLocalSet->u32SFID;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid);
}
| gpl-2.0 |
Suninus/Xiaomi_Kernel_OpenSource | arch/arm/mach-orion5x/rd88f5182-setup.c | 4737 | 7768 | /*
* arch/arm/mach-orion5x/rd88f5182-setup.c
*
* Marvell Orion-NAS Reference Design Setup
*
* Maintainer: Ronen Shitrit <rshitrit@marvell.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
#include <linux/i2c.h>
#include <asm/mach-types.h>
#include <asm/leds.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/orion5x.h>
#include "common.h"
#include "mpp.h"
/*****************************************************************************
* RD-88F5182 Info
****************************************************************************/
/*
* 512K NOR flash Device bus boot chip select
*/
#define RD88F5182_NOR_BOOT_BASE 0xf4000000
#define RD88F5182_NOR_BOOT_SIZE SZ_512K
/*
* 16M NOR flash on Device bus chip select 1
*/
#define RD88F5182_NOR_BASE 0xfc000000
#define RD88F5182_NOR_SIZE SZ_16M
/*
* PCI
*/
#define RD88F5182_PCI_SLOT0_OFFS 7
#define RD88F5182_PCI_SLOT0_IRQ_A_PIN 7
#define RD88F5182_PCI_SLOT0_IRQ_B_PIN 6
/*
* GPIO Debug LED
*/
#define RD88F5182_GPIO_DBG_LED 0
/*****************************************************************************
* 16M NOR Flash on Device bus CS1
****************************************************************************/
static struct physmap_flash_data rd88f5182_nor_flash_data = {
.width = 1,
};
static struct resource rd88f5182_nor_flash_resource = {
.flags = IORESOURCE_MEM,
.start = RD88F5182_NOR_BASE,
.end = RD88F5182_NOR_BASE + RD88F5182_NOR_SIZE - 1,
};
static struct platform_device rd88f5182_nor_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &rd88f5182_nor_flash_data,
},
.num_resources = 1,
.resource = &rd88f5182_nor_flash_resource,
};
#ifdef CONFIG_LEDS
/*****************************************************************************
* Use GPIO debug led as CPU active indication
****************************************************************************/
static void rd88f5182_dbgled_event(led_event_t evt)
{
int val;
if (evt == led_idle_end)
val = 1;
else if (evt == led_idle_start)
val = 0;
else
return;
gpio_set_value(RD88F5182_GPIO_DBG_LED, val);
}
static int __init rd88f5182_dbgled_init(void)
{
int pin;
if (machine_is_rd88f5182()) {
pin = RD88F5182_GPIO_DBG_LED;
if (gpio_request(pin, "DBGLED") == 0) {
if (gpio_direction_output(pin, 0) != 0) {
printk(KERN_ERR "rd88f5182_dbgled_init failed "
"to set output pin %d\n", pin);
gpio_free(pin);
return 0;
}
} else {
printk(KERN_ERR "rd88f5182_dbgled_init failed "
"to request gpio %d\n", pin);
return 0;
}
leds_event = rd88f5182_dbgled_event;
}
return 0;
}
__initcall(rd88f5182_dbgled_init);
#endif
/*****************************************************************************
* PCI
****************************************************************************/
void __init rd88f5182_pci_preinit(void)
{
int pin;
/*
* Configure PCI GPIO IRQ pins
*/
pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN;
if (gpio_request(pin, "PCI IntA") == 0) {
if (gpio_direction_input(pin) == 0) {
irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit failed to "
"set_irq_type pin %d\n", pin);
gpio_free(pin);
}
} else {
printk(KERN_ERR "rd88f5182_pci_preinit failed to request gpio %d\n", pin);
}
pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN;
if (gpio_request(pin, "PCI IntB") == 0) {
if (gpio_direction_input(pin) == 0) {
irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit failed to "
"set_irq_type pin %d\n", pin);
gpio_free(pin);
}
} else {
printk(KERN_ERR "rd88f5182_pci_preinit failed to gpio_request %d\n", pin);
}
}
static int __init rd88f5182_pci_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
int irq;
/*
* Check for devices with hard-wired IRQs.
*/
irq = orion5x_pci_map_irq(dev, slot, pin);
if (irq != -1)
return irq;
/*
* PCI IRQs are connected via GPIOs
*/
switch (slot - RD88F5182_PCI_SLOT0_OFFS) {
case 0:
if (pin == 1)
return gpio_to_irq(RD88F5182_PCI_SLOT0_IRQ_A_PIN);
else
return gpio_to_irq(RD88F5182_PCI_SLOT0_IRQ_B_PIN);
default:
return -1;
}
}
static struct hw_pci rd88f5182_pci __initdata = {
.nr_controllers = 2,
.preinit = rd88f5182_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = orion5x_pci_sys_setup,
.scan = orion5x_pci_sys_scan_bus,
.map_irq = rd88f5182_pci_map_irq,
};
static int __init rd88f5182_pci_init(void)
{
if (machine_is_rd88f5182())
pci_common_init(&rd88f5182_pci);
return 0;
}
subsys_initcall(rd88f5182_pci_init);
/*****************************************************************************
* Ethernet
****************************************************************************/
static struct mv643xx_eth_platform_data rd88f5182_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/*****************************************************************************
* RTC DS1338 on I2C bus
****************************************************************************/
static struct i2c_board_info __initdata rd88f5182_i2c_rtc = {
I2C_BOARD_INFO("ds1338", 0x68),
};
/*****************************************************************************
* Sata
****************************************************************************/
static struct mv_sata_platform_data rd88f5182_sata_data = {
.n_ports = 2,
};
/*****************************************************************************
* General Setup
****************************************************************************/
static unsigned int rd88f5182_mpp_modes[] __initdata = {
MPP0_GPIO, /* Debug Led */
MPP1_GPIO, /* Reset Switch */
MPP2_UNUSED,
MPP3_GPIO, /* RTC Int */
MPP4_GPIO,
MPP5_GPIO,
MPP6_GPIO, /* PCI_intA */
MPP7_GPIO, /* PCI_intB */
MPP8_UNUSED,
MPP9_UNUSED,
MPP10_UNUSED,
MPP11_UNUSED,
MPP12_SATA_LED, /* SATA 0 presence */
MPP13_SATA_LED, /* SATA 1 presence */
MPP14_SATA_LED, /* SATA 0 active */
MPP15_SATA_LED, /* SATA 1 active */
MPP16_UNUSED,
MPP17_UNUSED,
MPP18_UNUSED,
MPP19_UNUSED,
0,
};
static void __init rd88f5182_init(void)
{
/*
* Setup basic Orion functions. Need to be called early.
*/
orion5x_init();
orion5x_mpp_conf(rd88f5182_mpp_modes);
/*
* MPP[20] PCI Clock to MV88F5182
* MPP[21] PCI Clock to mini PCI CON11
* MPP[22] USB 0 over current indication
* MPP[23] USB 1 over current indication
* MPP[24] USB 1 over current enable
* MPP[25] USB 0 over current enable
*/
/*
* Configure peripherals.
*/
orion5x_ehci0_init();
orion5x_ehci1_init();
orion5x_eth_init(&rd88f5182_eth_data);
orion5x_i2c_init();
orion5x_sata_init(&rd88f5182_sata_data);
orion5x_uart0_init();
orion5x_xor_init();
orion5x_setup_dev_boot_win(RD88F5182_NOR_BOOT_BASE,
RD88F5182_NOR_BOOT_SIZE);
orion5x_setup_dev1_win(RD88F5182_NOR_BASE, RD88F5182_NOR_SIZE);
platform_device_register(&rd88f5182_nor_flash);
i2c_register_board_info(0, &rd88f5182_i2c_rtc, 1);
}
MACHINE_START(RD88F5182, "Marvell Orion-NAS Reference Design")
/* Maintainer: Ronen Shitrit <rshitrit@marvell.com> */
.atag_offset = 0x100,
.init_machine = rd88f5182_init,
.map_io = orion5x_map_io,
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.restart = orion5x_restart,
MACHINE_END
| gpl-2.0 |
smaeul/kernel_samsung_tuna | arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c | 4737 | 6688 | /*
* Copyright (C) 2010 Eric Benard - eric@eukrea.com
*
* Based on pcm970-baseboard.c which is :
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <video/platform_lcd.h>
#include <mach/hardware.h>
#include <mach/iomux-mx25.h>
#include <mach/common.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/mx25.h>
#include "devices-imx25.h"
static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
/* LCD */
MX25_PAD_LD0__LD0,
MX25_PAD_LD1__LD1,
MX25_PAD_LD2__LD2,
MX25_PAD_LD3__LD3,
MX25_PAD_LD4__LD4,
MX25_PAD_LD5__LD5,
MX25_PAD_LD6__LD6,
MX25_PAD_LD7__LD7,
MX25_PAD_LD8__LD8,
MX25_PAD_LD9__LD9,
MX25_PAD_LD10__LD10,
MX25_PAD_LD11__LD11,
MX25_PAD_LD12__LD12,
MX25_PAD_LD13__LD13,
MX25_PAD_LD14__LD14,
MX25_PAD_LD15__LD15,
MX25_PAD_GPIO_E__LD16,
MX25_PAD_GPIO_F__LD17,
MX25_PAD_HSYNC__HSYNC,
MX25_PAD_VSYNC__VSYNC,
MX25_PAD_LSCLK__LSCLK,
MX25_PAD_OE_ACD__OE_ACD,
MX25_PAD_CONTRAST__CONTRAST,
/* LCD_PWR */
MX25_PAD_PWM__GPIO_1_26,
/* LED */
MX25_PAD_POWER_FAIL__GPIO_3_19,
/* SWITCH */
MX25_PAD_VSTBY_ACK__GPIO_3_18,
/* UART2 */
MX25_PAD_UART2_RTS__UART2_RTS,
MX25_PAD_UART2_CTS__UART2_CTS,
MX25_PAD_UART2_TXD__UART2_TXD,
MX25_PAD_UART2_RXD__UART2_RXD,
/* SD1 */
MX25_PAD_SD1_CMD__SD1_CMD,
MX25_PAD_SD1_CLK__SD1_CLK,
MX25_PAD_SD1_DATA0__SD1_DATA0,
MX25_PAD_SD1_DATA1__SD1_DATA1,
MX25_PAD_SD1_DATA2__SD1_DATA2,
MX25_PAD_SD1_DATA3__SD1_DATA3,
/* SD1 CD */
MX25_PAD_DE_B__GPIO_2_20,
/* I2S */
MX25_PAD_KPP_COL3__AUD5_TXFS,
MX25_PAD_KPP_COL2__AUD5_TXC,
MX25_PAD_KPP_COL1__AUD5_RXD,
MX25_PAD_KPP_COL0__AUD5_TXD,
/* CAN */
MX25_PAD_GPIO_D__CAN2_RX,
MX25_PAD_GPIO_C__CAN2_TX,
};
#define GPIO_LED1 83
#define GPIO_SWITCH1 82
#define GPIO_SD1CD 52
#define GPIO_LCDPWR 26
static struct imx_fb_videomode eukrea_mximxsd_modes[] = {
{
.mode = {
.name = "CMO-QVGA",
.refresh = 60,
.xres = 320,
.yres = 240,
.pixclock = KHZ2PICOS(6500),
.left_margin = 30,
.right_margin = 38,
.upper_margin = 20,
.lower_margin = 3,
.hsync_len = 15,
.vsync_len = 4,
},
.bpp = 16,
.pcr = 0xCAD08B80,
}, {
.mode = {
.name = "DVI-VGA",
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 32000,
.hsync_len = 7,
.left_margin = 100,
.right_margin = 100,
.vsync_len = 7,
.upper_margin = 7,
.lower_margin = 100,
},
.pcr = 0xFA208B80,
.bpp = 16,
}, {
.mode = {
.name = "DVI-SVGA",
.refresh = 60,
.xres = 800,
.yres = 600,
.pixclock = 25000,
.hsync_len = 7,
.left_margin = 75,
.right_margin = 75,
.vsync_len = 7,
.upper_margin = 7,
.lower_margin = 75,
},
.pcr = 0xFA208B80,
.bpp = 16,
},
};
static const struct imx_fb_platform_data eukrea_mximxsd_fb_pdata __initconst = {
.mode = eukrea_mximxsd_modes,
.num_modes = ARRAY_SIZE(eukrea_mximxsd_modes),
.pwmr = 0x00A903FF,
.lscr1 = 0x00120300,
.dmacr = 0x00040060,
};
static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
if (power)
gpio_direction_output(GPIO_LCDPWR, 1);
else
gpio_direction_output(GPIO_LCDPWR, 0);
}
static struct plat_lcd_data eukrea_mbimxsd_lcd_power_data = {
.set_power = eukrea_mbimxsd_lcd_power_set,
};
static struct platform_device eukrea_mbimxsd_lcd_powerdev = {
.name = "platform-lcd",
.dev.platform_data = &eukrea_mbimxsd_lcd_power_data,
};
static const struct gpio_led eukrea_mbimxsd_leds[] __initconst = {
{
.name = "led1",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = GPIO_LED1,
},
};
static const struct gpio_led_platform_data
eukrea_mbimxsd_led_info __initconst = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
.code = BTN_0,
.desc = "BP1",
.active_low = 1,
.wakeup = 1,
},
};
static const struct gpio_keys_platform_data
eukrea_mbimxsd_button_data __initconst = {
.buttons = eukrea_mbimxsd_gpio_buttons,
.nbuttons = ARRAY_SIZE(eukrea_mbimxsd_gpio_buttons),
};
static struct platform_device *platform_devices[] __initdata = {
&eukrea_mbimxsd_lcd_powerdev,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static struct i2c_board_info eukrea_mbimxsd_i2c_devices[] = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
},
};
static const
struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata __initconst = {
.flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
};
static struct esdhc_platform_data sd1_pdata = {
.cd_gpio = GPIO_SD1CD,
.cd_type = ESDHC_CD_GPIO,
.wp_type = ESDHC_WP_NONE,
};
/*
* system init for baseboard usage. Will be called by cpuimx25 init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init eukrea_mbimxsd25_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
printk(KERN_ERR "error setting mbimxsd pads !\n");
imx25_add_imx_uart1(&uart_pdata);
imx25_add_imx_fb(&eukrea_mximxsd_fb_pdata);
imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
imx25_add_flexcan1(NULL);
imx25_add_sdhci_esdhc_imx(0, &sd1_pdata);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
gpio_free(GPIO_LED1);
gpio_request(GPIO_SWITCH1, "SWITCH1");
gpio_direction_input(GPIO_SWITCH1);
gpio_free(GPIO_SWITCH1);
gpio_request(GPIO_LCDPWR, "LCDPWR");
gpio_direction_output(GPIO_LCDPWR, 1);
gpio_free(GPIO_SWITCH1);
i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
| gpl-2.0 |
MarshedOut/android_kernel_lge_msm8974 | drivers/video/riva/fbdev.c | 4993 | 59750 | /*
* linux/drivers/video/riva/fbdev.c - nVidia RIVA 128/TNT/TNT2 fb driver
*
* Maintained by Ani Joshi <ajoshi@shell.unixbox.com>
*
* Copyright 1999-2000 Jeff Garzik
*
* Contributors:
*
* Ani Joshi: Lots of debugging and cleanup work, really helped
* get the driver going
*
* Ferenc Bakonyi: Bug fixes, cleanup, modularization
*
* Jindrich Makovicka: Accel code help, hw cursor, mtrr
*
* Paul Richards: Bug fixes, updates
*
* Initial template from skeletonfb.c, created 28 Dec 1997 by Geert Uytterhoeven
* Includes riva_hw.c from nVidia, see copyright below.
* KGI code provided the basis for state storage, init, and mode switching.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Known bugs and issues:
* restoring text mode fails
* doublescan modes are broken
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/backlight.h>
#include <linux/bitrev.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/machdep.h>
#include <asm/backlight.h>
#endif
#include "rivafb.h"
#include "nvreg.h"
/* version number of this driver */
#define RIVAFB_VERSION "0.9.5b"
/* ------------------------------------------------------------------------- *
*
* various helpful macros and constants
*
* ------------------------------------------------------------------------- */
#ifdef CONFIG_FB_RIVA_DEBUG
#define NVTRACE printk
#else
#define NVTRACE if(0) printk
#endif
#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __func__)
#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __func__)
#ifdef CONFIG_FB_RIVA_DEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
#expr,__FILE__,__func__,__LINE__); \
BUG(); \
}
#else
#define assert(expr)
#endif
#define PFX "rivafb: "
/* macro that allows you to set overflow bits */
#define SetBitField(value,from,to) SetBF(to,GetBF(value,from))
#define SetBit(n) (1<<(n))
#define Set8Bits(value) ((value)&0xff)
/* HW cursor parameters */
#define MAX_CURS 32
/* ------------------------------------------------------------------------- *
*
* prototypes
*
* ------------------------------------------------------------------------- */
static int rivafb_blank(int blank, struct fb_info *info);
/* ------------------------------------------------------------------------- *
*
* card identification
*
* ------------------------------------------------------------------------- */
static struct pci_device_id rivafb_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA_SGS, PCI_DEVICE_ID_NVIDIA_SGS_RIVA128,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_VTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UVTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_ITNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
// NF2/IGP version, GeForce 4 MX, NV18
{ PCI_VENDOR_ID_NVIDIA, 0x01f0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_IGEFORCE2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_DDC,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, rivafb_pci_tbl);
/* ------------------------------------------------------------------------- *
*
* global variables
*
* ------------------------------------------------------------------------- */
/* command line data, set in rivafb_setup() */
static int flatpanel __devinitdata = -1; /* Autodetect later */
static int forceCRTC __devinitdata = -1;
static bool noaccel __devinitdata = 0;
#ifdef CONFIG_MTRR
static bool nomtrr __devinitdata = 0;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
#else
static int backlight __devinitdata = 0;
#endif
static char *mode_option __devinitdata = NULL;
static bool strictmode = 0;
static struct fb_fix_screeninfo __devinitdata rivafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.xpanstep = 1,
.ypanstep = 1,
};
static struct fb_var_screeninfo __devinitdata rivafb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 8,
.red = {0, 8, 0},
.green = {0, 8, 0},
.blue = {0, 8, 0},
.transp = {0, 0, 0},
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 39721,
.left_margin = 40,
.right_margin = 24,
.upper_margin = 32,
.lower_margin = 11,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED
};
/* from GGI */
static const struct riva_regs reg_template = {
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* ATTR */
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x41, 0x01, 0x0F, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* CRT */
0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, /* 0x10 */
0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, /* 0x40 */
},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F, /* GRA */
0xFF},
{0x03, 0x01, 0x0F, 0x00, 0x0E}, /* SEQ */
0xEB /* MISC */
};
/*
* Backlight control
*/
#ifdef CONFIG_FB_RIVA_BACKLIGHT
/* We do not have any information about which values are allowed, thus
* we used safe values.
*/
#define MIN_LEVEL 0x158
#define MAX_LEVEL 0x534
#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX)
static int riva_bl_get_level_brightness(struct riva_par *par,
int level)
{
struct fb_info *info = pci_get_drvdata(par->pdev);
int nlevel;
/* Get and convert the value */
/* No locking on bl_curve since accessing a single value */
nlevel = MIN_LEVEL + info->bl_curve[level] * LEVEL_STEP;
if (nlevel < 0)
nlevel = 0;
else if (nlevel < MIN_LEVEL)
nlevel = MIN_LEVEL;
else if (nlevel > MAX_LEVEL)
nlevel = MAX_LEVEL;
return nlevel;
}
static int riva_bl_update_status(struct backlight_device *bd)
{
struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
int level;
if (bd->props.power != FB_BLANK_UNBLANK ||
bd->props.fb_blank != FB_BLANK_UNBLANK)
level = 0;
else
level = bd->props.brightness;
tmp_pmc = NV_RD32(par->riva.PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->riva.PCRTC0, 0x081C) & 0xFFFFFFFC;
if(level > 0) {
tmp_pcrt |= 0x1;
tmp_pmc |= (1 << 31); /* backlight bit */
tmp_pmc |= riva_bl_get_level_brightness(par, level) << 16; /* level */
}
NV_WR32(par->riva.PCRTC0, 0x081C, tmp_pcrt);
NV_WR32(par->riva.PMC, 0x10F0, tmp_pmc);
return 0;
}
static int riva_bl_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static const struct backlight_ops riva_bl_ops = {
.get_brightness = riva_bl_get_brightness,
.update_status = riva_bl_update_status,
};
static void riva_bl_init(struct riva_par *par)
{
struct backlight_properties props;
struct fb_info *info = pci_get_drvdata(par->pdev);
struct backlight_device *bd;
char name[12];
if (!par->FlatPanel)
return;
#ifdef CONFIG_PMAC_BACKLIGHT
if (!machine_is(powermac) ||
!pmac_has_backlight_type("mnca"))
return;
#endif
snprintf(name, sizeof(name), "rivabl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
&props);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
printk(KERN_WARNING "riva: Backlight registration failed\n");
goto error;
}
info->bl_dev = bd;
fb_bl_default_curve(info, 0,
MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL,
FB_BACKLIGHT_MAX);
bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
printk("riva: Backlight initialized (%s)\n", name);
return;
error:
return;
}
static void riva_bl_exit(struct fb_info *info)
{
struct backlight_device *bd = info->bl_dev;
backlight_device_unregister(bd);
printk("riva: Backlight unloaded\n");
}
#else
static inline void riva_bl_init(struct riva_par *par) {}
static inline void riva_bl_exit(struct fb_info *info) {}
#endif /* CONFIG_FB_RIVA_BACKLIGHT */
/* ------------------------------------------------------------------------- *
*
* MMIO access macros
*
* ------------------------------------------------------------------------- */
static inline void CRTCout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PCIO, 0x3d4, index);
VGA_WR08(par->riva.PCIO, 0x3d5, val);
}
static inline unsigned char CRTCin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PCIO, 0x3d4, index);
return (VGA_RD08(par->riva.PCIO, 0x3d5));
}
static inline void GRAout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3ce, index);
VGA_WR08(par->riva.PVIO, 0x3cf, val);
}
static inline unsigned char GRAin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PVIO, 0x3ce, index);
return (VGA_RD08(par->riva.PVIO, 0x3cf));
}
static inline void SEQout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3c4, index);
VGA_WR08(par->riva.PVIO, 0x3c5, val);
}
static inline unsigned char SEQin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PVIO, 0x3c4, index);
return (VGA_RD08(par->riva.PVIO, 0x3c5));
}
static inline void ATTRout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PCIO, 0x3c0, index);
VGA_WR08(par->riva.PCIO, 0x3c0, val);
}
static inline unsigned char ATTRin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PCIO, 0x3c0, index);
return (VGA_RD08(par->riva.PCIO, 0x3c1));
}
static inline void MISCout(struct riva_par *par, unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3c2, val);
}
static inline unsigned char MISCin(struct riva_par *par)
{
return (VGA_RD08(par->riva.PVIO, 0x3cc));
}
static inline void reverse_order(u32 *l)
{
u8 *a = (u8 *)l;
a[0] = bitrev8(a[0]);
a[1] = bitrev8(a[1]);
a[2] = bitrev8(a[2]);
a[3] = bitrev8(a[3]);
}
/* ------------------------------------------------------------------------- *
*
* cursor stuff
*
* ------------------------------------------------------------------------- */
/**
* rivafb_load_cursor_image - load cursor image to hardware
* @data: address to monochrome bitmap (1 = foreground color, 0 = background)
* @par: pointer to private data
* @w: width of cursor image in pixels
* @h: height of cursor image in scanlines
* @bg: background color (ARGB1555) - alpha bit determines opacity
* @fg: foreground color (ARGB1555)
*
* DESCRIPTiON:
* Loads cursor image based on a monochrome source and mask bitmap. The
* image bits determines the color of the pixel, 0 for background, 1 for
* foreground. Only the affected region (as determined by @w and @h
* parameters) will be updated.
*
* CALLED FROM:
* rivafb_cursor()
*/
static void rivafb_load_cursor_image(struct riva_par *par, u8 *data8,
u16 bg, u16 fg, u32 w, u32 h)
{
int i, j, k = 0;
u32 b, tmp;
u32 *data = (u32 *)data8;
bg = le16_to_cpu(bg);
fg = le16_to_cpu(fg);
w = (w + 1) & ~1;
for (i = 0; i < h; i++) {
b = *data++;
reverse_order(&b);
for (j = 0; j < w/2; j++) {
tmp = 0;
#if defined (__BIG_ENDIAN)
tmp = (b & (1 << 31)) ? fg << 16 : bg << 16;
b <<= 1;
tmp |= (b & (1 << 31)) ? fg : bg;
b <<= 1;
#else
tmp = (b & 1) ? fg : bg;
b >>= 1;
tmp |= (b & 1) ? fg << 16 : bg << 16;
b >>= 1;
#endif
writel(tmp, &par->riva.CURSOR[k++]);
}
k += (MAX_CURS - w)/2;
}
}
/* ------------------------------------------------------------------------- *
*
* general utility functions
*
* ------------------------------------------------------------------------- */
/**
* riva_wclut - set CLUT entry
* @chip: pointer to RIVA_HW_INST object
* @regnum: register number
* @red: red component
* @green: green component
* @blue: blue component
*
* DESCRIPTION:
* Sets color register @regnum.
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static void riva_wclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char red,
unsigned char green, unsigned char blue)
{
VGA_WR08(chip->PDIO, 0x3c8, regnum);
VGA_WR08(chip->PDIO, 0x3c9, red);
VGA_WR08(chip->PDIO, 0x3c9, green);
VGA_WR08(chip->PDIO, 0x3c9, blue);
}
/**
* riva_rclut - read fromCLUT register
* @chip: pointer to RIVA_HW_INST object
* @regnum: register number
* @red: red component
* @green: green component
* @blue: blue component
*
* DESCRIPTION:
* Reads red, green, and blue from color register @regnum.
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static void riva_rclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char *red,
unsigned char *green, unsigned char *blue)
{
VGA_WR08(chip->PDIO, 0x3c7, regnum);
*red = VGA_RD08(chip->PDIO, 0x3c9);
*green = VGA_RD08(chip->PDIO, 0x3c9);
*blue = VGA_RD08(chip->PDIO, 0x3c9);
}
/**
* riva_save_state - saves current chip state
* @par: pointer to riva_par object containing info for current riva board
* @regs: pointer to riva_regs object
*
* DESCRIPTION:
* Saves current chip state to @regs.
*
* CALLED FROM:
* rivafb_probe()
*/
/* from GGI */
static void riva_save_state(struct riva_par *par, struct riva_regs *regs)
{
int i;
NVTRACE_ENTER();
par->riva.LockUnlock(&par->riva, 0);
par->riva.UnloadStateExt(&par->riva, ®s->ext);
regs->misc_output = MISCin(par);
for (i = 0; i < NUM_CRT_REGS; i++)
regs->crtc[i] = CRTCin(par, i);
for (i = 0; i < NUM_ATC_REGS; i++)
regs->attr[i] = ATTRin(par, i);
for (i = 0; i < NUM_GRC_REGS; i++)
regs->gra[i] = GRAin(par, i);
for (i = 0; i < NUM_SEQ_REGS; i++)
regs->seq[i] = SEQin(par, i);
NVTRACE_LEAVE();
}
/**
* riva_load_state - loads current chip state
* @par: pointer to riva_par object containing info for current riva board
* @regs: pointer to riva_regs object
*
* DESCRIPTION:
* Loads chip state from @regs.
*
* CALLED FROM:
* riva_load_video_mode()
* rivafb_probe()
* rivafb_remove()
*/
/* from GGI */
static void riva_load_state(struct riva_par *par, struct riva_regs *regs)
{
RIVA_HW_STATE *state = ®s->ext;
int i;
NVTRACE_ENTER();
CRTCout(par, 0x11, 0x00);
par->riva.LockUnlock(&par->riva, 0);
par->riva.LoadStateExt(&par->riva, state);
MISCout(par, regs->misc_output);
for (i = 0; i < NUM_CRT_REGS; i++) {
switch (i) {
case 0x19:
case 0x20 ... 0x40:
break;
default:
CRTCout(par, i, regs->crtc[i]);
}
}
for (i = 0; i < NUM_ATC_REGS; i++)
ATTRout(par, i, regs->attr[i]);
for (i = 0; i < NUM_GRC_REGS; i++)
GRAout(par, i, regs->gra[i]);
for (i = 0; i < NUM_SEQ_REGS; i++)
SEQout(par, i, regs->seq[i]);
NVTRACE_LEAVE();
}
/**
* riva_load_video_mode - calculate timings
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Calculate some timings and then send em off to riva_load_state().
*
* CALLED FROM:
* rivafb_set_par()
*/
static int riva_load_video_mode(struct fb_info *info)
{
int bpp, width, hDisplaySize, hDisplay, hStart,
hEnd, hTotal, height, vDisplay, vStart, vEnd, vTotal, dotClock;
int hBlankStart, hBlankEnd, vBlankStart, vBlankEnd;
int rc;
struct riva_par *par = info->par;
struct riva_regs newmode;
NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(FB_BLANK_NORMAL, info);
bpp = info->var.bits_per_pixel;
if (bpp == 16 && info->var.green.length == 5)
bpp = 15;
width = info->var.xres_virtual;
hDisplaySize = info->var.xres;
hDisplay = (hDisplaySize / 8) - 1;
hStart = (hDisplaySize + info->var.right_margin) / 8 - 1;
hEnd = (hDisplaySize + info->var.right_margin +
info->var.hsync_len) / 8 - 1;
hTotal = (hDisplaySize + info->var.right_margin +
info->var.hsync_len + info->var.left_margin) / 8 - 5;
hBlankStart = hDisplay;
hBlankEnd = hTotal + 4;
height = info->var.yres_virtual;
vDisplay = info->var.yres - 1;
vStart = info->var.yres + info->var.lower_margin - 1;
vEnd = info->var.yres + info->var.lower_margin +
info->var.vsync_len - 1;
vTotal = info->var.yres + info->var.lower_margin +
info->var.vsync_len + info->var.upper_margin + 2;
vBlankStart = vDisplay;
vBlankEnd = vTotal + 1;
dotClock = 1000000000 / info->var.pixclock;
memcpy(&newmode, ®_template, sizeof(struct riva_regs));
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
vTotal |= 1;
if (par->FlatPanel) {
vStart = vTotal - 3;
vEnd = vTotal - 2;
vBlankStart = vStart;
hStart = hTotal - 3;
hEnd = hTotal - 2;
hBlankEnd = hTotal + 4;
}
newmode.crtc[0x0] = Set8Bits (hTotal);
newmode.crtc[0x1] = Set8Bits (hDisplay);
newmode.crtc[0x2] = Set8Bits (hBlankStart);
newmode.crtc[0x3] = SetBitField (hBlankEnd, 4: 0, 4:0) | SetBit (7);
newmode.crtc[0x4] = Set8Bits (hStart);
newmode.crtc[0x5] = SetBitField (hBlankEnd, 5: 5, 7:7)
| SetBitField (hEnd, 4: 0, 4:0);
newmode.crtc[0x6] = SetBitField (vTotal, 7: 0, 7:0);
newmode.crtc[0x7] = SetBitField (vTotal, 8: 8, 0:0)
| SetBitField (vDisplay, 8: 8, 1:1)
| SetBitField (vStart, 8: 8, 2:2)
| SetBitField (vBlankStart, 8: 8, 3:3)
| SetBit (4)
| SetBitField (vTotal, 9: 9, 5:5)
| SetBitField (vDisplay, 9: 9, 6:6)
| SetBitField (vStart, 9: 9, 7:7);
newmode.crtc[0x9] = SetBitField (vBlankStart, 9: 9, 5:5)
| SetBit (6);
newmode.crtc[0x10] = Set8Bits (vStart);
newmode.crtc[0x11] = SetBitField (vEnd, 3: 0, 3:0)
| SetBit (5);
newmode.crtc[0x12] = Set8Bits (vDisplay);
newmode.crtc[0x13] = (width / 8) * ((bpp + 1) / 8);
newmode.crtc[0x15] = Set8Bits (vBlankStart);
newmode.crtc[0x16] = Set8Bits (vBlankEnd);
newmode.ext.screen = SetBitField(hBlankEnd,6:6,4:4)
| SetBitField(vBlankStart,10:10,3:3)
| SetBitField(vStart,10:10,2:2)
| SetBitField(vDisplay,10:10,1:1)
| SetBitField(vTotal,10:10,0:0);
newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
| SetBitField(hDisplay,8:8,1:1)
| SetBitField(hBlankStart,8:8,2:2)
| SetBitField(hStart,8:8,3:3);
newmode.ext.extra = SetBitField(vTotal,11:11,0:0)
| SetBitField(vDisplay,11:11,2:2)
| SetBitField(vStart,11:11,4:4)
| SetBitField(vBlankStart,11:11,6:6);
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
int tmp = (hTotal >> 1) & ~1;
newmode.ext.interlace = Set8Bits(tmp);
newmode.ext.horiz |= SetBitField(tmp, 8:8,4:4);
} else
newmode.ext.interlace = 0xff; /* interlace off */
if (par->riva.Architecture >= NV_ARCH_10)
par->riva.CURSOR = (U032 __iomem *)(info->screen_base + par->riva.CursorStart);
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
newmode.misc_output &= ~0x40;
else
newmode.misc_output |= 0x40;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
newmode.misc_output &= ~0x80;
else
newmode.misc_output |= 0x80;
rc = CalcStateExt(&par->riva, &newmode.ext, bpp, width,
hDisplaySize, height, dotClock);
if (rc)
goto out;
newmode.ext.scale = NV_RD32(par->riva.PRAMDAC, 0x00000848) &
0xfff000ff;
if (par->FlatPanel == 1) {
newmode.ext.pixel |= (1 << 7);
newmode.ext.scale |= (1 << 8);
}
if (par->SecondCRTC) {
newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) &
~0x00001000;
newmode.ext.head2 = NV_RD32(par->riva.PCRTC0, 0x00002860) |
0x00001000;
newmode.ext.crtcOwner = 3;
newmode.ext.pllsel |= 0x20000800;
newmode.ext.vpll2 = newmode.ext.vpll;
} else if (par->riva.twoHeads) {
newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) |
0x00001000;
newmode.ext.head2 = NV_RD32(par->riva.PCRTC0, 0x00002860) &
~0x00001000;
newmode.ext.crtcOwner = 0;
newmode.ext.vpll2 = NV_RD32(par->riva.PRAMDAC0, 0x00000520);
}
if (par->FlatPanel == 1) {
newmode.ext.pixel |= (1 << 7);
newmode.ext.scale |= (1 << 8);
}
newmode.ext.cursorConfig = 0x02000100;
par->current_state = newmode;
riva_load_state(par, &par->current_state);
par->riva.LockUnlock(&par->riva, 0); /* important for HW cursor */
out:
rivafb_blank(FB_BLANK_UNBLANK, info);
NVTRACE_LEAVE();
return rc;
}
static void riva_update_var(struct fb_var_screeninfo *var,
const struct fb_videomode *modedb)
{
NVTRACE_ENTER();
var->xres = var->xres_virtual = modedb->xres;
var->yres = modedb->yres;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
var->xoffset = var->yoffset = 0;
var->pixclock = modedb->pixclock;
var->left_margin = modedb->left_margin;
var->right_margin = modedb->right_margin;
var->upper_margin = modedb->upper_margin;
var->lower_margin = modedb->lower_margin;
var->hsync_len = modedb->hsync_len;
var->vsync_len = modedb->vsync_len;
var->sync = modedb->sync;
var->vmode = modedb->vmode;
NVTRACE_LEAVE();
}
/**
* rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
* @var:
* @nom:
* @den:
*
* DESCRIPTION:
* .
*
* RETURNS:
* -EINVAL on failure, 0 on success
*
*
* CALLED FROM:
* rivafb_check_var()
*/
static int rivafb_do_maximize(struct fb_info *info,
struct fb_var_screeninfo *var,
int nom, int den)
{
static struct {
int xres, yres;
} modes[] = {
{1600, 1280},
{1280, 1024},
{1024, 768},
{800, 600},
{640, 480},
{-1, -1}
};
int i;
NVTRACE_ENTER();
/* use highest possible virtual resolution */
if (var->xres_virtual == -1 && var->yres_virtual == -1) {
printk(KERN_WARNING PFX
"using maximum available virtual resolution\n");
for (i = 0; modes[i].xres != -1; i++) {
if (modes[i].xres * nom / den * modes[i].yres <
info->fix.smem_len)
break;
}
if (modes[i].xres == -1) {
printk(KERN_ERR PFX
"could not find a virtual resolution that fits into video memory!!\n");
NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
var->xres_virtual = modes[i].xres;
var->yres_virtual = modes[i].yres;
printk(KERN_INFO PFX
"virtual resolution set to maximum of %dx%d\n",
var->xres_virtual, var->yres_virtual);
} else if (var->xres_virtual == -1) {
var->xres_virtual = (info->fix.smem_len * den /
(nom * var->yres_virtual)) & ~15;
printk(KERN_WARNING PFX
"setting virtual X resolution to %d\n", var->xres_virtual);
} else if (var->yres_virtual == -1) {
var->xres_virtual = (var->xres_virtual + 15) & ~15;
var->yres_virtual = info->fix.smem_len * den /
(nom * var->xres_virtual);
printk(KERN_WARNING PFX
"setting virtual Y resolution to %d\n", var->yres_virtual);
} else {
var->xres_virtual = (var->xres_virtual + 15) & ~15;
if (var->xres_virtual * nom / den * var->yres_virtual > info->fix.smem_len) {
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...resolution too high to fit into video memory!\n",
var->xres, var->yres, var->bits_per_pixel);
NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
}
if (var->xres_virtual * nom / den >= 8192) {
printk(KERN_WARNING PFX
"virtual X resolution (%d) is too high, lowering to %d\n",
var->xres_virtual, 8192 * den / nom - 16);
var->xres_virtual = 8192 * den / nom - 16;
}
if (var->xres_virtual < var->xres) {
printk(KERN_ERR PFX
"virtual X resolution (%d) is smaller than real\n", var->xres_virtual);
return -EINVAL;
}
if (var->yres_virtual < var->yres) {
printk(KERN_ERR PFX
"virtual Y resolution (%d) is smaller than real\n", var->yres_virtual);
return -EINVAL;
}
if (var->yres_virtual > 0x7fff/nom)
var->yres_virtual = 0x7fff/nom;
if (var->xres_virtual > 0x7fff/nom)
var->xres_virtual = 0x7fff/nom;
NVTRACE_LEAVE();
return 0;
}
static void
riva_set_pattern(struct riva_par *par, int clr0, int clr1, int pat0, int pat1)
{
RIVA_FIFO_FREE(par->riva, Patt, 4);
NV_WR32(&par->riva.Patt->Color0, 0, clr0);
NV_WR32(&par->riva.Patt->Color1, 0, clr1);
NV_WR32(par->riva.Patt->Monochrome, 0, pat0);
NV_WR32(par->riva.Patt->Monochrome, 4, pat1);
}
/* acceleration routines */
static inline void wait_for_idle(struct riva_par *par)
{
while (par->riva.Busy(&par->riva));
}
/*
* Set ROP. Translate X rop into ROP3. Internal routine.
*/
static void
riva_set_rop_solid(struct riva_par *par, int rop)
{
riva_set_pattern(par, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
RIVA_FIFO_FREE(par->riva, Rop, 1);
NV_WR32(&par->riva.Rop->Rop3, 0, rop);
}
static void riva_setup_accel(struct fb_info *info)
{
struct riva_par *par = info->par;
RIVA_FIFO_FREE(par->riva, Clip, 2);
NV_WR32(&par->riva.Clip->TopLeft, 0, 0x0);
NV_WR32(&par->riva.Clip->WidthHeight, 0,
(info->var.xres_virtual & 0xffff) |
(info->var.yres_virtual << 16));
riva_set_rop_solid(par, 0xcc);
wait_for_idle(par);
}
/**
* riva_get_cmap_len - query current color map length
* @var: standard kernel fb changeable data
*
* DESCRIPTION:
* Get current color map length.
*
* RETURNS:
* Length of color map
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
{
int rc = 256; /* reasonable default */
switch (var->green.length) {
case 8:
rc = 256; /* 256 entries (2^8), 8 bpp and RGB8888 */
break;
case 5:
rc = 32; /* 32 entries (2^5), 16 bpp, RGB555 */
break;
case 6:
rc = 64; /* 64 entries (2^6), 16 bpp, RGB565 */
break;
default:
/* should not occur */
break;
}
return rc;
}
/* ------------------------------------------------------------------------- *
*
* framebuffer operations
*
* ------------------------------------------------------------------------- */
static int rivafb_open(struct fb_info *info, int user)
{
struct riva_par *par = info->par;
NVTRACE_ENTER();
mutex_lock(&par->open_lock);
if (!par->ref_count) {
#ifdef CONFIG_X86
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
/* save the DAC for Riva128 */
if (par->riva.Architecture == NV_ARCH_03)
par->state.flags |= VGA_SAVE_CMAP;
save_vga(&par->state);
#endif
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
riva_save_state(par, &par->initial_state);
}
par->ref_count++;
mutex_unlock(&par->open_lock);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_release(struct fb_info *info, int user)
{
struct riva_par *par = info->par;
NVTRACE_ENTER();
mutex_lock(&par->open_lock);
if (!par->ref_count) {
mutex_unlock(&par->open_lock);
return -EINVAL;
}
if (par->ref_count == 1) {
par->riva.LockUnlock(&par->riva, 0);
par->riva.LoadStateExt(&par->riva, &par->initial_state.ext);
riva_load_state(par, &par->initial_state);
#ifdef CONFIG_X86
restore_vga(&par->state);
#endif
par->riva.LockUnlock(&par->riva, 1);
}
par->ref_count--;
mutex_unlock(&par->open_lock);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
const struct fb_videomode *mode;
struct riva_par *par = info->par;
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
NVTRACE_ENTER();
switch (var->bits_per_pixel) {
case 1 ... 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
var->bits_per_pixel = 8;
nom = den = 1;
break;
case 9 ... 15:
var->green.length = 5;
/* fall through */
case 16:
var->bits_per_pixel = 16;
/* The Riva128 supports RGB555 only */
if (par->riva.Architecture == NV_ARCH_03)
var->green.length = 5;
if (var->green.length == 5) {
/* 0rrrrrgg gggbbbbb */
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
} else {
/* rrrrrggg gggbbbbb */
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
}
nom = 2;
den = 1;
break;
case 17 ... 32:
var->red.length = var->green.length = var->blue.length = 8;
var->bits_per_pixel = 32;
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
nom = 4;
den = 1;
break;
default:
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...color depth not supported.\n",
var->xres, var->yres, var->bits_per_pixel);
NVTRACE("EXIT, returning -EINVAL\n");
return -EINVAL;
}
if (!strictmode) {
if (!info->monspecs.vfmax || !info->monspecs.hfmax ||
!info->monspecs.dclkmax || !fb_validate_mode(var, info))
mode_valid = 1;
}
/* calculate modeline if supported by monitor */
if (!mode_valid && info->monspecs.gtf) {
if (!fb_get_mode(FB_MAXTIMINGS, 0, var, info))
mode_valid = 1;
}
if (!mode_valid) {
mode = fb_find_best_mode(var, &info->modelist);
if (mode) {
riva_update_var(var, mode);
mode_valid = 1;
}
}
if (!mode_valid && info->monspecs.modedb_len)
return -EINVAL;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
if (var->yres_virtual <= var->yres)
var->yres_virtual = -1;
if (rivafb_do_maximize(info, var, nom, den) < 0)
return -EINVAL;
if (var->xoffset < 0)
var->xoffset = 0;
if (var->yoffset < 0)
var->yoffset = 0;
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
NVTRACE_LEAVE();
return 0;
}
static int rivafb_set_par(struct fb_info *info)
{
struct riva_par *par = info->par;
int rc = 0;
NVTRACE_ENTER();
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
rc = riva_load_video_mode(info);
if (rc)
goto out;
if(!(info->flags & FBINFO_HWACCEL_DISABLED))
riva_setup_accel(info);
par->cursor_reset = 1;
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
if (info->flags & FBINFO_HWACCEL_DISABLED)
info->pixmap.scan_align = 1;
else
info->pixmap.scan_align = 4;
out:
NVTRACE_LEAVE();
return rc;
}
/**
* rivafb_pan_display
* @var: standard kernel fb changeable data
* @con: TODO
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Pan (or wrap, depending on the `vmode' field) the display using the
* `xoffset' and `yoffset' fields of the `var' structure.
* If the values don't fit, return -EINVAL.
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int rivafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct riva_par *par = info->par;
unsigned int base;
NVTRACE_ENTER();
base = var->yoffset * info->fix.line_length + var->xoffset;
par->riva.SetStartAddress(&par->riva, base);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_blank(int blank, struct fb_info *info)
{
struct riva_par *par= info->par;
unsigned char tmp, vesa;
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
vesa = CRTCin(par, 0x1a) & ~0xc0; /* sync on/off */
NVTRACE_ENTER();
if (blank)
tmp |= 0x20;
switch (blank) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
break;
case FB_BLANK_VSYNC_SUSPEND:
vesa |= 0x80;
break;
case FB_BLANK_HSYNC_SUSPEND:
vesa |= 0x40;
break;
case FB_BLANK_POWERDOWN:
vesa |= 0xc0;
break;
}
SEQout(par, 0x01, tmp);
CRTCout(par, 0x1a, vesa);
NVTRACE_LEAVE();
return 0;
}
/**
* rivafb_setcolreg
* @regno: register index
* @red: red component
* @green: green component
* @blue: blue component
* @transp: transparency
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Set a single color register. The values supplied have a 16 bit
* magnitude.
*
* RETURNS:
* Return != 0 for invalid regno.
*
* CALLED FROM:
* fbcmap.c:fb_set_cmap()
*/
static int rivafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct riva_par *par = info->par;
RIVA_HW_INST *chip = &par->riva;
int i;
if (regno >= riva_get_cmap_len(&info->var))
return -EINVAL;
if (info->var.grayscale) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28) >> 8;
}
if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
((u32 *) info->pseudo_palette)[regno] =
(regno << info->var.red.offset) |
(regno << info->var.green.offset) |
(regno << info->var.blue.offset);
/*
* The Riva128 2D engine requires color information in
* TrueColor format even if framebuffer is in DirectColor
*/
if (par->riva.Architecture == NV_ARCH_03) {
switch (info->var.bits_per_pixel) {
case 16:
par->palette[regno] = ((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
break;
case 32:
par->palette[regno] = ((red & 0xff00) << 8) |
((green & 0xff00)) |
((blue & 0xff00) >> 8);
break;
}
}
}
switch (info->var.bits_per_pixel) {
case 8:
/* "transparent" stuff is completely ignored. */
riva_wclut(chip, regno, red >> 8, green >> 8, blue >> 8);
break;
case 16:
if (info->var.green.length == 5) {
for (i = 0; i < 8; i++) {
riva_wclut(chip, regno*8+i, red >> 8,
green >> 8, blue >> 8);
}
} else {
u8 r, g, b;
if (regno < 32) {
for (i = 0; i < 8; i++) {
riva_wclut(chip, regno*8+i,
red >> 8, green >> 8,
blue >> 8);
}
}
riva_rclut(chip, regno*4, &r, &g, &b);
for (i = 0; i < 4; i++)
riva_wclut(chip, regno*4+i, r,
green >> 8, b);
}
break;
case 32:
riva_wclut(chip, regno, red >> 8, green >> 8, blue >> 8);
break;
default:
/* do nothing */
break;
}
return 0;
}
/**
* rivafb_fillrect - hardware accelerated color fill function
* @info: pointer to fb_info structure
* @rect: pointer to fb_fillrect structure
*
* DESCRIPTION:
* This function fills up a region of framebuffer memory with a solid
* color with a choice of two different ROP's, copy or invert.
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct riva_par *par = info->par;
u_int color, rop = 0;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
cfb_fillrect(info, rect);
return;
}
if (info->var.bits_per_pixel == 8)
color = rect->color;
else {
if (par->riva.Architecture != NV_ARCH_03)
color = ((u32 *)info->pseudo_palette)[rect->color];
else
color = par->palette[rect->color];
}
switch (rect->rop) {
case ROP_XOR:
rop = 0x66;
break;
case ROP_COPY:
default:
rop = 0xCC;
break;
}
riva_set_rop_solid(par, rop);
RIVA_FIFO_FREE(par->riva, Bitmap, 1);
NV_WR32(&par->riva.Bitmap->Color1A, 0, color);
RIVA_FIFO_FREE(par->riva, Bitmap, 2);
NV_WR32(&par->riva.Bitmap->UnclippedRectangle[0].TopLeft, 0,
(rect->dx << 16) | rect->dy);
mb();
NV_WR32(&par->riva.Bitmap->UnclippedRectangle[0].WidthHeight, 0,
(rect->width << 16) | rect->height);
mb();
riva_set_rop_solid(par, 0xcc);
}
/**
* rivafb_copyarea - hardware accelerated blit function
* @info: pointer to fb_info structure
* @region: pointer to fb_copyarea structure
*
* DESCRIPTION:
* This copies an area of pixels from one location to another
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct riva_par *par = info->par;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
cfb_copyarea(info, region);
return;
}
RIVA_FIFO_FREE(par->riva, Blt, 3);
NV_WR32(&par->riva.Blt->TopLeftSrc, 0,
(region->sy << 16) | region->sx);
NV_WR32(&par->riva.Blt->TopLeftDst, 0,
(region->dy << 16) | region->dx);
mb();
NV_WR32(&par->riva.Blt->WidthHeight, 0,
(region->height << 16) | region->width);
mb();
}
static inline void convert_bgcolor_16(u32 *col)
{
*col = ((*col & 0x0000F800) << 8)
| ((*col & 0x00007E0) << 5)
| ((*col & 0x0000001F) << 3)
| 0xFF000000;
mb();
}
/**
* rivafb_imageblit: hardware accelerated color expand function
* @info: pointer to fb_info structure
* @image: pointer to fb_image structure
*
* DESCRIPTION:
* If the source is a monochrome bitmap, the function fills up a a region
* of framebuffer memory with pixels whose color is determined by the bit
* setting of the bitmap, 1 - foreground, 0 - background.
*
* If the source is not a monochrome bitmap, color expansion is not done.
* In this case, it is channeled to a software function.
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct riva_par *par = info->par;
u32 fgx = 0, bgx = 0, width, tmp;
u8 *cdat = (u8 *) image->data;
volatile u32 __iomem *d;
int i, size;
if ((info->flags & FBINFO_HWACCEL_DISABLED) || image->depth != 1) {
cfb_imageblit(info, image);
return;
}
switch (info->var.bits_per_pixel) {
case 8:
fgx = image->fg_color;
bgx = image->bg_color;
break;
case 16:
case 32:
if (par->riva.Architecture != NV_ARCH_03) {
fgx = ((u32 *)info->pseudo_palette)[image->fg_color];
bgx = ((u32 *)info->pseudo_palette)[image->bg_color];
} else {
fgx = par->palette[image->fg_color];
bgx = par->palette[image->bg_color];
}
if (info->var.green.length == 6)
convert_bgcolor_16(&bgx);
break;
}
RIVA_FIFO_FREE(par->riva, Bitmap, 7);
NV_WR32(&par->riva.Bitmap->ClipE.TopLeft, 0,
(image->dy << 16) | (image->dx & 0xFFFF));
NV_WR32(&par->riva.Bitmap->ClipE.BottomRight, 0,
(((image->dy + image->height) << 16) |
((image->dx + image->width) & 0xffff)));
NV_WR32(&par->riva.Bitmap->Color0E, 0, bgx);
NV_WR32(&par->riva.Bitmap->Color1E, 0, fgx);
NV_WR32(&par->riva.Bitmap->WidthHeightInE, 0,
(image->height << 16) | ((image->width + 31) & ~31));
NV_WR32(&par->riva.Bitmap->WidthHeightOutE, 0,
(image->height << 16) | ((image->width + 31) & ~31));
NV_WR32(&par->riva.Bitmap->PointE, 0,
(image->dy << 16) | (image->dx & 0xFFFF));
d = &par->riva.Bitmap->MonochromeData01E;
width = (image->width + 31)/32;
size = width * image->height;
while (size >= 16) {
RIVA_FIFO_FREE(par->riva, Bitmap, 16);
for (i = 0; i < 16; i++) {
tmp = *((u32 *)cdat);
cdat = (u8 *)((u32 *)cdat + 1);
reverse_order(&tmp);
NV_WR32(d, i*4, tmp);
}
size -= 16;
}
if (size) {
RIVA_FIFO_FREE(par->riva, Bitmap, size);
for (i = 0; i < size; i++) {
tmp = *((u32 *) cdat);
cdat = (u8 *)((u32 *)cdat + 1);
reverse_order(&tmp);
NV_WR32(d, i*4, tmp);
}
}
}
/**
* rivafb_cursor - hardware cursor function
* @info: pointer to info structure
* @cursor: pointer to fbcursor structure
*
* DESCRIPTION:
* A cursor function that supports displaying a cursor image via hardware.
* Within the kernel, copy and invert rops are supported. If exported
* to user space, only the copy rop will be supported.
*
* CALLED FROM
* framebuffer hook
*/
static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct riva_par *par = info->par;
u8 data[MAX_CURS * MAX_CURS/8];
int i, set = cursor->set;
u16 fg, bg;
if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
return -ENXIO;
par->riva.ShowHideCursor(&par->riva, 0);
if (par->cursor_reset) {
set = FB_CUR_SETALL;
par->cursor_reset = 0;
}
if (set & FB_CUR_SETSIZE)
memset_io(par->riva.CURSOR, 0, MAX_CURS * MAX_CURS * 2);
if (set & FB_CUR_SETPOS) {
u32 xx, yy, temp;
yy = cursor->image.dy - info->var.yoffset;
xx = cursor->image.dx - info->var.xoffset;
temp = xx & 0xFFFF;
temp |= yy << 16;
NV_WR32(par->riva.PRAMDAC, 0x0000300, temp);
}
if (set & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP | FB_CUR_SETIMAGE)) {
u32 bg_idx = cursor->image.bg_color;
u32 fg_idx = cursor->image.fg_color;
u32 s_pitch = (cursor->image.width+7) >> 3;
u32 d_pitch = MAX_CURS/8;
u8 *dat = (u8 *) cursor->image.data;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
src = kmalloc(s_pitch * cursor->image.height, GFP_ATOMIC);
if (src) {
switch (cursor->rop) {
case ROP_XOR:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] ^ msk[i];
break;
case ROP_COPY:
default:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] & msk[i];
break;
}
fb_pad_aligned_buffer(data, d_pitch, src, s_pitch,
cursor->image.height);
bg = ((info->cmap.red[bg_idx] & 0xf8) << 7) |
((info->cmap.green[bg_idx] & 0xf8) << 2) |
((info->cmap.blue[bg_idx] & 0xf8) >> 3) |
1 << 15;
fg = ((info->cmap.red[fg_idx] & 0xf8) << 7) |
((info->cmap.green[fg_idx] & 0xf8) << 2) |
((info->cmap.blue[fg_idx] & 0xf8) >> 3) |
1 << 15;
par->riva.LockUnlock(&par->riva, 0);
rivafb_load_cursor_image(par, data, bg, fg,
cursor->image.width,
cursor->image.height);
kfree(src);
}
}
if (cursor->enable)
par->riva.ShowHideCursor(&par->riva, 1);
return 0;
}
static int rivafb_sync(struct fb_info *info)
{
struct riva_par *par = info->par;
wait_for_idle(par);
return 0;
}
/* ------------------------------------------------------------------------- *
*
* initialization helper functions
*
* ------------------------------------------------------------------------- */
/* kernel interface */
static struct fb_ops riva_fb_ops = {
.owner = THIS_MODULE,
.fb_open = rivafb_open,
.fb_release = rivafb_release,
.fb_check_var = rivafb_check_var,
.fb_set_par = rivafb_set_par,
.fb_setcolreg = rivafb_setcolreg,
.fb_pan_display = rivafb_pan_display,
.fb_blank = rivafb_blank,
.fb_fillrect = rivafb_fillrect,
.fb_copyarea = rivafb_copyarea,
.fb_imageblit = rivafb_imageblit,
.fb_cursor = rivafb_cursor,
.fb_sync = rivafb_sync,
};
static int __devinit riva_set_fbinfo(struct fb_info *info)
{
unsigned int cmap_len;
struct riva_par *par = info->par;
NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_IMAGEBLIT;
/* Accel seems to not work properly on NV30 yet...*/
if ((par->riva.Architecture == NV_ARCH_30) || noaccel) {
printk(KERN_DEBUG PFX "disabling acceleration\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
info->var = rivafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->pseudo_palette = par->pseudo_palette;
cmap_len = riva_get_cmap_len(&info->var);
fb_alloc_cmap(&info->cmap, cmap_len, 0);
info->pixmap.size = 8 * 1024;
info->pixmap.buf_align = 4;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->var.yres_virtual = -1;
NVTRACE_LEAVE();
return (rivafb_check_var(&info->var, info));
}
#ifdef CONFIG_PPC_OF
static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
{
struct riva_par *par = info->par;
struct device_node *dp;
const unsigned char *pedid = NULL;
const unsigned char *disptype = NULL;
static char *propnames[] = {
"DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
int i;
NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
disptype = of_get_property(dp, "display-type", NULL);
if (disptype == NULL)
continue;
if (strncmp(disptype, "LCD", 3) != 0)
continue;
for (i = 0; propnames[i] != NULL; ++i) {
pedid = of_get_property(dp, propnames[i], NULL);
if (pedid != NULL) {
par->EDID = (unsigned char *)pedid;
NVTRACE("LCD found.\n");
return 1;
}
}
}
NVTRACE_LEAVE();
return 0;
}
#endif /* CONFIG_PPC_OF */
#if defined(CONFIG_FB_RIVA_I2C) && !defined(CONFIG_PPC_OF)
static int __devinit riva_get_EDID_i2c(struct fb_info *info)
{
struct riva_par *par = info->par;
struct fb_var_screeninfo var;
int i;
NVTRACE_ENTER();
riva_create_i2c_busses(par);
for (i = 0; i < 3; i++) {
if (!par->chan[i].par)
continue;
riva_probe_i2c_connector(par, i, &par->EDID);
if (par->EDID && !fb_parse_edid(par->EDID, &var)) {
printk(PFX "Found EDID Block from BUS %i\n", i);
break;
}
}
NVTRACE_LEAVE();
return (par->EDID) ? 1 : 0;
}
#endif /* CONFIG_FB_RIVA_I2C */
static void __devinit riva_update_default_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
NVTRACE_ENTER();
/* respect mode options */
if (mode_option) {
fb_find_mode(var, info, mode_option,
specs->modedb, specs->modedb_len,
NULL, 8);
} else if (specs->modedb != NULL) {
/* get first mode in database as fallback */
modedb = specs->modedb[0];
/* get preferred timing */
if (info->monspecs.misc & FB_MISC_1ST_DETAIL) {
int i;
for (i = 0; i < specs->modedb_len; i++) {
if (specs->modedb[i].flag & FB_MODE_IS_FIRST) {
modedb = specs->modedb[i];
break;
}
}
}
var->bits_per_pixel = 8;
riva_update_var(var, &modedb);
}
NVTRACE_LEAVE();
}
static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
if (!riva_get_EDID_OF(info, pdev))
printk(PFX "could not retrieve EDID from OF\n");
#elif defined(CONFIG_FB_RIVA_I2C)
if (!riva_get_EDID_i2c(info))
printk(PFX "could not retrieve EDID from DDC/I2C\n");
#endif
NVTRACE_LEAVE();
}
static void __devinit riva_get_edidinfo(struct fb_info *info)
{
struct fb_var_screeninfo *var = &rivafb_default_var;
struct riva_par *par = info->par;
fb_edid_to_monspecs(par->EDID, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb, info->monspecs.modedb_len,
&info->modelist);
riva_update_default_var(var, info);
/* if user specified flatpanel, we respect that */
if (info->monspecs.input & FB_DISP_DDI)
par->FlatPanel = 1;
}
/* ------------------------------------------------------------------------- *
*
* PCI bus
*
* ------------------------------------------------------------------------- */
static u32 __devinit riva_get_arch(struct pci_dev *pd)
{
u32 arch = 0;
switch (pd->device & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
case 0x0170: /* GeForce4 MX */
case 0x0180: /* GeForce4 MX (8x AGP) */
case 0x01A0: /* nForce */
case 0x01F0: /* nForce2 */
arch = NV_ARCH_10;
break;
case 0x0200: /* GeForce3 */
case 0x0250: /* GeForce4 Ti */
case 0x0280: /* GeForce4 Ti (8x AGP) */
arch = NV_ARCH_20;
break;
case 0x0300: /* GeForceFX 5800 */
case 0x0310: /* GeForceFX 5600 */
case 0x0320: /* GeForceFX 5200 */
case 0x0330: /* GeForceFX 5900 */
case 0x0340: /* GeForceFX 5700 */
arch = NV_ARCH_30;
break;
case 0x0020: /* TNT, TNT2 */
arch = NV_ARCH_04;
break;
case 0x0010: /* Riva128 */
arch = NV_ARCH_03;
break;
default: /* unknown architecture */
break;
}
return arch;
}
static int __devinit rivafb_probe(struct pci_dev *pd,
const struct pci_device_id *ent)
{
struct riva_par *default_par;
struct fb_info *info;
int ret;
NVTRACE_ENTER();
assert(pd != NULL);
info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
if (!info) {
printk (KERN_ERR PFX "could not allocate memory\n");
ret = -ENOMEM;
goto err_ret;
}
default_par = info->par;
default_par->pdev = pd;
info->pixmap.addr = kzalloc(8 * 1024, GFP_KERNEL);
if (info->pixmap.addr == NULL) {
ret = -ENOMEM;
goto err_framebuffer_release;
}
ret = pci_enable_device(pd);
if (ret < 0) {
printk(KERN_ERR PFX "cannot enable PCI device\n");
goto err_free_pixmap;
}
ret = pci_request_regions(pd, "rivafb");
if (ret < 0) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_disable_device;
}
mutex_init(&default_par->open_lock);
default_par->riva.Architecture = riva_get_arch(pd);
default_par->Chipset = (pd->vendor << 16) | pd->device;
printk(KERN_INFO PFX "nVidia device/chipset %X\n",default_par->Chipset);
if(default_par->riva.Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
ret=-ENODEV;
goto err_release_region;
}
if(default_par->riva.Architecture == NV_ARCH_10 ||
default_par->riva.Architecture == NV_ARCH_20 ||
default_par->riva.Architecture == NV_ARCH_30) {
sprintf(rivafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
} else {
sprintf(rivafb_fix.id, "NV%x", default_par->riva.Architecture);
}
default_par->FlatPanel = flatpanel;
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
default_par->forceCRTC = forceCRTC;
rivafb_fix.mmio_len = pci_resource_len(pd, 0);
rivafb_fix.smem_len = pci_resource_len(pd, 1);
{
/* enable IO and mem if not already done */
unsigned short cmd;
pci_read_config_word(pd, PCI_COMMAND, &cmd);
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pd, PCI_COMMAND, cmd);
}
rivafb_fix.mmio_start = pci_resource_start(pd, 0);
rivafb_fix.smem_start = pci_resource_start(pd, 1);
default_par->ctrl_base = ioremap(rivafb_fix.mmio_start,
rivafb_fix.mmio_len);
if (!default_par->ctrl_base) {
printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
ret = -EIO;
goto err_release_region;
}
switch (default_par->riva.Architecture) {
case NV_ARCH_03:
/* Riva128's PRAMIN is in the "framebuffer" space
* Since these cards were never made with more than 8 megabytes
* we can safely allocate this separately.
*/
default_par->riva.PRAMIN = ioremap(rivafb_fix.smem_start + 0x00C00000, 0x00008000);
if (!default_par->riva.PRAMIN) {
printk(KERN_ERR PFX "cannot ioremap PRAMIN region\n");
ret = -EIO;
goto err_iounmap_ctrl_base;
}
break;
case NV_ARCH_04:
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default_par->riva.PCRTC0 =
(u32 __iomem *)(default_par->ctrl_base + 0x00600000);
default_par->riva.PRAMIN =
(u32 __iomem *)(default_par->ctrl_base + 0x00710000);
break;
}
riva_common_setup(default_par);
if (default_par->riva.Architecture == NV_ARCH_03) {
default_par->riva.PCRTC = default_par->riva.PCRTC0
= default_par->riva.PGRAPH;
}
rivafb_fix.smem_len = riva_get_memlen(default_par) * 1024;
default_par->dclk_max = riva_get_maxdclk(default_par) * 1000;
info->screen_base = ioremap(rivafb_fix.smem_start,
rivafb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR PFX "cannot ioremap FB base\n");
ret = -EIO;
goto err_iounmap_pramin;
}
#ifdef CONFIG_MTRR
if (!nomtrr) {
default_par->mtrr.vram = mtrr_add(rivafb_fix.smem_start,
rivafb_fix.smem_len,
MTRR_TYPE_WRCOMB, 1);
if (default_par->mtrr.vram < 0) {
printk(KERN_ERR PFX "unable to setup MTRR\n");
} else {
default_par->mtrr.vram_valid = 1;
/* let there be speed */
printk(KERN_INFO PFX "RIVA MTRR set to ON\n");
}
}
#endif /* CONFIG_MTRR */
info->fbops = &riva_fb_ops;
info->fix = rivafb_fix;
riva_get_EDID(info, pd);
riva_get_edidinfo(info);
ret=riva_set_fbinfo(info);
if (ret < 0) {
printk(KERN_ERR PFX "error setting initial video mode\n");
goto err_iounmap_screen_base;
}
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb = NULL;
pci_set_drvdata(pd, info);
if (backlight)
riva_bl_init(info->par);
ret = register_framebuffer(info);
if (ret < 0) {
printk(KERN_ERR PFX
"error registering riva framebuffer\n");
goto err_iounmap_screen_base;
}
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n",
info->fix.id,
RIVAFB_VERSION,
info->fix.smem_len / (1024 * 1024),
info->fix.smem_start);
NVTRACE_LEAVE();
return 0;
err_iounmap_screen_base:
#ifdef CONFIG_FB_RIVA_I2C
riva_delete_i2c_busses(info->par);
#endif
iounmap(info->screen_base);
err_iounmap_pramin:
if (default_par->riva.Architecture == NV_ARCH_03)
iounmap(default_par->riva.PRAMIN);
err_iounmap_ctrl_base:
iounmap(default_par->ctrl_base);
err_release_region:
pci_release_regions(pd);
err_disable_device:
err_free_pixmap:
kfree(info->pixmap.addr);
err_framebuffer_release:
framebuffer_release(info);
err_ret:
return ret;
}
static void __devexit rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = info->par;
NVTRACE_ENTER();
#ifdef CONFIG_FB_RIVA_I2C
riva_delete_i2c_busses(par);
kfree(par->EDID);
#endif
unregister_framebuffer(info);
riva_bl_exit(info);
#ifdef CONFIG_MTRR
if (par->mtrr.vram_valid)
mtrr_del(par->mtrr.vram, info->fix.smem_start,
info->fix.smem_len);
#endif /* CONFIG_MTRR */
iounmap(par->ctrl_base);
iounmap(info->screen_base);
if (par->riva.Architecture == NV_ARCH_03)
iounmap(par->riva.PRAMIN);
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
*
* initialization
*
* ------------------------------------------------------------------------- */
#ifndef MODULE
static int __devinit rivafb_setup(char *options)
{
char *this_opt;
NVTRACE_ENTER();
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "forceCRTC", 9)) {
char *p;
p = this_opt + 9;
if (!*p || !*(++p)) continue;
forceCRTC = *p - '0';
if (forceCRTC < 0 || forceCRTC > 1)
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
} else if (!strncmp(this_opt, "backlight:", 10)) {
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#endif
} else if (!strncmp(this_opt, "strictmode", 10)) {
strictmode = 1;
} else if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
} else
mode_option = this_opt;
}
NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
static struct pci_driver rivafb_driver = {
.name = "rivafb",
.id_table = rivafb_pci_tbl,
.probe = rivafb_probe,
.remove = __devexit_p(rivafb_remove),
};
/* ------------------------------------------------------------------------- *
*
* modularization
*
* ------------------------------------------------------------------------- */
static int __devinit rivafb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("rivafb", &option))
return -ENODEV;
rivafb_setup(option);
#endif
return pci_register_driver(&rivafb_driver);
}
module_init(rivafb_init);
static void __exit rivafb_exit(void)
{
pci_unregister_driver(&rivafb_driver);
}
module_exit(rivafb_exit);
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
module_param(flatpanel, int, 0);
MODULE_PARM_DESC(flatpanel, "Enables experimental flat panel support for some chipsets. (0 or 1=enabled) (default=0)");
module_param(forceCRTC, int, 0);
MODULE_PARM_DESC(forceCRTC, "Forces usage of a particular CRTC in case autodetection fails. (0 or 1) (default=autodetect)");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) (default=0)");
#endif
module_param(strictmode, bool, 0);
MODULE_PARM_DESC(strictmode, "Only use video modes from EDID");
MODULE_AUTHOR("Ani Joshi, maintainer");
MODULE_DESCRIPTION("Framebuffer driver for nVidia Riva 128, TNT, TNT2, and the GeForce series");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tako0910/android_kernel_htc_msm8960 | drivers/video/riva/fbdev.c | 4993 | 59750 | /*
* linux/drivers/video/riva/fbdev.c - nVidia RIVA 128/TNT/TNT2 fb driver
*
* Maintained by Ani Joshi <ajoshi@shell.unixbox.com>
*
* Copyright 1999-2000 Jeff Garzik
*
* Contributors:
*
* Ani Joshi: Lots of debugging and cleanup work, really helped
* get the driver going
*
* Ferenc Bakonyi: Bug fixes, cleanup, modularization
*
* Jindrich Makovicka: Accel code help, hw cursor, mtrr
*
* Paul Richards: Bug fixes, updates
*
* Initial template from skeletonfb.c, created 28 Dec 1997 by Geert Uytterhoeven
* Includes riva_hw.c from nVidia, see copyright below.
* KGI code provided the basis for state storage, init, and mode switching.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Known bugs and issues:
* restoring text mode fails
* doublescan modes are broken
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/backlight.h>
#include <linux/bitrev.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/machdep.h>
#include <asm/backlight.h>
#endif
#include "rivafb.h"
#include "nvreg.h"
/* version number of this driver */
#define RIVAFB_VERSION "0.9.5b"
/* ------------------------------------------------------------------------- *
*
* various helpful macros and constants
*
* ------------------------------------------------------------------------- */
#ifdef CONFIG_FB_RIVA_DEBUG
#define NVTRACE printk
#else
#define NVTRACE if(0) printk
#endif
#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __func__)
#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __func__)
#ifdef CONFIG_FB_RIVA_DEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
#expr,__FILE__,__func__,__LINE__); \
BUG(); \
}
#else
#define assert(expr)
#endif
#define PFX "rivafb: "
/* macro that allows you to set overflow bits */
#define SetBitField(value,from,to) SetBF(to,GetBF(value,from))
#define SetBit(n) (1<<(n))
#define Set8Bits(value) ((value)&0xff)
/* HW cursor parameters */
#define MAX_CURS 32
/* ------------------------------------------------------------------------- *
*
* prototypes
*
* ------------------------------------------------------------------------- */
static int rivafb_blank(int blank, struct fb_info *info);
/* ------------------------------------------------------------------------- *
*
* card identification
*
* ------------------------------------------------------------------------- */
static struct pci_device_id rivafb_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA_SGS, PCI_DEVICE_ID_NVIDIA_SGS_RIVA128,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_VTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UVTNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_ITNT2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
// NF2/IGP version, GeForce 4 MX, NV18
{ PCI_VENDOR_ID_NVIDIA, 0x01f0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_IGEFORCE2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_DDC,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, rivafb_pci_tbl);
/* ------------------------------------------------------------------------- *
*
* global variables
*
* ------------------------------------------------------------------------- */
/* command line data, set in rivafb_setup() */
static int flatpanel __devinitdata = -1; /* Autodetect later */
static int forceCRTC __devinitdata = -1;
static bool noaccel __devinitdata = 0;
#ifdef CONFIG_MTRR
static bool nomtrr __devinitdata = 0;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
#else
static int backlight __devinitdata = 0;
#endif
static char *mode_option __devinitdata = NULL;
static bool strictmode = 0;
static struct fb_fix_screeninfo __devinitdata rivafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.xpanstep = 1,
.ypanstep = 1,
};
static struct fb_var_screeninfo __devinitdata rivafb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 8,
.red = {0, 8, 0},
.green = {0, 8, 0},
.blue = {0, 8, 0},
.transp = {0, 0, 0},
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 39721,
.left_margin = 40,
.right_margin = 24,
.upper_margin = 32,
.lower_margin = 11,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED
};
/* from GGI */
static const struct riva_regs reg_template = {
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* ATTR */
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x41, 0x01, 0x0F, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* CRT */
0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE3, /* 0x10 */
0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, /* 0x40 */
},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F, /* GRA */
0xFF},
{0x03, 0x01, 0x0F, 0x00, 0x0E}, /* SEQ */
0xEB /* MISC */
};
/*
* Backlight control
*/
#ifdef CONFIG_FB_RIVA_BACKLIGHT
/* We do not have any information about which values are allowed, thus
* we used safe values.
*/
#define MIN_LEVEL 0x158
#define MAX_LEVEL 0x534
#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX)
static int riva_bl_get_level_brightness(struct riva_par *par,
int level)
{
struct fb_info *info = pci_get_drvdata(par->pdev);
int nlevel;
/* Get and convert the value */
/* No locking on bl_curve since accessing a single value */
nlevel = MIN_LEVEL + info->bl_curve[level] * LEVEL_STEP;
if (nlevel < 0)
nlevel = 0;
else if (nlevel < MIN_LEVEL)
nlevel = MIN_LEVEL;
else if (nlevel > MAX_LEVEL)
nlevel = MAX_LEVEL;
return nlevel;
}
static int riva_bl_update_status(struct backlight_device *bd)
{
struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
int level;
if (bd->props.power != FB_BLANK_UNBLANK ||
bd->props.fb_blank != FB_BLANK_UNBLANK)
level = 0;
else
level = bd->props.brightness;
tmp_pmc = NV_RD32(par->riva.PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->riva.PCRTC0, 0x081C) & 0xFFFFFFFC;
if(level > 0) {
tmp_pcrt |= 0x1;
tmp_pmc |= (1 << 31); /* backlight bit */
tmp_pmc |= riva_bl_get_level_brightness(par, level) << 16; /* level */
}
NV_WR32(par->riva.PCRTC0, 0x081C, tmp_pcrt);
NV_WR32(par->riva.PMC, 0x10F0, tmp_pmc);
return 0;
}
static int riva_bl_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static const struct backlight_ops riva_bl_ops = {
.get_brightness = riva_bl_get_brightness,
.update_status = riva_bl_update_status,
};
static void riva_bl_init(struct riva_par *par)
{
struct backlight_properties props;
struct fb_info *info = pci_get_drvdata(par->pdev);
struct backlight_device *bd;
char name[12];
if (!par->FlatPanel)
return;
#ifdef CONFIG_PMAC_BACKLIGHT
if (!machine_is(powermac) ||
!pmac_has_backlight_type("mnca"))
return;
#endif
snprintf(name, sizeof(name), "rivabl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
&props);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
printk(KERN_WARNING "riva: Backlight registration failed\n");
goto error;
}
info->bl_dev = bd;
fb_bl_default_curve(info, 0,
MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL,
FB_BACKLIGHT_MAX);
bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
printk("riva: Backlight initialized (%s)\n", name);
return;
error:
return;
}
static void riva_bl_exit(struct fb_info *info)
{
struct backlight_device *bd = info->bl_dev;
backlight_device_unregister(bd);
printk("riva: Backlight unloaded\n");
}
#else
static inline void riva_bl_init(struct riva_par *par) {}
static inline void riva_bl_exit(struct fb_info *info) {}
#endif /* CONFIG_FB_RIVA_BACKLIGHT */
/* ------------------------------------------------------------------------- *
*
* MMIO access macros
*
* ------------------------------------------------------------------------- */
static inline void CRTCout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PCIO, 0x3d4, index);
VGA_WR08(par->riva.PCIO, 0x3d5, val);
}
static inline unsigned char CRTCin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PCIO, 0x3d4, index);
return (VGA_RD08(par->riva.PCIO, 0x3d5));
}
static inline void GRAout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3ce, index);
VGA_WR08(par->riva.PVIO, 0x3cf, val);
}
static inline unsigned char GRAin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PVIO, 0x3ce, index);
return (VGA_RD08(par->riva.PVIO, 0x3cf));
}
static inline void SEQout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3c4, index);
VGA_WR08(par->riva.PVIO, 0x3c5, val);
}
static inline unsigned char SEQin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PVIO, 0x3c4, index);
return (VGA_RD08(par->riva.PVIO, 0x3c5));
}
static inline void ATTRout(struct riva_par *par, unsigned char index,
unsigned char val)
{
VGA_WR08(par->riva.PCIO, 0x3c0, index);
VGA_WR08(par->riva.PCIO, 0x3c0, val);
}
static inline unsigned char ATTRin(struct riva_par *par,
unsigned char index)
{
VGA_WR08(par->riva.PCIO, 0x3c0, index);
return (VGA_RD08(par->riva.PCIO, 0x3c1));
}
static inline void MISCout(struct riva_par *par, unsigned char val)
{
VGA_WR08(par->riva.PVIO, 0x3c2, val);
}
static inline unsigned char MISCin(struct riva_par *par)
{
return (VGA_RD08(par->riva.PVIO, 0x3cc));
}
static inline void reverse_order(u32 *l)
{
u8 *a = (u8 *)l;
a[0] = bitrev8(a[0]);
a[1] = bitrev8(a[1]);
a[2] = bitrev8(a[2]);
a[3] = bitrev8(a[3]);
}
/* ------------------------------------------------------------------------- *
*
* cursor stuff
*
* ------------------------------------------------------------------------- */
/**
* rivafb_load_cursor_image - load cursor image to hardware
* @data: address to monochrome bitmap (1 = foreground color, 0 = background)
* @par: pointer to private data
* @w: width of cursor image in pixels
* @h: height of cursor image in scanlines
* @bg: background color (ARGB1555) - alpha bit determines opacity
* @fg: foreground color (ARGB1555)
*
* DESCRIPTiON:
* Loads cursor image based on a monochrome source and mask bitmap. The
* image bits determines the color of the pixel, 0 for background, 1 for
* foreground. Only the affected region (as determined by @w and @h
* parameters) will be updated.
*
* CALLED FROM:
* rivafb_cursor()
*/
static void rivafb_load_cursor_image(struct riva_par *par, u8 *data8,
u16 bg, u16 fg, u32 w, u32 h)
{
int i, j, k = 0;
u32 b, tmp;
u32 *data = (u32 *)data8;
bg = le16_to_cpu(bg);
fg = le16_to_cpu(fg);
w = (w + 1) & ~1;
for (i = 0; i < h; i++) {
b = *data++;
reverse_order(&b);
for (j = 0; j < w/2; j++) {
tmp = 0;
#if defined (__BIG_ENDIAN)
tmp = (b & (1 << 31)) ? fg << 16 : bg << 16;
b <<= 1;
tmp |= (b & (1 << 31)) ? fg : bg;
b <<= 1;
#else
tmp = (b & 1) ? fg : bg;
b >>= 1;
tmp |= (b & 1) ? fg << 16 : bg << 16;
b >>= 1;
#endif
writel(tmp, &par->riva.CURSOR[k++]);
}
k += (MAX_CURS - w)/2;
}
}
/* ------------------------------------------------------------------------- *
*
* general utility functions
*
* ------------------------------------------------------------------------- */
/**
* riva_wclut - set CLUT entry
* @chip: pointer to RIVA_HW_INST object
* @regnum: register number
* @red: red component
* @green: green component
* @blue: blue component
*
* DESCRIPTION:
* Sets color register @regnum.
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static void riva_wclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char red,
unsigned char green, unsigned char blue)
{
VGA_WR08(chip->PDIO, 0x3c8, regnum);
VGA_WR08(chip->PDIO, 0x3c9, red);
VGA_WR08(chip->PDIO, 0x3c9, green);
VGA_WR08(chip->PDIO, 0x3c9, blue);
}
/**
* riva_rclut - read fromCLUT register
* @chip: pointer to RIVA_HW_INST object
* @regnum: register number
* @red: red component
* @green: green component
* @blue: blue component
*
* DESCRIPTION:
* Reads red, green, and blue from color register @regnum.
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static void riva_rclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char *red,
unsigned char *green, unsigned char *blue)
{
VGA_WR08(chip->PDIO, 0x3c7, regnum);
*red = VGA_RD08(chip->PDIO, 0x3c9);
*green = VGA_RD08(chip->PDIO, 0x3c9);
*blue = VGA_RD08(chip->PDIO, 0x3c9);
}
/**
* riva_save_state - saves current chip state
* @par: pointer to riva_par object containing info for current riva board
* @regs: pointer to riva_regs object
*
* DESCRIPTION:
* Saves current chip state to @regs.
*
* CALLED FROM:
* rivafb_probe()
*/
/* from GGI */
static void riva_save_state(struct riva_par *par, struct riva_regs *regs)
{
int i;
NVTRACE_ENTER();
par->riva.LockUnlock(&par->riva, 0);
par->riva.UnloadStateExt(&par->riva, ®s->ext);
regs->misc_output = MISCin(par);
for (i = 0; i < NUM_CRT_REGS; i++)
regs->crtc[i] = CRTCin(par, i);
for (i = 0; i < NUM_ATC_REGS; i++)
regs->attr[i] = ATTRin(par, i);
for (i = 0; i < NUM_GRC_REGS; i++)
regs->gra[i] = GRAin(par, i);
for (i = 0; i < NUM_SEQ_REGS; i++)
regs->seq[i] = SEQin(par, i);
NVTRACE_LEAVE();
}
/**
* riva_load_state - loads current chip state
* @par: pointer to riva_par object containing info for current riva board
* @regs: pointer to riva_regs object
*
* DESCRIPTION:
* Loads chip state from @regs.
*
* CALLED FROM:
* riva_load_video_mode()
* rivafb_probe()
* rivafb_remove()
*/
/* from GGI */
static void riva_load_state(struct riva_par *par, struct riva_regs *regs)
{
RIVA_HW_STATE *state = ®s->ext;
int i;
NVTRACE_ENTER();
CRTCout(par, 0x11, 0x00);
par->riva.LockUnlock(&par->riva, 0);
par->riva.LoadStateExt(&par->riva, state);
MISCout(par, regs->misc_output);
for (i = 0; i < NUM_CRT_REGS; i++) {
switch (i) {
case 0x19:
case 0x20 ... 0x40:
break;
default:
CRTCout(par, i, regs->crtc[i]);
}
}
for (i = 0; i < NUM_ATC_REGS; i++)
ATTRout(par, i, regs->attr[i]);
for (i = 0; i < NUM_GRC_REGS; i++)
GRAout(par, i, regs->gra[i]);
for (i = 0; i < NUM_SEQ_REGS; i++)
SEQout(par, i, regs->seq[i]);
NVTRACE_LEAVE();
}
/**
* riva_load_video_mode - calculate timings
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Calculate some timings and then send em off to riva_load_state().
*
* CALLED FROM:
* rivafb_set_par()
*/
static int riva_load_video_mode(struct fb_info *info)
{
int bpp, width, hDisplaySize, hDisplay, hStart,
hEnd, hTotal, height, vDisplay, vStart, vEnd, vTotal, dotClock;
int hBlankStart, hBlankEnd, vBlankStart, vBlankEnd;
int rc;
struct riva_par *par = info->par;
struct riva_regs newmode;
NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(FB_BLANK_NORMAL, info);
bpp = info->var.bits_per_pixel;
if (bpp == 16 && info->var.green.length == 5)
bpp = 15;
width = info->var.xres_virtual;
hDisplaySize = info->var.xres;
hDisplay = (hDisplaySize / 8) - 1;
hStart = (hDisplaySize + info->var.right_margin) / 8 - 1;
hEnd = (hDisplaySize + info->var.right_margin +
info->var.hsync_len) / 8 - 1;
hTotal = (hDisplaySize + info->var.right_margin +
info->var.hsync_len + info->var.left_margin) / 8 - 5;
hBlankStart = hDisplay;
hBlankEnd = hTotal + 4;
height = info->var.yres_virtual;
vDisplay = info->var.yres - 1;
vStart = info->var.yres + info->var.lower_margin - 1;
vEnd = info->var.yres + info->var.lower_margin +
info->var.vsync_len - 1;
vTotal = info->var.yres + info->var.lower_margin +
info->var.vsync_len + info->var.upper_margin + 2;
vBlankStart = vDisplay;
vBlankEnd = vTotal + 1;
dotClock = 1000000000 / info->var.pixclock;
memcpy(&newmode, ®_template, sizeof(struct riva_regs));
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
vTotal |= 1;
if (par->FlatPanel) {
vStart = vTotal - 3;
vEnd = vTotal - 2;
vBlankStart = vStart;
hStart = hTotal - 3;
hEnd = hTotal - 2;
hBlankEnd = hTotal + 4;
}
newmode.crtc[0x0] = Set8Bits (hTotal);
newmode.crtc[0x1] = Set8Bits (hDisplay);
newmode.crtc[0x2] = Set8Bits (hBlankStart);
newmode.crtc[0x3] = SetBitField (hBlankEnd, 4: 0, 4:0) | SetBit (7);
newmode.crtc[0x4] = Set8Bits (hStart);
newmode.crtc[0x5] = SetBitField (hBlankEnd, 5: 5, 7:7)
| SetBitField (hEnd, 4: 0, 4:0);
newmode.crtc[0x6] = SetBitField (vTotal, 7: 0, 7:0);
newmode.crtc[0x7] = SetBitField (vTotal, 8: 8, 0:0)
| SetBitField (vDisplay, 8: 8, 1:1)
| SetBitField (vStart, 8: 8, 2:2)
| SetBitField (vBlankStart, 8: 8, 3:3)
| SetBit (4)
| SetBitField (vTotal, 9: 9, 5:5)
| SetBitField (vDisplay, 9: 9, 6:6)
| SetBitField (vStart, 9: 9, 7:7);
newmode.crtc[0x9] = SetBitField (vBlankStart, 9: 9, 5:5)
| SetBit (6);
newmode.crtc[0x10] = Set8Bits (vStart);
newmode.crtc[0x11] = SetBitField (vEnd, 3: 0, 3:0)
| SetBit (5);
newmode.crtc[0x12] = Set8Bits (vDisplay);
newmode.crtc[0x13] = (width / 8) * ((bpp + 1) / 8);
newmode.crtc[0x15] = Set8Bits (vBlankStart);
newmode.crtc[0x16] = Set8Bits (vBlankEnd);
newmode.ext.screen = SetBitField(hBlankEnd,6:6,4:4)
| SetBitField(vBlankStart,10:10,3:3)
| SetBitField(vStart,10:10,2:2)
| SetBitField(vDisplay,10:10,1:1)
| SetBitField(vTotal,10:10,0:0);
newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
| SetBitField(hDisplay,8:8,1:1)
| SetBitField(hBlankStart,8:8,2:2)
| SetBitField(hStart,8:8,3:3);
newmode.ext.extra = SetBitField(vTotal,11:11,0:0)
| SetBitField(vDisplay,11:11,2:2)
| SetBitField(vStart,11:11,4:4)
| SetBitField(vBlankStart,11:11,6:6);
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
int tmp = (hTotal >> 1) & ~1;
newmode.ext.interlace = Set8Bits(tmp);
newmode.ext.horiz |= SetBitField(tmp, 8:8,4:4);
} else
newmode.ext.interlace = 0xff; /* interlace off */
if (par->riva.Architecture >= NV_ARCH_10)
par->riva.CURSOR = (U032 __iomem *)(info->screen_base + par->riva.CursorStart);
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
newmode.misc_output &= ~0x40;
else
newmode.misc_output |= 0x40;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
newmode.misc_output &= ~0x80;
else
newmode.misc_output |= 0x80;
rc = CalcStateExt(&par->riva, &newmode.ext, bpp, width,
hDisplaySize, height, dotClock);
if (rc)
goto out;
newmode.ext.scale = NV_RD32(par->riva.PRAMDAC, 0x00000848) &
0xfff000ff;
if (par->FlatPanel == 1) {
newmode.ext.pixel |= (1 << 7);
newmode.ext.scale |= (1 << 8);
}
if (par->SecondCRTC) {
newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) &
~0x00001000;
newmode.ext.head2 = NV_RD32(par->riva.PCRTC0, 0x00002860) |
0x00001000;
newmode.ext.crtcOwner = 3;
newmode.ext.pllsel |= 0x20000800;
newmode.ext.vpll2 = newmode.ext.vpll;
} else if (par->riva.twoHeads) {
newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) |
0x00001000;
newmode.ext.head2 = NV_RD32(par->riva.PCRTC0, 0x00002860) &
~0x00001000;
newmode.ext.crtcOwner = 0;
newmode.ext.vpll2 = NV_RD32(par->riva.PRAMDAC0, 0x00000520);
}
if (par->FlatPanel == 1) {
newmode.ext.pixel |= (1 << 7);
newmode.ext.scale |= (1 << 8);
}
newmode.ext.cursorConfig = 0x02000100;
par->current_state = newmode;
riva_load_state(par, &par->current_state);
par->riva.LockUnlock(&par->riva, 0); /* important for HW cursor */
out:
rivafb_blank(FB_BLANK_UNBLANK, info);
NVTRACE_LEAVE();
return rc;
}
static void riva_update_var(struct fb_var_screeninfo *var,
const struct fb_videomode *modedb)
{
NVTRACE_ENTER();
var->xres = var->xres_virtual = modedb->xres;
var->yres = modedb->yres;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
var->xoffset = var->yoffset = 0;
var->pixclock = modedb->pixclock;
var->left_margin = modedb->left_margin;
var->right_margin = modedb->right_margin;
var->upper_margin = modedb->upper_margin;
var->lower_margin = modedb->lower_margin;
var->hsync_len = modedb->hsync_len;
var->vsync_len = modedb->vsync_len;
var->sync = modedb->sync;
var->vmode = modedb->vmode;
NVTRACE_LEAVE();
}
/**
* rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
* @var:
* @nom:
* @den:
*
* DESCRIPTION:
* .
*
* RETURNS:
* -EINVAL on failure, 0 on success
*
*
* CALLED FROM:
* rivafb_check_var()
*/
static int rivafb_do_maximize(struct fb_info *info,
struct fb_var_screeninfo *var,
int nom, int den)
{
static struct {
int xres, yres;
} modes[] = {
{1600, 1280},
{1280, 1024},
{1024, 768},
{800, 600},
{640, 480},
{-1, -1}
};
int i;
NVTRACE_ENTER();
/* use highest possible virtual resolution */
if (var->xres_virtual == -1 && var->yres_virtual == -1) {
printk(KERN_WARNING PFX
"using maximum available virtual resolution\n");
for (i = 0; modes[i].xres != -1; i++) {
if (modes[i].xres * nom / den * modes[i].yres <
info->fix.smem_len)
break;
}
if (modes[i].xres == -1) {
printk(KERN_ERR PFX
"could not find a virtual resolution that fits into video memory!!\n");
NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
var->xres_virtual = modes[i].xres;
var->yres_virtual = modes[i].yres;
printk(KERN_INFO PFX
"virtual resolution set to maximum of %dx%d\n",
var->xres_virtual, var->yres_virtual);
} else if (var->xres_virtual == -1) {
var->xres_virtual = (info->fix.smem_len * den /
(nom * var->yres_virtual)) & ~15;
printk(KERN_WARNING PFX
"setting virtual X resolution to %d\n", var->xres_virtual);
} else if (var->yres_virtual == -1) {
var->xres_virtual = (var->xres_virtual + 15) & ~15;
var->yres_virtual = info->fix.smem_len * den /
(nom * var->xres_virtual);
printk(KERN_WARNING PFX
"setting virtual Y resolution to %d\n", var->yres_virtual);
} else {
var->xres_virtual = (var->xres_virtual + 15) & ~15;
if (var->xres_virtual * nom / den * var->yres_virtual > info->fix.smem_len) {
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...resolution too high to fit into video memory!\n",
var->xres, var->yres, var->bits_per_pixel);
NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
}
if (var->xres_virtual * nom / den >= 8192) {
printk(KERN_WARNING PFX
"virtual X resolution (%d) is too high, lowering to %d\n",
var->xres_virtual, 8192 * den / nom - 16);
var->xres_virtual = 8192 * den / nom - 16;
}
if (var->xres_virtual < var->xres) {
printk(KERN_ERR PFX
"virtual X resolution (%d) is smaller than real\n", var->xres_virtual);
return -EINVAL;
}
if (var->yres_virtual < var->yres) {
printk(KERN_ERR PFX
"virtual Y resolution (%d) is smaller than real\n", var->yres_virtual);
return -EINVAL;
}
if (var->yres_virtual > 0x7fff/nom)
var->yres_virtual = 0x7fff/nom;
if (var->xres_virtual > 0x7fff/nom)
var->xres_virtual = 0x7fff/nom;
NVTRACE_LEAVE();
return 0;
}
static void
riva_set_pattern(struct riva_par *par, int clr0, int clr1, int pat0, int pat1)
{
RIVA_FIFO_FREE(par->riva, Patt, 4);
NV_WR32(&par->riva.Patt->Color0, 0, clr0);
NV_WR32(&par->riva.Patt->Color1, 0, clr1);
NV_WR32(par->riva.Patt->Monochrome, 0, pat0);
NV_WR32(par->riva.Patt->Monochrome, 4, pat1);
}
/* acceleration routines */
static inline void wait_for_idle(struct riva_par *par)
{
while (par->riva.Busy(&par->riva));
}
/*
* Set ROP. Translate X rop into ROP3. Internal routine.
*/
static void
riva_set_rop_solid(struct riva_par *par, int rop)
{
riva_set_pattern(par, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
RIVA_FIFO_FREE(par->riva, Rop, 1);
NV_WR32(&par->riva.Rop->Rop3, 0, rop);
}
static void riva_setup_accel(struct fb_info *info)
{
struct riva_par *par = info->par;
RIVA_FIFO_FREE(par->riva, Clip, 2);
NV_WR32(&par->riva.Clip->TopLeft, 0, 0x0);
NV_WR32(&par->riva.Clip->WidthHeight, 0,
(info->var.xres_virtual & 0xffff) |
(info->var.yres_virtual << 16));
riva_set_rop_solid(par, 0xcc);
wait_for_idle(par);
}
/**
* riva_get_cmap_len - query current color map length
* @var: standard kernel fb changeable data
*
* DESCRIPTION:
* Get current color map length.
*
* RETURNS:
* Length of color map
*
* CALLED FROM:
* rivafb_setcolreg()
*/
static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
{
int rc = 256; /* reasonable default */
switch (var->green.length) {
case 8:
rc = 256; /* 256 entries (2^8), 8 bpp and RGB8888 */
break;
case 5:
rc = 32; /* 32 entries (2^5), 16 bpp, RGB555 */
break;
case 6:
rc = 64; /* 64 entries (2^6), 16 bpp, RGB565 */
break;
default:
/* should not occur */
break;
}
return rc;
}
/* ------------------------------------------------------------------------- *
*
* framebuffer operations
*
* ------------------------------------------------------------------------- */
static int rivafb_open(struct fb_info *info, int user)
{
struct riva_par *par = info->par;
NVTRACE_ENTER();
mutex_lock(&par->open_lock);
if (!par->ref_count) {
#ifdef CONFIG_X86
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
/* save the DAC for Riva128 */
if (par->riva.Architecture == NV_ARCH_03)
par->state.flags |= VGA_SAVE_CMAP;
save_vga(&par->state);
#endif
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
riva_save_state(par, &par->initial_state);
}
par->ref_count++;
mutex_unlock(&par->open_lock);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_release(struct fb_info *info, int user)
{
struct riva_par *par = info->par;
NVTRACE_ENTER();
mutex_lock(&par->open_lock);
if (!par->ref_count) {
mutex_unlock(&par->open_lock);
return -EINVAL;
}
if (par->ref_count == 1) {
par->riva.LockUnlock(&par->riva, 0);
par->riva.LoadStateExt(&par->riva, &par->initial_state.ext);
riva_load_state(par, &par->initial_state);
#ifdef CONFIG_X86
restore_vga(&par->state);
#endif
par->riva.LockUnlock(&par->riva, 1);
}
par->ref_count--;
mutex_unlock(&par->open_lock);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
const struct fb_videomode *mode;
struct riva_par *par = info->par;
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
NVTRACE_ENTER();
switch (var->bits_per_pixel) {
case 1 ... 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
var->bits_per_pixel = 8;
nom = den = 1;
break;
case 9 ... 15:
var->green.length = 5;
/* fall through */
case 16:
var->bits_per_pixel = 16;
/* The Riva128 supports RGB555 only */
if (par->riva.Architecture == NV_ARCH_03)
var->green.length = 5;
if (var->green.length == 5) {
/* 0rrrrrgg gggbbbbb */
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
} else {
/* rrrrrggg gggbbbbb */
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
}
nom = 2;
den = 1;
break;
case 17 ... 32:
var->red.length = var->green.length = var->blue.length = 8;
var->bits_per_pixel = 32;
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
nom = 4;
den = 1;
break;
default:
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...color depth not supported.\n",
var->xres, var->yres, var->bits_per_pixel);
NVTRACE("EXIT, returning -EINVAL\n");
return -EINVAL;
}
if (!strictmode) {
if (!info->monspecs.vfmax || !info->monspecs.hfmax ||
!info->monspecs.dclkmax || !fb_validate_mode(var, info))
mode_valid = 1;
}
/* calculate modeline if supported by monitor */
if (!mode_valid && info->monspecs.gtf) {
if (!fb_get_mode(FB_MAXTIMINGS, 0, var, info))
mode_valid = 1;
}
if (!mode_valid) {
mode = fb_find_best_mode(var, &info->modelist);
if (mode) {
riva_update_var(var, mode);
mode_valid = 1;
}
}
if (!mode_valid && info->monspecs.modedb_len)
return -EINVAL;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
if (var->yres_virtual <= var->yres)
var->yres_virtual = -1;
if (rivafb_do_maximize(info, var, nom, den) < 0)
return -EINVAL;
if (var->xoffset < 0)
var->xoffset = 0;
if (var->yoffset < 0)
var->yoffset = 0;
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
NVTRACE_LEAVE();
return 0;
}
static int rivafb_set_par(struct fb_info *info)
{
struct riva_par *par = info->par;
int rc = 0;
NVTRACE_ENTER();
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
rc = riva_load_video_mode(info);
if (rc)
goto out;
if(!(info->flags & FBINFO_HWACCEL_DISABLED))
riva_setup_accel(info);
par->cursor_reset = 1;
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
if (info->flags & FBINFO_HWACCEL_DISABLED)
info->pixmap.scan_align = 1;
else
info->pixmap.scan_align = 4;
out:
NVTRACE_LEAVE();
return rc;
}
/**
* rivafb_pan_display
* @var: standard kernel fb changeable data
* @con: TODO
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Pan (or wrap, depending on the `vmode' field) the display using the
* `xoffset' and `yoffset' fields of the `var' structure.
* If the values don't fit, return -EINVAL.
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int rivafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct riva_par *par = info->par;
unsigned int base;
NVTRACE_ENTER();
base = var->yoffset * info->fix.line_length + var->xoffset;
par->riva.SetStartAddress(&par->riva, base);
NVTRACE_LEAVE();
return 0;
}
static int rivafb_blank(int blank, struct fb_info *info)
{
struct riva_par *par= info->par;
unsigned char tmp, vesa;
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
vesa = CRTCin(par, 0x1a) & ~0xc0; /* sync on/off */
NVTRACE_ENTER();
if (blank)
tmp |= 0x20;
switch (blank) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
break;
case FB_BLANK_VSYNC_SUSPEND:
vesa |= 0x80;
break;
case FB_BLANK_HSYNC_SUSPEND:
vesa |= 0x40;
break;
case FB_BLANK_POWERDOWN:
vesa |= 0xc0;
break;
}
SEQout(par, 0x01, tmp);
CRTCout(par, 0x1a, vesa);
NVTRACE_LEAVE();
return 0;
}
/**
* rivafb_setcolreg
* @regno: register index
* @red: red component
* @green: green component
* @blue: blue component
* @transp: transparency
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
* Set a single color register. The values supplied have a 16 bit
* magnitude.
*
* RETURNS:
* Return != 0 for invalid regno.
*
* CALLED FROM:
* fbcmap.c:fb_set_cmap()
*/
static int rivafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct riva_par *par = info->par;
RIVA_HW_INST *chip = &par->riva;
int i;
if (regno >= riva_get_cmap_len(&info->var))
return -EINVAL;
if (info->var.grayscale) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28) >> 8;
}
if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
((u32 *) info->pseudo_palette)[regno] =
(regno << info->var.red.offset) |
(regno << info->var.green.offset) |
(regno << info->var.blue.offset);
/*
* The Riva128 2D engine requires color information in
* TrueColor format even if framebuffer is in DirectColor
*/
if (par->riva.Architecture == NV_ARCH_03) {
switch (info->var.bits_per_pixel) {
case 16:
par->palette[regno] = ((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
break;
case 32:
par->palette[regno] = ((red & 0xff00) << 8) |
((green & 0xff00)) |
((blue & 0xff00) >> 8);
break;
}
}
}
switch (info->var.bits_per_pixel) {
case 8:
/* "transparent" stuff is completely ignored. */
riva_wclut(chip, regno, red >> 8, green >> 8, blue >> 8);
break;
case 16:
if (info->var.green.length == 5) {
for (i = 0; i < 8; i++) {
riva_wclut(chip, regno*8+i, red >> 8,
green >> 8, blue >> 8);
}
} else {
u8 r, g, b;
if (regno < 32) {
for (i = 0; i < 8; i++) {
riva_wclut(chip, regno*8+i,
red >> 8, green >> 8,
blue >> 8);
}
}
riva_rclut(chip, regno*4, &r, &g, &b);
for (i = 0; i < 4; i++)
riva_wclut(chip, regno*4+i, r,
green >> 8, b);
}
break;
case 32:
riva_wclut(chip, regno, red >> 8, green >> 8, blue >> 8);
break;
default:
/* do nothing */
break;
}
return 0;
}
/**
* rivafb_fillrect - hardware accelerated color fill function
* @info: pointer to fb_info structure
* @rect: pointer to fb_fillrect structure
*
* DESCRIPTION:
* This function fills up a region of framebuffer memory with a solid
* color with a choice of two different ROP's, copy or invert.
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct riva_par *par = info->par;
u_int color, rop = 0;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
cfb_fillrect(info, rect);
return;
}
if (info->var.bits_per_pixel == 8)
color = rect->color;
else {
if (par->riva.Architecture != NV_ARCH_03)
color = ((u32 *)info->pseudo_palette)[rect->color];
else
color = par->palette[rect->color];
}
switch (rect->rop) {
case ROP_XOR:
rop = 0x66;
break;
case ROP_COPY:
default:
rop = 0xCC;
break;
}
riva_set_rop_solid(par, rop);
RIVA_FIFO_FREE(par->riva, Bitmap, 1);
NV_WR32(&par->riva.Bitmap->Color1A, 0, color);
RIVA_FIFO_FREE(par->riva, Bitmap, 2);
NV_WR32(&par->riva.Bitmap->UnclippedRectangle[0].TopLeft, 0,
(rect->dx << 16) | rect->dy);
mb();
NV_WR32(&par->riva.Bitmap->UnclippedRectangle[0].WidthHeight, 0,
(rect->width << 16) | rect->height);
mb();
riva_set_rop_solid(par, 0xcc);
}
/**
* rivafb_copyarea - hardware accelerated blit function
* @info: pointer to fb_info structure
* @region: pointer to fb_copyarea structure
*
* DESCRIPTION:
* This copies an area of pixels from one location to another
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct riva_par *par = info->par;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
cfb_copyarea(info, region);
return;
}
RIVA_FIFO_FREE(par->riva, Blt, 3);
NV_WR32(&par->riva.Blt->TopLeftSrc, 0,
(region->sy << 16) | region->sx);
NV_WR32(&par->riva.Blt->TopLeftDst, 0,
(region->dy << 16) | region->dx);
mb();
NV_WR32(&par->riva.Blt->WidthHeight, 0,
(region->height << 16) | region->width);
mb();
}
static inline void convert_bgcolor_16(u32 *col)
{
*col = ((*col & 0x0000F800) << 8)
| ((*col & 0x00007E0) << 5)
| ((*col & 0x0000001F) << 3)
| 0xFF000000;
mb();
}
/**
* rivafb_imageblit: hardware accelerated color expand function
* @info: pointer to fb_info structure
* @image: pointer to fb_image structure
*
* DESCRIPTION:
* If the source is a monochrome bitmap, the function fills up a a region
* of framebuffer memory with pixels whose color is determined by the bit
* setting of the bitmap, 1 - foreground, 0 - background.
*
* If the source is not a monochrome bitmap, color expansion is not done.
* In this case, it is channeled to a software function.
*
* CALLED FROM:
* framebuffer hook
*/
static void rivafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct riva_par *par = info->par;
u32 fgx = 0, bgx = 0, width, tmp;
u8 *cdat = (u8 *) image->data;
volatile u32 __iomem *d;
int i, size;
if ((info->flags & FBINFO_HWACCEL_DISABLED) || image->depth != 1) {
cfb_imageblit(info, image);
return;
}
switch (info->var.bits_per_pixel) {
case 8:
fgx = image->fg_color;
bgx = image->bg_color;
break;
case 16:
case 32:
if (par->riva.Architecture != NV_ARCH_03) {
fgx = ((u32 *)info->pseudo_palette)[image->fg_color];
bgx = ((u32 *)info->pseudo_palette)[image->bg_color];
} else {
fgx = par->palette[image->fg_color];
bgx = par->palette[image->bg_color];
}
if (info->var.green.length == 6)
convert_bgcolor_16(&bgx);
break;
}
RIVA_FIFO_FREE(par->riva, Bitmap, 7);
NV_WR32(&par->riva.Bitmap->ClipE.TopLeft, 0,
(image->dy << 16) | (image->dx & 0xFFFF));
NV_WR32(&par->riva.Bitmap->ClipE.BottomRight, 0,
(((image->dy + image->height) << 16) |
((image->dx + image->width) & 0xffff)));
NV_WR32(&par->riva.Bitmap->Color0E, 0, bgx);
NV_WR32(&par->riva.Bitmap->Color1E, 0, fgx);
NV_WR32(&par->riva.Bitmap->WidthHeightInE, 0,
(image->height << 16) | ((image->width + 31) & ~31));
NV_WR32(&par->riva.Bitmap->WidthHeightOutE, 0,
(image->height << 16) | ((image->width + 31) & ~31));
NV_WR32(&par->riva.Bitmap->PointE, 0,
(image->dy << 16) | (image->dx & 0xFFFF));
d = &par->riva.Bitmap->MonochromeData01E;
width = (image->width + 31)/32;
size = width * image->height;
while (size >= 16) {
RIVA_FIFO_FREE(par->riva, Bitmap, 16);
for (i = 0; i < 16; i++) {
tmp = *((u32 *)cdat);
cdat = (u8 *)((u32 *)cdat + 1);
reverse_order(&tmp);
NV_WR32(d, i*4, tmp);
}
size -= 16;
}
if (size) {
RIVA_FIFO_FREE(par->riva, Bitmap, size);
for (i = 0; i < size; i++) {
tmp = *((u32 *) cdat);
cdat = (u8 *)((u32 *)cdat + 1);
reverse_order(&tmp);
NV_WR32(d, i*4, tmp);
}
}
}
/**
* rivafb_cursor - hardware cursor function
* @info: pointer to info structure
* @cursor: pointer to fbcursor structure
*
* DESCRIPTION:
* A cursor function that supports displaying a cursor image via hardware.
* Within the kernel, copy and invert rops are supported. If exported
* to user space, only the copy rop will be supported.
*
* CALLED FROM
* framebuffer hook
*/
static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct riva_par *par = info->par;
u8 data[MAX_CURS * MAX_CURS/8];
int i, set = cursor->set;
u16 fg, bg;
if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
return -ENXIO;
par->riva.ShowHideCursor(&par->riva, 0);
if (par->cursor_reset) {
set = FB_CUR_SETALL;
par->cursor_reset = 0;
}
if (set & FB_CUR_SETSIZE)
memset_io(par->riva.CURSOR, 0, MAX_CURS * MAX_CURS * 2);
if (set & FB_CUR_SETPOS) {
u32 xx, yy, temp;
yy = cursor->image.dy - info->var.yoffset;
xx = cursor->image.dx - info->var.xoffset;
temp = xx & 0xFFFF;
temp |= yy << 16;
NV_WR32(par->riva.PRAMDAC, 0x0000300, temp);
}
if (set & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP | FB_CUR_SETIMAGE)) {
u32 bg_idx = cursor->image.bg_color;
u32 fg_idx = cursor->image.fg_color;
u32 s_pitch = (cursor->image.width+7) >> 3;
u32 d_pitch = MAX_CURS/8;
u8 *dat = (u8 *) cursor->image.data;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
src = kmalloc(s_pitch * cursor->image.height, GFP_ATOMIC);
if (src) {
switch (cursor->rop) {
case ROP_XOR:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] ^ msk[i];
break;
case ROP_COPY:
default:
for (i = 0; i < s_pitch * cursor->image.height; i++)
src[i] = dat[i] & msk[i];
break;
}
fb_pad_aligned_buffer(data, d_pitch, src, s_pitch,
cursor->image.height);
bg = ((info->cmap.red[bg_idx] & 0xf8) << 7) |
((info->cmap.green[bg_idx] & 0xf8) << 2) |
((info->cmap.blue[bg_idx] & 0xf8) >> 3) |
1 << 15;
fg = ((info->cmap.red[fg_idx] & 0xf8) << 7) |
((info->cmap.green[fg_idx] & 0xf8) << 2) |
((info->cmap.blue[fg_idx] & 0xf8) >> 3) |
1 << 15;
par->riva.LockUnlock(&par->riva, 0);
rivafb_load_cursor_image(par, data, bg, fg,
cursor->image.width,
cursor->image.height);
kfree(src);
}
}
if (cursor->enable)
par->riva.ShowHideCursor(&par->riva, 1);
return 0;
}
static int rivafb_sync(struct fb_info *info)
{
struct riva_par *par = info->par;
wait_for_idle(par);
return 0;
}
/* ------------------------------------------------------------------------- *
*
* initialization helper functions
*
* ------------------------------------------------------------------------- */
/* kernel interface */
static struct fb_ops riva_fb_ops = {
.owner = THIS_MODULE,
.fb_open = rivafb_open,
.fb_release = rivafb_release,
.fb_check_var = rivafb_check_var,
.fb_set_par = rivafb_set_par,
.fb_setcolreg = rivafb_setcolreg,
.fb_pan_display = rivafb_pan_display,
.fb_blank = rivafb_blank,
.fb_fillrect = rivafb_fillrect,
.fb_copyarea = rivafb_copyarea,
.fb_imageblit = rivafb_imageblit,
.fb_cursor = rivafb_cursor,
.fb_sync = rivafb_sync,
};
static int __devinit riva_set_fbinfo(struct fb_info *info)
{
unsigned int cmap_len;
struct riva_par *par = info->par;
NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_IMAGEBLIT;
/* Accel seems to not work properly on NV30 yet...*/
if ((par->riva.Architecture == NV_ARCH_30) || noaccel) {
printk(KERN_DEBUG PFX "disabling acceleration\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
info->var = rivafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->pseudo_palette = par->pseudo_palette;
cmap_len = riva_get_cmap_len(&info->var);
fb_alloc_cmap(&info->cmap, cmap_len, 0);
info->pixmap.size = 8 * 1024;
info->pixmap.buf_align = 4;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->var.yres_virtual = -1;
NVTRACE_LEAVE();
return (rivafb_check_var(&info->var, info));
}
#ifdef CONFIG_PPC_OF
static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
{
struct riva_par *par = info->par;
struct device_node *dp;
const unsigned char *pedid = NULL;
const unsigned char *disptype = NULL;
static char *propnames[] = {
"DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
int i;
NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
disptype = of_get_property(dp, "display-type", NULL);
if (disptype == NULL)
continue;
if (strncmp(disptype, "LCD", 3) != 0)
continue;
for (i = 0; propnames[i] != NULL; ++i) {
pedid = of_get_property(dp, propnames[i], NULL);
if (pedid != NULL) {
par->EDID = (unsigned char *)pedid;
NVTRACE("LCD found.\n");
return 1;
}
}
}
NVTRACE_LEAVE();
return 0;
}
#endif /* CONFIG_PPC_OF */
#if defined(CONFIG_FB_RIVA_I2C) && !defined(CONFIG_PPC_OF)
static int __devinit riva_get_EDID_i2c(struct fb_info *info)
{
struct riva_par *par = info->par;
struct fb_var_screeninfo var;
int i;
NVTRACE_ENTER();
riva_create_i2c_busses(par);
for (i = 0; i < 3; i++) {
if (!par->chan[i].par)
continue;
riva_probe_i2c_connector(par, i, &par->EDID);
if (par->EDID && !fb_parse_edid(par->EDID, &var)) {
printk(PFX "Found EDID Block from BUS %i\n", i);
break;
}
}
NVTRACE_LEAVE();
return (par->EDID) ? 1 : 0;
}
#endif /* CONFIG_FB_RIVA_I2C */
static void __devinit riva_update_default_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
NVTRACE_ENTER();
/* respect mode options */
if (mode_option) {
fb_find_mode(var, info, mode_option,
specs->modedb, specs->modedb_len,
NULL, 8);
} else if (specs->modedb != NULL) {
/* get first mode in database as fallback */
modedb = specs->modedb[0];
/* get preferred timing */
if (info->monspecs.misc & FB_MISC_1ST_DETAIL) {
int i;
for (i = 0; i < specs->modedb_len; i++) {
if (specs->modedb[i].flag & FB_MODE_IS_FIRST) {
modedb = specs->modedb[i];
break;
}
}
}
var->bits_per_pixel = 8;
riva_update_var(var, &modedb);
}
NVTRACE_LEAVE();
}
static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
if (!riva_get_EDID_OF(info, pdev))
printk(PFX "could not retrieve EDID from OF\n");
#elif defined(CONFIG_FB_RIVA_I2C)
if (!riva_get_EDID_i2c(info))
printk(PFX "could not retrieve EDID from DDC/I2C\n");
#endif
NVTRACE_LEAVE();
}
static void __devinit riva_get_edidinfo(struct fb_info *info)
{
struct fb_var_screeninfo *var = &rivafb_default_var;
struct riva_par *par = info->par;
fb_edid_to_monspecs(par->EDID, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb, info->monspecs.modedb_len,
&info->modelist);
riva_update_default_var(var, info);
/* if user specified flatpanel, we respect that */
if (info->monspecs.input & FB_DISP_DDI)
par->FlatPanel = 1;
}
/* ------------------------------------------------------------------------- *
*
* PCI bus
*
* ------------------------------------------------------------------------- */
static u32 __devinit riva_get_arch(struct pci_dev *pd)
{
u32 arch = 0;
switch (pd->device & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
case 0x0170: /* GeForce4 MX */
case 0x0180: /* GeForce4 MX (8x AGP) */
case 0x01A0: /* nForce */
case 0x01F0: /* nForce2 */
arch = NV_ARCH_10;
break;
case 0x0200: /* GeForce3 */
case 0x0250: /* GeForce4 Ti */
case 0x0280: /* GeForce4 Ti (8x AGP) */
arch = NV_ARCH_20;
break;
case 0x0300: /* GeForceFX 5800 */
case 0x0310: /* GeForceFX 5600 */
case 0x0320: /* GeForceFX 5200 */
case 0x0330: /* GeForceFX 5900 */
case 0x0340: /* GeForceFX 5700 */
arch = NV_ARCH_30;
break;
case 0x0020: /* TNT, TNT2 */
arch = NV_ARCH_04;
break;
case 0x0010: /* Riva128 */
arch = NV_ARCH_03;
break;
default: /* unknown architecture */
break;
}
return arch;
}
static int __devinit rivafb_probe(struct pci_dev *pd,
const struct pci_device_id *ent)
{
struct riva_par *default_par;
struct fb_info *info;
int ret;
NVTRACE_ENTER();
assert(pd != NULL);
info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
if (!info) {
printk (KERN_ERR PFX "could not allocate memory\n");
ret = -ENOMEM;
goto err_ret;
}
default_par = info->par;
default_par->pdev = pd;
info->pixmap.addr = kzalloc(8 * 1024, GFP_KERNEL);
if (info->pixmap.addr == NULL) {
ret = -ENOMEM;
goto err_framebuffer_release;
}
ret = pci_enable_device(pd);
if (ret < 0) {
printk(KERN_ERR PFX "cannot enable PCI device\n");
goto err_free_pixmap;
}
ret = pci_request_regions(pd, "rivafb");
if (ret < 0) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_disable_device;
}
mutex_init(&default_par->open_lock);
default_par->riva.Architecture = riva_get_arch(pd);
default_par->Chipset = (pd->vendor << 16) | pd->device;
printk(KERN_INFO PFX "nVidia device/chipset %X\n",default_par->Chipset);
if(default_par->riva.Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
ret=-ENODEV;
goto err_release_region;
}
if(default_par->riva.Architecture == NV_ARCH_10 ||
default_par->riva.Architecture == NV_ARCH_20 ||
default_par->riva.Architecture == NV_ARCH_30) {
sprintf(rivafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
} else {
sprintf(rivafb_fix.id, "NV%x", default_par->riva.Architecture);
}
default_par->FlatPanel = flatpanel;
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
default_par->forceCRTC = forceCRTC;
rivafb_fix.mmio_len = pci_resource_len(pd, 0);
rivafb_fix.smem_len = pci_resource_len(pd, 1);
{
/* enable IO and mem if not already done */
unsigned short cmd;
pci_read_config_word(pd, PCI_COMMAND, &cmd);
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pd, PCI_COMMAND, cmd);
}
rivafb_fix.mmio_start = pci_resource_start(pd, 0);
rivafb_fix.smem_start = pci_resource_start(pd, 1);
default_par->ctrl_base = ioremap(rivafb_fix.mmio_start,
rivafb_fix.mmio_len);
if (!default_par->ctrl_base) {
printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
ret = -EIO;
goto err_release_region;
}
switch (default_par->riva.Architecture) {
case NV_ARCH_03:
/* Riva128's PRAMIN is in the "framebuffer" space
* Since these cards were never made with more than 8 megabytes
* we can safely allocate this separately.
*/
default_par->riva.PRAMIN = ioremap(rivafb_fix.smem_start + 0x00C00000, 0x00008000);
if (!default_par->riva.PRAMIN) {
printk(KERN_ERR PFX "cannot ioremap PRAMIN region\n");
ret = -EIO;
goto err_iounmap_ctrl_base;
}
break;
case NV_ARCH_04:
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default_par->riva.PCRTC0 =
(u32 __iomem *)(default_par->ctrl_base + 0x00600000);
default_par->riva.PRAMIN =
(u32 __iomem *)(default_par->ctrl_base + 0x00710000);
break;
}
riva_common_setup(default_par);
if (default_par->riva.Architecture == NV_ARCH_03) {
default_par->riva.PCRTC = default_par->riva.PCRTC0
= default_par->riva.PGRAPH;
}
rivafb_fix.smem_len = riva_get_memlen(default_par) * 1024;
default_par->dclk_max = riva_get_maxdclk(default_par) * 1000;
info->screen_base = ioremap(rivafb_fix.smem_start,
rivafb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR PFX "cannot ioremap FB base\n");
ret = -EIO;
goto err_iounmap_pramin;
}
#ifdef CONFIG_MTRR
if (!nomtrr) {
default_par->mtrr.vram = mtrr_add(rivafb_fix.smem_start,
rivafb_fix.smem_len,
MTRR_TYPE_WRCOMB, 1);
if (default_par->mtrr.vram < 0) {
printk(KERN_ERR PFX "unable to setup MTRR\n");
} else {
default_par->mtrr.vram_valid = 1;
/* let there be speed */
printk(KERN_INFO PFX "RIVA MTRR set to ON\n");
}
}
#endif /* CONFIG_MTRR */
info->fbops = &riva_fb_ops;
info->fix = rivafb_fix;
riva_get_EDID(info, pd);
riva_get_edidinfo(info);
ret=riva_set_fbinfo(info);
if (ret < 0) {
printk(KERN_ERR PFX "error setting initial video mode\n");
goto err_iounmap_screen_base;
}
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb = NULL;
pci_set_drvdata(pd, info);
if (backlight)
riva_bl_init(info->par);
ret = register_framebuffer(info);
if (ret < 0) {
printk(KERN_ERR PFX
"error registering riva framebuffer\n");
goto err_iounmap_screen_base;
}
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n",
info->fix.id,
RIVAFB_VERSION,
info->fix.smem_len / (1024 * 1024),
info->fix.smem_start);
NVTRACE_LEAVE();
return 0;
err_iounmap_screen_base:
#ifdef CONFIG_FB_RIVA_I2C
riva_delete_i2c_busses(info->par);
#endif
iounmap(info->screen_base);
err_iounmap_pramin:
if (default_par->riva.Architecture == NV_ARCH_03)
iounmap(default_par->riva.PRAMIN);
err_iounmap_ctrl_base:
iounmap(default_par->ctrl_base);
err_release_region:
pci_release_regions(pd);
err_disable_device:
err_free_pixmap:
kfree(info->pixmap.addr);
err_framebuffer_release:
framebuffer_release(info);
err_ret:
return ret;
}
static void __devexit rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = info->par;
NVTRACE_ENTER();
#ifdef CONFIG_FB_RIVA_I2C
riva_delete_i2c_busses(par);
kfree(par->EDID);
#endif
unregister_framebuffer(info);
riva_bl_exit(info);
#ifdef CONFIG_MTRR
if (par->mtrr.vram_valid)
mtrr_del(par->mtrr.vram, info->fix.smem_start,
info->fix.smem_len);
#endif /* CONFIG_MTRR */
iounmap(par->ctrl_base);
iounmap(info->screen_base);
if (par->riva.Architecture == NV_ARCH_03)
iounmap(par->riva.PRAMIN);
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
*
* initialization
*
* ------------------------------------------------------------------------- */
#ifndef MODULE
static int __devinit rivafb_setup(char *options)
{
char *this_opt;
NVTRACE_ENTER();
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "forceCRTC", 9)) {
char *p;
p = this_opt + 9;
if (!*p || !*(++p)) continue;
forceCRTC = *p - '0';
if (forceCRTC < 0 || forceCRTC > 1)
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
} else if (!strncmp(this_opt, "backlight:", 10)) {
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#endif
} else if (!strncmp(this_opt, "strictmode", 10)) {
strictmode = 1;
} else if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
} else
mode_option = this_opt;
}
NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
static struct pci_driver rivafb_driver = {
.name = "rivafb",
.id_table = rivafb_pci_tbl,
.probe = rivafb_probe,
.remove = __devexit_p(rivafb_remove),
};
/* ------------------------------------------------------------------------- *
*
* modularization
*
* ------------------------------------------------------------------------- */
static int __devinit rivafb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("rivafb", &option))
return -ENODEV;
rivafb_setup(option);
#endif
return pci_register_driver(&rivafb_driver);
}
module_init(rivafb_init);
static void __exit rivafb_exit(void)
{
pci_unregister_driver(&rivafb_driver);
}
module_exit(rivafb_exit);
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
module_param(flatpanel, int, 0);
MODULE_PARM_DESC(flatpanel, "Enables experimental flat panel support for some chipsets. (0 or 1=enabled) (default=0)");
module_param(forceCRTC, int, 0);
MODULE_PARM_DESC(forceCRTC, "Forces usage of a particular CRTC in case autodetection fails. (0 or 1) (default=autodetect)");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) (default=0)");
#endif
module_param(strictmode, bool, 0);
MODULE_PARM_DESC(strictmode, "Only use video modes from EDID");
MODULE_AUTHOR("Ani Joshi, maintainer");
MODULE_DESCRIPTION("Framebuffer driver for nVidia Riva 128, TNT, TNT2, and the GeForce series");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Renzo-Olivares/android_43_kernel_htc_monarudo | drivers/firmware/memmap.c | 9089 | 7301 | /*
* linux/drivers/firmware/memmap.c
* Copyright (C) 2008 SUSE LINUX Products GmbH
* by Bernhard Walle <bernhard.walle@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License v2.0 as published by
* the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/string.h>
#include <linux/firmware-map.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
/*
* Data types ------------------------------------------------------------------
*/
/*
* Firmware map entry. Because firmware memory maps are flat and not
* hierarchical, it's ok to organise them in a linked list. No parent
* information is necessary as for the resource tree.
*/
struct firmware_map_entry {
/*
* start and end must be u64 rather than resource_size_t, because e820
* resources can lie at addresses above 4G.
*/
u64 start; /* start of the memory range */
u64 end; /* end of the memory range (incl.) */
const char *type; /* type of the memory range */
struct list_head list; /* entry for the linked list */
struct kobject kobj; /* kobject for each entry */
};
/*
* Forward declarations --------------------------------------------------------
*/
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf);
static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
/*
* Static data -----------------------------------------------------------------
*/
struct memmap_attribute {
struct attribute attr;
ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
};
static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
/*
* These are default attributes that are added for every memmap entry.
*/
static struct attribute *def_attrs[] = {
&memmap_start_attr.attr,
&memmap_end_attr.attr,
&memmap_type_attr.attr,
NULL
};
static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
static struct kobj_type memmap_ktype = {
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
};
/*
* Registration functions ------------------------------------------------------
*/
/*
* Firmware memory map entries. No locking is needed because the
* firmware_map_add() and firmware_map_add_early() functions are called
* in firmware initialisation code in one single thread of execution.
*/
static LIST_HEAD(map_entries);
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
*
* Common implementation of firmware_map_add() and firmware_map_add_early()
* which expects a pre-allocated struct firmware_map_entry.
**/
static int firmware_map_add_entry(u64 start, u64 end,
const char *type,
struct firmware_map_entry *entry)
{
BUG_ON(start > end);
entry->start = start;
entry->end = end;
entry->type = type;
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
list_add_tail(&entry->list, &map_entries);
return 0;
}
/*
* Add memmap entry on sysfs
*/
static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
{
static int map_entries_nr;
static struct kset *mmap_kset;
if (!mmap_kset) {
mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
if (!mmap_kset)
return -ENOMEM;
}
entry->kobj.kset = mmap_kset;
if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
kobject_put(&entry->kobj);
return 0;
}
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function is for memory hotplug, it is
* similar to function firmware_map_add_early(). The only difference is that
* it will create the syfs entry dynamically.
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
add_sysfs_fw_map_entry(entry);
return 0;
}
/**
* firmware_map_add_early() - Adds a firmware mapping entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function uses the bootmem allocator
* for memory allocation.
*
* That function must be called before late_initcall.
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = alloc_bootmem(sizeof(struct firmware_map_entry));
if (WARN_ON(!entry))
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
}
/*
* Sysfs functions -------------------------------------------------------------
*/
static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->start);
}
static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->end);
}
static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct firmware_map_entry *entry = to_memmap_entry(kobj);
struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
return memmap_attr->show(entry, buf);
}
/*
* Initialises stuff and adds the entries in the map_entries list to
* sysfs. Important is that firmware_map_add() and firmware_map_add_early()
* must be called before late_initcall. That's just because that function
* is called as late_initcall() function, which means that if you call
* firmware_map_add() or firmware_map_add_early() afterwards, the entries
* are not added to sysfs.
*/
static int __init memmap_init(void)
{
struct firmware_map_entry *entry;
list_for_each_entry(entry, &map_entries, list)
add_sysfs_fw_map_entry(entry);
return 0;
}
late_initcall(memmap_init);
| gpl-2.0 |
arpith20/linux | arch/blackfin/kernel/nmi.c | 9601 | 5793 | /*
* Blackfin nmi_watchdog Driver
*
* Originally based on bfin_wdt.c
* Copyright 2010-2010 Analog Devices Inc.
* Graff Yang <graf.yang@analog.com>
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Licensed under the GPL-2 or later.
*/
#include <linux/bitops.h>
#include <linux/hardirq.h>
#include <linux/syscore_ops.h>
#include <linux/pm.h>
#include <linux/nmi.h>
#include <linux/smp.h>
#include <linux/timer.h>
#include <asm/blackfin.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/bfin_watchdog.h>
#define DRV_NAME "nmi-wdt"
#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
static int nmi_wdt_cpu = 1;
static unsigned int timeout = NMI_WDT_TIMEOUT;
static int nmi_active;
static unsigned short wdoga_ctl;
static unsigned int wdoga_cnt;
static struct corelock_slot saved_corelock;
static atomic_t nmi_touched[NR_CPUS];
static struct timer_list ntimer;
enum {
COREA_ENTER_NMI = 0,
COREA_EXIT_NMI,
COREB_EXIT_NMI,
NMI_EVENT_NR,
};
static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
/* we are in nmi, non-atomic bit ops is safe */
static inline void set_nmi_event(int event)
{
__set_bit(event, &nmi_event);
}
static inline void wait_nmi_event(int event)
{
while (!test_bit(event, &nmi_event))
barrier();
__clear_bit(event, &nmi_event);
}
static inline void send_corea_nmi(void)
{
wdoga_ctl = bfin_read_WDOGA_CTL();
wdoga_cnt = bfin_read_WDOGA_CNT();
bfin_write_WDOGA_CTL(WDEN_DISABLE);
bfin_write_WDOGA_CNT(0);
bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
}
static inline void restore_corea_nmi(void)
{
bfin_write_WDOGA_CTL(WDEN_DISABLE);
bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
bfin_write_WDOGA_CNT(wdoga_cnt);
bfin_write_WDOGA_CTL(wdoga_ctl);
}
static inline void save_corelock(void)
{
saved_corelock = corelock;
corelock.lock = 0;
}
static inline void restore_corelock(void)
{
corelock = saved_corelock;
}
static inline void nmi_wdt_keepalive(void)
{
bfin_write_WDOGB_STAT(0);
}
static inline void nmi_wdt_stop(void)
{
bfin_write_WDOGB_CTL(WDEN_DISABLE);
}
/* before calling this function, you must stop the WDT */
static inline void nmi_wdt_clear(void)
{
/* clear TRO bit, disable event generation */
bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
}
static inline void nmi_wdt_start(void)
{
bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
}
static inline int nmi_wdt_running(void)
{
return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
}
static inline int nmi_wdt_set_timeout(unsigned long t)
{
u32 cnt, max_t, sclk;
int run;
sclk = get_sclk();
max_t = -1 / sclk;
cnt = t * sclk;
if (t > max_t) {
pr_warning("NMI: timeout value is too large\n");
return -EINVAL;
}
run = nmi_wdt_running();
nmi_wdt_stop();
bfin_write_WDOGB_CNT(cnt);
if (run)
nmi_wdt_start();
timeout = t;
return 0;
}
int check_nmi_wdt_touched(void)
{
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
if (!atomic_read(&nmi_touched[this_cpu]))
return 0;
atomic_set(&nmi_touched[this_cpu], 0);
cpumask_clear_cpu(this_cpu, &mask);
for_each_cpu(cpu, &mask) {
invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
(unsigned long)(&nmi_touched[cpu]));
if (!atomic_read(&nmi_touched[cpu]))
return 0;
atomic_set(&nmi_touched[cpu], 0);
}
return 1;
}
static void nmi_wdt_timer(unsigned long data)
{
if (check_nmi_wdt_touched())
nmi_wdt_keepalive();
mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
}
static int __init init_nmi_wdt(void)
{
nmi_wdt_set_timeout(timeout);
nmi_wdt_start();
nmi_active = true;
init_timer(&ntimer);
ntimer.function = nmi_wdt_timer;
ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
add_timer(&ntimer);
pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
return 0;
}
device_initcall(init_nmi_wdt);
void touch_nmi_watchdog(void)
{
atomic_set(&nmi_touched[smp_processor_id()], 1);
}
/* Suspend/resume support */
#ifdef CONFIG_PM
static int nmi_wdt_suspend(void)
{
nmi_wdt_stop();
return 0;
}
static void nmi_wdt_resume(void)
{
if (nmi_active)
nmi_wdt_start();
}
static struct syscore_ops nmi_syscore_ops = {
.resume = nmi_wdt_resume,
.suspend = nmi_wdt_suspend,
};
static int __init init_nmi_wdt_syscore(void)
{
if (nmi_active)
register_syscore_ops(&nmi_syscore_ops);
return 0;
}
late_initcall(init_nmi_wdt_syscore);
#endif /* CONFIG_PM */
asmlinkage notrace void do_nmi(struct pt_regs *fp)
{
unsigned int cpu = smp_processor_id();
nmi_enter();
cpu_pda[cpu].__nmi_count += 1;
if (cpu == nmi_wdt_cpu) {
/* CoreB goes here first */
/* reload the WDOG_STAT */
nmi_wdt_keepalive();
/* clear nmi interrupt for CoreB */
nmi_wdt_stop();
nmi_wdt_clear();
/* trigger NMI interrupt of CoreA */
send_corea_nmi();
/* waiting CoreB to enter NMI */
wait_nmi_event(COREA_ENTER_NMI);
/* recover WDOGA's settings */
restore_corea_nmi();
save_corelock();
/* corelock is save/cleared, CoreA is dummping messages */
wait_nmi_event(COREA_EXIT_NMI);
} else {
/* OK, CoreA entered NMI */
set_nmi_event(COREA_ENTER_NMI);
}
pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
dump_bfin_process(fp);
dump_bfin_mem(fp);
show_regs(fp);
dump_bfin_trace_buffer();
show_stack(current, (unsigned long *)fp);
if (cpu == nmi_wdt_cpu) {
pr_emerg("This fault is not recoverable, sorry!\n");
/* CoreA dump finished, restore the corelock */
restore_corelock();
set_nmi_event(COREB_EXIT_NMI);
} else {
/* CoreB dump finished, notice the CoreA we are done */
set_nmi_event(COREA_EXIT_NMI);
/* synchronize with CoreA */
wait_nmi_event(COREB_EXIT_NMI);
}
nmi_exit();
}
| gpl-2.0 |
sakindia123/kernel_3.4_samsung_exynos4 | drivers/misc/ibmasm/r_heartbeat.c | 9601 | 2611 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <amax@us.ibm.com>
*
*/
#include <linux/sched.h>
#include "ibmasm.h"
#include "dot_command.h"
/*
* Reverse Heartbeat, i.e. heartbeats sent from the driver to the
* service processor.
* These heartbeats are initiated by user level programs.
*/
/* the reverse heartbeat dot command */
#pragma pack(1)
static struct {
struct dot_command_header header;
unsigned char command[3];
} rhb_dot_cmd = {
.header = {
.type = sp_read,
.command_size = 3,
.data_size = 0,
.status = 0
},
.command = { 4, 3, 6 }
};
#pragma pack()
void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
init_waitqueue_head(&rhb->wait);
rhb->stopped = 0;
}
/**
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
* processor fails to respond 3 times or we were interrupted.
*/
int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
struct command *cmd;
int times_failed = 0;
int result = 1;
cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd);
if (!cmd)
return -ENOMEM;
while (times_failed < 3) {
memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd);
cmd->status = IBMASM_CMD_PENDING;
ibmasm_exec_command(sp, cmd);
ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
if (cmd->status != IBMASM_CMD_COMPLETE)
times_failed++;
wait_event_interruptible_timeout(rhb->wait,
rhb->stopped,
REVERSE_HEARTBEAT_TIMEOUT * HZ);
if (signal_pending(current) || rhb->stopped) {
result = -EINTR;
break;
}
}
command_put(cmd);
rhb->stopped = 0;
return result;
}
void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb)
{
rhb->stopped = 1;
wake_up_interruptible(&rhb->wait);
}
| gpl-2.0 |
kodos96/backport | drivers/scsi/pcmcia/nsp_message.c | 15489 | 2158 | /*==========================================================================
NinjaSCSI-3 message handler
By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
This software may be used and distributed according to the terms of
the GNU General Public License.
*/
/* $Id: nsp_message.c,v 1.6 2003/07/26 14:21:09 elca Exp $ */
static void nsp_message_in(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
unsigned char data_reg, control_reg;
int ret, len;
/*
* XXX: NSP QUIRK
* NSP invoke interrupts only in the case of scsi phase changes,
* therefore we should poll the scsi phase here to catch
* the next "msg in" if exists (no scsi phase changes).
*/
ret = 16;
len = 0;
nsp_dbg(NSP_DEBUG_MSGINOCCUR, "msgin loop");
do {
/* read data */
data_reg = nsp_index_read(base, SCSIDATAIN);
/* assert ACK */
control_reg = nsp_index_read(base, SCSIBUSCTRL);
control_reg |= SCSI_ACK;
nsp_index_write(base, SCSIBUSCTRL, control_reg);
nsp_negate_signal(SCpnt, BUSMON_REQ, "msgin<REQ>");
data->MsgBuffer[len] = data_reg; len++;
/* deassert ACK */
control_reg = nsp_index_read(base, SCSIBUSCTRL);
control_reg &= ~SCSI_ACK;
nsp_index_write(base, SCSIBUSCTRL, control_reg);
/* catch a next signal */
ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_IN, BUSMON_REQ);
} while (ret > 0 && MSGBUF_SIZE > len);
data->MsgLen = len;
}
static void nsp_message_out(struct scsi_cmnd *SCpnt)
{
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
int ret = 1;
int len = data->MsgLen;
/*
* XXX: NSP QUIRK
* NSP invoke interrupts only in the case of scsi phase changes,
* therefore we should poll the scsi phase here to catch
* the next "msg out" if exists (no scsi phase changes).
*/
nsp_dbg(NSP_DEBUG_MSGOUTOCCUR, "msgout loop");
do {
if (nsp_xfer(SCpnt, BUSPHASE_MESSAGE_OUT)) {
nsp_msg(KERN_DEBUG, "msgout: xfer short");
}
/* catch a next signal */
ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_OUT, BUSMON_REQ);
} while (ret > 0 && len-- > 0);
}
/* end */
| gpl-2.0 |
optimsoc/gzll-gcc | gcc/testsuite/gfortran.dg/import2.f90 | 130 | 1309 | ! { dg-do compile }
! { dg-options "-std=f95" }
! { dg-shouldfail "Fortran 2003 feature with -std=f95" }
! Test whether import does not work with -std=f95
! PR fortran/29601
module testmod
implicit none
integer, parameter :: kind = 8
type modType
real :: rv
end type modType
interface
subroutine other(x,y)
import ! { dg-error "Fortran 2003: IMPORT statement" }
type(modType) :: y ! { dg-error "is being used before it is defined" }
real(kind) :: x ! { dg-error "has not been declared" }
end subroutine
end interface
end module testmod
program foo
integer, parameter :: dp = 8
type myType
sequence
integer :: i
end type myType
type myType3
sequence
integer :: i
end type myType3
interface
subroutine bar(x,y)
import ! { dg-error "Fortran 2003: IMPORT statement" }
type(myType) :: x ! { dg-error "is being used before it is defined" }
integer(dp) :: y ! { dg-error "has not been declared" }
end subroutine bar
subroutine test(x)
import :: myType3 ! { dg-error "Fortran 2003: IMPORT statement" }
import myType3 ! { dg-error "Fortran 2003: IMPORT statement" }
type(myType3) :: x ! { dg-error "is being used before it is defined" }
end subroutine test
end interface
end program foo
| gpl-2.0 |
nikybiasion/android_kernel_asus_me301t | drivers/staging/rtl8192e/r8192E_core.c | 386 | 147396 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192E
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Jerry chuang <wlanfae@realtek.com>
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/hardirq.h>
#include <asm/uaccess.h>
#include "r8192E_hw.h"
#include "r8192E.h"
#include "r8190_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8180_93cx6.h" /* Card EEPROM */
#include "r8192E_wx.h"
#include "r819xE_phy.h" //added by WB 4.30.2008
#include "r819xE_phyreg.h"
#include "r819xE_cmdpkt.h"
#include "r8192E_dm.h"
#ifdef CONFIG_PM
#include "r8192_pm.h"
#endif
#ifdef ENABLE_DOT11D
#include "ieee80211/dot11d.h"
#endif
//set here to open your trace code. //WB
u32 rt_global_debug_component = COMP_ERR ; //always open err flags on
static DEFINE_PCI_DEVICE_TABLE(rtl8192_pci_id_tbl) = {
/* Realtek */
{ PCI_DEVICE(0x10ec, 0x8192) },
/* Corega */
{ PCI_DEVICE(0x07aa, 0x0044) },
{ PCI_DEVICE(0x07aa, 0x0047) },
{}
};
static char ifname[IFNAMSIZ] = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
MODULE_LICENSE("GPL");
MODULE_VERSION("V 1.1");
MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
//MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ifname," Net interface name, wlan%d=default");
MODULE_PARM_DESC(hwwep," Try to use hardware WEP support. Still broken and not available on all cards");
MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev);
static struct pci_driver rtl8192_pci_driver = {
.name = RTL819xE_MODULE_NAME, /* Driver name */
.id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
.probe = rtl8192_pci_probe, /* probe fn */
.remove = __devexit_p(rtl8192_pci_disconnect), /* remove fn */
#ifdef CONFIG_PM
.suspend = rtl8192E_suspend, /* PM suspend fn */
.resume = rtl8192E_resume, /* PM resume fn */
#else
.suspend = NULL, /* PM suspend fn */
.resume = NULL, /* PM resume fn */
#endif
};
static void rtl8192_start_beacon(struct ieee80211_device *ieee80211);
static void rtl8192_stop_beacon(struct ieee80211_device *ieee80211);
static void rtl819x_watchdog_wqcallback(struct work_struct *work);
static void rtl8192_irq_rx_tasklet(unsigned long arg);
static void rtl8192_irq_tx_tasklet(unsigned long arg);
static void rtl8192_prepare_beacon(unsigned long arg);
static irqreturn_t rtl8192_interrupt(int irq, void *param);
static void rtl819xE_tx_cmd(struct r8192_priv *priv, struct sk_buff *skb);
static void rtl8192_update_ratr_table(struct r8192_priv *priv);
static void rtl8192_restart(struct work_struct *work);
static void watch_dog_timer_callback(unsigned long data);
static int _rtl8192_up(struct r8192_priv *priv);
static void rtl8192_cancel_deferred_work(struct r8192_priv* priv);
static short rtl8192_tx(struct r8192_priv *priv, struct sk_buff* skb);
#ifdef ENABLE_DOT11D
typedef struct _CHANNEL_LIST
{
u8 Channel[32];
u8 Len;
}CHANNEL_LIST, *PCHANNEL_LIST;
static const CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
{{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Spain. Change to ETSI.
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //France. Change to ETSI.
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, //MKK //MKK
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22},//MKK1
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Israel.
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, // For 11a , TELEC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64}, 22}, //MIC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
};
static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
{
int i, max_chan=-1, min_chan=-1;
struct ieee80211_device* ieee = priv->ieee80211;
switch (channel_plan)
{
case COUNTRY_CODE_FCC:
case COUNTRY_CODE_IC:
case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1:
case COUNTRY_CODE_ISRAEL:
case COUNTRY_CODE_TELEC:
case COUNTRY_CODE_MIC:
{
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
//acturally 8225 & 8256 rf chip only support B,G,24N mode
min_chan = 1;
max_chan = 14;
if (ChannelPlan[channel_plan].Len != 0){
// Clear old channel map
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
// Set new channel map
for (i=0;i<ChannelPlan[channel_plan].Len;i++)
{
if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
break;
GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
}
}
break;
}
case COUNTRY_CODE_GLOBAL_DOMAIN:
{
GET_DOT11D_INFO(ieee)->bEnabled = 0; //this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain setting
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
}
default:
break;
}
}
#endif
static inline bool rx_hal_is_cck_rate(prx_fwinfo_819x_pci pdrvinfo)
{
return (pdrvinfo->RxRate == DESC90_RATE1M ||
pdrvinfo->RxRate == DESC90_RATE2M ||
pdrvinfo->RxRate == DESC90_RATE5_5M ||
pdrvinfo->RxRate == DESC90_RATE11M) &&
!pdrvinfo->RxHT;
}
void CamResetAllEntry(struct r8192_priv* priv)
{
write_nic_dword(priv, RWCAM, BIT31|BIT30);
}
void write_cam(struct r8192_priv *priv, u8 addr, u32 data)
{
write_nic_dword(priv, WCAMI, data);
write_nic_dword(priv, RWCAM, BIT31|BIT16|(addr&0xff) );
}
u32 read_cam(struct r8192_priv *priv, u8 addr)
{
write_nic_dword(priv, RWCAM, 0x80000000|(addr&0xff) );
return read_nic_dword(priv, 0xa8);
}
u8 read_nic_byte(struct r8192_priv *priv, int x)
{
return 0xff & readb(priv->mem_start + x);
}
u32 read_nic_dword(struct r8192_priv *priv, int x)
{
return readl(priv->mem_start + x);
}
u16 read_nic_word(struct r8192_priv *priv, int x)
{
return readw(priv->mem_start + x);
}
void write_nic_byte(struct r8192_priv *priv, int x,u8 y)
{
writeb(y, priv->mem_start + x);
udelay(20);
}
void write_nic_dword(struct r8192_priv *priv, int x,u32 y)
{
writel(y, priv->mem_start + x);
udelay(20);
}
void write_nic_word(struct r8192_priv *priv, int x,u16 y)
{
writew(y, priv->mem_start + x);
udelay(20);
}
u8 rtl8192e_ap_sec_type(struct ieee80211_device *ieee)
{
static const u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
static const u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len= ieee->wpa_ie_len;
struct ieee80211_crypt_data* crypt;
int encrypt;
crypt = ieee->crypt[ieee->tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY) ||
(ieee->host_encrypt && crypt && crypt->ops &&
(0 == strcmp(crypt->ops->name,"WEP")));
/* simply judge */
if(encrypt && (wpa_ie_len == 0)) {
// wep encryption, no N mode setting */
return SEC_ALG_WEP;
} else if((wpa_ie_len != 0)) {
// parse pairwise key type */
if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) ||
((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10],ccmp_rsn_ie, 4))))
return SEC_ALG_CCMP;
else
return SEC_ALG_TKIP;
} else {
return SEC_ALG_NONE;
}
}
void rtl8192e_SetHwReg(struct ieee80211_device *ieee80211, u8 variable, u8 *val)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
switch(variable)
{
case HW_VAR_BSSID:
write_nic_dword(priv, BSSIDR, ((u32*)(val))[0]);
write_nic_word(priv, BSSIDR+2, ((u16*)(val+2))[0]);
break;
case HW_VAR_MEDIA_STATUS:
{
RT_OP_MODE OpMode = *((RT_OP_MODE *)(val));
u8 btMsr = read_nic_byte(priv, MSR);
btMsr &= 0xfc;
switch(OpMode)
{
case RT_OP_MODE_INFRASTRUCTURE:
btMsr |= MSR_INFRA;
break;
case RT_OP_MODE_IBSS:
btMsr |= MSR_ADHOC;
break;
case RT_OP_MODE_AP:
btMsr |= MSR_AP;
break;
default:
btMsr |= MSR_NOLINK;
break;
}
write_nic_byte(priv, MSR, btMsr);
}
break;
case HW_VAR_CHECK_BSSID:
{
u32 RegRCR, Type;
Type = ((u8*)(val))[0];
RegRCR = read_nic_dword(priv, RCR);
priv->ReceiveConfig = RegRCR;
if (Type == true)
RegRCR |= (RCR_CBSSID);
else if (Type == false)
RegRCR &= (~RCR_CBSSID);
write_nic_dword(priv, RCR,RegRCR);
priv->ReceiveConfig = RegRCR;
}
break;
case HW_VAR_SLOT_TIME:
{
priv->slot_time = val[0];
write_nic_byte(priv, SLOT_TIME, val[0]);
}
break;
case HW_VAR_ACK_PREAMBLE:
{
u32 regTmp = 0;
priv->short_preamble = (bool)(*(u8*)val );
regTmp = priv->basic_rate;
if (priv->short_preamble)
regTmp |= BRSR_AckShortPmb;
write_nic_dword(priv, RRSR, regTmp);
}
break;
case HW_VAR_CPU_RST:
write_nic_dword(priv, CPU_GEN, ((u32*)(val))[0]);
break;
default:
break;
}
}
static struct proc_dir_entry *rtl8192_proc = NULL;
static int proc_get_stats_ap(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
struct r8192_priv *priv = data;
struct ieee80211_device *ieee = priv->ieee80211;
struct ieee80211_network *target;
int len = 0;
list_for_each_entry(target, &ieee->network_list, list) {
len += snprintf(page + len, count - len,
"%s ", target->ssid);
if(target->wpa_ie_len>0 || target->rsn_ie_len>0){
len += snprintf(page + len, count - len,
"WPA\n");
}
else{
len += snprintf(page + len, count - len,
"non_WPA\n");
}
}
*eof = 1;
return len;
}
static int proc_get_registers(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
struct r8192_priv *priv = data;
int len = 0;
int i,n;
int max=0xff;
/* This dump the current register page */
len += snprintf(page + len, count - len,
"\n####################page 0##################\n ");
for(n=0;n<=max;)
{
len += snprintf(page + len, count - len,
"\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
len += snprintf(page + len, count - len,
"%2x ",read_nic_byte(priv,n));
}
len += snprintf(page + len, count - len,"\n");
len += snprintf(page + len, count - len,
"\n####################page 1##################\n ");
for(n=0;n<=max;)
{
len += snprintf(page + len, count - len,
"\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
len += snprintf(page + len, count - len,
"%2x ",read_nic_byte(priv,0x100|n));
}
len += snprintf(page + len, count - len,
"\n####################page 3##################\n ");
for(n=0;n<=max;)
{
len += snprintf(page + len, count - len,
"\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
len += snprintf(page + len, count - len,
"%2x ",read_nic_byte(priv,0x300|n));
}
*eof = 1;
return len;
}
static int proc_get_stats_tx(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
struct r8192_priv *priv = data;
int len = 0;
len += snprintf(page + len, count - len,
"TX VI priority ok int: %lu\n"
"TX VO priority ok int: %lu\n"
"TX BE priority ok int: %lu\n"
"TX BK priority ok int: %lu\n"
"TX MANAGE priority ok int: %lu\n"
"TX BEACON priority ok int: %lu\n"
"TX BEACON priority error int: %lu\n"
"TX CMDPKT priority ok int: %lu\n"
"TX queue stopped?: %d\n"
"TX fifo overflow: %lu\n"
"TX total data packets %lu\n"
"TX total data bytes :%lu\n",
priv->stats.txviokint,
priv->stats.txvookint,
priv->stats.txbeokint,
priv->stats.txbkokint,
priv->stats.txmanageokint,
priv->stats.txbeaconokint,
priv->stats.txbeaconerr,
priv->stats.txcmdpktokint,
netif_queue_stopped(priv->ieee80211->dev),
priv->stats.txoverflow,
priv->ieee80211->stats.tx_packets,
priv->ieee80211->stats.tx_bytes);
*eof = 1;
return len;
}
static int proc_get_stats_rx(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
struct r8192_priv *priv = data;
int len = 0;
len += snprintf(page + len, count - len,
"RX packets: %lu\n"
"RX desc err: %lu\n"
"RX rx overflow error: %lu\n",
priv->stats.rxint,
priv->stats.rxrdu,
priv->stats.rxoverflow);
*eof = 1;
return len;
}
static void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem\n");
rtl8192_proc = proc_mkdir(RTL819xE_MODULE_NAME, init_net.proc_net);
}
static void rtl8192_proc_module_remove(void)
{
remove_proc_entry(RTL819xE_MODULE_NAME, init_net.proc_net);
}
static void rtl8192_proc_remove_one(struct r8192_priv *priv)
{
struct net_device *dev = priv->ieee80211->dev;
printk("dev name=======> %s\n",dev->name);
if (priv->dir_dev) {
remove_proc_entry("stats-tx", priv->dir_dev);
remove_proc_entry("stats-rx", priv->dir_dev);
remove_proc_entry("stats-ap", priv->dir_dev);
remove_proc_entry("registers", priv->dir_dev);
remove_proc_entry("wlan0", rtl8192_proc);
priv->dir_dev = NULL;
}
}
static void rtl8192_proc_init_one(struct r8192_priv *priv)
{
struct net_device *dev = priv->ieee80211->dev;
struct proc_dir_entry *e;
priv->dir_dev = proc_mkdir(dev->name, rtl8192_proc);
if (!priv->dir_dev) {
RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
dev->name);
return;
}
e = create_proc_read_entry("stats-rx", S_IFREG | S_IRUGO,
priv->dir_dev, proc_get_stats_rx, priv);
if (!e) {
RT_TRACE(COMP_ERR,"Unable to initialize "
"/proc/net/rtl8192/%s/stats-rx\n",
dev->name);
}
e = create_proc_read_entry("stats-tx", S_IFREG | S_IRUGO,
priv->dir_dev, proc_get_stats_tx, priv);
if (!e) {
RT_TRACE(COMP_ERR, "Unable to initialize "
"/proc/net/rtl8192/%s/stats-tx\n",
dev->name);
}
e = create_proc_read_entry("stats-ap", S_IFREG | S_IRUGO,
priv->dir_dev, proc_get_stats_ap, priv);
if (!e) {
RT_TRACE(COMP_ERR, "Unable to initialize "
"/proc/net/rtl8192/%s/stats-ap\n",
dev->name);
}
e = create_proc_read_entry("registers", S_IFREG | S_IRUGO,
priv->dir_dev, proc_get_registers, priv);
if (!e) {
RT_TRACE(COMP_ERR, "Unable to initialize "
"/proc/net/rtl8192/%s/registers\n",
dev->name);
}
}
static short check_nic_enough_desc(struct ieee80211_device *ieee, int prio)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
/* for now we reserve two free descriptor as a safety boundary
* between the tail and the head
*/
return (ring->entries - skb_queue_len(&ring->queue) >= 2);
}
static void tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
schedule_work(&priv->reset_wq);
printk("TXTIMEOUT");
}
static void rtl8192_irq_enable(struct r8192_priv *priv)
{
u32 mask;
mask = IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK |
IMR_HCCADOK | IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 | IMR_RDU | IMR_RXFOVW |
IMR_TXFOVW | IMR_BcnInt | IMR_TBDOK | IMR_TBDER;
write_nic_dword(priv, INTA_MASK, mask);
}
static void rtl8192_irq_disable(struct r8192_priv *priv)
{
write_nic_dword(priv, INTA_MASK, 0);
synchronize_irq(priv->irq);
}
static void rtl8192_update_msr(struct r8192_priv *priv)
{
u8 msr;
msr = read_nic_byte(priv, MSR);
msr &= ~ MSR_LINK_MASK;
/* do not change in link_state != WLAN_LINK_ASSOCIATED.
* msr must be updated if the state is ASSOCIATING.
* this is intentional and make sense for ad-hoc and
* master (see the create BSS/IBSS func)
*/
if (priv->ieee80211->state == IEEE80211_LINKED){
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
}else
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
write_nic_byte(priv, MSR, msr);
}
static void rtl8192_set_chan(struct ieee80211_device *ieee80211, short ch)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
priv->chan = ch;
/* need to implement rf set channel here WB */
if (priv->rf_set_chan)
priv->rf_set_chan(ieee80211, priv->chan);
}
static void rtl8192_rx_enable(struct r8192_priv *priv)
{
write_nic_dword(priv, RDQDA, priv->rx_ring_dma);
}
/* the TX_DESC_BASE setting is according to the following queue index
* BK_QUEUE ===> 0
* BE_QUEUE ===> 1
* VI_QUEUE ===> 2
* VO_QUEUE ===> 3
* HCCA_QUEUE ===> 4
* TXCMD_QUEUE ===> 5
* MGNT_QUEUE ===> 6
* HIGH_QUEUE ===> 7
* BEACON_QUEUE ===> 8
* */
static const u32 TX_DESC_BASE[] = {BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA};
static void rtl8192_tx_enable(struct r8192_priv *priv)
{
u32 i;
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
write_nic_dword(priv, TX_DESC_BASE[i], priv->tx_ring[i].dma);
ieee80211_reset_queue(priv->ieee80211);
}
static void rtl8192_free_rx_ring(struct r8192_priv *priv)
{
int i;
for (i = 0; i < priv->rxringcount; i++) {
struct sk_buff *skb = priv->rx_buf[i];
if (!skb)
continue;
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
priv->rxbuffersize, PCI_DMA_FROMDEVICE);
kfree_skb(skb);
}
pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * priv->rxringcount,
priv->rx_ring, priv->rx_ring_dma);
priv->rx_ring = NULL;
}
static void rtl8192_free_tx_ring(struct r8192_priv *priv, unsigned int prio)
{
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
tx_desc_819x_pci *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
ring->desc, ring->dma);
ring->desc = NULL;
}
void PHY_SetRtl8192eRfOff(struct r8192_priv *priv)
{
//disable RF-Chip A/B
rtl8192_setBBreg(priv, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
//analog to digital off, for power save
rtl8192_setBBreg(priv, rFPGA0_AnalogParameter4, 0x300, 0x0);
//digital to analog off, for power save
rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x18, 0x0);
//rx antenna off
rtl8192_setBBreg(priv, rOFDM0_TRxPathEnable, 0xf, 0x0);
//rx antenna off
rtl8192_setBBreg(priv, rOFDM1_TRxPathEnable, 0xf, 0x0);
//analog to digital part2 off, for power save
rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x60, 0x0);
rtl8192_setBBreg(priv, rFPGA0_AnalogParameter1, 0x4, 0x0);
// Analog parameter!!Change bias and Lbus control.
write_nic_byte(priv, ANAPAR_FOR_8192PciE, 0x07);
}
static void rtl8192_halt_adapter(struct r8192_priv *priv, bool reset)
{
int i;
u8 OpMode;
u32 ulRegRead;
OpMode = RT_OP_MODE_NO_LINK;
priv->ieee80211->SetHwRegHandler(priv->ieee80211, HW_VAR_MEDIA_STATUS, &OpMode);
if (!priv->ieee80211->bSupportRemoteWakeUp) {
/*
* disable tx/rx. In 8185 we write 0x10 (Reset bit),
* but here we make reference to WMAC and wirte 0x0
*/
write_nic_byte(priv, CMDR, 0);
}
mdelay(20);
if (!reset) {
mdelay(150);
priv->bHwRfOffAction = 2;
/*
* Call MgntActSet_RF_State instead to
* prevent RF config race condition.
*/
if (!priv->ieee80211->bSupportRemoteWakeUp) {
PHY_SetRtl8192eRfOff(priv);
ulRegRead = read_nic_dword(priv, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
write_nic_dword(priv,CPU_GEN, ulRegRead);
} else {
/* for WOL */
write_nic_dword(priv, WFCRC0, 0xffffffff);
write_nic_dword(priv, WFCRC1, 0xffffffff);
write_nic_dword(priv, WFCRC2, 0xffffffff);
/* Write PMR register */
write_nic_byte(priv, PMR, 0x5);
/* Disable tx, enanble rx */
write_nic_byte(priv, MacBlkCtrl, 0xa);
}
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_purge(&priv->ieee80211->skb_waitQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_purge(&priv->ieee80211->skb_aggQ [i]);
}
skb_queue_purge(&priv->skb_queue);
}
static void rtl8192_data_hard_stop(struct ieee80211_device *ieee80211)
{
}
static void rtl8192_data_hard_resume(struct ieee80211_device *ieee80211)
{
}
/*
* this function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
static void rtl8192_hard_data_xmit(struct sk_buff *skb,
struct ieee80211_device *ieee80211, int rate)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
int ret;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
/* shall not be referred by command packet */
BUG_ON(queue_index == TXCMD_QUEUE);
if (priv->bHwRadioOff || (!priv->up))
{
kfree_skb(skb);
return;
}
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(priv, skb);
if (ret != 0) {
kfree_skb(skb);
}
if (queue_index != MGNT_QUEUE) {
priv->ieee80211->stats.tx_bytes += (skb->len - priv->ieee80211->tx_headroom);
priv->ieee80211->stats.tx_packets++;
}
}
/*
* This is a rough attempt to TX a frame
* This is called by the ieee 80211 stack to TX management frames.
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen).
*/
static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
int ret;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
if (queue_index != TXCMD_QUEUE) {
if (priv->bHwRadioOff || (!priv->up))
{
kfree_skb(skb);
return 0;
}
}
if (queue_index == TXCMD_QUEUE) {
rtl819xE_tx_cmd(priv, skb);
ret = 0;
return ret;
} else {
tcb_desc->RATRIndex = 7;
tcb_desc->bTxDisableRateFallBack = 1;
tcb_desc->bTxUseDriverAssingedRate = 1;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, ieee80211->tx_headroom);
ret = rtl8192_tx(priv, skb);
if (ret != 0) {
kfree_skb(skb);
}
}
return ret;
}
static void rtl8192_tx_isr(struct r8192_priv *priv, int prio)
{
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
while (skb_queue_len(&ring->queue)) {
tx_desc_819x_pci *entry = &ring->desc[ring->idx];
struct sk_buff *skb;
/*
* beacon packet will only use the first descriptor defaultly,
* and the OWN may not be cleared by the hardware
*/
if (prio != BEACON_QUEUE) {
if (entry->OWN)
return;
ring->idx = (ring->idx + 1) % ring->entries;
}
skb = __skb_dequeue(&ring->queue);
pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
}
if (prio != BEACON_QUEUE) {
/* try to deal with the pending packets */
tasklet_schedule(&priv->irq_tx_tasklet);
}
}
static void rtl8192_stop_beacon(struct ieee80211_device *ieee80211)
{
}
static void rtl8192_config_rate(struct r8192_priv *priv, u16* rate_config)
{
struct ieee80211_network *net;
u8 i=0, basic_rate = 0;
net = & priv->ieee80211->current_network;
for (i=0; i<net->rates_len; i++)
{
basic_rate = net->rates[i]&0x7f;
switch(basic_rate)
{
case MGN_1M: *rate_config |= RRSR_1M; break;
case MGN_2M: *rate_config |= RRSR_2M; break;
case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
case MGN_11M: *rate_config |= RRSR_11M; break;
case MGN_6M: *rate_config |= RRSR_6M; break;
case MGN_9M: *rate_config |= RRSR_9M; break;
case MGN_12M: *rate_config |= RRSR_12M; break;
case MGN_18M: *rate_config |= RRSR_18M; break;
case MGN_24M: *rate_config |= RRSR_24M; break;
case MGN_36M: *rate_config |= RRSR_36M; break;
case MGN_48M: *rate_config |= RRSR_48M; break;
case MGN_54M: *rate_config |= RRSR_54M; break;
}
}
for (i=0; i<net->rates_ex_len; i++)
{
basic_rate = net->rates_ex[i]&0x7f;
switch(basic_rate)
{
case MGN_1M: *rate_config |= RRSR_1M; break;
case MGN_2M: *rate_config |= RRSR_2M; break;
case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
case MGN_11M: *rate_config |= RRSR_11M; break;
case MGN_6M: *rate_config |= RRSR_6M; break;
case MGN_9M: *rate_config |= RRSR_9M; break;
case MGN_12M: *rate_config |= RRSR_12M; break;
case MGN_18M: *rate_config |= RRSR_18M; break;
case MGN_24M: *rate_config |= RRSR_24M; break;
case MGN_36M: *rate_config |= RRSR_36M; break;
case MGN_48M: *rate_config |= RRSR_48M; break;
case MGN_54M: *rate_config |= RRSR_54M; break;
}
}
}
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
static void rtl8192_update_cap(struct r8192_priv *priv, u16 cap)
{
u32 tmp = 0;
struct ieee80211_network *net = &priv->ieee80211->current_network;
priv->short_preamble = cap & WLAN_CAPABILITY_SHORT_PREAMBLE;
tmp = priv->basic_rate;
if (priv->short_preamble)
tmp |= BRSR_AckShortPmb;
write_nic_dword(priv, RRSR, tmp);
if (net->mode & (IEEE_G|IEEE_N_24G))
{
u8 slot_time = 0;
if ((cap & WLAN_CAPABILITY_SHORT_SLOT)&&(!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime))
{//short slot time
slot_time = SHORT_SLOT_TIME;
}
else //long slot time
slot_time = NON_SHORT_SLOT_TIME;
priv->slot_time = slot_time;
write_nic_byte(priv, SLOT_TIME, slot_time);
}
}
static void rtl8192_net_update(struct r8192_priv *priv)
{
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
u16 rate_config = 0;
net = &priv->ieee80211->current_network;
/* update Basic rate: RR, BRSR */
rtl8192_config_rate(priv, &rate_config);
/*
* Select RRSR (in Legacy-OFDM and CCK)
* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M,
* 2M, and 1M from the Basic rate.
* We do not use other rates.
*/
priv->basic_rate = rate_config &= 0x15f;
/* BSSID */
write_nic_dword(priv, BSSIDR, ((u32 *)net->bssid)[0]);
write_nic_word(priv, BSSIDR+4, ((u16 *)net->bssid)[2]);
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
{
write_nic_word(priv, ATIMWND, 2);
write_nic_word(priv, BCN_DMATIME, 256);
write_nic_word(priv, BCN_INTERVAL, net->beacon_interval);
/*
* BIT15 of BCN_DRV_EARLY_INT will indicate
* whether software beacon or hw beacon is applied.
*/
write_nic_word(priv, BCN_DRV_EARLY_INT, 10);
write_nic_byte(priv, BCN_ERR_THRESH, 100);
BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
/* TODO: BcnIFS may required to be changed on ASIC */
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
write_nic_word(priv, BCN_TCFG, BcnTimeCfg);
}
}
static void rtl819xE_tx_cmd(struct r8192_priv *priv, struct sk_buff *skb)
{
struct rtl8192_tx_ring *ring;
tx_desc_819x_pci *entry;
unsigned int idx;
dma_addr_t mapping;
cb_desc *tcb_desc;
unsigned long flags;
ring = &priv->tx_ring[TXCMD_QUEUE];
mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
spin_lock_irqsave(&priv->irq_th_lock,flags);
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
memset(entry,0,12);
entry->LINIP = tcb_desc->bLastIniPkt;
entry->FirstSeg = 1;//first segment
entry->LastSeg = 1; //last segment
if(tcb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
entry->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry->Offset = sizeof(TX_FWINFO_8190PCI) + 8;
entry->PktSize = (u16)(tcb_desc->pkt_size + entry->Offset);
entry->QueueSelect = QSLT_CMD;
entry->TxFWInfoSize = 0x08;
entry->RATid = (u8)DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
entry->TxBuffAddr = cpu_to_le32(mapping);
entry->OWN = 1;
__skb_queue_tail(&ring->queue, skb);
spin_unlock_irqrestore(&priv->irq_th_lock,flags);
write_nic_byte(priv, TPPoll, TPPoll_CQ);
return;
}
/*
* Mapping Software/Hardware descriptor queue id to "Queue Select Field"
* in TxFwInfo data structure
*/
static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
{
u8 QueueSelect = 0;
switch (QueueID) {
case BE_QUEUE:
QueueSelect = QSLT_BE;
break;
case BK_QUEUE:
QueueSelect = QSLT_BK;
break;
case VO_QUEUE:
QueueSelect = QSLT_VO;
break;
case VI_QUEUE:
QueueSelect = QSLT_VI;
break;
case MGNT_QUEUE:
QueueSelect = QSLT_MGNT;
break;
case BEACON_QUEUE:
QueueSelect = QSLT_BEACON;
break;
case TXCMD_QUEUE:
QueueSelect = QSLT_CMD;
break;
case HIGH_QUEUE:
default:
RT_TRACE(COMP_ERR, "Impossible Queue Selection: %d\n", QueueID);
break;
}
return QueueSelect;
}
static u8 MRateToHwRate8190Pci(u8 rate)
{
u8 ret = DESC90_RATE1M;
switch(rate) {
case MGN_1M: ret = DESC90_RATE1M; break;
case MGN_2M: ret = DESC90_RATE2M; break;
case MGN_5_5M: ret = DESC90_RATE5_5M; break;
case MGN_11M: ret = DESC90_RATE11M; break;
case MGN_6M: ret = DESC90_RATE6M; break;
case MGN_9M: ret = DESC90_RATE9M; break;
case MGN_12M: ret = DESC90_RATE12M; break;
case MGN_18M: ret = DESC90_RATE18M; break;
case MGN_24M: ret = DESC90_RATE24M; break;
case MGN_36M: ret = DESC90_RATE36M; break;
case MGN_48M: ret = DESC90_RATE48M; break;
case MGN_54M: ret = DESC90_RATE54M; break;
// HT rate since here
case MGN_MCS0: ret = DESC90_RATEMCS0; break;
case MGN_MCS1: ret = DESC90_RATEMCS1; break;
case MGN_MCS2: ret = DESC90_RATEMCS2; break;
case MGN_MCS3: ret = DESC90_RATEMCS3; break;
case MGN_MCS4: ret = DESC90_RATEMCS4; break;
case MGN_MCS5: ret = DESC90_RATEMCS5; break;
case MGN_MCS6: ret = DESC90_RATEMCS6; break;
case MGN_MCS7: ret = DESC90_RATEMCS7; break;
case MGN_MCS8: ret = DESC90_RATEMCS8; break;
case MGN_MCS9: ret = DESC90_RATEMCS9; break;
case MGN_MCS10: ret = DESC90_RATEMCS10; break;
case MGN_MCS11: ret = DESC90_RATEMCS11; break;
case MGN_MCS12: ret = DESC90_RATEMCS12; break;
case MGN_MCS13: ret = DESC90_RATEMCS13; break;
case MGN_MCS14: ret = DESC90_RATEMCS14; break;
case MGN_MCS15: ret = DESC90_RATEMCS15; break;
case (0x80|0x20): ret = DESC90_RATEMCS32; break;
default: break;
}
return ret;
}
static u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
{
u8 tmp_Short;
tmp_Short = (TxHT==1)?((tcb_desc->bUseShortGI)?1:0):((tcb_desc->bUseShortPreamble)?1:0);
if(TxHT==1 && TxRate != DESC90_RATEMCS15)
tmp_Short = 0;
return tmp_Short;
}
/*
* The tx procedure is just as following,
* skb->cb will contain all the following information,
* priority, morefrag, rate, &dev.
*/
static short rtl8192_tx(struct r8192_priv *priv, struct sk_buff* skb)
{
struct rtl8192_tx_ring *ring;
unsigned long flags;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tx_desc_819x_pci *pdesc = NULL;
TX_FWINFO_8190PCI *pTxFwInfo = NULL;
dma_addr_t mapping;
bool multi_addr = false, broad_addr = false, uni_addr = false;
u8 *pda_addr = NULL;
int idx;
if (priv->bdisable_nic) {
RT_TRACE(COMP_ERR, "Nic is disabled! Can't tx packet len=%d qidx=%d!!!\n",
skb->len, tcb_desc->queue_index);
return skb->len;
}
#ifdef ENABLE_LPS
priv->ieee80211->bAwakePktSent = true;
#endif
mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
/* collect the tx packets statitcs */
pda_addr = ((u8 *)skb->data) + sizeof(TX_FWINFO_8190PCI);
if (is_multicast_ether_addr(pda_addr))
multi_addr = true;
else if (is_broadcast_ether_addr(pda_addr))
broad_addr = true;
else
uni_addr = true;
if (uni_addr)
priv->stats.txbytesunicast += (u8)(skb->len) - sizeof(TX_FWINFO_8190PCI);
/* fill tx firmware */
pTxFwInfo = (PTX_FWINFO_8190PCI)skb->data;
memset(pTxFwInfo, 0, sizeof(TX_FWINFO_8190PCI));
pTxFwInfo->TxHT = (tcb_desc->data_rate&0x80) ? 1 : 0;
pTxFwInfo->TxRate = MRateToHwRate8190Pci((u8)tcb_desc->data_rate);
pTxFwInfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
pTxFwInfo->Short = QueryIsShort(pTxFwInfo->TxHT, pTxFwInfo->TxRate, tcb_desc);
/* Aggregation related */
if (tcb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = tcb_desc->ampdu_factor;
pTxFwInfo->RxAMD = tcb_desc->ampdu_density;
} else {
pTxFwInfo->AllowAggregation = 0;
pTxFwInfo->RxMF = 0;
pTxFwInfo->RxAMD = 0;
}
/* Protection mode related */
pTxFwInfo->RtsEnable = (tcb_desc->bRTSEnable) ? 1 : 0;
pTxFwInfo->CtsEnable = (tcb_desc->bCTSEnable) ? 1 : 0;
pTxFwInfo->RtsSTBC = (tcb_desc->bRTSSTBC) ? 1 : 0;
pTxFwInfo->RtsHT = (tcb_desc->rts_rate&0x80) ? 1 : 0;
pTxFwInfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
pTxFwInfo->RtsBandwidth = 0;
pTxFwInfo->RtsSubcarrier = tcb_desc->RTSSC;
pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ? (tcb_desc->bRTSUseShortPreamble ? 1 : 0) : (tcb_desc->bRTSUseShortGI? 1 : 0);
/* Set Bandwidth and sub-channel settings. */
if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
if (tcb_desc->bPacketBW) {
pTxFwInfo->TxBandwidth = 1;
/* use duplicated mode */
pTxFwInfo->TxSubCarrier = 0;
} else {
pTxFwInfo->TxBandwidth = 0;
pTxFwInfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
}
} else {
pTxFwInfo->TxBandwidth = 0;
pTxFwInfo->TxSubCarrier = 0;
}
spin_lock_irqsave(&priv->irq_th_lock, flags);
ring = &priv->tx_ring[tcb_desc->queue_index];
if (tcb_desc->queue_index != BEACON_QUEUE)
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
else
idx = 0;
pdesc = &ring->desc[idx];
if ((pdesc->OWN == 1) && (tcb_desc->queue_index != BEACON_QUEUE)) {
RT_TRACE(COMP_ERR, "No more TX desc@%d, ring->idx = %d,idx = %d,%x\n",
tcb_desc->queue_index, ring->idx, idx, skb->len);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return skb->len;
}
/* fill tx descriptor */
memset(pdesc, 0, 12);
/*DWORD 0*/
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(TX_FWINFO_8190PCI) + 8; /* We must add 8!! */
pdesc->PktSize = (u16)skb->len-sizeof(TX_FWINFO_8190PCI);
/*DWORD 1*/
pdesc->SecCAMID = 0;
pdesc->RATid = tcb_desc->RATRIndex;
pdesc->NoEnc = 1;
pdesc->SecType = 0x0;
if (tcb_desc->bHwSec) {
switch (priv->ieee80211->pairwise_key_type) {
case KEY_TYPE_WEP40:
case KEY_TYPE_WEP104:
pdesc->SecType = 0x1;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_TKIP:
pdesc->SecType = 0x2;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_CCMP:
pdesc->SecType = 0x3;
pdesc->NoEnc = 0;
break;
case KEY_TYPE_NA:
pdesc->SecType = 0x0;
pdesc->NoEnc = 1;
break;
}
}
/* Set Packet ID */
pdesc->PktId = 0x0;
pdesc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
pdesc->TxFWInfoSize = sizeof(TX_FWINFO_8190PCI);
pdesc->DISFB = tcb_desc->bTxDisableRateFallBack;
pdesc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
pdesc->FirstSeg = 1;
pdesc->LastSeg = 1;
pdesc->TxBufferSize = skb->len;
pdesc->TxBuffAddr = cpu_to_le32(mapping);
__skb_queue_tail(&ring->queue, skb);
pdesc->OWN = 1;
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
priv->ieee80211->dev->trans_start = jiffies;
write_nic_word(priv, TPPoll, 0x01<<tcb_desc->queue_index);
return 0;
}
static short rtl8192_alloc_rx_desc_ring(struct r8192_priv *priv)
{
rx_desc_819x_pci *entry = NULL;
int i;
priv->rx_ring = pci_alloc_consistent(priv->pdev,
sizeof(*priv->rx_ring) * priv->rxringcount, &priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
RT_TRACE(COMP_ERR,"Cannot allocate RX ring\n");
return -ENOMEM;
}
memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * priv->rxringcount);
priv->rx_idx = 0;
for (i = 0; i < priv->rxringcount; i++) {
struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
dma_addr_t *mapping;
entry = &priv->rx_ring[i];
if (!skb)
return 0;
priv->rx_buf[i] = skb;
mapping = (dma_addr_t *)skb->cb;
*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
priv->rxbuffersize, PCI_DMA_FROMDEVICE);
entry->BufferAddress = cpu_to_le32(*mapping);
entry->Length = priv->rxbuffersize;
entry->OWN = 1;
}
entry->EOR = 1;
return 0;
}
static int rtl8192_alloc_tx_desc_ring(struct r8192_priv *priv,
unsigned int prio, unsigned int entries)
{
tx_desc_819x_pci *ring;
dma_addr_t dma;
int i;
ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
if (!ring || (unsigned long)ring & 0xFF) {
RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
memset(ring, 0, sizeof(*ring)*entries);
priv->tx_ring[prio].desc = ring;
priv->tx_ring[prio].dma = dma;
priv->tx_ring[prio].idx = 0;
priv->tx_ring[prio].entries = entries;
skb_queue_head_init(&priv->tx_ring[prio].queue);
for (i = 0; i < entries; i++)
ring[i].NextDescAddress =
cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring));
return 0;
}
static short rtl8192_pci_initdescring(struct r8192_priv *priv)
{
u32 ret;
int i;
ret = rtl8192_alloc_rx_desc_ring(priv);
if (ret)
return ret;
/* general process for other queue */
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
ret = rtl8192_alloc_tx_desc_ring(priv, i, priv->txringcount);
if (ret)
goto err_free_rings;
}
return 0;
err_free_rings:
rtl8192_free_rx_ring(priv);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
if (priv->tx_ring[i].desc)
rtl8192_free_tx_ring(priv, i);
return 1;
}
static void rtl8192_pci_resetdescring(struct r8192_priv *priv)
{
int i;
/* force the rx_idx to the first one */
if(priv->rx_ring) {
rx_desc_819x_pci *entry = NULL;
for (i = 0; i < priv->rxringcount; i++) {
entry = &priv->rx_ring[i];
entry->OWN = 1;
}
priv->rx_idx = 0;
}
/* after reset, release previous pending packet, and force the
* tx idx to the first one */
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
if (priv->tx_ring[i].desc) {
struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
tx_desc_819x_pci *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
ring->idx = 0;
}
}
}
static void rtl8192_link_change(struct ieee80211_device *ieee)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
if (ieee->state == IEEE80211_LINKED)
{
rtl8192_net_update(priv);
rtl8192_update_ratr_table(priv);
//add this as in pure N mode, wep encryption will use software way, but there is no chance to set this as wep will not set group key in wext. WB.2008.07.08
if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
EnableHWSecurityConfig8192(priv);
}
else
{
write_nic_byte(priv, 0x173, 0);
}
rtl8192_update_msr(priv);
// 2007/10/16 MH MAC Will update TSF according to all received beacon, so we have
// // To set CBSSID bit when link with any AP or STA.
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
{
u32 reg = 0;
reg = read_nic_dword(priv, RCR);
if (priv->ieee80211->state == IEEE80211_LINKED)
priv->ReceiveConfig = reg |= RCR_CBSSID;
else
priv->ReceiveConfig = reg &= ~RCR_CBSSID;
write_nic_dword(priv, RCR, reg);
}
}
static const struct ieee80211_qos_parameters def_qos_parameters = {
{3,3,3,3},/* cw_min */
{7,7,7,7},/* cw_max */
{2,2,2,2},/* aifs */
{0,0,0,0},/* flags */
{0,0,0,0} /* tx_op_limit */
};
static void rtl8192_update_beacon(struct work_struct * work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
struct ieee80211_device* ieee = priv->ieee80211;
struct ieee80211_network* net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
HTUpdateSelfAndPeerSetting(ieee, net);
ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bdRT2RTLongSlotTime;
rtl8192_update_cap(priv, net->capability);
}
/*
* background support to run QoS activate functionality
*/
static const int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
static void rtl8192_qos_activate(struct work_struct * work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->current_network.mode;
u8 u1bAIFS;
u32 u4bAcParam;
int i;
mutex_lock(&priv->mutex);
if(priv->ieee80211->state != IEEE80211_LINKED)
goto success;
RT_TRACE(COMP_QOS,"qos active process with associate response received\n");
/* It better set slot time at first */
/* For we just support b/g mode at present, let the slot time at 9/20 selection */
/* update the ac parameter to related registers */
for(i = 0; i < QOS_QUEUE_NUM; i++) {
//Mode G/A: slotTimeTimer = 9; Mode B: 20
u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
(((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
(((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
write_nic_dword(priv, WDCAPARA_ADD[i], u4bAcParam);
}
success:
mutex_unlock(&priv->mutex);
}
static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
int active_network,
struct ieee80211_network *network)
{
int ret = 0;
u32 size = sizeof(struct ieee80211_qos_parameters);
if(priv->ieee80211->state !=IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
return ret;
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
(network->qos_data.old_param_count !=
network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
queue_work(priv->priv_wq, &priv->qos_activate);
RT_TRACE (COMP_QOS, "QoS parameters change call "
"qos_activate\n");
}
} else {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
queue_work(priv->priv_wq, &priv->qos_activate);
RT_TRACE(COMP_QOS, "QoS was disabled call qos_activate\n");
}
network->qos_data.active = 0;
network->qos_data.supported = 0;
}
return 0;
}
/* handle manage frame frame beacon and probe response */
static int rtl8192_handle_beacon(struct ieee80211_device *ieee,
struct ieee80211_beacon * beacon,
struct ieee80211_network * network)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
rtl8192_qos_handle_probe_response(priv,1,network);
queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
return 0;
}
/*
* handling the beaconing responses. if we get different QoS setting
* off the network from the associated setting, adjust the QoS setting
*/
static int rtl8192_qos_association_resp(struct r8192_priv *priv,
struct ieee80211_network *network)
{
int ret = 0;
unsigned long flags;
u32 size = sizeof(struct ieee80211_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL))
return ret;
if (priv->ieee80211->state != IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
return ret;
spin_lock_irqsave(&priv->ieee80211->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,
&network->qos_data.parameters,
sizeof(struct ieee80211_qos_parameters));
priv->ieee80211->current_network.qos_data.active = 1;
set_qos_param = 1;
/* update qos parameter for current network */
priv->ieee80211->current_network.qos_data.old_param_count =
priv->ieee80211->current_network.qos_data.param_count;
priv->ieee80211->current_network.qos_data.param_count =
network->qos_data.param_count;
} else {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,
&def_qos_parameters, size);
priv->ieee80211->current_network.qos_data.active = 0;
priv->ieee80211->current_network.qos_data.supported = 0;
set_qos_param = 1;
}
spin_unlock_irqrestore(&priv->ieee80211->lock, flags);
RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __FUNCTION__,
network->flags, priv->ieee80211->current_network.qos_data.active);
if (set_qos_param == 1)
queue_work(priv->priv_wq, &priv->qos_activate);
return ret;
}
static int rtl8192_handle_assoc_response(struct ieee80211_device *ieee,
struct ieee80211_assoc_response_frame *resp,
struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
rtl8192_qos_association_resp(priv, network);
return 0;
}
/* updateRATRTabel for MCS only. Basic rate is not implemented. */
static void rtl8192_update_ratr_table(struct r8192_priv* priv)
{
struct ieee80211_device* ieee = priv->ieee80211;
u8* pMcsRate = ieee->dot11HTOperationalRateSet;
u32 ratr_value = 0;
u8 rate_index = 0;
rtl8192_config_rate(priv, (u16*)(&ratr_value));
ratr_value |= (*(u16*)(pMcsRate)) << 12;
switch (ieee->mode)
{
case IEEE_A:
ratr_value &= 0x00000FF0;
break;
case IEEE_B:
ratr_value &= 0x0000000F;
break;
case IEEE_G:
ratr_value &= 0x00000FF7;
break;
case IEEE_N_24G:
case IEEE_N_5G:
if (ieee->pHTInfo->PeerMimoPs == 0) //MIMO_PS_STATIC
ratr_value &= 0x0007F007;
else{
if (priv->rf_type == RF_1T2R)
ratr_value &= 0x000FF007;
else
ratr_value &= 0x0F81F007;
}
break;
default:
break;
}
ratr_value &= 0x0FFFFFFF;
if(ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz){
ratr_value |= 0x80000000;
}else if(!ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI20MHz){
ratr_value |= 0x80000000;
}
write_nic_dword(priv, RATR0+rate_index*4, ratr_value);
write_nic_byte(priv, UFWP, 1);
}
static bool GetNmodeSupportBySecCfg8190Pci(struct ieee80211_device *ieee)
{
return !(ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP)));
}
static void rtl8192_refresh_supportrate(struct r8192_priv* priv)
{
struct ieee80211_device* ieee = priv->ieee80211;
//we donot consider set support rate for ABG mode, only HT MCS rate is set here.
if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G)
{
memcpy(ieee->Regdot11HTOperationalRateSet, ieee->RegHTSuppRateSet, 16);
}
else
memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
}
static u8 rtl8192_getSupportedWireleeMode(void)
{
return (WIRELESS_MODE_N_24G|WIRELESS_MODE_G|WIRELESS_MODE_B);
}
static void rtl8192_SetWirelessMode(struct ieee80211_device *ieee, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode();
if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode&bSupportMode)==0))
{
if(bSupportMode & WIRELESS_MODE_N_24G)
{
wireless_mode = WIRELESS_MODE_N_24G;
}
else if(bSupportMode & WIRELESS_MODE_N_5G)
{
wireless_mode = WIRELESS_MODE_N_5G;
}
else if((bSupportMode & WIRELESS_MODE_A))
{
wireless_mode = WIRELESS_MODE_A;
}
else if((bSupportMode & WIRELESS_MODE_G))
{
wireless_mode = WIRELESS_MODE_G;
}
else if((bSupportMode & WIRELESS_MODE_B))
{
wireless_mode = WIRELESS_MODE_B;
}
else{
RT_TRACE(COMP_ERR, "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", __FUNCTION__,bSupportMode);
wireless_mode = WIRELESS_MODE_B;
}
}
priv->ieee80211->mode = wireless_mode;
if ((wireless_mode == WIRELESS_MODE_N_24G) || (wireless_mode == WIRELESS_MODE_N_5G))
priv->ieee80211->pHTInfo->bEnableHT = 1;
else
priv->ieee80211->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
rtl8192_refresh_supportrate(priv);
}
static bool GetHalfNmodeSupportByAPs819xPci(struct ieee80211_device* ieee)
{
return ieee->bHalfWirelessN24GMode;
}
static short rtl8192_is_tx_queue_empty(struct ieee80211_device *ieee)
{
int i=0;
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
for (i=0; i<=MGNT_QUEUE; i++)
{
if ((i== TXCMD_QUEUE) || (i == HCCA_QUEUE) )
continue;
if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0){
printk("===>tx queue is not empty:%d, %d\n", i, skb_queue_len(&(&priv->tx_ring[i])->queue));
return 0;
}
}
return 1;
}
static void rtl8192_hw_sleep_down(struct r8192_priv *priv)
{
MgntActSet_RF_State(priv, eRfSleep, RF_CHANGE_BY_PS);
}
static void rtl8192_hw_wakeup(struct ieee80211_device *ieee)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
MgntActSet_RF_State(priv, eRfOn, RF_CHANGE_BY_PS);
}
static void rtl8192_hw_wakeup_wq (struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq);
rtl8192_hw_wakeup(ieee);
}
#define MIN_SLEEP_TIME 50
#define MAX_SLEEP_TIME 10000
static void rtl8192_hw_to_sleep(struct ieee80211_device *ieee, u32 th, u32 tl)
{
struct r8192_priv *priv = ieee80211_priv(ieee->dev);
u32 tmp;
u32 rb = jiffies;
// Writing HW register with 0 equals to disable
// the timer, that is not really what we want
//
tl -= MSECS(8+16+7);
// If the interval in witch we are requested to sleep is too
// short then give up and remain awake
// when we sleep after send null frame, the timer will be too short to sleep.
//
if(((tl>=rb)&& (tl-rb) <= MSECS(MIN_SLEEP_TIME))
||((rb>tl)&& (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
printk("too short to sleep::%x, %x, %lx\n",tl, rb, MSECS(MIN_SLEEP_TIME));
return;
}
if(((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME)))||
((tl < rb) && (tl>MSECS(69)) && ((rb-tl) > MSECS(MAX_SLEEP_TIME)))||
((tl<rb)&&(tl<MSECS(69))&&((tl+0xffffffff-rb)>MSECS(MAX_SLEEP_TIME)))) {
printk("========>too long to sleep:%x, %x, %lx\n", tl, rb, MSECS(MAX_SLEEP_TIME));
return;
}
tmp = (tl>rb)?(tl-rb):(rb-tl);
queue_delayed_work(priv->ieee80211->wq,
&priv->ieee80211->hw_wakeup_wq,tmp);
rtl8192_hw_sleep_down(priv);
}
static void rtl8192_init_priv_variable(struct r8192_priv *priv)
{
u8 i;
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
// Default Halt the NIC if RF is OFF.
pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_HALT_NIC;
pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_CLK_REQ;
pPSC->RegRfPsLevel |= RT_RF_OFF_LEVL_ASPM;
pPSC->RegRfPsLevel |= RT_RF_LPS_LEVEL_ASPM;
pPSC->bLeisurePs = true;
priv->ieee80211->RegMaxLPSAwakeIntvl = 5;
priv->bHwRadioOff = false;
priv->being_init_adapter = false;
priv->txringcount = 64;//32;
priv->rxbuffersize = 9100;//2048;//1024;
priv->rxringcount = MAX_RX_COUNT;//64;
priv->chan = 1; //set to channel 1
priv->RegWirelessMode = WIRELESS_MODE_AUTO;
priv->RegChannelPlan = 0xf;
priv->ieee80211->mode = WIRELESS_MODE_AUTO; //SET AUTO
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->ieee_up=0;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
priv->ieee80211->rts = DEFAULT_RTS_THRESHOLD;
priv->ieee80211->rate = 110; //11 mbps
priv->ieee80211->short_slot = 1;
priv->promisc = (priv->ieee80211->dev->flags & IFF_PROMISC) ? 1:0;
priv->bcck_in_ch14 = false;
priv->CCKPresentAttentuation = 0;
priv->rfa_txpowertrackingindex = 0;
priv->rfc_txpowertrackingindex = 0;
priv->CckPwEnl = 6;
//added by amy for silent reset
priv->ResetProgress = RESET_TYPE_NORESET;
priv->bForcedSilentReset = 0;
priv->bDisableNormalResetCheck = false;
priv->force_reset = false;
//added by amy for power save
priv->RfOffReason = 0;
priv->bHwRfOffAction = 0;
priv->PowerSaveControl.bInactivePs = true;
priv->PowerSaveControl.bIPSModeBackup = false;
priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE;/* |
IEEE_SOFTMAC_BEACONS;*///added by amy 080604 //| //IEEE_SOFTMAC_SINGLE_QUEUE;
priv->ieee80211->active_scan = 1;
priv->ieee80211->modulation = IEEE80211_CCK_MODULATION | IEEE80211_OFDM_MODULATION;
priv->ieee80211->host_encrypt = 1;
priv->ieee80211->host_decrypt = 1;
priv->ieee80211->start_send_beacons = rtl8192_start_beacon;
priv->ieee80211->stop_send_beacons = rtl8192_stop_beacon;
priv->ieee80211->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
priv->ieee80211->set_chan = rtl8192_set_chan;
priv->ieee80211->link_change = rtl8192_link_change;
priv->ieee80211->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
priv->ieee80211->data_hard_stop = rtl8192_data_hard_stop;
priv->ieee80211->data_hard_resume = rtl8192_data_hard_resume;
priv->ieee80211->init_wmmparam_flag = 0;
priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
priv->ieee80211->check_nic_enough_desc = check_nic_enough_desc;
priv->ieee80211->tx_headroom = sizeof(TX_FWINFO_8190PCI);
priv->ieee80211->qos_support = 1;
priv->ieee80211->SetBWModeHandler = rtl8192_SetBWMode;
priv->ieee80211->handle_assoc_response = rtl8192_handle_assoc_response;
priv->ieee80211->handle_beacon = rtl8192_handle_beacon;
priv->ieee80211->sta_wake_up = rtl8192_hw_wakeup;
priv->ieee80211->enter_sleep_state = rtl8192_hw_to_sleep;
priv->ieee80211->ps_is_queue_empty = rtl8192_is_tx_queue_empty;
priv->ieee80211->GetNmodeSupportBySecCfg = GetNmodeSupportBySecCfg8190Pci;
priv->ieee80211->SetWirelessMode = rtl8192_SetWirelessMode;
priv->ieee80211->GetHalfNmodeSupportByAPsHandler = GetHalfNmodeSupportByAPs819xPci;
priv->ieee80211->InitialGainHandler = InitialGain819xPci;
#ifdef ENABLE_IPS
priv->ieee80211->ieee80211_ips_leave_wq = ieee80211_ips_leave_wq;
priv->ieee80211->ieee80211_ips_leave = ieee80211_ips_leave;
#endif
#ifdef ENABLE_LPS
priv->ieee80211->LeisurePSLeave = LeisurePSLeave;
#endif
priv->ieee80211->SetHwRegHandler = rtl8192e_SetHwReg;
priv->ieee80211->rtllib_ap_sec_type = rtl8192e_ap_sec_type;
priv->ShortRetryLimit = 0x30;
priv->LongRetryLimit = 0x30;
priv->ReceiveConfig = RCR_ADD3 |
RCR_AMF | RCR_ADF | //accept management/data
RCR_AICV | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
RCR_AAP | ((u32)7<<RCR_MXDMA_OFFSET) |
((u32)7 << RCR_FIFO_OFFSET) | RCR_ONLYERLPKT;
priv->pFirmware = vzalloc(sizeof(rt_firmware));
/* rx related queue */
skb_queue_head_init(&priv->skb_queue);
/* Tx related queue */
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_head_init(&priv->ieee80211->skb_waitQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_head_init(&priv->ieee80211->skb_aggQ [i]);
}
priv->rf_set_chan = rtl8192_phy_SwChnl;
}
static void rtl8192_init_priv_lock(struct r8192_priv* priv)
{
spin_lock_init(&priv->irq_th_lock);
spin_lock_init(&priv->rf_ps_lock);
sema_init(&priv->wx_sem,1);
sema_init(&priv->rf_sem,1);
mutex_init(&priv->mutex);
}
/* init tasklet and wait_queue here */
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct r8192_priv *priv)
{
priv->priv_wq = create_workqueue(DRV_NAME);
#ifdef ENABLE_IPS
INIT_WORK(&priv->ieee80211->ips_leave_wq, IPSLeave_wq);
#endif
INIT_WORK(&priv->reset_wq, rtl8192_restart);
INIT_DELAYED_WORK(&priv->watch_dog_wq, rtl819x_watchdog_wqcallback);
INIT_DELAYED_WORK(&priv->txpower_tracking_wq, dm_txpower_trackingcallback);
INIT_DELAYED_WORK(&priv->rfpath_check_wq, dm_rf_pathcheck_workitemcallback);
INIT_DELAYED_WORK(&priv->update_beacon_wq, rtl8192_update_beacon);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
INIT_DELAYED_WORK(&priv->ieee80211->hw_wakeup_wq, rtl8192_hw_wakeup_wq);
tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
(unsigned long) priv);
tasklet_init(&priv->irq_tx_tasklet, rtl8192_irq_tx_tasklet,
(unsigned long) priv);
tasklet_init(&priv->irq_prepare_beacon_tasklet, rtl8192_prepare_beacon,
(unsigned long) priv);
}
static void rtl8192_get_eeprom_size(struct r8192_priv *priv)
{
u16 curCR = 0;
RT_TRACE(COMP_INIT, "===========>%s()\n", __FUNCTION__);
curCR = read_nic_dword(priv, EPROM_CMD);
RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, curCR);
//whether need I consider BIT5?
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EPROM_93c56 : EPROM_93c46;
RT_TRACE(COMP_INIT, "<===========%s(), epromtype:%d\n", __FUNCTION__, priv->epromtype);
}
/*
* Adapter->EEPROMAddressSize should be set before this function call.
* EEPROM address size can be got through GetEEPROMSize8185()
*/
static void rtl8192_read_eeprom_info(struct r8192_priv *priv)
{
struct net_device *dev = priv->ieee80211->dev;
u8 tempval;
u8 ICVer8192, ICVer8256;
u16 i,usValue, IC_Version;
u16 EEPROMId;
u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x01};
RT_TRACE(COMP_INIT, "====> rtl8192_read_eeprom_info\n");
// TODO: I don't know if we need to apply EF function to EEPROM read function
//2 Read EEPROM ID to make sure autoload is success
EEPROMId = eprom_read(priv, 0);
if( EEPROMId != RTL8190_EEPROM_ID )
{
RT_TRACE(COMP_ERR, "EEPROM ID is invalid:%x, %x\n", EEPROMId, RTL8190_EEPROM_ID);
priv->AutoloadFailFlag=true;
}
else
{
priv->AutoloadFailFlag=false;
}
//
// Assign Chip Version ID
//
// Read IC Version && Channel Plan
if(!priv->AutoloadFailFlag)
{
// VID, PID
priv->eeprom_vid = eprom_read(priv, (EEPROM_VID >> 1));
priv->eeprom_did = eprom_read(priv, (EEPROM_DID >> 1));
usValue = eprom_read(priv, (u16)(EEPROM_Customer_ID>>1)) >> 8 ;
priv->eeprom_CustomerID = (u8)( usValue & 0xff);
usValue = eprom_read(priv, (EEPROM_ICVersion_ChannelPlan>>1));
priv->eeprom_ChannelPlan = usValue&0xff;
IC_Version = ((usValue&0xff00)>>8);
ICVer8192 = (IC_Version&0xf); //bit0~3; 1:A cut, 2:B cut, 3:C cut...
ICVer8256 = ((IC_Version&0xf0)>>4);//bit4~6, bit7 reserved for other RF chip; 1:A cut, 2:B cut, 3:C cut...
RT_TRACE(COMP_INIT, "ICVer8192 = 0x%x\n", ICVer8192);
RT_TRACE(COMP_INIT, "ICVer8256 = 0x%x\n", ICVer8256);
if(ICVer8192 == 0x2) //B-cut
{
if(ICVer8256 == 0x5) //E-cut
priv->card_8192_version= VERSION_8190_BE;
}
switch(priv->card_8192_version)
{
case VERSION_8190_BD:
case VERSION_8190_BE:
break;
default:
priv->card_8192_version = VERSION_8190_BD;
break;
}
RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n", priv->card_8192_version);
}
else
{
priv->card_8192_version = VERSION_8190_BD;
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
priv->eeprom_CustomerID = 0;
priv->eeprom_ChannelPlan = 0;
RT_TRACE(COMP_INIT, "IC Version = 0x%x\n", 0xff);
}
RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
RT_TRACE(COMP_INIT, "EEPROM DID = 0x%4x\n", priv->eeprom_did);
RT_TRACE(COMP_INIT,"EEPROM Customer ID: 0x%2x\n", priv->eeprom_CustomerID);
//2 Read Permanent MAC address
if(!priv->AutoloadFailFlag)
{
for(i = 0; i < 6; i += 2)
{
usValue = eprom_read(priv, (u16) ((EEPROM_NODE_ADDRESS_BYTE_0+i)>>1));
*(u16*)(&dev->dev_addr[i]) = usValue;
}
} else {
// when auto load failed, the last address byte set to be a random one.
// added by david woo.2007/11/7
memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
}
RT_TRACE(COMP_INIT, "Permanent Address = %pM\n", dev->dev_addr);
//2 TX Power Check EEPROM Fail or not
if(priv->card_8192_version > VERSION_8190_BD) {
priv->bTXPowerDataReadFromEEPORM = true;
} else {
priv->bTXPowerDataReadFromEEPORM = false;
}
// 2007/11/15 MH 8190PCI Default=2T4R, 8192PCIE default=1T2R
priv->rf_type = RTL819X_DEFAULT_RF_TYPE;
if(priv->card_8192_version > VERSION_8190_BD)
{
// Read RF-indication and Tx Power gain index diff of legacy to HT OFDM rate.
if(!priv->AutoloadFailFlag)
{
tempval = (eprom_read(priv, (EEPROM_RFInd_PowerDiff>>1))) & 0xff;
priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf; // bit[3:0]
if (tempval&0x80) //RF-indication, bit[7]
priv->rf_type = RF_1T2R;
else
priv->rf_type = RF_2T4R;
}
else
{
priv->EEPROMLegacyHTTxPowerDiff = EEPROM_Default_LegacyHTTxPowerDiff;
}
RT_TRACE(COMP_INIT, "EEPROMLegacyHTTxPowerDiff = %d\n",
priv->EEPROMLegacyHTTxPowerDiff);
// Read ThermalMeter from EEPROM
if(!priv->AutoloadFailFlag)
{
priv->EEPROMThermalMeter = (u8)(((eprom_read(priv, (EEPROM_ThermalMeter>>1))) & 0xff00)>>8);
}
else
{
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
}
RT_TRACE(COMP_INIT, "ThermalMeter = %d\n", priv->EEPROMThermalMeter);
//vivi, for tx power track
priv->TSSI_13dBm = priv->EEPROMThermalMeter *100;
if(priv->epromtype == EPROM_93c46)
{
// Read antenna tx power offset of B/C/D to A and CrystalCap from EEPROM
if(!priv->AutoloadFailFlag)
{
usValue = eprom_read(priv, (EEPROM_TxPwDiff_CrystalCap>>1));
priv->EEPROMAntPwDiff = (usValue&0x0fff);
priv->EEPROMCrystalCap = (u8)((usValue&0xf000)>>12);
}
else
{
priv->EEPROMAntPwDiff = EEPROM_Default_AntTxPowerDiff;
priv->EEPROMCrystalCap = EEPROM_Default_TxPwDiff_CrystalCap;
}
RT_TRACE(COMP_INIT, "EEPROMAntPwDiff = %d\n", priv->EEPROMAntPwDiff);
RT_TRACE(COMP_INIT, "EEPROMCrystalCap = %d\n", priv->EEPROMCrystalCap);
//
// Get per-channel Tx Power Level
//
for(i=0; i<14; i+=2)
{
if(!priv->AutoloadFailFlag)
{
usValue = eprom_read(priv, (u16) ((EEPROM_TxPwIndex_CCK+i)>>1) );
}
else
{
usValue = EEPROM_Default_TxPower;
}
*((u16*)(&priv->EEPROMTxPowerLevelCCK[i])) = usValue;
RT_TRACE(COMP_INIT,"CCK Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelCCK[i]);
RT_TRACE(COMP_INIT, "CCK Tx Power Level, Index %d = 0x%02x\n", i+1, priv->EEPROMTxPowerLevelCCK[i+1]);
}
for(i=0; i<14; i+=2)
{
if(!priv->AutoloadFailFlag)
{
usValue = eprom_read(priv, (u16) ((EEPROM_TxPwIndex_OFDM_24G+i)>>1) );
}
else
{
usValue = EEPROM_Default_TxPower;
}
*((u16*)(&priv->EEPROMTxPowerLevelOFDM24G[i])) = usValue;
RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelOFDM24G[i]);
RT_TRACE(COMP_INIT, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i+1, priv->EEPROMTxPowerLevelOFDM24G[i+1]);
}
}
//
// Update HAL variables.
//
if(priv->epromtype == EPROM_93c46)
{
for(i=0; i<14; i++)
{
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK[i];
priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[i];
}
priv->LegacyHTTxPowerDiff = priv->EEPROMLegacyHTTxPowerDiff;
// Antenna B gain offset to antenna A, bit0~3
priv->AntennaTxPwDiff[0] = (priv->EEPROMAntPwDiff & 0xf);
// Antenna C gain offset to antenna A, bit4~7
priv->AntennaTxPwDiff[1] = ((priv->EEPROMAntPwDiff & 0xf0)>>4);
// Antenna D gain offset to antenna A, bit8~11
priv->AntennaTxPwDiff[2] = ((priv->EEPROMAntPwDiff & 0xf00)>>8);
// CrystalCap, bit12~15
priv->CrystalCap = priv->EEPROMCrystalCap;
// ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
priv->ThermalMeter[0] = (priv->EEPROMThermalMeter & 0xf);
priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter & 0xf0)>>4);
}
else if(priv->epromtype == EPROM_93c56)
{
for(i=0; i<3; i++) // channel 1~3 use the same Tx Power Level.
{
priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[0];
priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[0];
priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[0];
priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[0];
}
for(i=3; i<9; i++) // channel 4~9 use the same Tx Power Level
{
priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[1];
priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[1];
priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[1];
priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[1];
}
for(i=9; i<14; i++) // channel 10~14 use the same Tx Power Level
{
priv->TxPowerLevelCCK_A[i] = priv->EEPROMRfACCKChnl1TxPwLevel[2];
priv->TxPowerLevelOFDM24G_A[i] = priv->EEPROMRfAOfdmChnlTxPwLevel[2];
priv->TxPowerLevelCCK_C[i] = priv->EEPROMRfCCCKChnl1TxPwLevel[2];
priv->TxPowerLevelOFDM24G_C[i] = priv->EEPROMRfCOfdmChnlTxPwLevel[2];
}
for(i=0; i<14; i++)
RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_A[%d] = 0x%x\n", i, priv->TxPowerLevelCCK_A[i]);
for(i=0; i<14; i++)
RT_TRACE(COMP_INIT,"priv->TxPowerLevelOFDM24G_A[%d] = 0x%x\n", i, priv->TxPowerLevelOFDM24G_A[i]);
for(i=0; i<14; i++)
RT_TRACE(COMP_INIT, "priv->TxPowerLevelCCK_C[%d] = 0x%x\n", i, priv->TxPowerLevelCCK_C[i]);
for(i=0; i<14; i++)
RT_TRACE(COMP_INIT, "priv->TxPowerLevelOFDM24G_C[%d] = 0x%x\n", i, priv->TxPowerLevelOFDM24G_C[i]);
priv->LegacyHTTxPowerDiff = priv->EEPROMLegacyHTTxPowerDiff;
priv->AntennaTxPwDiff[0] = 0;
priv->AntennaTxPwDiff[1] = 0;
priv->AntennaTxPwDiff[2] = 0;
priv->CrystalCap = priv->EEPROMCrystalCap;
// ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
priv->ThermalMeter[0] = (priv->EEPROMThermalMeter & 0xf);
priv->ThermalMeter[1] = ((priv->EEPROMThermalMeter & 0xf0)>>4);
}
}
if(priv->rf_type == RF_1T2R)
{
RT_TRACE(COMP_INIT, "1T2R config\n");
}
else if (priv->rf_type == RF_2T4R)
{
RT_TRACE(COMP_INIT, "2T4R config\n");
}
// 2008/01/16 MH We can only know RF type in the function. So we have to init
// DIG RATR table again.
init_rate_adaptive(priv);
//1 Make a copy for following variables and we can change them if we want
if(priv->RegChannelPlan == 0xf)
{
priv->ChannelPlan = priv->eeprom_ChannelPlan;
}
else
{
priv->ChannelPlan = priv->RegChannelPlan;
}
//
// Used PID and DID to Set CustomerID
//
if( priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304 )
{
priv->CustomerID = RT_CID_DLINK;
}
switch(priv->eeprom_CustomerID)
{
case EEPROM_CID_DEFAULT:
priv->CustomerID = RT_CID_DEFAULT;
break;
case EEPROM_CID_CAMEO:
priv->CustomerID = RT_CID_819x_CAMEO;
break;
case EEPROM_CID_RUNTOP:
priv->CustomerID = RT_CID_819x_RUNTOP;
break;
case EEPROM_CID_NetCore:
priv->CustomerID = RT_CID_819x_Netcore;
break;
case EEPROM_CID_TOSHIBA: // Merge by Jacken, 2008/01/31
priv->CustomerID = RT_CID_TOSHIBA;
if(priv->eeprom_ChannelPlan&0x80)
priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
else
priv->ChannelPlan = 0x0;
RT_TRACE(COMP_INIT, "Toshiba ChannelPlan = 0x%x\n",
priv->ChannelPlan);
break;
case EEPROM_CID_Nettronix:
priv->CustomerID = RT_CID_Nettronix;
break;
case EEPROM_CID_Pronet:
priv->CustomerID = RT_CID_PRONET;
break;
case EEPROM_CID_DLINK:
priv->CustomerID = RT_CID_DLINK;
break;
case EEPROM_CID_WHQL:
break;
default:
// value from RegCustomerID
break;
}
//Avoid the channel plan array overflow, by Bruce, 2007-08-27.
if(priv->ChannelPlan > CHANNEL_PLAN_LEN - 1)
priv->ChannelPlan = 0; //FCC
if( priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
priv->ieee80211->bSupportRemoteWakeUp = true;
else
priv->ieee80211->bSupportRemoteWakeUp = false;
RT_TRACE(COMP_INIT, "RegChannelPlan(%d)\n", priv->RegChannelPlan);
RT_TRACE(COMP_INIT, "ChannelPlan = %d\n", priv->ChannelPlan);
RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
}
static short rtl8192_get_channel_map(struct r8192_priv *priv)
{
#ifdef ENABLE_DOT11D
if(priv->ChannelPlan> COUNTRY_CODE_GLOBAL_DOMAIN){
printk("rtl8180_init:Error channel plan! Set to default.\n");
priv->ChannelPlan= 0;
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
rtl819x_set_channel_map(priv->ChannelPlan, priv);
#else
int ch,i;
//Set Default Channel Plan
if(!channels){
DMESG("No channels, aborting");
return -1;
}
ch=channels;
priv->ChannelPlan= 0;//hikaru
// set channels 1..14 allowed in given locale
for (i=1; i<=14; i++) {
(priv->ieee80211->channel_map)[i] = (u8)(ch & 0x01);
ch >>= 1;
}
#endif
return 0;
}
static short rtl8192_init(struct r8192_priv *priv)
{
struct net_device *dev = priv->ieee80211->dev;
memset(&(priv->stats),0,sizeof(struct Stats));
rtl8192_init_priv_variable(priv);
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(priv);
rtl8192_get_eeprom_size(priv);
rtl8192_read_eeprom_info(priv);
rtl8192_get_channel_map(priv);
init_hal_dm(priv);
init_timer(&priv->watch_dog_timer);
priv->watch_dog_timer.data = (unsigned long)priv;
priv->watch_dog_timer.function = watch_dog_timer_callback;
if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED, dev->name, priv)) {
printk("Error allocating IRQ %d",dev->irq);
return -1;
}else{
priv->irq=dev->irq;
printk("IRQ %d",dev->irq);
}
if (rtl8192_pci_initdescring(priv) != 0){
printk("Endopoints initialization failed");
return -1;
}
return 0;
}
/*
* Actually only set RRSR, RATR and BW_OPMODE registers
* not to do all the hw config as its name says
* This part need to modified according to the rate set we filtered
*/
static void rtl8192_hwconfig(struct r8192_priv *priv)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
// Set RRSR, RATR, and BW_OPMODE registers
//
switch (priv->ieee80211->mode)
{
case WIRELESS_MODE_B:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK;
regRRSR = RATE_ALL_CCK;
break;
case WIRELESS_MODE_A:
regBwOpMode = BW_OPMODE_5G |BW_OPMODE_20MHZ;
regRATR = RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_G:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_AUTO:
case WIRELESS_MODE_N_24G:
// It support CCK rate by default.
// CCK rate will be filtered out only when associated AP does not support it.
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_5G:
regBwOpMode = BW_OPMODE_5G;
regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_OFDM_AG;
break;
}
write_nic_byte(priv, BW_OPMODE, regBwOpMode);
{
u32 ratr_value = 0;
ratr_value = regRATR;
if (priv->rf_type == RF_1T2R)
{
ratr_value &= ~(RATE_ALL_OFDM_2SS);
}
write_nic_dword(priv, RATR0, ratr_value);
write_nic_byte(priv, UFWP, 1);
}
regTmp = read_nic_byte(priv, 0x313);
regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
write_nic_dword(priv, RRSR, regRRSR);
//
// Set Retry Limit here
//
write_nic_word(priv, RETRY_LIMIT,
priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
// Set Contention Window here
// Set Tx AGC
// Set Tx Antenna including Feedback control
// Set Auto Rate fallback control
}
static RT_STATUS rtl8192_adapter_start(struct r8192_priv *priv)
{
struct net_device *dev = priv->ieee80211->dev;
u32 ulRegRead;
RT_STATUS rtStatus = RT_STATUS_SUCCESS;
u8 tmpvalue;
u8 ICVersion,SwitchingRegulatorOutput;
bool bfirmwareok = true;
u32 tmpRegA, tmpRegC, TempCCk;
int i =0;
RT_TRACE(COMP_INIT, "====>%s()\n", __FUNCTION__);
priv->being_init_adapter = true;
rtl8192_pci_resetdescring(priv);
// 2007/11/02 MH Before initalizing RF. We can not use FW to do RF-R/W.
priv->Rf_Mode = RF_OP_By_SW_3wire;
//dPLL on
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
write_nic_byte(priv, ANAPAR, 0x37);
// Accordign to designer's explain, LBUS active will never > 10ms. We delay 10ms
// Joseph increae the time to prevent firmware download fail
mdelay(500);
}
//PlatformSleepUs(10000);
// For any kind of InitializeAdapter process, we shall use system now!!
priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
//
//3 //Config CPUReset Register
//3//
//3 Firmware Reset Or Not
ulRegRead = read_nic_dword(priv, CPU_GEN);
if(priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
{ //called from MPInitialized. do nothing
ulRegRead |= CPU_GEN_SYSTEM_RESET;
}else if(priv->pFirmware->firmware_status == FW_STATUS_5_READY)
ulRegRead |= CPU_GEN_FIRMWARE_RESET; // Called from MPReset
else
RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)\n", __FUNCTION__, priv->pFirmware->firmware_status);
write_nic_dword(priv, CPU_GEN, ulRegRead);
//3//
//3 //Fix the issue of E-cut high temperature issue
//3//
// TODO: E cut only
ICVersion = read_nic_byte(priv, IC_VERRSION);
if(ICVersion >= 0x4) //E-cut only
{
// HW SD suggest that we should not wirte this register too often, so driver
// should readback this register. This register will be modified only when
// power on reset
SwitchingRegulatorOutput = read_nic_byte(priv, SWREGULATOR);
if(SwitchingRegulatorOutput != 0xb8)
{
write_nic_byte(priv, SWREGULATOR, 0xa8);
mdelay(1);
write_nic_byte(priv, SWREGULATOR, 0xb8);
}
}
//3//
//3// Initialize BB before MAC
//3//
RT_TRACE(COMP_INIT, "BB Config Start!\n");
rtStatus = rtl8192_BBConfig(priv);
if(rtStatus != RT_STATUS_SUCCESS)
{
RT_TRACE(COMP_ERR, "BB Config failed\n");
return rtStatus;
}
RT_TRACE(COMP_INIT,"BB Config Finished!\n");
//3//Set Loopback mode or Normal mode
//3//
//2006.12.13 by emily. Note!We should not merge these two CPU_GEN register writings
// because setting of System_Reset bit reset MAC to default transmission mode.
//Loopback mode or not
priv->LoopbackMode = RTL819X_NO_LOOPBACK;
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
ulRegRead = read_nic_dword(priv, CPU_GEN);
if(priv->LoopbackMode == RTL819X_NO_LOOPBACK)
{
ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) | CPU_GEN_NO_LOOPBACK_SET);
}
else if (priv->LoopbackMode == RTL819X_MAC_LOOPBACK )
{
ulRegRead |= CPU_CCK_LOOPBACK;
}
else
{
RT_TRACE(COMP_ERR,"Serious error: wrong loopback mode setting\n");
}
//2008.06.03, for WOL
//ulRegRead &= (~(CPU_GEN_GPIO_UART));
write_nic_dword(priv, CPU_GEN, ulRegRead);
// 2006.11.29. After reset cpu, we sholud wait for a second, otherwise, it may fail to write registers. Emily
udelay(500);
}
//3Set Hardware(Do nothing now)
rtl8192_hwconfig(priv);
//2=======================================================
// Common Setting for all of the FPGA platform. (part 1)
//2=======================================================
// If there is changes, please make sure it applies to all of the FPGA version
//3 Turn on Tx/Rx
write_nic_byte(priv, CMDR, CR_RE|CR_TE);
//2Set Tx dma burst
write_nic_byte(priv, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
(MXDMA2_NoLimit<<MXDMA2_TX_SHIFT) ));
//set IDR0 here
write_nic_dword(priv, MAC0, ((u32*)dev->dev_addr)[0]);
write_nic_word(priv, MAC4, ((u16*)(dev->dev_addr + 4))[0]);
//set RCR
write_nic_dword(priv, RCR, priv->ReceiveConfig);
//3 Initialize Number of Reserved Pages in Firmware Queue
write_nic_dword(priv, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_VO <<RSVD_FW_QUEUE_PAGE_VO_SHIFT);
write_nic_dword(priv, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
write_nic_dword(priv, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW|
NUM_OF_PAGE_IN_FW_QUEUE_BCN<<RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
NUM_OF_PAGE_IN_FW_QUEUE_PUB<<RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
rtl8192_tx_enable(priv);
rtl8192_rx_enable(priv);
//3Set Response Rate Setting Register
// CCK rate is supported by default.
// CCK rate will be filtered out only when associated AP does not support it.
ulRegRead = (0xFFF00000 & read_nic_dword(priv, RRSR)) | RATE_ALL_OFDM_AG | RATE_ALL_CCK;
write_nic_dword(priv, RRSR, ulRegRead);
write_nic_dword(priv, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
//2Set AckTimeout
// TODO: (it value is only for FPGA version). need to be changed!!2006.12.18, by Emily
write_nic_byte(priv, ACK_TIMEOUT, 0x30);
if(priv->ResetProgress == RESET_TYPE_NORESET)
rtl8192_SetWirelessMode(priv->ieee80211, priv->ieee80211->mode);
//-----------------------------------------------------------------------------
// Set up security related. 070106, by rcnjko:
// 1. Clear all H/W keys.
// 2. Enable H/W encryption/decryption.
//-----------------------------------------------------------------------------
CamResetAllEntry(priv);
{
u8 SECR_value = 0x0;
SECR_value |= SCR_TxEncEnable;
SECR_value |= SCR_RxDecEnable;
SECR_value |= SCR_NoSKMC;
write_nic_byte(priv, SECR, SECR_value);
}
//3Beacon related
write_nic_word(priv, ATIMWND, 2);
write_nic_word(priv, BCN_INTERVAL, 100);
for (i=0; i<QOS_QUEUE_NUM; i++)
write_nic_dword(priv, WDCAPARA_ADD[i], 0x005e4332);
//
// Switching regulator controller: This is set temporarily.
// It's not sure if this can be removed in the future.
// PJ advised to leave it by default.
//
write_nic_byte(priv, 0xbe, 0xc0);
//2=======================================================
// Set PHY related configuration defined in MAC register bank
//2=======================================================
rtl8192_phy_configmac(priv);
if (priv->card_8192_version > (u8) VERSION_8190_BD) {
rtl8192_phy_getTxPower(priv);
rtl8192_phy_setTxPower(priv, priv->chan);
}
//if D or C cut
tmpvalue = read_nic_byte(priv, IC_VERRSION);
priv->IC_Cut = tmpvalue;
RT_TRACE(COMP_INIT, "priv->IC_Cut = 0x%x\n", priv->IC_Cut);
if(priv->IC_Cut >= IC_VersionCut_D)
{
//pHalData->bDcut = TRUE;
if(priv->IC_Cut == IC_VersionCut_D)
RT_TRACE(COMP_INIT, "D-cut\n");
if(priv->IC_Cut == IC_VersionCut_E)
{
RT_TRACE(COMP_INIT, "E-cut\n");
// HW SD suggest that we should not wirte this register too often, so driver
// should readback this register. This register will be modified only when
// power on reset
}
}
else
{
//pHalData->bDcut = FALSE;
RT_TRACE(COMP_INIT, "Before C-cut\n");
}
//Firmware download
RT_TRACE(COMP_INIT, "Load Firmware!\n");
bfirmwareok = init_firmware(priv);
if(bfirmwareok != true) {
rtStatus = RT_STATUS_FAILURE;
return rtStatus;
}
RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
//RF config
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
RT_TRACE(COMP_INIT, "RF Config Started!\n");
rtStatus = rtl8192_phy_RFConfig(priv);
if(rtStatus != RT_STATUS_SUCCESS)
{
RT_TRACE(COMP_ERR, "RF Config failed\n");
return rtStatus;
}
RT_TRACE(COMP_INIT, "RF Config Finished!\n");
}
rtl8192_phy_updateInitGain(priv);
/*---- Set CCK and OFDM Block "ON"----*/
rtl8192_setBBreg(priv, rFPGA0_RFMOD, bCCKEn, 0x1);
rtl8192_setBBreg(priv, rFPGA0_RFMOD, bOFDMEn, 0x1);
//Enable Led
write_nic_byte(priv, 0x87, 0x0);
//2=======================================================
// RF Power Save
//2=======================================================
#ifdef ENABLE_IPS
{
if(priv->RfOffReason > RF_CHANGE_BY_PS)
{ // H/W or S/W RF OFF before sleep.
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for RfOffReason(%d)\n", __FUNCTION__,priv->RfOffReason);
MgntActSet_RF_State(priv, eRfOff, priv->RfOffReason);
}
else if(priv->RfOffReason >= RF_CHANGE_BY_IPS)
{ // H/W or S/W RF OFF before sleep.
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): Turn off RF for RfOffReason(%d)\n", __FUNCTION__, priv->RfOffReason);
MgntActSet_RF_State(priv, eRfOff, priv->RfOffReason);
}
else
{
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON \n",__FUNCTION__);
priv->eRFPowerState = eRfOn;
priv->RfOffReason = 0;
}
}
#endif
// We can force firmware to do RF-R/W
if(priv->ieee80211->FwRWRF)
priv->Rf_Mode = RF_OP_By_FW;
else
priv->Rf_Mode = RF_OP_By_SW_3wire;
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
dm_initialize_txpower_tracking(priv);
if(priv->IC_Cut >= IC_VersionCut_D)
{
tmpRegA = rtl8192_QueryBBReg(priv, rOFDM0_XATxIQImbalance, bMaskDWord);
tmpRegC = rtl8192_QueryBBReg(priv, rOFDM0_XCTxIQImbalance, bMaskDWord);
for(i = 0; i<TxBBGainTableLength; i++)
{
if(tmpRegA == priv->txbbgain_table[i].txbbgain_value)
{
priv->rfa_txpowertrackingindex= (u8)i;
priv->rfa_txpowertrackingindex_real= (u8)i;
priv->rfa_txpowertracking_default = priv->rfa_txpowertrackingindex;
break;
}
}
TempCCk = rtl8192_QueryBBReg(priv, rCCK0_TxFilter1, bMaskByte2);
for(i=0 ; i<CCKTxBBGainTableLength ; i++)
{
if(TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0])
{
priv->CCKPresentAttentuation_20Mdefault =(u8) i;
break;
}
}
priv->CCKPresentAttentuation_40Mdefault = 0;
priv->CCKPresentAttentuation_difference = 0;
priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault;
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_initial = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real__initial = %d\n", priv->rfa_txpowertrackingindex_real);
RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference_initial = %d\n", priv->CCKPresentAttentuation_difference);
RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_initial = %d\n", priv->CCKPresentAttentuation);
priv->btxpower_tracking = FALSE;//TEMPLY DISABLE
}
}
rtl8192_irq_enable(priv);
priv->being_init_adapter = false;
return rtStatus;
}
static void rtl8192_prepare_beacon(unsigned long arg)
{
struct r8192_priv *priv = (struct r8192_priv*) arg;
struct sk_buff *skb;
cb_desc *tcb_desc;
skb = ieee80211_get_beacon(priv->ieee80211);
tcb_desc = (cb_desc *)(skb->cb + 8);
/* prepare misc info for the beacon xmit */
tcb_desc->queue_index = BEACON_QUEUE;
/* IBSS does not support HT yet, use 1M defaultly */
tcb_desc->data_rate = 2;
tcb_desc->RATRIndex = 7;
tcb_desc->bTxDisableRateFallBack = 1;
tcb_desc->bTxUseDriverAssingedRate = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
if(skb){
rtl8192_tx(priv, skb);
}
}
/*
* configure registers for beacon tx and enables it via
* rtl8192_beacon_tx_enable(). rtl8192_beacon_tx_disable() might
* be used to stop beacon transmission
*/
static void rtl8192_start_beacon(struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
struct ieee80211_network *net = &priv->ieee80211->current_network;
u16 BcnTimeCfg = 0;
u16 BcnCW = 6;
u16 BcnIFS = 0xf;
DMESG("Enabling beacon TX");
rtl8192_irq_disable(priv);
//rtl8192_beacon_tx_enable(dev);
/* ATIM window */
write_nic_word(priv, ATIMWND, 2);
/* Beacon interval (in unit of TU) */
write_nic_word(priv, BCN_INTERVAL, net->beacon_interval);
/*
* DrvErlyInt (in unit of TU).
* (Time to send interrupt to notify driver to c
* hange beacon content)
* */
write_nic_word(priv, BCN_DRV_EARLY_INT, 10);
/*
* BcnDMATIM(in unit of us).
* Indicates the time before TBTT to perform beacon queue DMA
* */
write_nic_word(priv, BCN_DMATIME, 256);
/*
* Force beacon frame transmission even after receiving
* beacon frame from other ad hoc STA
* */
write_nic_byte(priv, BCN_ERR_THRESH, 100);
/* Set CW and IFS */
BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
write_nic_word(priv, BCN_TCFG, BcnTimeCfg);
/* enable the interrupt for ad-hoc process */
rtl8192_irq_enable(priv);
}
static bool HalRxCheckStuck8190Pci(struct r8192_priv *priv)
{
u16 RegRxCounter = read_nic_word(priv, 0x130);
bool bStuck = FALSE;
RT_TRACE(COMP_RESET,"%s(): RegRxCounter is %d,RxCounter is %d\n",__FUNCTION__,RegRxCounter,priv->RxCounter);
// If rssi is small, we should check rx for long time because of bad rx.
// or maybe it will continuous silent reset every 2 seconds.
priv->rx_chk_cnt++;
if(priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5))
{
priv->rx_chk_cnt = 0; /* high rssi, check rx stuck right now. */
}
else if(priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High+5) &&
((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_40M) ||
(priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_20M)) )
{
if(priv->rx_chk_cnt < 2)
{
return bStuck;
}
else
{
priv->rx_chk_cnt = 0;
}
}
else if(((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_40M) ||
(priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_20M)) &&
priv->undecorated_smoothed_pwdb >= VeryLowRSSI)
{
if(priv->rx_chk_cnt < 4)
{
return bStuck;
}
else
{
priv->rx_chk_cnt = 0;
}
}
else
{
if(priv->rx_chk_cnt < 8)
{
return bStuck;
}
else
{
priv->rx_chk_cnt = 0;
}
}
if(priv->RxCounter==RegRxCounter)
bStuck = TRUE;
priv->RxCounter = RegRxCounter;
return bStuck;
}
static RESET_TYPE RxCheckStuck(struct r8192_priv *priv)
{
if(HalRxCheckStuck8190Pci(priv))
{
RT_TRACE(COMP_RESET, "RxStuck Condition\n");
return RESET_TYPE_SILENT;
}
return RESET_TYPE_NORESET;
}
static RESET_TYPE rtl819x_check_reset(struct r8192_priv *priv)
{
RESET_TYPE RxResetType = RESET_TYPE_NORESET;
RT_RF_POWER_STATE rfState;
rfState = priv->eRFPowerState;
if (rfState != eRfOff && (priv->ieee80211->iw_mode != IW_MODE_ADHOC)) {
/*
* If driver is in the status of firmware download failure,
* driver skips RF initialization and RF is in turned off state.
* Driver should check whether Rx stuck and do silent reset. And
* if driver is in firmware download failure status, driver
* should initialize RF in the following silent reset procedure
*
* Driver should not check RX stuck in IBSS mode because it is
* required to set Check BSSID in order to send beacon, however,
* if check BSSID is set, STA cannot hear any packet a all.
*/
RxResetType = RxCheckStuck(priv);
}
RT_TRACE(COMP_RESET, "%s(): RxResetType is %d\n", __FUNCTION__, RxResetType);
return RxResetType;
}
#ifdef ENABLE_IPS
static void InactivePsWorkItemCallback(struct r8192_priv *priv)
{
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
RT_TRACE(COMP_POWER, "InactivePsWorkItemCallback() --------->\n");
//
// This flag "bSwRfProcessing", indicates the status of IPS procedure, should be set if the IPS workitem
// is really scheduled.
// The old code, sets this flag before scheduling the IPS workitem and however, at the same time the
// previous IPS workitem did not end yet, fails to schedule the current workitem. Thus, bSwRfProcessing
// blocks the IPS procedure of switching RF.
// By Bruce, 2007-12-25.
//
pPSC->bSwRfProcessing = TRUE;
RT_TRACE(COMP_RF, "InactivePsWorkItemCallback(): Set RF to %s.\n",
pPSC->eInactivePowerState == eRfOff?"OFF":"ON");
MgntActSet_RF_State(priv, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
//
// To solve CAM values miss in RF OFF, rewrite CAM values after RF ON. By Bruce, 2007-09-20.
//
pPSC->bSwRfProcessing = FALSE;
RT_TRACE(COMP_POWER, "InactivePsWorkItemCallback() <---------\n");
}
#ifdef ENABLE_LPS
/* Change current and default preamble mode. */
bool MgntActSet_802_11_PowerSaveMode(struct r8192_priv *priv, u8 rtPsMode)
{
// Currently, we do not change power save mode on IBSS mode.
if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
{
return false;
}
//
// <RJ_NOTE> If we make HW to fill up the PwrMgt bit for us,
// some AP will not response to our mgnt frames with PwrMgt bit set,
// e.g. cannot associate the AP.
// So I commented out it. 2005.02.16, by rcnjko.
//
// // Change device's power save mode.
// Adapter->HalFunc.SetPSModeHandler( Adapter, rtPsMode );
// Update power save mode configured.
//RT_TRACE(COMP_LPS,"%s(): set ieee->ps = %x\n",__FUNCTION__,rtPsMode);
if(!priv->ps_force) {
priv->ieee80211->ps = rtPsMode;
}
// Awake immediately
if(priv->ieee80211->sta_sleep != 0 && rtPsMode == IEEE80211_PS_DISABLED)
{
// Notify the AP we awke.
rtl8192_hw_wakeup(priv->ieee80211);
priv->ieee80211->sta_sleep = 0;
spin_lock(&priv->ieee80211->mgmt_tx_lock);
printk("LPS leave: notify AP we are awaked ++++++++++ SendNullFunctionData\n");
ieee80211_sta_ps_send_null_frame(priv->ieee80211, 0);
spin_unlock(&priv->ieee80211->mgmt_tx_lock);
}
return true;
}
/* Enter the leisure power save mode. */
void LeisurePSEnter(struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
if(!((priv->ieee80211->iw_mode == IW_MODE_INFRA) &&
(priv->ieee80211->state == IEEE80211_LINKED)) ||
(priv->ieee80211->iw_mode == IW_MODE_ADHOC) ||
(priv->ieee80211->iw_mode == IW_MODE_MASTER))
return;
if (pPSC->bLeisurePs)
{
// Idle for a while if we connect to AP a while ago.
if(pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) // 4 Sec
{
if(priv->ieee80211->ps == IEEE80211_PS_DISABLED)
{
MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
}
}
else
pPSC->LpsIdleCount++;
}
}
/* Leave leisure power save mode. */
void LeisurePSLeave(struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
if (pPSC->bLeisurePs)
{
if(priv->ieee80211->ps != IEEE80211_PS_DISABLED)
{
// move to lps_wakecomplete()
MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_DISABLED);
}
}
}
#endif
/* Enter the inactive power save mode. RF will be off */
void IPSEnter(struct r8192_priv *priv)
{
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
RT_RF_POWER_STATE rtState;
if (pPSC->bInactivePs)
{
rtState = priv->eRFPowerState;
//
// Added by Bruce, 2007-12-25.
// Do not enter IPS in the following conditions:
// (1) RF is already OFF or Sleep
// (2) bSwRfProcessing (indicates the IPS is still under going)
// (3) Connectted (only disconnected can trigger IPS)
// (4) IBSS (send Beacon)
// (5) AP mode (send Beacon)
//
if (rtState == eRfOn && !pPSC->bSwRfProcessing
&& (priv->ieee80211->state != IEEE80211_LINKED) )
{
RT_TRACE(COMP_RF,"IPSEnter(): Turn off RF.\n");
pPSC->eInactivePowerState = eRfOff;
// queue_work(priv->priv_wq,&(pPSC->InactivePsWorkItem));
InactivePsWorkItemCallback(priv);
}
}
}
//
// Description:
// Leave the inactive power save mode, RF will be on.
// 2007.08.17, by shien chang.
//
void IPSLeave(struct r8192_priv *priv)
{
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
RT_RF_POWER_STATE rtState;
if (pPSC->bInactivePs)
{
rtState = priv->eRFPowerState;
if (rtState != eRfOn && !pPSC->bSwRfProcessing && priv->RfOffReason <= RF_CHANGE_BY_IPS)
{
RT_TRACE(COMP_POWER, "IPSLeave(): Turn on RF.\n");
pPSC->eInactivePowerState = eRfOn;
InactivePsWorkItemCallback(priv);
}
}
}
void IPSLeave_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, ips_leave_wq);
struct net_device *dev = ieee->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
down(&priv->ieee80211->ips_sem);
IPSLeave(priv);
up(&priv->ieee80211->ips_sem);
}
void ieee80211_ips_leave_wq(struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
RT_RF_POWER_STATE rtState;
rtState = priv->eRFPowerState;
if (priv->PowerSaveControl.bInactivePs){
if(rtState == eRfOff){
if(priv->RfOffReason > RF_CHANGE_BY_IPS)
{
RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
return;
}
else{
printk("=========>%s(): IPSLeave\n",__FUNCTION__);
queue_work(priv->ieee80211->wq,&priv->ieee80211->ips_leave_wq);
}
}
}
}
//added by amy 090331 end
void ieee80211_ips_leave(struct ieee80211_device *ieee80211)
{
struct r8192_priv *priv = ieee80211_priv(ieee80211->dev);
down(&ieee80211->ips_sem);
IPSLeave(priv);
up(&ieee80211->ips_sem);
}
#endif
static void rtl819x_update_rxcounts(
struct r8192_priv *priv,
u32* TotalRxBcnNum,
u32* TotalRxDataNum
)
{
u16 SlotIndex;
u8 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
SlotIndex = (priv->ieee80211->LinkDetectInfo.SlotIndex++)%(priv->ieee80211->LinkDetectInfo.SlotNum);
priv->ieee80211->LinkDetectInfo.RxBcnNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod;
priv->ieee80211->LinkDetectInfo.RxDataNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod;
for( i=0; i<priv->ieee80211->LinkDetectInfo.SlotNum; i++ ){
*TotalRxBcnNum += priv->ieee80211->LinkDetectInfo.RxBcnNum[i];
*TotalRxDataNum += priv->ieee80211->LinkDetectInfo.RxDataNum[i];
}
}
static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
struct ieee80211_device* ieee = priv->ieee80211;
RESET_TYPE ResetType = RESET_TYPE_NORESET;
bool bBusyTraffic = false;
bool bEnterPS = false;
if ((!priv->up) || priv->bHwRadioOff)
return;
if(!priv->up)
return;
hal_dm_watchdog(priv);
#ifdef ENABLE_IPS
if(ieee->actscanning == false){
if((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == IEEE80211_NOLINK) &&
(priv->eRFPowerState == eRfOn) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc){
if (priv->PowerSaveControl.ReturnPoint == IPS_CALLBACK_NONE){
IPSEnter(priv);
}
}
}
#endif
{//to get busy traffic condition
if(ieee->state == IEEE80211_LINKED)
{
if( ieee->LinkDetectInfo.NumRxOkInPeriod> 100 ||
ieee->LinkDetectInfo.NumTxOkInPeriod> 100 ) {
bBusyTraffic = true;
}
#ifdef ENABLE_LPS
//added by amy for Leisure PS
if( ((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + ieee->LinkDetectInfo.NumTxOkInPeriod) > 8 ) ||
(ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2) )
{
bEnterPS= false;
}
else
{
bEnterPS= true;
}
// LeisurePS only work in infra mode.
if(bEnterPS)
{
LeisurePSEnter(priv->ieee80211);
}
else
{
LeisurePSLeave(priv->ieee80211);
}
#endif
}
else
{
#ifdef ENABLE_LPS
LeisurePSLeave(priv->ieee80211);
#endif
}
ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
}
//added by amy for AP roaming
if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA)
{
u32 TotalRxBcnNum = 0;
u32 TotalRxDataNum = 0;
rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if((TotalRxBcnNum+TotalRxDataNum) == 0)
{
if (priv->eRFPowerState == eRfOff)
RT_TRACE(COMP_ERR,"========>%s()\n",__FUNCTION__);
printk("===>%s(): AP is power off,connect another one\n",__FUNCTION__);
// Dot11d_Reset(dev);
ieee->state = IEEE80211_ASSOCIATING;
notify_wx_assoc_event(priv->ieee80211);
RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
ieee->is_roaming = true;
ieee->is_set_key = false;
ieee->link_change(ieee);
queue_work(ieee->wq, &ieee->associate_procedure_wq);
}
}
ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
ieee->LinkDetectInfo.NumRecvDataInPeriod=0;
//check if reset the driver
if (priv->watchdog_check_reset_cnt++ >= 3 && !ieee->is_roaming &&
priv->watchdog_last_time != 1)
{
ResetType = rtl819x_check_reset(priv);
priv->watchdog_check_reset_cnt = 3;
}
if(!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL)
{
priv->ResetProgress = RESET_TYPE_NORMAL;
RT_TRACE(COMP_RESET,"%s(): NOMAL RESET\n",__FUNCTION__);
return;
}
/* disable silent reset temply 2008.9.11*/
if( ((priv->force_reset) || (!priv->bDisableNormalResetCheck && ResetType==RESET_TYPE_SILENT))) // This is control by OID set in Pomelo
{
priv->watchdog_last_time = 1;
}
else
priv->watchdog_last_time = 0;
priv->force_reset = false;
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
void watch_dog_timer_callback(unsigned long data)
{
struct r8192_priv *priv = (struct r8192_priv *) data;
queue_delayed_work(priv->priv_wq,&priv->watch_dog_wq,0);
mod_timer(&priv->watch_dog_timer, jiffies + MSECS(IEEE80211_WATCH_DOG_TIME));
}
static int _rtl8192_up(struct r8192_priv *priv)
{
RT_STATUS init_status = RT_STATUS_SUCCESS;
struct net_device *dev = priv->ieee80211->dev;
priv->up=1;
priv->ieee80211->ieee_up=1;
priv->bdisable_nic = false; //YJ,add,091111
RT_TRACE(COMP_INIT, "Bringing up iface\n");
init_status = rtl8192_adapter_start(priv);
if(init_status != RT_STATUS_SUCCESS)
{
RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization is failed!\n",__FUNCTION__);
return -1;
}
RT_TRACE(COMP_INIT, "start adapter finished\n");
if (priv->eRFPowerState != eRfOn)
MgntActSet_RF_State(priv, eRfOn, priv->RfOffReason);
if(priv->ieee80211->state != IEEE80211_LINKED)
ieee80211_softmac_start_protocol(priv->ieee80211);
ieee80211_reset_queue(priv->ieee80211);
watch_dog_timer_callback((unsigned long) priv);
if(!netif_queue_stopped(dev))
netif_start_queue(dev);
else
netif_wake_queue(dev);
return 0;
}
static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
down(&priv->wx_sem);
ret = rtl8192_up(dev);
up(&priv->wx_sem);
return ret;
}
int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (priv->up == 1) return -1;
return _rtl8192_up(priv);
}
static int rtl8192_close(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
down(&priv->wx_sem);
ret = rtl8192_down(dev);
up(&priv->wx_sem);
return ret;
}
int rtl8192_down(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (priv->up == 0) return -1;
#ifdef ENABLE_LPS
//LZM for PS-Poll AID issue. 090429
if(priv->ieee80211->state == IEEE80211_LINKED)
LeisurePSLeave(priv->ieee80211);
#endif
priv->up=0;
priv->ieee80211->ieee_up = 0;
RT_TRACE(COMP_DOWN, "==========>%s()\n", __FUNCTION__);
/* FIXME */
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
rtl8192_irq_disable(priv);
rtl8192_cancel_deferred_work(priv);
deinit_hal_dm(priv);
del_timer_sync(&priv->watch_dog_timer);
ieee80211_softmac_stop_protocol(priv->ieee80211,true);
rtl8192_halt_adapter(priv, false);
memset(&priv->ieee80211->current_network, 0 , offsetof(struct ieee80211_network, list));
RT_TRACE(COMP_DOWN, "<==========%s()\n", __FUNCTION__);
return 0;
}
void rtl8192_commit(struct r8192_priv *priv)
{
if (priv->up == 0) return ;
ieee80211_softmac_stop_protocol(priv->ieee80211,true);
rtl8192_irq_disable(priv);
rtl8192_halt_adapter(priv, true);
_rtl8192_up(priv);
}
static void rtl8192_restart(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, reset_wq);
down(&priv->wx_sem);
rtl8192_commit(priv);
up(&priv->wx_sem);
}
static void r8192_set_multicast(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
}
static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
down(&priv->wx_sem);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
schedule_work(&priv->reset_wq);
up(&priv->wx_sem);
return 0;
}
static void r8192e_set_hw_key(struct r8192_priv *priv, struct ieee_param *ipw)
{
struct ieee80211_device *ieee = priv->ieee80211;
u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
u32 key[4];
if (ipw->u.crypt.set_tx) {
if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
ieee->pairwise_key_type = KEY_TYPE_CCMP;
else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
ieee->pairwise_key_type = KEY_TYPE_TKIP;
else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
if (ipw->u.crypt.key_len == 13)
ieee->pairwise_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->pairwise_key_type = KEY_TYPE_WEP40;
} else
ieee->pairwise_key_type = KEY_TYPE_NA;
if (ieee->pairwise_key_type) {
memcpy(key, ipw->u.crypt.key, 16);
EnableHWSecurityConfig8192(priv);
/*
* We fill both index entry and 4th entry for pairwise
* key as in IPW interface, adhoc will only get here,
* so we need index entry for its default key serching!
*/
setKey(priv, 4, ipw->u.crypt.idx,
ieee->pairwise_key_type,
(u8*)ieee->ap_mac_addr, 0, key);
/* LEAP WEP will never set this. */
if (ieee->auth_mode != 2)
setKey(priv, ipw->u.crypt.idx, ipw->u.crypt.idx,
ieee->pairwise_key_type,
(u8*)ieee->ap_mac_addr, 0, key);
}
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
ieee->pHTInfo->bCurrentHTSupport) {
write_nic_byte(priv, 0x173, 1); /* fix aes bug */
}
} else {
memcpy(key, ipw->u.crypt.key, 16);
if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
ieee->group_key_type= KEY_TYPE_CCMP;
else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
ieee->group_key_type = KEY_TYPE_TKIP;
else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
if (ipw->u.crypt.key_len == 13)
ieee->group_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->group_key_type = KEY_TYPE_WEP40;
} else
ieee->group_key_type = KEY_TYPE_NA;
if (ieee->group_key_type) {
setKey(priv, ipw->u.crypt.idx, ipw->u.crypt.idx,
ieee->group_key_type, broadcast_addr, 0, key);
}
}
}
/* based on ipw2200 driver */
static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
int ret=-1;
struct iw_point *p = &wrq->u.data;
struct ieee_param *ipw = NULL;//(struct ieee_param *)wrq->u.data.pointer;
down(&priv->wx_sem);
if (p->length < sizeof(struct ieee_param) || !p->pointer){
ret = -EINVAL;
goto out;
}
ipw = kmalloc(p->length, GFP_KERNEL);
if (ipw == NULL){
ret = -ENOMEM;
goto out;
}
if (copy_from_user(ipw, p->pointer, p->length)) {
kfree(ipw);
ret = -EFAULT;
goto out;
}
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
/* parse here for HW security */
if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
r8192e_set_hw_key(priv, ipw);
ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
break;
default:
ret = -EOPNOTSUPP;
break;
}
kfree(ipw);
out:
up(&priv->wx_sem);
return ret;
}
static u8 HwRateToMRate90(bool bIsHT, u8 rate)
{
u8 ret_rate = 0x02;
if(!bIsHT) {
switch(rate) {
case DESC90_RATE1M: ret_rate = MGN_1M; break;
case DESC90_RATE2M: ret_rate = MGN_2M; break;
case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
case DESC90_RATE11M: ret_rate = MGN_11M; break;
case DESC90_RATE6M: ret_rate = MGN_6M; break;
case DESC90_RATE9M: ret_rate = MGN_9M; break;
case DESC90_RATE12M: ret_rate = MGN_12M; break;
case DESC90_RATE18M: ret_rate = MGN_18M; break;
case DESC90_RATE24M: ret_rate = MGN_24M; break;
case DESC90_RATE36M: ret_rate = MGN_36M; break;
case DESC90_RATE48M: ret_rate = MGN_48M; break;
case DESC90_RATE54M: ret_rate = MGN_54M; break;
default:
RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
break;
}
} else {
switch(rate) {
case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
case DESC90_RATEMCS3: ret_rate = MGN_MCS3; break;
case DESC90_RATEMCS4: ret_rate = MGN_MCS4; break;
case DESC90_RATEMCS5: ret_rate = MGN_MCS5; break;
case DESC90_RATEMCS6: ret_rate = MGN_MCS6; break;
case DESC90_RATEMCS7: ret_rate = MGN_MCS7; break;
case DESC90_RATEMCS8: ret_rate = MGN_MCS8; break;
case DESC90_RATEMCS9: ret_rate = MGN_MCS9; break;
case DESC90_RATEMCS10: ret_rate = MGN_MCS10; break;
case DESC90_RATEMCS11: ret_rate = MGN_MCS11; break;
case DESC90_RATEMCS12: ret_rate = MGN_MCS12; break;
case DESC90_RATEMCS13: ret_rate = MGN_MCS13; break;
case DESC90_RATEMCS14: ret_rate = MGN_MCS14; break;
case DESC90_RATEMCS15: ret_rate = MGN_MCS15; break;
case DESC90_RATEMCS32: ret_rate = (0x80|0x20); break;
default:
RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
break;
}
}
return ret_rate;
}
/* Record the TSF time stamp when receiving a packet */
static void UpdateRxPktTimeStamp8190(struct r8192_priv *priv, struct ieee80211_rx_stats *stats)
{
if(stats->bIsAMPDU && !stats->bFirstMPDU) {
stats->mac_time[0] = priv->LastRxDescTSFLow;
stats->mac_time[1] = priv->LastRxDescTSFHigh;
} else {
priv->LastRxDescTSFLow = stats->mac_time[0];
priv->LastRxDescTSFHigh = stats->mac_time[1];
}
}
static long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
{
long signal_power; // in dBm.
// Translate to dBm (x=0.5y-95).
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
/* 2008/01/22 MH We can not delcare RSSI/EVM total value of sliding window to
be a local static. Otherwise, it may increase when we return from S3/S4. The
value will be kept in memory or disk. We must delcare the value in adapter
and it will be reinitialized when return from S3/S4. */
static void rtl8192_process_phyinfo(struct r8192_priv * priv, u8* buffer,struct ieee80211_rx_stats * pprevious_stats, struct ieee80211_rx_stats * pcurrent_stats)
{
bool bcheck = false;
u8 rfpath;
u32 nspatial_stream, tmp_val;
static u32 slide_rssi_index=0, slide_rssi_statistics=0;
static u32 slide_evm_index=0, slide_evm_statistics=0;
static u32 last_rssi=0, last_evm=0;
//cosa add for beacon rssi smoothing
static u32 slide_beacon_adc_pwdb_index=0, slide_beacon_adc_pwdb_statistics=0;
static u32 last_beacon_adc_pwdb=0;
struct ieee80211_hdr_3addr *hdr;
u16 sc ;
unsigned int frag,seq;
hdr = (struct ieee80211_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
//
// Check whether we should take the previous packet into accounting
//
if(!pprevious_stats->bIsAMPDU)
{
// if previous packet is not aggregated packet
bcheck = true;
}
if(slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX)
{
slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
}
priv->stats.slide_rssi_total += pprevious_stats->SignalStrength;
priv->stats.slide_signal_strength[slide_rssi_index++] = pprevious_stats->SignalStrength;
if(slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
// <1> Showed on UI for user, in dbm
tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
priv->stats.signal_strength = rtl819x_translate_todbm((u8)tmp_val);
pcurrent_stats->rssi = priv->stats.signal_strength;
//
// If the previous packet does not match the criteria, neglect it
//
if(!pprevious_stats->bPacketMatchBSSID)
{
if(!pprevious_stats->bToSelfBA)
return;
}
if(!bcheck)
return;
// <2> Showed on UI for engineering
// hardware does not provide rssi information for each rf path in CCK
if(!pprevious_stats->bIsCCK && pprevious_stats->bPacketToSelf)
{
for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++)
{
if (!rtl8192_phy_CheckIsLegalRFPath(priv, rfpath))
continue;
RT_TRACE(COMP_DBG, "pPreviousstats->RxMIMOSignalStrength[rfpath] = %d\n", pprevious_stats->RxMIMOSignalStrength[rfpath]);
//Fixed by Jacken 2008-03-20
if(priv->stats.rx_rssi_percentage[rfpath] == 0)
{
priv->stats.rx_rssi_percentage[rfpath] = pprevious_stats->RxMIMOSignalStrength[rfpath];
}
if(pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath])
{
priv->stats.rx_rssi_percentage[rfpath] =
( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
priv->stats.rx_rssi_percentage[rfpath] = priv->stats.rx_rssi_percentage[rfpath] + 1;
}
else
{
priv->stats.rx_rssi_percentage[rfpath] =
( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
}
RT_TRACE(COMP_DBG, "priv->RxStats.RxRSSIPercentage[rfPath] = %d \n" , priv->stats.rx_rssi_percentage[rfpath]);
}
}
//
// Check PWDB.
//
//cosa add for beacon rssi smoothing by average.
if(pprevious_stats->bPacketBeacon)
{
/* record the beacon pwdb to the sliding window. */
if(slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX)
{
slide_beacon_adc_pwdb_statistics = PHY_Beacon_RSSI_SLID_WIN_MAX;
last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index];
priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
// slide_beacon_adc_pwdb_index, last_beacon_adc_pwdb, Adapter->RxStats.Slide_Beacon_Total);
}
priv->stats.Slide_Beacon_Total += pprevious_stats->RxPWDBAll;
priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] = pprevious_stats->RxPWDBAll;
slide_beacon_adc_pwdb_index++;
if(slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
slide_beacon_adc_pwdb_index = 0;
pprevious_stats->RxPWDBAll = priv->stats.Slide_Beacon_Total/slide_beacon_adc_pwdb_statistics;
if(pprevious_stats->RxPWDBAll >= 3)
pprevious_stats->RxPWDBAll -= 3;
}
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK? "CCK": "OFDM",
pprevious_stats->RxPWDBAll);
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
{
if(priv->undecorated_smoothed_pwdb < 0) // initialize
{
priv->undecorated_smoothed_pwdb = pprevious_stats->RxPWDBAll;
}
if(pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb)
{
priv->undecorated_smoothed_pwdb =
( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
priv->undecorated_smoothed_pwdb = priv->undecorated_smoothed_pwdb + 1;
}
else
{
priv->undecorated_smoothed_pwdb =
( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
}
}
//
// Check EVM
//
/* record the general EVM to the sliding window. */
if(pprevious_stats->SignalQuality == 0)
{
}
else
{
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA){
if(slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX){
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
last_evm = priv->stats.slide_evm[slide_evm_index];
priv->stats.slide_evm_total -= last_evm;
}
priv->stats.slide_evm_total += pprevious_stats->SignalQuality;
priv->stats.slide_evm[slide_evm_index++] = pprevious_stats->SignalQuality;
if(slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
// <1> Showed on UI for user, in percentage.
tmp_val = priv->stats.slide_evm_total/slide_evm_statistics;
//cosa add 10/11/2007, Showed on UI for user in Windows Vista, for Link quality.
}
// <2> Showed on UI for engineering
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
{
for(nspatial_stream = 0; nspatial_stream<2 ; nspatial_stream++) // 2 spatial stream
{
if(pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1)
{
if(priv->stats.rx_evm_percentage[nspatial_stream] == 0) // initialize
{
priv->stats.rx_evm_percentage[nspatial_stream] = pprevious_stats->RxMIMOSignalQuality[nspatial_stream];
}
priv->stats.rx_evm_percentage[nspatial_stream] =
( (priv->stats.rx_evm_percentage[nspatial_stream]* (Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalQuality[nspatial_stream]* 1)) / (Rx_Smooth_Factor);
}
}
}
}
}
static u8 rtl819x_query_rxpwrpercentage(
char antpower
)
{
if ((antpower <= -100) || (antpower >= 20))
{
return 0;
}
else if (antpower >= 0)
{
return 100;
}
else
{
return (100+antpower);
}
}
static u8
rtl819x_evm_dbtopercentage(
char value
)
{
char ret_val;
ret_val = value;
if(ret_val >= 0)
ret_val = 0;
if(ret_val <= -33)
ret_val = -33;
ret_val = 0 - ret_val;
ret_val*=3;
if(ret_val == 99)
ret_val = 100;
return ret_val;
}
/* We want good-looking for signal strength/quality */
static long rtl819x_signal_scale_mapping(long currsig)
{
long retsig;
// Step 1. Scale mapping.
if(currsig >= 61 && currsig <= 100)
{
retsig = 90 + ((currsig - 60) / 4);
}
else if(currsig >= 41 && currsig <= 60)
{
retsig = 78 + ((currsig - 40) / 2);
}
else if(currsig >= 31 && currsig <= 40)
{
retsig = 66 + (currsig - 30);
}
else if(currsig >= 21 && currsig <= 30)
{
retsig = 54 + (currsig - 20);
}
else if(currsig >= 5 && currsig <= 20)
{
retsig = 42 + (((currsig - 5) * 2) / 3);
}
else if(currsig == 4)
{
retsig = 36;
}
else if(currsig == 3)
{
retsig = 27;
}
else if(currsig == 2)
{
retsig = 18;
}
else if(currsig == 1)
{
retsig = 9;
}
else
{
retsig = currsig;
}
return retsig;
}
static void rtl8192_query_rxphystatus(
struct r8192_priv * priv,
struct ieee80211_rx_stats * pstats,
prx_desc_819x_pci pdesc,
prx_fwinfo_819x_pci pdrvinfo,
struct ieee80211_rx_stats * precord_stats,
bool bpacket_match_bssid,
bool bpacket_toself,
bool bPacketBeacon,
bool bToSelfBA
)
{
//PRT_RFD_STATUS pRtRfdStatus = &(pRfd->Status);
phy_sts_ofdm_819xpci_t* pofdm_buf;
phy_sts_cck_819xpci_t * pcck_buf;
phy_ofdm_rx_status_rxsc_sgien_exintfflag* prxsc;
u8 *prxpkt;
u8 i,max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
char rx_pwr[4], rx_pwr_all=0;
//long rx_avg_pwr = 0;
char rx_snrX, rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi=0;//, total_evm=0;
// long signal_strength_index = 0;
u8 is_cck_rate=0;
u8 rf_rx_num = 0;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
// Record it for next packet processing
memset(precord_stats, 0, sizeof(struct ieee80211_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID = bpacket_match_bssid;
pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;//RX_HAL_IS_CCK_RATE(pDrvInfo);
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
/*2007.08.30 requested by SD3 Jerry */
if (priv->phy_check_reg824 == 0)
{
priv->phy_reg824_bit9 = rtl8192_QueryBBReg(priv, rFPGA0_XA_HSSIParameter2, 0x200);
priv->phy_check_reg824 = 1;
}
prxpkt = (u8*)pdrvinfo;
/* Move pointer to the 16th bytes. Phy status start address. */
prxpkt += sizeof(rx_fwinfo_819x_pci);
/* Initial the cck and ofdm buffer pointer */
pcck_buf = (phy_sts_cck_819xpci_t *)prxpkt;
pofdm_buf = (phy_sts_ofdm_819xpci_t *)prxpkt;
pstats->RxMIMOSignalQuality[0] = -1;
pstats->RxMIMOSignalQuality[1] = -1;
precord_stats->RxMIMOSignalQuality[0] = -1;
precord_stats->RxMIMOSignalQuality[1] = -1;
if(is_cck_rate)
{
//
// (1)Hardware does not provide RSSI for CCK
//
//
// (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
//
u8 report;//, cck_agc_rpt;
if (!priv->phy_reg824_bit9)
{
report = pcck_buf->cck_agc_rpt & 0xc0;
report = report>>6;
switch(report)
{
//Fixed by Jacken from Bryant 2008-03-20
//Original value is -38 , -26 , -14 , -2
//Fixed value is -35 , -23 , -11 , 6
case 0x3:
rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x2:
rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x1:
rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x0:
rx_pwr_all = 8 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
}
}
else
{
report = pcck_buf->cck_agc_rpt & 0x60;
report = report>>5;
switch(report)
{
case 0x3:
rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
case 0x2:
rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
break;
case 0x1:
rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
case 0x0:
rx_pwr_all = -8 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
}
}
pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RecvSignalPower = rx_pwr_all;
//
// (3) Get Signal Quality (EVM)
//
if(bpacket_match_bssid)
{
u8 sq;
if(pstats->RxPWDBAll > 40)
{
sq = 100;
}else
{
sq = pcck_buf->sq_rpt;
if(pcck_buf->sq_rpt > 64)
sq = 0;
else if (pcck_buf->sq_rpt < 20)
sq = 100;
else
sq = ((64-sq) * 100) / 44;
}
pstats->SignalQuality = precord_stats->SignalQuality = sq;
pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq;
pstats->RxMIMOSignalQuality[1] = precord_stats->RxMIMOSignalQuality[1] = -1;
}
}
else
{
//
// (1)Get RSSI for HT rate
//
for(i=RF90_PATH_A; i<RF90_PATH_MAX; i++)
{
// 2008/01/30 MH we will judge RF RX path now.
if (priv->brfpath_rxenable[i])
rf_rx_num++;
//else
//continue;
//Fixed by Jacken from Bryant 2008-03-20
//Original value is 106
rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i]&0x3F)*2) - 110;
//Get Rx snr value in DB
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
rx_snrX = (char)(tmp_rxsnr);
rx_snrX /= 2;
/* Translate DBM to percentage. */
RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
if (priv->brfpath_rxenable[i])
total_rssi += RSSI;
/* Record Signal Strength for next packet */
if(bpacket_match_bssid)
{
pstats->RxMIMOSignalStrength[i] =(u8) RSSI;
precord_stats->RxMIMOSignalStrength[i] =(u8) RSSI;
}
}
//
// (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
//
//Fixed by Jacken from Bryant 2008-03-20
//Original value is 106
rx_pwr_all = (((pofdm_buf->pwdb_all ) >> 1 )& 0x7f) -106;
pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
pstats->RecvSignalPower = rx_pwr_all;
//
// (3)EVM of HT rate
//
if(pdrvinfo->RxHT && pdrvinfo->RxRate>=DESC90_RATEMCS8 &&
pdrvinfo->RxRate<=DESC90_RATEMCS15)
max_spatial_stream = 2; //both spatial stream make sense
else
max_spatial_stream = 1; //only spatial stream 1 makes sense
for(i=0; i<max_spatial_stream; i++)
{
tmp_rxevm = pofdm_buf->rxevm_X[i];
rx_evmX = (char)(tmp_rxevm);
// Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment
// fill most significant bit to "zero" when doing shifting operation which may change a negative
// value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore.
rx_evmX /= 2; //dbm
evm = rtl819x_evm_dbtopercentage(rx_evmX);
if(bpacket_match_bssid)
{
if(i==0) // Fill value in RFD, Get the first spatial stream only
pstats->SignalQuality = precord_stats->SignalQuality = (u8)(evm & 0xff);
pstats->RxMIMOSignalQuality[i] = precord_stats->RxMIMOSignalQuality[i] = (u8)(evm & 0xff);
}
}
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (phy_ofdm_rx_status_rxsc_sgien_exintfflag *)&rxsc_sgien_exflg;
}
//UI BSS List signal strength(in percentage), make it good looking, from 0~100.
//It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp().
if(is_cck_rate)
{
pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));//PWDB_ALL;
}
else
{
//pRfd->Status.SignalStrength = pRecordRfd->Status.SignalStrength = (u1Byte)(SignalScaleMapping(total_rssi/=RF90_PATH_MAX));//(u1Byte)(total_rssi/=RF90_PATH_MAX);
// We can judge RX path number now.
if (rf_rx_num != 0)
pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)(total_rssi/=rf_rx_num)));
}
}
static void
rtl8192_record_rxdesc_forlateruse(
struct ieee80211_rx_stats * psrc_stats,
struct ieee80211_rx_stats * ptarget_stats
)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
}
static void TranslateRxSignalStuff819xpci(struct r8192_priv *priv,
struct sk_buff *skb,
struct ieee80211_rx_stats * pstats,
prx_desc_819x_pci pdesc,
prx_fwinfo_819x_pci pdrvinfo)
{
// TODO: We must only check packet for current MAC address. Not finish
bool bpacket_match_bssid, bpacket_toself;
bool bPacketBeacon=false, bToSelfBA=false;
struct ieee80211_hdr_3addr *hdr;
u16 fc,type;
// Get Signal Quality for only RX data queue (but not command queue)
u8* tmp_buf;
u8 *praddr;
/* Get MAC frame start address. */
tmp_buf = skb->data;
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
/* Check if the received packet is acceptabe. */
bpacket_match_bssid = ((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS)? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS )? hdr->addr2 : hdr->addr3))
&& (!pstats->bHwError) && (!pstats->bCRC)&& (!pstats->bICV));
bpacket_toself = bpacket_match_bssid & (!compare_ether_addr(praddr, priv->ieee80211->dev->dev_addr));
if(WLAN_FC_GET_FRAMETYPE(fc)== IEEE80211_STYPE_BEACON)
{
bPacketBeacon = true;
}
if(WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK)
{
if (!compare_ether_addr(praddr, priv->ieee80211->dev->dev_addr))
bToSelfBA = true;
}
//
// Process PHY information for previous packet (RSSI/PWDB/EVM)
//
// Because phy information is contained in the last packet of AMPDU only, so driver
// should process phy information of previous packet
rtl8192_process_phyinfo(priv, tmp_buf, &priv->previous_stats, pstats);
rtl8192_query_rxphystatus(priv, pstats, pdesc, pdrvinfo, &priv->previous_stats, bpacket_match_bssid,
bpacket_toself ,bPacketBeacon, bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &priv->previous_stats);
}
static void rtl8192_tx_resume(struct r8192_priv *priv)
{
struct ieee80211_device *ieee = priv->ieee80211;
struct sk_buff *skb;
int i;
for (i = BK_QUEUE; i < TXCMD_QUEUE; i++) {
while ((!skb_queue_empty(&ieee->skb_waitQ[i])) &&
(priv->ieee80211->check_nic_enough_desc(ieee, i) > 0)) {
/* 1. dequeue the packet from the wait queue */
skb = skb_dequeue(&ieee->skb_waitQ[i]);
/* 2. tx the packet directly */
ieee->softmac_data_hard_start_xmit(skb, ieee, 0);
}
}
}
static void rtl8192_irq_tx_tasklet(unsigned long arg)
{
struct r8192_priv *priv = (struct r8192_priv*) arg;
struct rtl8192_tx_ring *mgnt_ring = &priv->tx_ring[MGNT_QUEUE];
unsigned long flags;
/* check if we need to report that the management queue is drained */
spin_lock_irqsave(&priv->irq_th_lock, flags);
if (!skb_queue_len(&mgnt_ring->queue) &&
priv->ieee80211->ack_tx_to_ieee &&
rtl8192_is_tx_queue_empty(priv->ieee80211)) {
priv->ieee80211->ack_tx_to_ieee = 0;
ieee80211_ps_tx_ack(priv->ieee80211, 1);
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
rtl8192_tx_resume(priv);
}
/* Record the received data rate */
static void UpdateReceivedRateHistogramStatistics8190(
struct r8192_priv *priv,
struct ieee80211_rx_stats* pstats
)
{
u32 rcvType=1; //0: Total, 1:OK, 2:CRC, 3:ICV
u32 rateIndex;
u32 preamble_guardinterval; //1: short preamble/GI, 0: long preamble/GI
if(pstats->bCRC)
rcvType = 2;
else if(pstats->bICV)
rcvType = 3;
if(pstats->bShortPreamble)
preamble_guardinterval = 1;// short
else
preamble_guardinterval = 0;// long
switch(pstats->rate)
{
//
// CCK rate
//
case MGN_1M: rateIndex = 0; break;
case MGN_2M: rateIndex = 1; break;
case MGN_5_5M: rateIndex = 2; break;
case MGN_11M: rateIndex = 3; break;
//
// Legacy OFDM rate
//
case MGN_6M: rateIndex = 4; break;
case MGN_9M: rateIndex = 5; break;
case MGN_12M: rateIndex = 6; break;
case MGN_18M: rateIndex = 7; break;
case MGN_24M: rateIndex = 8; break;
case MGN_36M: rateIndex = 9; break;
case MGN_48M: rateIndex = 10; break;
case MGN_54M: rateIndex = 11; break;
//
// 11n High throughput rate
//
case MGN_MCS0: rateIndex = 12; break;
case MGN_MCS1: rateIndex = 13; break;
case MGN_MCS2: rateIndex = 14; break;
case MGN_MCS3: rateIndex = 15; break;
case MGN_MCS4: rateIndex = 16; break;
case MGN_MCS5: rateIndex = 17; break;
case MGN_MCS6: rateIndex = 18; break;
case MGN_MCS7: rateIndex = 19; break;
case MGN_MCS8: rateIndex = 20; break;
case MGN_MCS9: rateIndex = 21; break;
case MGN_MCS10: rateIndex = 22; break;
case MGN_MCS11: rateIndex = 23; break;
case MGN_MCS12: rateIndex = 24; break;
case MGN_MCS13: rateIndex = 25; break;
case MGN_MCS14: rateIndex = 26; break;
case MGN_MCS15: rateIndex = 27; break;
default: rateIndex = 28; break;
}
priv->stats.received_rate_histogram[0][rateIndex]++; //total
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
static void rtl8192_rx(struct r8192_priv *priv)
{
struct ieee80211_hdr_1addr *ieee80211_hdr = NULL;
bool unicast_packet = false;
struct ieee80211_rx_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
.freq = IEEE80211_24GHZ_BAND,
};
unsigned int count = priv->rxringcount;
prx_fwinfo_819x_pci pDrvInfo = NULL;
struct sk_buff *new_skb;
while (count--) {
rx_desc_819x_pci *pdesc = &priv->rx_ring[priv->rx_idx];//rx descriptor
struct sk_buff *skb = priv->rx_buf[priv->rx_idx];//rx pkt
if (pdesc->OWN)
/* wait data to be filled by hardware */
return;
stats.bICV = pdesc->ICV;
stats.bCRC = pdesc->CRC32;
stats.bHwError = pdesc->CRC32 | pdesc->ICV;
stats.Length = pdesc->Length;
if(stats.Length < 24)
stats.bHwError |= 1;
if(stats.bHwError) {
stats.bShift = false;
goto done;
}
pDrvInfo = NULL;
new_skb = dev_alloc_skb(priv->rxbuffersize);
if (unlikely(!new_skb))
goto done;
stats.RxDrvInfoSize = pdesc->RxDrvInfoSize;
stats.RxBufShift = ((pdesc->Shift)&0x03);
stats.Decrypted = !pdesc->SWDec;
pci_dma_sync_single_for_cpu(priv->pdev,
*((dma_addr_t *)skb->cb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
skb_put(skb, pdesc->Length);
pDrvInfo = (rx_fwinfo_819x_pci *)(skb->data + stats.RxBufShift);
skb_reserve(skb, stats.RxDrvInfoSize + stats.RxBufShift);
stats.rate = HwRateToMRate90((bool)pDrvInfo->RxHT, (u8)pDrvInfo->RxRate);
stats.bShortPreamble = pDrvInfo->SPLCP;
/* it is debug only. It should be disabled in released driver.
* 2007.1.11 by Emily
* */
UpdateReceivedRateHistogramStatistics8190(priv, &stats);
stats.bIsAMPDU = (pDrvInfo->PartAggr==1);
stats.bFirstMPDU = (pDrvInfo->PartAggr==1) && (pDrvInfo->FirstAGGR==1);
stats.TimeStampLow = pDrvInfo->TSFL;
stats.TimeStampHigh = read_nic_dword(priv, TSFR+4);
UpdateRxPktTimeStamp8190(priv, &stats);
//
// Get Total offset of MPDU Frame Body
//
if((stats.RxBufShift + stats.RxDrvInfoSize) > 0)
stats.bShift = 1;
/* ???? */
TranslateRxSignalStuff819xpci(priv, skb, &stats, pdesc, pDrvInfo);
/* Rx A-MPDU */
if(pDrvInfo->FirstAGGR==1 || pDrvInfo->PartAggr == 1)
RT_TRACE(COMP_RXDESC, "pDrvInfo->FirstAGGR = %d, pDrvInfo->PartAggr = %d\n",
pDrvInfo->FirstAGGR, pDrvInfo->PartAggr);
skb_trim(skb, skb->len - 4/*sCrcLng*/);
/* rx packets statistics */
ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
unicast_packet = false;
if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
}else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
//TODO
}else {
/* unicast packet */
unicast_packet = true;
}
if(!ieee80211_rtl_rx(priv->ieee80211, skb, &stats)){
dev_kfree_skb_any(skb);
} else {
priv->stats.rxok++;
if(unicast_packet) {
priv->stats.rxbytesunicast += skb->len;
}
}
pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
priv->rxbuffersize, PCI_DMA_FROMDEVICE);
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
*((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
done:
pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
if (priv->rx_idx == priv->rxringcount-1)
pdesc->EOR = 1;
priv->rx_idx = (priv->rx_idx + 1) % priv->rxringcount;
}
}
static void rtl8192_irq_rx_tasklet(unsigned long arg)
{
struct r8192_priv *priv = (struct r8192_priv*) arg;
rtl8192_rx(priv);
/* unmask RDU */
write_nic_dword(priv, INTA_MASK, read_nic_dword(priv, INTA_MASK) | IMR_RDU);
}
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = rtl8192_open,
.ndo_stop = rtl8192_close,
.ndo_tx_timeout = tx_timeout,
.ndo_do_ioctl = rtl8192_ioctl,
.ndo_set_multicast_list = r8192_set_multicast,
.ndo_set_mac_address = r8192_set_mac_adr,
.ndo_start_xmit = ieee80211_rtl_xmit,
};
static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev = NULL;
struct r8192_priv *priv= NULL;
u8 unit = 0;
int ret = -ENODEV;
unsigned long pmem_start, pmem_len, pmem_flags;
u8 revisionid;
RT_TRACE(COMP_INIT,"Configuring chip resources\n");
if( pci_enable_device (pdev) ){
RT_TRACE(COMP_ERR,"Failed to enable PCI device");
return -EIO;
}
pci_set_master(pdev);
//pci_set_wmi(pdev);
pci_set_dma_mask(pdev, 0xffffff00ULL);
pci_set_consistent_dma_mask(pdev,0xffffff00ULL);
dev = alloc_ieee80211(sizeof(struct r8192_priv));
if (!dev) {
ret = -ENOMEM;
goto fail_free;
}
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
priv = ieee80211_priv(dev);
priv->ieee80211 = netdev_priv(dev);
priv->pdev=pdev;
if((pdev->subsystem_vendor == PCI_VENDOR_ID_DLINK)&&(pdev->subsystem_device == 0x3304)){
priv->ieee80211->bSupportRemoteWakeUp = 1;
} else
{
priv->ieee80211->bSupportRemoteWakeUp = 0;
}
pmem_start = pci_resource_start(pdev, 1);
pmem_len = pci_resource_len(pdev, 1);
pmem_flags = pci_resource_flags (pdev, 1);
if (!(pmem_flags & IORESOURCE_MEM)) {
RT_TRACE(COMP_ERR, "region #1 not a MMIO resource, aborting\n");
goto fail;
}
//DMESG("Memory mapped space @ 0x%08lx ", pmem_start);
if( ! request_mem_region(pmem_start, pmem_len, RTL819xE_MODULE_NAME)) {
RT_TRACE(COMP_ERR,"request_mem_region failed!\n");
goto fail;
}
priv->mem_start = ioremap_nocache(pmem_start, pmem_len);
if (!priv->mem_start) {
RT_TRACE(COMP_ERR,"ioremap failed!\n");
goto fail1;
}
dev->mem_start = (unsigned long) priv->mem_start;
dev->mem_end = (unsigned long) (priv->mem_start +
pci_resource_len(pdev, 0));
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
pci_read_config_byte(pdev, 0x08, &revisionid);
/* If the revisionid is 0x10, the device uses rtl8192se. */
if (pdev->device == 0x8192 && revisionid == 0x10)
goto fail1;
pci_read_config_byte(pdev, 0x05, &unit);
pci_write_config_byte(pdev, 0x05, unit & (~0x04));
dev->irq = pdev->irq;
priv->irq = 0;
dev->netdev_ops = &rtl8192_netdev_ops;
dev->wireless_handlers = &r8192_wx_handlers_def;
dev->type=ARPHRD_ETHER;
dev->watchdog_timeo = HZ*3;
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
RT_TRACE(COMP_INIT, "Driver probe completed1\n");
if (rtl8192_init(priv)!=0) {
RT_TRACE(COMP_ERR, "Initialization failed\n");
goto fail;
}
register_netdev(dev);
RT_TRACE(COMP_INIT, "dev name=======> %s\n",dev->name);
rtl8192_proc_init_one(priv);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
fail1:
if (priv->mem_start) {
iounmap(priv->mem_start);
release_mem_region( pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1) );
}
fail:
if(dev){
if (priv->irq) {
free_irq(priv->irq, priv);
priv->irq = 0;
}
free_ieee80211(dev);
}
fail_free:
pci_disable_device(pdev);
DMESG("wlan driver load failed\n");
pci_set_drvdata(pdev, NULL);
return ret;
}
/* detach all the work and timer structure declared or inititialized
* in r8192_init function.
* */
static void rtl8192_cancel_deferred_work(struct r8192_priv* priv)
{
/* call cancel_work_sync instead of cancel_delayed_work if and only if Linux_version_code
* is or is newer than 2.6.20 and work structure is defined to be struct work_struct.
* Otherwise call cancel_delayed_work is enough.
* FIXME (2.6.20 should 2.6.22, work_struct should not cancel)
* */
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
cancel_delayed_work(&priv->ieee80211->hw_wakeup_wq);
cancel_delayed_work(&priv->gpio_change_rf_wq);
cancel_work_sync(&priv->reset_wq);
cancel_work_sync(&priv->qos_activate);
}
static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r8192_priv *priv ;
u32 i;
if (dev) {
unregister_netdev(dev);
priv = ieee80211_priv(dev);
rtl8192_proc_remove_one(priv);
rtl8192_down(dev);
if (priv->pFirmware)
{
vfree(priv->pFirmware);
priv->pFirmware = NULL;
}
destroy_workqueue(priv->priv_wq);
/* free tx/rx rings */
rtl8192_free_rx_ring(priv);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
rtl8192_free_tx_ring(priv, i);
if (priv->irq) {
printk("Freeing irq %d\n", priv->irq);
free_irq(priv->irq, priv);
priv->irq = 0;
}
if (priv->mem_start) {
iounmap(priv->mem_start);
release_mem_region( pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1) );
}
free_ieee80211(dev);
}
pci_disable_device(pdev);
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
extern int ieee80211_rtl_init(void);
extern void ieee80211_rtl_exit(void);
static int __init rtl8192_pci_module_init(void)
{
int retval;
retval = ieee80211_rtl_init();
if (retval)
return retval;
printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
RT_TRACE(COMP_INIT, "Initializing module\n");
rtl8192_proc_module_init();
if(0!=pci_register_driver(&rtl8192_pci_driver))
{
DMESG("No device found");
/*pci_unregister_driver (&rtl8192_pci_driver);*/
return -ENODEV;
}
return 0;
}
static void __exit rtl8192_pci_module_exit(void)
{
pci_unregister_driver(&rtl8192_pci_driver);
RT_TRACE(COMP_DOWN, "Exiting\n");
rtl8192_proc_module_remove();
ieee80211_rtl_exit();
}
static irqreturn_t rtl8192_interrupt(int irq, void *param)
{
struct r8192_priv *priv = param;
struct net_device *dev = priv->ieee80211->dev;
unsigned long flags;
u32 inta;
irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&priv->irq_th_lock, flags);
/* ISR: 4bytes */
inta = read_nic_dword(priv, ISR); /* & priv->IntrMask; */
write_nic_dword(priv, ISR, inta); /* reset int situation */
if (!inta) {
/*
* most probably we can safely return IRQ_NONE,
* but for now is better to avoid problems
*/
goto out_unlock;
}
if (inta == 0xffff) {
/* HW disappared */
goto out_unlock;
}
if (!netif_running(dev))
goto out_unlock;
if (inta & IMR_TBDOK) {
RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
rtl8192_tx_isr(priv, BEACON_QUEUE);
priv->stats.txbeaconokint++;
}
if (inta & IMR_TBDER) {
RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
rtl8192_tx_isr(priv, BEACON_QUEUE);
priv->stats.txbeaconerr++;
}
if (inta & IMR_MGNTDOK ) {
RT_TRACE(COMP_INTR, "Manage ok interrupt!\n");
priv->stats.txmanageokint++;
rtl8192_tx_isr(priv, MGNT_QUEUE);
}
if (inta & IMR_COMDOK)
{
priv->stats.txcmdpktokint++;
rtl8192_tx_isr(priv, TXCMD_QUEUE);
}
if (inta & IMR_ROK) {
priv->stats.rxint++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
if (inta & IMR_BcnInt) {
RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n");
tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
}
if (inta & IMR_RDU) {
RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
priv->stats.rxrdu++;
/* reset int situation */
write_nic_dword(priv, INTA_MASK, read_nic_dword(priv, INTA_MASK) & ~IMR_RDU);
tasklet_schedule(&priv->irq_rx_tasklet);
}
if (inta & IMR_RXFOVW) {
RT_TRACE(COMP_INTR, "rx overflow !\n");
priv->stats.rxoverflow++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
if (inta & IMR_TXFOVW)
priv->stats.txoverflow++;
if (inta & IMR_BKDOK) {
RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n");
priv->stats.txbkokint++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
rtl8192_tx_isr(priv, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n");
priv->stats.txbeokint++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
rtl8192_tx_isr(priv, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n");
priv->stats.txviokint++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
rtl8192_tx_isr(priv, VI_QUEUE);
}
if (inta & IMR_VODOK) {
priv->stats.txvookint++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
rtl8192_tx_isr(priv, VO_QUEUE);
}
out_unlock:
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return ret;
}
void EnableHWSecurityConfig8192(struct r8192_priv *priv)
{
u8 SECR_value = 0x0;
struct ieee80211_device* ieee = priv->ieee80211;
SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2))
{
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP)))
{
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
//add HWSec active enable here.
//default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
ieee->hwsec_active = 1;
if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep)//!ieee->hwsec_support) //add hwsec_support flag to totol control hw_sec on/off
{
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
RT_TRACE(COMP_SEC,"%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __FUNCTION__,
ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
{
write_nic_byte(priv, SECR, SECR_value);//SECR_value | SCR_UseDK );
}
}
#define TOTAL_CAM_ENTRY 32
//#define CAM_CONTENT_COUNT 8
void setKey(struct r8192_priv *priv, u8 EntryNo, u8 KeyIndex, u16 KeyType,
const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
u16 usConfig = 0;
u8 i;
#ifdef ENABLE_IPS
RT_RF_POWER_STATE rtState;
rtState = priv->eRFPowerState;
if (priv->PowerSaveControl.bInactivePs){
if(rtState == eRfOff){
if(priv->RfOffReason > RF_CHANGE_BY_IPS)
{
RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
//up(&priv->wx_sem);
return ;
}
else{
down(&priv->ieee80211->ips_sem);
IPSLeave(priv);
up(&priv->ieee80211->ips_sem);
}
}
}
priv->ieee80211->is_set_key = true;
#endif
if (EntryNo >= TOTAL_CAM_ENTRY)
RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
RT_TRACE(COMP_SEC, "====>to setKey(), priv:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", priv, EntryNo, KeyIndex, KeyType, MacAddr);
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
else
usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
// usConfig |= BIT15 | (KeyType<<2) | (DefaultKey<<5) | KeyIndex;
for(i=0 ; i<CAM_CONTENT_COUNT; i++){
TargetCommand = i+CAM_CONTENT_COUNT*EntryNo;
TargetCommand |= BIT31|BIT16;
if(i==0){//MAC|Config
TargetContent = (u32)(*(MacAddr+0)) << 16|
(u32)(*(MacAddr+1)) << 24|
(u32)usConfig;
write_nic_dword(priv, WCAMI, TargetContent);
write_nic_dword(priv, RWCAM, TargetCommand);
}
else if(i==1){//MAC
TargetContent = (u32)(*(MacAddr+2)) |
(u32)(*(MacAddr+3)) << 8|
(u32)(*(MacAddr+4)) << 16|
(u32)(*(MacAddr+5)) << 24;
write_nic_dword(priv, WCAMI, TargetContent);
write_nic_dword(priv, RWCAM, TargetCommand);
}
else { //Key Material
if(KeyContent != NULL)
{
write_nic_dword(priv, WCAMI, (u32)(*(KeyContent+i-2)) );
write_nic_dword(priv, RWCAM, TargetCommand);
}
}
}
RT_TRACE(COMP_SEC,"=========>after set key, usconfig:%x\n", usConfig);
}
bool NicIFEnableNIC(struct r8192_priv *priv)
{
RT_STATUS init_status = RT_STATUS_SUCCESS;
PRT_POWER_SAVE_CONTROL pPSC = &priv->PowerSaveControl;
//YJ,add,091109
if (priv->up == 0){
RT_TRACE(COMP_ERR, "ERR!!! %s(): Driver is already down!\n",__FUNCTION__);
priv->bdisable_nic = false; //YJ,add,091111
return false;
}
// <1> Reset memory: descriptor, buffer,..
//NicIFResetMemory(Adapter);
// <2> Enable Adapter
//priv->bfirst_init = true;
init_status = rtl8192_adapter_start(priv);
if (init_status != RT_STATUS_SUCCESS) {
RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization is failed!\n",__FUNCTION__);
priv->bdisable_nic = false; //YJ,add,091111
return -1;
}
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
//priv->bfirst_init = false;
// <3> Enable Interrupt
rtl8192_irq_enable(priv);
priv->bdisable_nic = false;
return (init_status == RT_STATUS_SUCCESS);
}
bool NicIFDisableNIC(struct r8192_priv *priv)
{
bool status = true;
u8 tmp_state = 0;
// <1> Disable Interrupt
priv->bdisable_nic = true; //YJ,move,091109
tmp_state = priv->ieee80211->state;
ieee80211_softmac_stop_protocol(priv->ieee80211, false);
priv->ieee80211->state = tmp_state;
rtl8192_cancel_deferred_work(priv);
rtl8192_irq_disable(priv);
// <2> Stop all timer
// <3> Disable Adapter
rtl8192_halt_adapter(priv, false);
// priv->bdisable_nic = true;
return status;
}
module_init(rtl8192_pci_module_init);
module_exit(rtl8192_pci_module_exit);
| gpl-2.0 |
VRToxin-AOSP/android_kernel_moto_shamu | arch/x86/kernel/cpu/mcheck/mce-severity.c | 2178 | 7319 | /*
* MCE grading rules.
* Copyright 2008, 2009 Intel Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
* Author: Andi Kleen
*/
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <asm/mce.h>
#include "mce-internal.h"
/*
* Grade an mce by severity. In general the most severe ones are processed
* first. Since there are quite a lot of combinations test the bits in a
* table-driven way. The rules are simply processed in order, first
* match wins.
*
* Note this is only used for machine check exceptions, the corrected
* errors use much simpler rules. The exceptions still check for the corrected
* errors, but only to leave them alone for the CMCI handler (except for
* panic situations)
*/
enum context { IN_KERNEL = 1, IN_USER = 2 };
enum ser { SER_REQUIRED = 1, NO_SER = 2 };
static struct severity {
u64 mask;
u64 result;
unsigned char sev;
unsigned char mcgmask;
unsigned char mcgres;
unsigned char ser;
unsigned char context;
unsigned char covered;
char *msg;
} severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
#define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER
#define SER .ser = SER_REQUIRED
#define NOSER .ser = NO_SER
#define BITCLR(x) .mask = x, .result = 0
#define BITSET(x) .mask = x, .result = x
#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
#define MASK(x, y) .mask = x, .result = y
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
MCESEV(
NO, "Invalid",
BITCLR(MCI_STATUS_VAL)
),
MCESEV(
NO, "Not enabled",
BITCLR(MCI_STATUS_EN)
),
MCESEV(
PANIC, "Processor context corrupt",
BITSET(MCI_STATUS_PCC)
),
/* When MCIP is not set something is very confused */
MCESEV(
PANIC, "MCIP not set in MCA handler",
MCGMASK(MCG_STATUS_MCIP, 0)
),
/* Neither return not error IP -- no chance to recover -> PANIC */
MCESEV(
PANIC, "Neither restart nor error IP",
MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
),
MCESEV(
PANIC, "In kernel and no restart IP",
KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
),
MCESEV(
KEEP, "Corrected error",
NOSER, BITCLR(MCI_STATUS_UC)
),
/* ignore OVER for UCNA */
MCESEV(
KEEP, "Uncorrected no action required",
SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
),
MCESEV(
PANIC, "Illegal combination (UCNA with AR=1)",
SER,
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
),
MCESEV(
KEEP, "Non signalled machine check",
SER, BITCLR(MCI_STATUS_S)
),
MCESEV(
PANIC, "Action required with lost events",
SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
),
/* known AR MCACODs: */
#ifdef CONFIG_MEMORY_FAILURE
MCESEV(
KEEP, "HT thread notices Action required: data load error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
MCGMASK(MCG_STATUS_EIPV, 0)
),
MCESEV(
AR, "Action required: data load error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
USER
),
MCESEV(
KEEP, "HT thread notices Action required: instruction fetch error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
MCGMASK(MCG_STATUS_EIPV, 0)
),
MCESEV(
AR, "Action required: instruction fetch error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
USER
),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
),
/* known AO MCACODs: */
MCESEV(
AO, "Action optional: memory scrubbing error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
),
MCESEV(
AO, "Action optional: last level cache writeback error",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
),
MCESEV(
SOME, "Action optional: unknown MCACOD",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
),
MCESEV(
SOME, "Action optional with lost events",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
),
MCESEV(
PANIC, "Overflowed uncorrected",
BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
),
MCESEV(
UC, "Uncorrected",
BITSET(MCI_STATUS_UC)
),
MCESEV(
SOME, "No match",
BITSET(0)
) /* always matches. keep at end */
};
/*
* If mcgstatus indicated that ip/cs on the stack were
* no good, then "m->cs" will be zero and we will have
* to assume the worst case (IN_KERNEL) as we actually
* have no idea what we were executing when the machine
* check hit.
* If we do have a good "m->cs" (or a faked one in the
* case we were executing in VM86 mode) we can use it to
* distinguish an exception taken in user from from one
* taken in the kernel.
*/
static int error_context(struct mce *m)
{
return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
}
int mce_severity(struct mce *m, int tolerant, char **msg)
{
enum context ctx = error_context(m);
struct severity *s;
for (s = severities;; s++) {
if ((m->status & s->mask) != s->result)
continue;
if ((m->mcgstatus & s->mcgmask) != s->mcgres)
continue;
if (s->ser == SER_REQUIRED && !mca_cfg.ser)
continue;
if (s->ser == NO_SER && mca_cfg.ser)
continue;
if (s->context && ctx != s->context)
continue;
if (msg)
*msg = s->msg;
s->covered = 1;
if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
if (panic_on_oops || tolerant < 1)
return MCE_PANIC_SEVERITY;
}
return s->sev;
}
}
#ifdef CONFIG_DEBUG_FS
static void *s_start(struct seq_file *f, loff_t *pos)
{
if (*pos >= ARRAY_SIZE(severities))
return NULL;
return &severities[*pos];
}
static void *s_next(struct seq_file *f, void *data, loff_t *pos)
{
if (++(*pos) >= ARRAY_SIZE(severities))
return NULL;
return &severities[*pos];
}
static void s_stop(struct seq_file *f, void *data)
{
}
static int s_show(struct seq_file *f, void *data)
{
struct severity *ser = data;
seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
return 0;
}
static const struct seq_operations severities_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
static int severities_coverage_open(struct inode *inode, struct file *file)
{
return seq_open(file, &severities_seq_ops);
}
static ssize_t severities_coverage_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
int i;
for (i = 0; i < ARRAY_SIZE(severities); i++)
severities[i].covered = 0;
return count;
}
static const struct file_operations severities_coverage_fops = {
.open = severities_coverage_open,
.release = seq_release,
.read = seq_read,
.write = severities_coverage_write,
.llseek = seq_lseek,
};
static int __init severities_debugfs_init(void)
{
struct dentry *dmce, *fsev;
dmce = mce_get_debugfs_dir();
if (!dmce)
goto err_out;
fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
&severities_coverage_fops);
if (!fsev)
goto err_out;
return 0;
err_out:
return -ENOMEM;
}
late_initcall(severities_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
| gpl-2.0 |
Motorhead1991/android_kernel_samsung_amazing | fs/ncpfs/inode.c | 2690 | 27648 | /*
* inode.c
*
* Copyright (C) 1995, 1996 by Volker Lendecke
* Modified for big endian by J.F. Chadima and David S. Miller
* Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
* Modified 1998 Wolfram Pienkoss for NLS
* Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
*
*/
#include <linux/module.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/namei.h>
#include <net/sock.h>
#include "ncp_fs.h"
#include "getopt.h"
#define NCP_DEFAULT_FILE_MODE 0600
#define NCP_DEFAULT_DIR_MODE 0700
#define NCP_DEFAULT_TIME_OUT 10
#define NCP_DEFAULT_RETRY_COUNT 20
static void ncp_evict_inode(struct inode *);
static void ncp_put_super(struct super_block *);
static int ncp_statfs(struct dentry *, struct kstatfs *);
static int ncp_show_options(struct seq_file *, struct vfsmount *);
static struct kmem_cache * ncp_inode_cachep;
static struct inode *ncp_alloc_inode(struct super_block *sb)
{
struct ncp_inode_info *ei;
ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
}
static void ncp_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
}
static void ncp_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, ncp_i_callback);
}
static void init_once(void *foo)
{
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
mutex_init(&ei->open_mutex);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
ncp_inode_cachep = kmem_cache_create("ncp_inode_cache",
sizeof(struct ncp_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (ncp_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
kmem_cache_destroy(ncp_inode_cachep);
}
static int ncp_remount(struct super_block *sb, int *flags, char* data)
{
*flags |= MS_NODIRATIME;
return 0;
}
static const struct super_operations ncp_sops =
{
.alloc_inode = ncp_alloc_inode,
.destroy_inode = ncp_destroy_inode,
.drop_inode = generic_delete_inode,
.evict_inode = ncp_evict_inode,
.put_super = ncp_put_super,
.statfs = ncp_statfs,
.remount_fs = ncp_remount,
.show_options = ncp_show_options,
};
/*
* Fill in the ncpfs-specific information in the inode.
*/
static void ncp_update_dirent(struct inode *inode, struct ncp_entry_info *nwinfo)
{
NCP_FINFO(inode)->DosDirNum = nwinfo->i.DosDirNum;
NCP_FINFO(inode)->dirEntNum = nwinfo->i.dirEntNum;
NCP_FINFO(inode)->volNumber = nwinfo->volume;
}
void ncp_update_inode(struct inode *inode, struct ncp_entry_info *nwinfo)
{
ncp_update_dirent(inode, nwinfo);
NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
NCP_FINFO(inode)->access = nwinfo->access;
memcpy(NCP_FINFO(inode)->file_handle, nwinfo->file_handle,
sizeof(nwinfo->file_handle));
DPRINTK("ncp_update_inode: updated %s, volnum=%d, dirent=%u\n",
nwinfo->i.entryName, NCP_FINFO(inode)->volNumber,
NCP_FINFO(inode)->dirEntNum);
}
static void ncp_update_dates(struct inode *inode, struct nw_info_struct *nwi)
{
/* NFS namespace mode overrides others if it's set. */
DPRINTK(KERN_DEBUG "ncp_update_dates_and_mode: (%s) nfs.mode=0%o\n",
nwi->entryName, nwi->nfs.mode);
if (nwi->nfs.mode) {
/* XXX Security? */
inode->i_mode = nwi->nfs.mode;
}
inode->i_blocks = (i_size_read(inode) + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT;
inode->i_mtime.tv_sec = ncp_date_dos2unix(nwi->modifyTime, nwi->modifyDate);
inode->i_ctime.tv_sec = ncp_date_dos2unix(nwi->creationTime, nwi->creationDate);
inode->i_atime.tv_sec = ncp_date_dos2unix(0, nwi->lastAccessDate);
inode->i_atime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
}
static void ncp_update_attrs(struct inode *inode, struct ncp_entry_info *nwinfo)
{
struct nw_info_struct *nwi = &nwinfo->i;
struct ncp_server *server = NCP_SERVER(inode);
if (nwi->attributes & aDIR) {
inode->i_mode = server->m.dir_mode;
/* for directories dataStreamSize seems to be some
Object ID ??? */
i_size_write(inode, NCP_BLOCK_SIZE);
} else {
u32 size;
inode->i_mode = server->m.file_mode;
size = le32_to_cpu(nwi->dataStreamSize);
i_size_write(inode, size);
#ifdef CONFIG_NCPFS_EXTRAS
if ((server->m.flags & (NCP_MOUNT_EXTRAS|NCP_MOUNT_SYMLINKS))
&& (nwi->attributes & aSHARED)) {
switch (nwi->attributes & (aHIDDEN|aSYSTEM)) {
case aHIDDEN:
if (server->m.flags & NCP_MOUNT_SYMLINKS) {
if (/* (size >= NCP_MIN_SYMLINK_SIZE)
&& */ (size <= NCP_MAX_SYMLINK_SIZE)) {
inode->i_mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
NCP_FINFO(inode)->flags |= NCPI_KLUDGE_SYMLINK;
break;
}
}
/* FALLTHROUGH */
case 0:
if (server->m.flags & NCP_MOUNT_EXTRAS)
inode->i_mode |= S_IRUGO;
break;
case aSYSTEM:
if (server->m.flags & NCP_MOUNT_EXTRAS)
inode->i_mode |= (inode->i_mode >> 2) & S_IXUGO;
break;
/* case aSYSTEM|aHIDDEN: */
default:
/* reserved combination */
break;
}
}
#endif
}
if (nwi->attributes & aRONLY) inode->i_mode &= ~S_IWUGO;
}
void ncp_update_inode2(struct inode* inode, struct ncp_entry_info *nwinfo)
{
NCP_FINFO(inode)->flags = 0;
if (!atomic_read(&NCP_FINFO(inode)->opened)) {
NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
ncp_update_attrs(inode, nwinfo);
}
ncp_update_dates(inode, &nwinfo->i);
ncp_update_dirent(inode, nwinfo);
}
/*
* Fill in the inode based on the ncp_entry_info structure. Used only for brand new inodes.
*/
static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
{
struct ncp_server *server = NCP_SERVER(inode);
NCP_FINFO(inode)->flags = 0;
ncp_update_attrs(inode, nwinfo);
DDPRINTK("ncp_read_inode: inode->i_mode = %u\n", inode->i_mode);
inode->i_nlink = 1;
inode->i_uid = server->m.uid;
inode->i_gid = server->m.gid;
ncp_update_dates(inode, &nwinfo->i);
ncp_update_inode(inode, nwinfo);
}
#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
static const struct inode_operations ncp_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = ncp_notify_change,
};
#endif
/*
* Get a new inode.
*/
struct inode *
ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
{
struct inode *inode;
if (info == NULL) {
printk(KERN_ERR "ncp_iget: info is NULL\n");
return NULL;
}
inode = new_inode(sb);
if (inode) {
atomic_set(&NCP_FINFO(inode)->opened, info->opened);
inode->i_mapping->backing_dev_info = sb->s_bdi;
inode->i_ino = info->ino;
ncp_set_attr(inode, info);
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ncp_file_inode_operations;
inode->i_fop = &ncp_file_operations;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ncp_dir_inode_operations;
inode->i_fop = &ncp_dir_operations;
#ifdef CONFIG_NCPFS_NFS_NS
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
init_special_inode(inode, inode->i_mode,
new_decode_dev(info->i.nfs.rdev));
#endif
#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &ncp_symlink_inode_operations;
inode->i_data.a_ops = &ncp_symlink_aops;
#endif
} else {
make_bad_inode(inode);
}
insert_inode_hash(inode);
} else
printk(KERN_ERR "ncp_iget: iget failed!\n");
return inode;
}
static void
ncp_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
if (S_ISDIR(inode->i_mode)) {
DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
}
if (ncp_make_closed(inode) != 0) {
/* We can't do anything but complain. */
printk(KERN_ERR "ncp_evict_inode: could not close\n");
}
}
static void ncp_stop_tasks(struct ncp_server *server) {
struct sock* sk = server->ncp_sock->sk;
lock_sock(sk);
sk->sk_error_report = server->error_report;
sk->sk_data_ready = server->data_ready;
sk->sk_write_space = server->write_space;
release_sock(sk);
del_timer_sync(&server->timeout_tm);
flush_work_sync(&server->rcv.tq);
if (sk->sk_socket->type == SOCK_STREAM)
flush_work_sync(&server->tx.tq);
else
flush_work_sync(&server->timeout_tq);
}
static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)
{
struct ncp_server *server = NCP_SBP(mnt->mnt_sb);
unsigned int tmp;
if (server->m.uid != 0)
seq_printf(seq, ",uid=%u", server->m.uid);
if (server->m.gid != 0)
seq_printf(seq, ",gid=%u", server->m.gid);
if (server->m.mounted_uid != 0)
seq_printf(seq, ",owner=%u", server->m.mounted_uid);
tmp = server->m.file_mode & S_IALLUGO;
if (tmp != NCP_DEFAULT_FILE_MODE)
seq_printf(seq, ",mode=0%o", tmp);
tmp = server->m.dir_mode & S_IALLUGO;
if (tmp != NCP_DEFAULT_DIR_MODE)
seq_printf(seq, ",dirmode=0%o", tmp);
if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) {
tmp = server->m.time_out * 100 / HZ;
seq_printf(seq, ",timeout=%u", tmp);
}
if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT)
seq_printf(seq, ",retry=%u", server->m.retry_count);
if (server->m.flags != 0)
seq_printf(seq, ",flags=%lu", server->m.flags);
if (server->m.wdog_pid != NULL)
seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid));
return 0;
}
static const struct ncp_option ncp_opts[] = {
{ "uid", OPT_INT, 'u' },
{ "gid", OPT_INT, 'g' },
{ "owner", OPT_INT, 'o' },
{ "mode", OPT_INT, 'm' },
{ "dirmode", OPT_INT, 'd' },
{ "timeout", OPT_INT, 't' },
{ "retry", OPT_INT, 'r' },
{ "flags", OPT_INT, 'f' },
{ "wdogpid", OPT_INT, 'w' },
{ "ncpfd", OPT_INT, 'n' },
{ "infofd", OPT_INT, 'i' }, /* v5 */
{ "version", OPT_INT, 'v' },
{ NULL, 0, 0 } };
static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) {
int optval;
char *optarg;
unsigned long optint;
int version = 0;
int ret;
data->flags = 0;
data->int_flags = 0;
data->mounted_uid = 0;
data->wdog_pid = NULL;
data->ncp_fd = ~0;
data->time_out = NCP_DEFAULT_TIME_OUT;
data->retry_count = NCP_DEFAULT_RETRY_COUNT;
data->uid = 0;
data->gid = 0;
data->file_mode = NCP_DEFAULT_FILE_MODE;
data->dir_mode = NCP_DEFAULT_DIR_MODE;
data->info_fd = -1;
data->mounted_vol[0] = 0;
while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) {
ret = optval;
if (ret < 0)
goto err;
switch (optval) {
case 'u':
data->uid = optint;
break;
case 'g':
data->gid = optint;
break;
case 'o':
data->mounted_uid = optint;
break;
case 'm':
data->file_mode = optint;
break;
case 'd':
data->dir_mode = optint;
break;
case 't':
data->time_out = optint;
break;
case 'r':
data->retry_count = optint;
break;
case 'f':
data->flags = optint;
break;
case 'w':
data->wdog_pid = find_get_pid(optint);
break;
case 'n':
data->ncp_fd = optint;
break;
case 'i':
data->info_fd = optint;
break;
case 'v':
ret = -ECHRNG;
if (optint < NCP_MOUNT_VERSION_V4)
goto err;
if (optint > NCP_MOUNT_VERSION_V5)
goto err;
version = optint;
break;
}
}
return 0;
err:
put_pid(data->wdog_pid);
data->wdog_pid = NULL;
return ret;
}
static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
{
struct ncp_mount_data_kernel data;
struct ncp_server *server;
struct file *ncp_filp;
struct inode *root_inode;
struct inode *sock_inode;
struct socket *sock;
int error;
int default_bufsize;
#ifdef CONFIG_NCPFS_PACKET_SIGNING
int options;
#endif
struct ncp_entry_info finfo;
memset(&data, 0, sizeof(data));
server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
if (!server)
return -ENOMEM;
sb->s_fs_info = server;
error = -EFAULT;
if (raw_data == NULL)
goto out;
switch (*(int*)raw_data) {
case NCP_MOUNT_VERSION:
{
struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data;
data.flags = md->flags;
data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
data.mounted_uid = md->mounted_uid;
data.wdog_pid = find_get_pid(md->wdog_pid);
data.ncp_fd = md->ncp_fd;
data.time_out = md->time_out;
data.retry_count = md->retry_count;
data.uid = md->uid;
data.gid = md->gid;
data.file_mode = md->file_mode;
data.dir_mode = md->dir_mode;
data.info_fd = -1;
memcpy(data.mounted_vol, md->mounted_vol,
NCP_VOLNAME_LEN+1);
}
break;
case NCP_MOUNT_VERSION_V4:
{
struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
data.flags = md->flags;
data.mounted_uid = md->mounted_uid;
data.wdog_pid = find_get_pid(md->wdog_pid);
data.ncp_fd = md->ncp_fd;
data.time_out = md->time_out;
data.retry_count = md->retry_count;
data.uid = md->uid;
data.gid = md->gid;
data.file_mode = md->file_mode;
data.dir_mode = md->dir_mode;
data.info_fd = -1;
}
break;
default:
error = -ECHRNG;
if (memcmp(raw_data, "vers", 4) == 0) {
error = ncp_parse_options(&data, raw_data);
}
if (error)
goto out;
break;
}
error = -EBADF;
ncp_filp = fget(data.ncp_fd);
if (!ncp_filp)
goto out;
error = -ENOTSOCK;
sock_inode = ncp_filp->f_path.dentry->d_inode;
if (!S_ISSOCK(sock_inode->i_mode))
goto out_fput;
sock = SOCKET_I(sock_inode);
if (!sock)
goto out_fput;
if (sock->type == SOCK_STREAM)
default_bufsize = 0xF000;
else
default_bufsize = 1024;
sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
sb->s_maxbytes = 0xFFFFFFFFU;
sb->s_blocksize = 1024; /* Eh... Is this correct? */
sb->s_blocksize_bits = 10;
sb->s_magic = NCP_SUPER_MAGIC;
sb->s_op = &ncp_sops;
sb->s_d_op = &ncp_dentry_operations;
sb->s_bdi = &server->bdi;
server = NCP_SBP(sb);
memset(server, 0, sizeof(*server));
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
if (error)
goto out_bdi;
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
if (data.info_fd != -1) {
struct socket *info_sock;
error = -EBADF;
server->info_filp = fget(data.info_fd);
if (!server->info_filp)
goto out_fput;
error = -ENOTSOCK;
sock_inode = server->info_filp->f_path.dentry->d_inode;
if (!S_ISSOCK(sock_inode->i_mode))
goto out_fput2;
info_sock = SOCKET_I(sock_inode);
if (!info_sock)
goto out_fput2;
error = -EBADFD;
if (info_sock->type != SOCK_STREAM)
goto out_fput2;
server->info_sock = info_sock;
}
/* server->lock = 0; */
mutex_init(&server->mutex);
server->packet = NULL;
/* server->buffer_size = 0; */
/* server->conn_status = 0; */
/* server->root_dentry = NULL; */
/* server->root_setuped = 0; */
mutex_init(&server->root_setup_lock);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
/* server->sign_wanted = 0; */
/* server->sign_active = 0; */
#endif
init_rwsem(&server->auth_rwsem);
server->auth.auth_type = NCP_AUTH_NONE;
/* server->auth.object_name_len = 0; */
/* server->auth.object_name = NULL; */
/* server->auth.object_type = 0; */
/* server->priv.len = 0; */
/* server->priv.data = NULL; */
server->m = data;
/* Although anything producing this is buggy, it happens
now because of PATH_MAX changes.. */
if (server->m.time_out < 1) {
server->m.time_out = 10;
printk(KERN_INFO "You need to recompile your ncpfs utils..\n");
}
server->m.time_out = server->m.time_out * HZ / 100;
server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG;
server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR;
#ifdef CONFIG_NCPFS_NLS
/* load the default NLS charsets */
server->nls_vol = load_nls_default();
server->nls_io = load_nls_default();
#endif /* CONFIG_NCPFS_NLS */
atomic_set(&server->dentry_ttl, 0); /* no caching */
INIT_LIST_HEAD(&server->tx.requests);
mutex_init(&server->rcv.creq_mutex);
server->tx.creq = NULL;
server->rcv.creq = NULL;
init_timer(&server->timeout_tm);
#undef NCP_PACKET_SIZE
#define NCP_PACKET_SIZE 131072
error = -ENOMEM;
server->packet_size = NCP_PACKET_SIZE;
server->packet = vmalloc(NCP_PACKET_SIZE);
if (server->packet == NULL)
goto out_nls;
server->txbuf = vmalloc(NCP_PACKET_SIZE);
if (server->txbuf == NULL)
goto out_packet;
server->rxbuf = vmalloc(NCP_PACKET_SIZE);
if (server->rxbuf == NULL)
goto out_txbuf;
lock_sock(sock->sk);
server->data_ready = sock->sk->sk_data_ready;
server->write_space = sock->sk->sk_write_space;
server->error_report = sock->sk->sk_error_report;
sock->sk->sk_user_data = server;
sock->sk->sk_data_ready = ncp_tcp_data_ready;
sock->sk->sk_error_report = ncp_tcp_error_report;
if (sock->type == SOCK_STREAM) {
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
sock->sk->sk_write_space = ncp_tcp_write_space;
} else {
INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
server->timeout_tm.data = (unsigned long)server;
server->timeout_tm.function = ncpdgram_timeout_call;
}
release_sock(sock->sk);
ncp_lock_server(server);
error = ncp_connect(server);
ncp_unlock_server(server);
if (error < 0)
goto out_rxbuf;
DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb));
error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (ncp_negotiate_size_and_options(server, default_bufsize,
NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0)
{
if (options != NCP_DEFAULT_OPTIONS)
{
if (ncp_negotiate_size_and_options(server,
default_bufsize,
options & 2,
&(server->buffer_size), &options) != 0)
{
goto out_disconnect;
}
}
ncp_lock_server(server);
if (options & 2)
server->sign_wanted = 1;
ncp_unlock_server(server);
}
else
#endif /* CONFIG_NCPFS_PACKET_SIGNING */
if (ncp_negotiate_buffersize(server, default_bufsize,
&(server->buffer_size)) != 0)
goto out_disconnect;
DPRINTK("ncpfs: bufsize = %d\n", server->buffer_size);
memset(&finfo, 0, sizeof(finfo));
finfo.i.attributes = aDIR;
finfo.i.dataStreamSize = 0; /* ignored */
finfo.i.dirEntNum = 0;
finfo.i.DosDirNum = 0;
#ifdef CONFIG_NCPFS_SMALLDOS
finfo.i.NSCreator = NW_NS_DOS;
#endif
finfo.volume = NCP_NUMBER_OF_VOLUMES;
/* set dates of mountpoint to Jan 1, 1986; 00:00 */
finfo.i.creationTime = finfo.i.modifyTime
= cpu_to_le16(0x0000);
finfo.i.creationDate = finfo.i.modifyDate
= finfo.i.lastAccessDate
= cpu_to_le16(0x0C21);
finfo.i.nameLen = 0;
finfo.i.entryName[0] = '\0';
finfo.opened = 0;
finfo.ino = 2; /* tradition */
server->name_space[finfo.volume] = NW_NS_DOS;
error = -ENOMEM;
root_inode = ncp_iget(sb, &finfo);
if (!root_inode)
goto out_disconnect;
DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber);
sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
return 0;
out_no_root:
iput(root_inode);
out_disconnect:
ncp_lock_server(server);
ncp_disconnect(server);
ncp_unlock_server(server);
out_rxbuf:
ncp_stop_tasks(server);
vfree(server->rxbuf);
out_txbuf:
vfree(server->txbuf);
out_packet:
vfree(server->packet);
out_nls:
#ifdef CONFIG_NCPFS_NLS
unload_nls(server->nls_io);
unload_nls(server->nls_vol);
#endif
mutex_destroy(&server->rcv.creq_mutex);
mutex_destroy(&server->root_setup_lock);
mutex_destroy(&server->mutex);
out_fput2:
if (server->info_filp)
fput(server->info_filp);
out_fput:
bdi_destroy(&server->bdi);
out_bdi:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogus, since
* it doesn't perform proper unlocking.
*/
fput(ncp_filp);
out:
put_pid(data.wdog_pid);
sb->s_fs_info = NULL;
kfree(server);
return error;
}
static void ncp_put_super(struct super_block *sb)
{
struct ncp_server *server = NCP_SBP(sb);
ncp_lock_server(server);
ncp_disconnect(server);
ncp_unlock_server(server);
ncp_stop_tasks(server);
#ifdef CONFIG_NCPFS_NLS
/* unload the NLS charsets */
unload_nls(server->nls_vol);
unload_nls(server->nls_io);
#endif /* CONFIG_NCPFS_NLS */
mutex_destroy(&server->rcv.creq_mutex);
mutex_destroy(&server->root_setup_lock);
mutex_destroy(&server->mutex);
if (server->info_filp)
fput(server->info_filp);
fput(server->ncp_filp);
kill_pid(server->m.wdog_pid, SIGTERM, 1);
put_pid(server->m.wdog_pid);
bdi_destroy(&server->bdi);
kfree(server->priv.data);
kfree(server->auth.object_name);
vfree(server->rxbuf);
vfree(server->txbuf);
vfree(server->packet);
sb->s_fs_info = NULL;
kfree(server);
}
static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct dentry* d;
struct inode* i;
struct ncp_inode_info* ni;
struct ncp_server* s;
struct ncp_volume_info vi;
struct super_block *sb = dentry->d_sb;
int err;
__u8 dh;
d = sb->s_root;
if (!d) {
goto dflt;
}
i = d->d_inode;
if (!i) {
goto dflt;
}
ni = NCP_FINFO(i);
if (!ni) {
goto dflt;
}
s = NCP_SBP(sb);
if (!s) {
goto dflt;
}
if (!s->m.mounted_vol[0]) {
goto dflt;
}
err = ncp_dirhandle_alloc(s, ni->volNumber, ni->DosDirNum, &dh);
if (err) {
goto dflt;
}
err = ncp_get_directory_info(s, dh, &vi);
ncp_dirhandle_free(s, dh);
if (err) {
goto dflt;
}
buf->f_type = NCP_SUPER_MAGIC;
buf->f_bsize = vi.sectors_per_block * 512;
buf->f_blocks = vi.total_blocks;
buf->f_bfree = vi.free_blocks;
buf->f_bavail = vi.free_blocks;
buf->f_files = vi.total_dir_entries;
buf->f_ffree = vi.available_dir_entries;
buf->f_namelen = 12;
return 0;
/* We cannot say how much disk space is left on a mounted
NetWare Server, because free space is distributed over
volumes, and the current user might have disk quotas. So
free space is not that simple to determine. Our decision
here is to err conservatively. */
dflt:;
buf->f_type = NCP_SUPER_MAGIC;
buf->f_bsize = NCP_BLOCK_SIZE;
buf->f_blocks = 0;
buf->f_bfree = 0;
buf->f_bavail = 0;
buf->f_namelen = 12;
return 0;
}
int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int result = 0;
__le32 info_mask;
struct nw_modify_dos_info info;
struct ncp_server *server;
result = -EIO;
server = NCP_SERVER(inode);
if (!server) /* How this could happen? */
goto out;
/* ageing the dentry to force validation */
ncp_age_dentry(server, dentry);
result = inode_change_ok(inode, attr);
if (result < 0)
goto out;
result = -EPERM;
if (((attr->ia_valid & ATTR_UID) &&
(attr->ia_uid != server->m.uid)))
goto out;
if (((attr->ia_valid & ATTR_GID) &&
(attr->ia_gid != server->m.gid)))
goto out;
if (((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode &
~(S_IFREG | S_IFDIR | S_IRWXUGO))))
goto out;
info_mask = 0;
memset(&info, 0, sizeof(info));
#if 1
if ((attr->ia_valid & ATTR_MODE) != 0)
{
umode_t newmode = attr->ia_mode;
info_mask |= DM_ATTRIBUTES;
if (S_ISDIR(inode->i_mode)) {
newmode &= server->m.dir_mode;
} else {
#ifdef CONFIG_NCPFS_EXTRAS
if (server->m.flags & NCP_MOUNT_EXTRAS) {
/* any non-default execute bit set */
if (newmode & ~server->m.file_mode & S_IXUGO)
info.attributes |= aSHARED | aSYSTEM;
/* read for group/world and not in default file_mode */
else if (newmode & ~server->m.file_mode & S_IRUGO)
info.attributes |= aSHARED;
} else
#endif
newmode &= server->m.file_mode;
}
if (newmode & S_IWUGO)
info.attributes &= ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
else
info.attributes |= (aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
#ifdef CONFIG_NCPFS_NFS_NS
if (ncp_is_nfs_extras(server, NCP_FINFO(inode)->volNumber)) {
result = ncp_modify_nfs_info(server,
NCP_FINFO(inode)->volNumber,
NCP_FINFO(inode)->dirEntNum,
attr->ia_mode, 0);
if (result != 0)
goto out;
info.attributes &= ~(aSHARED | aSYSTEM);
{
/* mark partial success */
struct iattr tmpattr;
tmpattr.ia_valid = ATTR_MODE;
tmpattr.ia_mode = attr->ia_mode;
setattr_copy(inode, &tmpattr);
mark_inode_dirty(inode);
}
}
#endif
}
#endif
/* Do SIZE before attributes, otherwise mtime together with size does not work...
*/
if ((attr->ia_valid & ATTR_SIZE) != 0) {
int written;
DPRINTK("ncpfs: trying to change size to %ld\n",
attr->ia_size);
if ((result = ncp_make_open(inode, O_WRONLY)) < 0) {
result = -EACCES;
goto out;
}
ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
attr->ia_size, 0, "", &written);
/* According to ndir, the changes only take effect after
closing the file */
ncp_inode_close(inode);
result = ncp_make_closed(inode);
if (result)
goto out;
if (attr->ia_size != i_size_read(inode)) {
result = vmtruncate(inode, attr->ia_size);
if (result)
goto out;
mark_inode_dirty(inode);
}
}
if ((attr->ia_valid & ATTR_CTIME) != 0) {
info_mask |= (DM_CREATE_TIME | DM_CREATE_DATE);
ncp_date_unix2dos(attr->ia_ctime.tv_sec,
&info.creationTime, &info.creationDate);
}
if ((attr->ia_valid & ATTR_MTIME) != 0) {
info_mask |= (DM_MODIFY_TIME | DM_MODIFY_DATE);
ncp_date_unix2dos(attr->ia_mtime.tv_sec,
&info.modifyTime, &info.modifyDate);
}
if ((attr->ia_valid & ATTR_ATIME) != 0) {
__le16 dummy;
info_mask |= (DM_LAST_ACCESS_DATE);
ncp_date_unix2dos(attr->ia_atime.tv_sec,
&dummy, &info.lastAccessDate);
}
if (info_mask != 0) {
result = ncp_modify_file_or_subdir_dos_info(NCP_SERVER(inode),
inode, info_mask, &info);
if (result != 0) {
if (info_mask == (DM_CREATE_TIME | DM_CREATE_DATE)) {
/* NetWare seems not to allow this. I
do not know why. So, just tell the
user everything went fine. This is
a terrible hack, but I do not know
how to do this correctly. */
result = 0;
} else
goto out;
}
#ifdef CONFIG_NCPFS_STRONG
if ((!result) && (info_mask & DM_ATTRIBUTES))
NCP_FINFO(inode)->nwattr = info.attributes;
#endif
}
if (result)
goto out;
setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
if (result > 0)
result = -EACCES;
return result;
}
static struct dentry *ncp_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, ncp_fill_super);
}
static struct file_system_type ncp_fs_type = {
.owner = THIS_MODULE,
.name = "ncpfs",
.mount = ncp_mount,
.kill_sb = kill_anon_super,
.fs_flags = FS_BINARY_MOUNTDATA,
};
static int __init init_ncp_fs(void)
{
int err;
DPRINTK("ncpfs: init_ncp_fs called\n");
err = init_inodecache();
if (err)
goto out1;
err = register_filesystem(&ncp_fs_type);
if (err)
goto out;
return 0;
out:
destroy_inodecache();
out1:
return err;
}
static void __exit exit_ncp_fs(void)
{
DPRINTK("ncpfs: exit_ncp_fs called\n");
unregister_filesystem(&ncp_fs_type);
destroy_inodecache();
}
module_init(init_ncp_fs)
module_exit(exit_ncp_fs)
MODULE_LICENSE("GPL");
| gpl-2.0 |
merbanan/linuxtv | drivers/media/pci/mantis/mantis_vp1033.c | 2946 | 4604 | /*
Mantis VP-1033 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "stv0299.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "mantis_vp1033.h"
#include "mantis_reg.h"
u8 lgtdqcs001f_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x2a,
0x05, 0x85,
0x06, 0x02,
0x07, 0x00,
0x08, 0x00,
0x0c, 0x01,
0x0d, 0x81,
0x0e, 0x44,
0x0f, 0x94,
0x10, 0x3c,
0x11, 0x84,
0x12, 0xb9,
0x13, 0xb5,
0x14, 0x4f,
0x15, 0xc9,
0x16, 0x80,
0x17, 0x36,
0x18, 0xfb,
0x19, 0xcf,
0x1a, 0xbc,
0x1c, 0x2b,
0x1d, 0x27,
0x1e, 0x00,
0x1f, 0x0b,
0x20, 0xa1,
0x21, 0x60,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x05,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x13,
0xff, 0xff,
};
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
static int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
struct i2c_adapter *adapter = &mantis->adapter;
u8 buf[4];
u32 div;
struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
div = p->frequency / 250;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x83;
buf[3] = 0xc0;
if (p->frequency < 1531000)
buf[3] |= 0x04;
else
buf[3] &= ~0x04;
if (i2c_transfer(adapter, &msg, 1) < 0) {
dprintk(MANTIS_ERROR, 1, "Write: I2C Transfer failed");
return -EIO;
}
msleep_interruptible(100);
return 0;
}
static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
if (srate < 1500000) {
aclk = 0xb7;
bclk = 0x47;
} else if (srate < 3000000) {
aclk = 0xb7;
bclk = 0x4b;
} else if (srate < 7000000) {
aclk = 0xb7;
bclk = 0x4f;
} else if (srate < 14000000) {
aclk = 0xb7;
bclk = 0x53;
} else if (srate < 30000000) {
aclk = 0xb6;
bclk = 0x53;
} else if (srate < 45000000) {
aclk = 0xb4;
bclk = 0x51;
}
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, ratio & 0xf0);
return 0;
}
struct stv0299_config lgtdqcs001f_config = {
.demod_address = 0x68,
.inittab = lgtdqcs001f_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = lgtdqcs001f_set_symbol_rate,
};
static int vp1033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
int err = 0;
err = mantis_frontend_power(mantis, POWER_ON);
if (err == 0) {
mantis_frontend_soft_reset(mantis);
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for STV0299 (DVB-S)");
fe = dvb_attach(stv0299_attach, &lgtdqcs001f_config, adapter);
if (fe) {
fe->ops.tuner_ops.set_params = lgtdqcs001f_tuner_set;
dprintk(MANTIS_ERROR, 1, "found STV0299 DVB-S frontend @ 0x%02x",
lgtdqcs001f_config.demod_address);
dprintk(MANTIS_ERROR, 1, "Mantis DVB-S STV0299 frontend attach success");
} else {
return -1;
}
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
mantis->fe = fe;
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp1033_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_204,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp1033_frontend_init,
.power = GPIF_A12,
.reset = GPIF_A13,
};
| gpl-2.0 |
Team-Blackout/Acer-A200-JB | net/wanrouter/wanproc.c | 2946 | 9368 | /*****************************************************************************
* wanproc.c WAN Router Module. /proc filesystem interface.
*
* This module is completely hardware-independent and provides
* access to the router using Linux /proc filesystem.
*
* Author: Gideon Hack
*
* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* Jun 02, 1999 Gideon Hack Updates for Linux 2.2.X kernels.
* Jun 29, 1997 Alan Cox Merged with 1.0.3 vendor code
* Jan 29, 1997 Gene Kozin v1.0.1. Implemented /proc read routines
* Jan 30, 1997 Alan Cox Hacked around for 2.1
* Dec 13, 1996 Gene Kozin Initial version (based on Sangoma's WANPIPE)
*****************************************************************************/
#include <linux/init.h> /* __initfunc et al. */
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/errno.h> /* return codes */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/wanrouter.h> /* WAN router API definitions */
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <asm/io.h>
#define PROC_STATS_FORMAT "%30s: %12lu\n"
/****** Defines and Macros **************************************************/
#define PROT_DECODE(prot) ((prot == WANCONFIG_FR) ? " FR" :\
(prot == WANCONFIG_X25) ? " X25" : \
(prot == WANCONFIG_PPP) ? " PPP" : \
(prot == WANCONFIG_CHDLC) ? " CHDLC": \
(prot == WANCONFIG_MPPP) ? " MPPP" : \
" Unknown" )
/****** Function Prototypes *************************************************/
#ifdef CONFIG_PROC_FS
/* Miscellaneous */
/*
* Structures for interfacing with the /proc filesystem.
* Router creates its own directory /proc/net/router with the following
* entries:
* config device configuration
* status global device statistics
* <device> entry for each WAN device
*/
/*
* Generic /proc/net/router/<file> file and inode operations
*/
/*
* /proc/net/router
*/
static DEFINE_MUTEX(config_mutex);
static struct proc_dir_entry *proc_router;
/* Strings */
/*
* Interface functions
*/
/****** Proc filesystem entry points ****************************************/
/*
* Iterator
*/
static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(kernel_lock)
{
struct wan_device *wandev;
loff_t l = *pos;
mutex_lock(&config_mutex);
if (!l--)
return SEQ_START_TOKEN;
for (wandev = wanrouter_router_devlist; l-- && wandev;
wandev = wandev->next)
;
return wandev;
}
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
struct wan_device *wandev = v;
(*pos)++;
return (v == SEQ_START_TOKEN) ? wanrouter_router_devlist : wandev->next;
}
static void r_stop(struct seq_file *m, void *v)
__releases(kernel_lock)
{
mutex_unlock(&config_mutex);
}
static int config_show(struct seq_file *m, void *v)
{
struct wan_device *p = v;
if (v == SEQ_START_TOKEN) {
seq_puts(m, "Device name | port |IRQ|DMA| mem.addr |"
"mem.size|option1|option2|option3|option4\n");
return 0;
}
if (!p->state)
return 0;
seq_printf(m, "%-15s|0x%-4X|%3u|%3u| 0x%-8lX |0x%-6X|%7u|%7u|%7u|%7u\n",
p->name, p->ioport, p->irq, p->dma, p->maddr, p->msize,
p->hw_opt[0], p->hw_opt[1], p->hw_opt[2], p->hw_opt[3]);
return 0;
}
static int status_show(struct seq_file *m, void *v)
{
struct wan_device *p = v;
if (v == SEQ_START_TOKEN) {
seq_puts(m, "Device name |protocol|station|interface|"
"clocking|baud rate| MTU |ndev|link state\n");
return 0;
}
if (!p->state)
return 0;
seq_printf(m, "%-15s|%-8s| %-7s| %-9s|%-8s|%9u|%5u|%3u |",
p->name,
PROT_DECODE(p->config_id),
p->config_id == WANCONFIG_FR ?
(p->station ? "Node" : "CPE") :
(p->config_id == WANCONFIG_X25 ?
(p->station ? "DCE" : "DTE") :
("N/A")),
p->interface ? "V.35" : "RS-232",
p->clocking ? "internal" : "external",
p->bps,
p->mtu,
p->ndev);
switch (p->state) {
case WAN_UNCONFIGURED:
seq_printf(m, "%-12s\n", "unconfigured");
break;
case WAN_DISCONNECTED:
seq_printf(m, "%-12s\n", "disconnected");
break;
case WAN_CONNECTING:
seq_printf(m, "%-12s\n", "connecting");
break;
case WAN_CONNECTED:
seq_printf(m, "%-12s\n", "connected");
break;
default:
seq_printf(m, "%-12s\n", "invalid");
break;
}
return 0;
}
static const struct seq_operations config_op = {
.start = r_start,
.next = r_next,
.stop = r_stop,
.show = config_show,
};
static const struct seq_operations status_op = {
.start = r_start,
.next = r_next,
.stop = r_stop,
.show = status_show,
};
static int config_open(struct inode *inode, struct file *file)
{
return seq_open(file, &config_op);
}
static int status_open(struct inode *inode, struct file *file)
{
return seq_open(file, &status_op);
}
static const struct file_operations config_fops = {
.owner = THIS_MODULE,
.open = config_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations status_fops = {
.owner = THIS_MODULE,
.open = status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int wandev_show(struct seq_file *m, void *v)
{
struct wan_device *wandev = m->private;
if (wandev->magic != ROUTER_MAGIC)
return 0;
if (!wandev->state) {
seq_puts(m, "device is not configured!\n");
return 0;
}
/* Update device statistics */
if (wandev->update) {
int err = wandev->update(wandev);
if (err == -EAGAIN) {
seq_puts(m, "Device is busy!\n");
return 0;
}
if (err) {
seq_puts(m, "Device is not configured!\n");
return 0;
}
}
seq_printf(m, PROC_STATS_FORMAT,
"total packets received", wandev->stats.rx_packets);
seq_printf(m, PROC_STATS_FORMAT,
"total packets transmitted", wandev->stats.tx_packets);
seq_printf(m, PROC_STATS_FORMAT,
"total bytes received", wandev->stats.rx_bytes);
seq_printf(m, PROC_STATS_FORMAT,
"total bytes transmitted", wandev->stats.tx_bytes);
seq_printf(m, PROC_STATS_FORMAT,
"bad packets received", wandev->stats.rx_errors);
seq_printf(m, PROC_STATS_FORMAT,
"packet transmit problems", wandev->stats.tx_errors);
seq_printf(m, PROC_STATS_FORMAT,
"received frames dropped", wandev->stats.rx_dropped);
seq_printf(m, PROC_STATS_FORMAT,
"transmit frames dropped", wandev->stats.tx_dropped);
seq_printf(m, PROC_STATS_FORMAT,
"multicast packets received", wandev->stats.multicast);
seq_printf(m, PROC_STATS_FORMAT,
"transmit collisions", wandev->stats.collisions);
seq_printf(m, PROC_STATS_FORMAT,
"receive length errors", wandev->stats.rx_length_errors);
seq_printf(m, PROC_STATS_FORMAT,
"receiver overrun errors", wandev->stats.rx_over_errors);
seq_printf(m, PROC_STATS_FORMAT,
"CRC errors", wandev->stats.rx_crc_errors);
seq_printf(m, PROC_STATS_FORMAT,
"frame format errors (aborts)", wandev->stats.rx_frame_errors);
seq_printf(m, PROC_STATS_FORMAT,
"receiver fifo overrun", wandev->stats.rx_fifo_errors);
seq_printf(m, PROC_STATS_FORMAT,
"receiver missed packet", wandev->stats.rx_missed_errors);
seq_printf(m, PROC_STATS_FORMAT,
"aborted frames transmitted", wandev->stats.tx_aborted_errors);
return 0;
}
static int wandev_open(struct inode *inode, struct file *file)
{
return single_open(file, wandev_show, PDE(inode)->data);
}
static const struct file_operations wandev_fops = {
.owner = THIS_MODULE,
.open = wandev_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.unlocked_ioctl = wanrouter_ioctl,
};
/*
* Initialize router proc interface.
*/
int __init wanrouter_proc_init(void)
{
struct proc_dir_entry *p;
proc_router = proc_mkdir(ROUTER_NAME, init_net.proc_net);
if (!proc_router)
goto fail;
p = proc_create("config", S_IRUGO, proc_router, &config_fops);
if (!p)
goto fail_config;
p = proc_create("status", S_IRUGO, proc_router, &status_fops);
if (!p)
goto fail_stat;
return 0;
fail_stat:
remove_proc_entry("config", proc_router);
fail_config:
remove_proc_entry(ROUTER_NAME, init_net.proc_net);
fail:
return -ENOMEM;
}
/*
* Clean up router proc interface.
*/
void wanrouter_proc_cleanup(void)
{
remove_proc_entry("config", proc_router);
remove_proc_entry("status", proc_router);
remove_proc_entry(ROUTER_NAME, init_net.proc_net);
}
/*
* Add directory entry for WAN device.
*/
int wanrouter_proc_add(struct wan_device* wandev)
{
if (wandev->magic != ROUTER_MAGIC)
return -EINVAL;
wandev->dent = proc_create(wandev->name, S_IRUGO,
proc_router, &wandev_fops);
if (!wandev->dent)
return -ENOMEM;
wandev->dent->data = wandev;
return 0;
}
/*
* Delete directory entry for WAN device.
*/
int wanrouter_proc_delete(struct wan_device* wandev)
{
if (wandev->magic != ROUTER_MAGIC)
return -EINVAL;
remove_proc_entry(wandev->name, proc_router);
return 0;
}
#else
/*
* No /proc - output stubs
*/
int __init wanrouter_proc_init(void)
{
return 0;
}
void wanrouter_proc_cleanup(void)
{
}
int wanrouter_proc_add(struct wan_device *wandev)
{
return 0;
}
int wanrouter_proc_delete(struct wan_device *wandev)
{
return 0;
}
#endif
/*
* End
*/
| gpl-2.0 |
poparteu/linux-kernel-hal | drivers/clk/spear/clk-aux-synth.c | 3714 | 4939 | /*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* Auxiliary Synthesizer clock implementation
*/
#define pr_fmt(fmt) "clk-aux-synth: " fmt
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include "clk.h"
/*
* DOC: Auxiliary Synthesizer clock
*
* Aux synth gives rate for different values of eq, x and y
*
* Fout from synthesizer can be given from two equations:
* Fout1 = (Fin * X/Y)/2 EQ1
* Fout2 = Fin * X/Y EQ2
*/
#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
static struct aux_clk_masks default_aux_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = AUX_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
.eq2_mask = AUX_EQ2_SEL,
.xscale_sel_mask = AUX_XSCALE_MASK,
.xscale_sel_shift = AUX_XSCALE_SHIFT,
.yscale_sel_mask = AUX_YSCALE_MASK,
.yscale_sel_shift = AUX_YSCALE_SHIFT,
.enable_bit = AUX_SYNT_ENB,
};
static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
int index)
{
struct clk_aux *aux = to_clk_aux(hw);
struct aux_rate_tbl *rtbl = aux->rtbl;
u8 eq = rtbl[index].eq ? 1 : 2;
return (((prate / 10000) * rtbl[index].xscale) /
(rtbl[index].yscale * eq)) * 10000;
}
static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
unsigned long *prate)
{
struct clk_aux *aux = to_clk_aux(hw);
int unused;
return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
aux->rtbl_cnt, &unused);
}
static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_aux *aux = to_clk_aux(hw);
unsigned int num = 1, den = 1, val, eqn;
unsigned long flags = 0;
if (aux->lock)
spin_lock_irqsave(aux->lock, flags);
val = readl_relaxed(aux->reg);
if (aux->lock)
spin_unlock_irqrestore(aux->lock, flags);
eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
if (eqn == aux->masks->eq1_mask)
den = 2;
/* calculate numerator */
num = (val >> aux->masks->xscale_sel_shift) &
aux->masks->xscale_sel_mask;
/* calculate denominator */
den *= (val >> aux->masks->yscale_sel_shift) &
aux->masks->yscale_sel_mask;
if (!den)
return 0;
return (((parent_rate / 10000) * num) / den) * 10000;
}
/* Configures new clock rate of aux */
static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long prate)
{
struct clk_aux *aux = to_clk_aux(hw);
struct aux_rate_tbl *rtbl = aux->rtbl;
unsigned long val, flags = 0;
int i;
clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
&i);
if (aux->lock)
spin_lock_irqsave(aux->lock, flags);
val = readl_relaxed(aux->reg) &
~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
aux->masks->eq_sel_shift;
val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
aux->masks->xscale_sel_shift;
val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
aux->masks->yscale_sel_shift;
writel_relaxed(val, aux->reg);
if (aux->lock)
spin_unlock_irqrestore(aux->lock, flags);
return 0;
}
static struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
.round_rate = clk_aux_round_rate,
.set_rate = clk_aux_set_rate,
};
struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
const char *parent_name, unsigned long flags, void __iomem *reg,
struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
{
struct clk_aux *aux;
struct clk_init_data init;
struct clk *clk;
if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
pr_err("Invalid arguments passed");
return ERR_PTR(-EINVAL);
}
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
if (!aux) {
pr_err("could not allocate aux clk\n");
return ERR_PTR(-ENOMEM);
}
/* struct clk_aux assignments */
if (!masks)
aux->masks = &default_aux_masks;
else
aux->masks = masks;
aux->reg = reg;
aux->rtbl = rtbl;
aux->rtbl_cnt = rtbl_cnt;
aux->lock = lock;
aux->hw.init = &init;
init.name = aux_name;
init.ops = &clk_aux_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
clk = clk_register(NULL, &aux->hw);
if (IS_ERR_OR_NULL(clk))
goto free_aux;
if (gate_name) {
struct clk *tgate_clk;
tgate_clk = clk_register_gate(NULL, gate_name, aux_name,
CLK_SET_RATE_PARENT, reg,
aux->masks->enable_bit, 0, lock);
if (IS_ERR_OR_NULL(tgate_clk))
goto free_aux;
if (gate_clk)
*gate_clk = tgate_clk;
}
return clk;
free_aux:
kfree(aux);
pr_err("clk register failed\n");
return NULL;
}
| gpl-2.0 |
lollipop-og/android_kernel_geehrc | arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c | 4738 | 4063 | /*
* Copyright (C) 2010 Eric Benard - eric@eukrea.com
*
* Based on pcm970-baseboard.c which is :
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx51.h>
#include "devices-imx51.h"
static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
/* LED */
MX51_PAD_NANDF_D10__GPIO3_30,
/* SWITCH */
NEW_PAD_CTRL(MX51_PAD_NANDF_D9__GPIO3_31, PAD_CTL_PUS_22K_UP |
PAD_CTL_PKE | PAD_CTL_SRE_FAST |
PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS),
/* UART2 */
MX51_PAD_UART2_RXD__UART2_RXD,
MX51_PAD_UART2_TXD__UART2_TXD,
/* UART 3 */
MX51_PAD_UART3_RXD__UART3_RXD,
MX51_PAD_UART3_TXD__UART3_TXD,
MX51_PAD_KEY_COL4__UART3_RTS,
MX51_PAD_KEY_COL5__UART3_CTS,
/* SD */
MX51_PAD_SD1_CMD__SD1_CMD,
MX51_PAD_SD1_CLK__SD1_CLK,
MX51_PAD_SD1_DATA0__SD1_DATA0,
MX51_PAD_SD1_DATA1__SD1_DATA1,
MX51_PAD_SD1_DATA2__SD1_DATA2,
MX51_PAD_SD1_DATA3__SD1_DATA3,
/* SD1 CD */
NEW_PAD_CTRL(MX51_PAD_GPIO1_0__SD1_CD, PAD_CTL_PUS_22K_UP |
PAD_CTL_PKE | PAD_CTL_SRE_FAST |
PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS),
};
#define GPIO_LED1 IMX_GPIO_NR(3, 30)
#define GPIO_SWITCH1 IMX_GPIO_NR(3, 31)
static const struct gpio_led eukrea_mbimxsd_leds[] __initconst = {
{
.name = "led1",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = GPIO_LED1,
},
};
static const struct gpio_led_platform_data
eukrea_mbimxsd_led_info __initconst = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
.code = BTN_0,
.desc = "BP1",
.active_low = 1,
.wakeup = 1,
},
};
static const struct gpio_keys_platform_data
eukrea_mbimxsd_button_data __initconst = {
.buttons = eukrea_mbimxsd_gpio_buttons,
.nbuttons = ARRAY_SIZE(eukrea_mbimxsd_gpio_buttons),
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static struct i2c_board_info eukrea_mbimxsd_i2c_devices[] = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
},
};
/*
* system init for baseboard usage. Will be called by cpuimx51sd init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init eukrea_mbimxsd51_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
printk(KERN_ERR "error setting mbimxsd pads !\n");
imx51_add_imx_uart(1, NULL);
imx51_add_imx_uart(2, &uart_pdata);
imx51_add_sdhci_esdhc_imx(0, NULL);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
gpio_free(GPIO_LED1);
gpio_request(GPIO_SWITCH1, "SWITCH1");
gpio_direction_input(GPIO_SWITCH1);
gpio_free(GPIO_SWITCH1);
i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
| gpl-2.0 |
kbc-developers/android_kernel_samsung_hlte | arch/arm/mach-s3c24xx/common-s3c2443.c | 4738 | 15853 | /*
* Common code for SoCs starting with the S3C2443
*
* Copyright (c) 2007, 2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/regs-s3c2443-clock.h>
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
static int s3c2443_gate(void __iomem *reg, struct clk *clk, int enable)
{
u32 ctrlbit = clk->ctrlbit;
u32 con = __raw_readl(reg);
if (enable)
con |= ctrlbit;
else
con &= ~ctrlbit;
__raw_writel(con, reg);
return 0;
}
int s3c2443_clkcon_enable_h(struct clk *clk, int enable)
{
return s3c2443_gate(S3C2443_HCLKCON, clk, enable);
}
int s3c2443_clkcon_enable_p(struct clk *clk, int enable)
{
return s3c2443_gate(S3C2443_PCLKCON, clk, enable);
}
int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
{
return s3c2443_gate(S3C2443_SCLKCON, clk, enable);
}
/* mpllref is a direct descendant of clk_xtal by default, but it is not
* elided as the EPLL can be either sourced by the XTAL or EXTCLK and as
* such directly equating the two source clocks is impossible.
*/
static struct clk clk_mpllref = {
.name = "mpllref",
.parent = &clk_xtal,
};
static struct clk *clk_epllref_sources[] = {
[0] = &clk_mpllref,
[1] = &clk_mpllref,
[2] = &clk_xtal,
[3] = &clk_ext,
};
struct clksrc_clk clk_epllref = {
.clk = {
.name = "epllref",
},
.sources = &(struct clksrc_sources) {
.sources = clk_epllref_sources,
.nr_sources = ARRAY_SIZE(clk_epllref_sources),
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 7 },
};
/* esysclk
*
* this is sourced from either the EPLL or the EPLLref clock
*/
static struct clk *clk_sysclk_sources[] = {
[0] = &clk_epllref.clk,
[1] = &clk_epll,
};
struct clksrc_clk clk_esysclk = {
.clk = {
.name = "esysclk",
.parent = &clk_epll,
},
.sources = &(struct clksrc_sources) {
.sources = clk_sysclk_sources,
.nr_sources = ARRAY_SIZE(clk_sysclk_sources),
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 6 },
};
static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
unsigned long div = __raw_readl(S3C2443_CLKDIV0);
div &= S3C2443_CLKDIV0_EXTDIV_MASK;
div >>= (S3C2443_CLKDIV0_EXTDIV_SHIFT-1); /* x2 */
return parent_rate / (div + 1);
}
static struct clk clk_mdivclk = {
.name = "mdivclk",
.parent = &clk_mpllref,
.ops = &(struct clk_ops) {
.get_rate = s3c2443_getrate_mdivclk,
},
};
static struct clk *clk_msysclk_sources[] = {
[0] = &clk_mpllref,
[1] = &clk_mpll,
[2] = &clk_mdivclk,
[3] = &clk_mpllref,
};
struct clksrc_clk clk_msysclk = {
.clk = {
.name = "msysclk",
.parent = &clk_xtal,
},
.sources = &(struct clksrc_sources) {
.sources = clk_msysclk_sources,
.nr_sources = ARRAY_SIZE(clk_msysclk_sources),
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 3 },
};
/* prediv
*
* this divides the msysclk down to pass to h/p/etc.
*/
static unsigned long s3c2443_prediv_getrate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
clkdiv0 &= S3C2443_CLKDIV0_PREDIV_MASK;
clkdiv0 >>= S3C2443_CLKDIV0_PREDIV_SHIFT;
return rate / (clkdiv0 + 1);
}
static struct clk clk_prediv = {
.name = "prediv",
.parent = &clk_msysclk.clk,
.ops = &(struct clk_ops) {
.get_rate = s3c2443_prediv_getrate,
},
};
/* hclk divider
*
* divides the prediv and provides the hclk.
*/
static unsigned long s3c2443_hclkdiv_getrate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
clkdiv0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
return rate / (clkdiv0 + 1);
}
static struct clk_ops clk_h_ops = {
.get_rate = s3c2443_hclkdiv_getrate,
};
/* pclk divider
*
* divides the hclk and provides the pclk.
*/
static unsigned long s3c2443_pclkdiv_getrate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
clkdiv0 = ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 1 : 0);
return rate / (clkdiv0 + 1);
}
static struct clk_ops clk_p_ops = {
.get_rate = s3c2443_pclkdiv_getrate,
};
/* armdiv
*
* this clock is sourced from msysclk and can have a number of
* divider values applied to it to then be fed into armclk.
*/
static unsigned int *armdiv;
static int nr_armdiv;
static int armdivmask;
static unsigned long s3c2443_armclk_roundrate(struct clk *clk,
unsigned long rate)
{
unsigned long parent = clk_get_rate(clk->parent);
unsigned long calc;
unsigned best = 256; /* bigger than any value */
unsigned div;
int ptr;
if (!nr_armdiv)
return -EINVAL;
for (ptr = 0; ptr < nr_armdiv; ptr++) {
div = armdiv[ptr];
if (div) {
/* cpufreq provides 266mhz as 266666000 not 266666666 */
calc = (parent / div / 1000) * 1000;
if (calc <= rate && div < best)
best = div;
}
}
return parent / best;
}
static unsigned long s3c2443_armclk_getrate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
unsigned long clkcon0;
int val;
if (!nr_armdiv || !armdivmask)
return -EINVAL;
clkcon0 = __raw_readl(S3C2443_CLKDIV0);
clkcon0 &= armdivmask;
val = clkcon0 >> S3C2443_CLKDIV0_ARMDIV_SHIFT;
return rate / armdiv[val];
}
static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
{
unsigned long parent = clk_get_rate(clk->parent);
unsigned long calc;
unsigned div;
unsigned best = 256; /* bigger than any value */
int ptr;
int val = -1;
if (!nr_armdiv || !armdivmask)
return -EINVAL;
for (ptr = 0; ptr < nr_armdiv; ptr++) {
div = armdiv[ptr];
if (div) {
/* cpufreq provides 266mhz as 266666000 not 266666666 */
calc = (parent / div / 1000) * 1000;
if (calc <= rate && div < best) {
best = div;
val = ptr;
}
}
}
if (val >= 0) {
unsigned long clkcon0;
clkcon0 = __raw_readl(S3C2443_CLKDIV0);
clkcon0 &= ~armdivmask;
clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT;
__raw_writel(clkcon0, S3C2443_CLKDIV0);
}
return (val == -1) ? -EINVAL : 0;
}
static struct clk clk_armdiv = {
.name = "armdiv",
.parent = &clk_msysclk.clk,
.ops = &(struct clk_ops) {
.round_rate = s3c2443_armclk_roundrate,
.get_rate = s3c2443_armclk_getrate,
.set_rate = s3c2443_armclk_setrate,
},
};
/* armclk
*
* this is the clock fed into the ARM core itself, from armdiv or from hclk.
*/
static struct clk *clk_arm_sources[] = {
[0] = &clk_armdiv,
[1] = &clk_h,
};
static struct clksrc_clk clk_arm = {
.clk = {
.name = "armclk",
},
.sources = &(struct clksrc_sources) {
.sources = clk_arm_sources,
.nr_sources = ARRAY_SIZE(clk_arm_sources),
},
.reg_src = { .reg = S3C2443_CLKDIV0, .size = 1, .shift = 13 },
};
/* usbhost
*
* usb host bus-clock, usually 48MHz to provide USB bus clock timing
*/
static struct clksrc_clk clk_usb_bus_host = {
.clk = {
.name = "usb-bus-host-parent",
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_USBHOST,
.enable = s3c2443_clkcon_enable_s,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
};
/* common clksrc clocks */
static struct clksrc_clk clksrc_clks[] = {
{
/* camera interface bus-clock, divided down from esysclk */
.clk = {
.name = "camif-upll", /* same as 2440 name */
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_CAMCLK,
.enable = s3c2443_clkcon_enable_s,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 26 },
}, {
.clk = {
.name = "display-if",
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_DISPCLK,
.enable = s3c2443_clkcon_enable_s,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 8, .shift = 16 },
},
};
static struct clksrc_clk clk_esys_uart = {
/* ART baud-rate clock sourced from esysclk via a divisor */
.clk = {
.name = "uartclk",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
};
static struct clk clk_i2s_ext = {
.name = "i2s-ext",
};
/* i2s_eplldiv
*
* This clock is the output from the I2S divisor of ESYSCLK, and is separate
* from the mux that comes after it (cannot merge into one single clock)
*/
static struct clksrc_clk clk_i2s_eplldiv = {
.clk = {
.name = "i2s-eplldiv",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 12, },
};
/* i2s-ref
*
* i2s bus reference clock, selectable from external, esysclk or epllref
*
* Note, this used to be two clocks, but was compressed into one.
*/
static struct clk *clk_i2s_srclist[] = {
[0] = &clk_i2s_eplldiv.clk,
[1] = &clk_i2s_ext,
[2] = &clk_epllref.clk,
[3] = &clk_epllref.clk,
};
static struct clksrc_clk clk_i2s = {
.clk = {
.name = "i2s-if",
.ctrlbit = S3C2443_SCLKCON_I2SCLK,
.enable = s3c2443_clkcon_enable_s,
},
.sources = &(struct clksrc_sources) {
.sources = clk_i2s_srclist,
.nr_sources = ARRAY_SIZE(clk_i2s_srclist),
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 14 },
};
static struct clk init_clocks_off[] = {
{
.name = "iis",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIS,
}, {
.name = "hsspi",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_HSSPI,
}, {
.name = "adc",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_ADC,
}, {
.name = "i2c",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIC,
}
};
static struct clk init_clocks[] = {
{
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA0,
}, {
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA1,
}, {
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA2,
}, {
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA3,
}, {
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA4,
}, {
.name = "dma",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA5,
}, {
.name = "gpio",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_GPIO,
}, {
.name = "usb-host",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_USBH,
}, {
.name = "usb-device",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_USBD,
}, {
.name = "lcd",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_LCDC,
}, {
.name = "timers",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_PWMT,
}, {
.name = "cfc",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_CFC,
}, {
.name = "ssmc",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_SSMC,
}, {
.name = "uart",
.devname = "s3c2440-uart.0",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART0,
}, {
.name = "uart",
.devname = "s3c2440-uart.1",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART1,
}, {
.name = "uart",
.devname = "s3c2440-uart.2",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART2,
}, {
.name = "uart",
.devname = "s3c2440-uart.3",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART3,
}, {
.name = "rtc",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_RTC,
}, {
.name = "watchdog",
.parent = &clk_p,
.ctrlbit = S3C2443_PCLKCON_WDT,
}, {
.name = "ac97",
.parent = &clk_p,
.ctrlbit = S3C2443_PCLKCON_AC97,
}, {
.name = "nand",
.parent = &clk_h,
}, {
.name = "usb-bus-host",
.parent = &clk_usb_bus_host.clk,
}
};
static struct clk hsmmc1_clk = {
.name = "hsmmc",
.devname = "s3c-sdhci.1",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_HSMMC,
};
/* EPLLCON compatible enough to get on/off information */
void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
{
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
unsigned long mpllcon = __raw_readl(S3C2443_MPLLCON);
struct clk *xtal_clk;
unsigned long xtal;
unsigned long pll;
int ptr;
xtal_clk = clk_get(NULL, "xtal");
xtal = clk_get_rate(xtal_clk);
clk_put(xtal_clk);
pll = get_mpll(mpllcon, xtal);
clk_msysclk.clk.rate = pll;
clk_mpll.rate = pll;
printk("CPU: MPLL %s %ld.%03ld MHz, cpu %ld.%03ld MHz, mem %ld.%03ld MHz, pclk %ld.%03ld MHz\n",
(mpllcon & S3C2443_PLLCON_OFF) ? "off" : "on",
print_mhz(pll), print_mhz(clk_get_rate(&clk_armdiv)),
print_mhz(clk_get_rate(&clk_h)),
print_mhz(clk_get_rate(&clk_p)));
for (ptr = 0; ptr < ARRAY_SIZE(clksrc_clks); ptr++)
s3c_set_clksrc(&clksrc_clks[ptr], true);
/* ensure usb bus clock is within correct rate of 48MHz */
if (clk_get_rate(&clk_usb_bus_host.clk) != (48 * 1000 * 1000)) {
printk(KERN_INFO "Warning: USB host bus not at 48MHz\n");
clk_set_rate(&clk_usb_bus_host.clk, 48*1000*1000);
}
printk("CPU: EPLL %s %ld.%03ld MHz, usb-bus %ld.%03ld MHz\n",
(epllcon & S3C2443_PLLCON_OFF) ? "off" : "on",
print_mhz(clk_get_rate(&clk_epll)),
print_mhz(clk_get_rate(&clk_usb_bus)));
}
static struct clk *clks[] __initdata = {
&clk_prediv,
&clk_mpllref,
&clk_mdivclk,
&clk_ext,
&clk_epll,
&clk_usb_bus,
&clk_armdiv,
&hsmmc1_clk,
};
static struct clksrc_clk *clksrcs[] __initdata = {
&clk_i2s_eplldiv,
&clk_i2s,
&clk_usb_bus_host,
&clk_epllref,
&clk_esysclk,
&clk_msysclk,
&clk_arm,
};
static struct clk_lookup s3c2443_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
};
void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
unsigned int *divs, int nr_divs,
int divmask)
{
int ptr;
armdiv = divs;
nr_armdiv = nr_divs;
armdivmask = divmask;
/* s3c2443 parents h clock from prediv */
clk_h.parent = &clk_prediv;
clk_h.ops = &clk_h_ops;
/* and p clock from h clock */
clk_p.parent = &clk_h;
clk_p.ops = &clk_p_ops;
clk_usb_bus.parent = &clk_usb_bus_host.clk;
clk_epll.parent = &clk_epllref.clk;
s3c24xx_register_baseclocks(xtal);
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_register_clksrc(clksrcs[ptr], 1);
s3c_register_clksrc(clksrc_clks, ARRAY_SIZE(clksrc_clks));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
/* See s3c2443/etc notes on disabling clocks at init time */
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
s3c2443_common_setup_clocks(get_mpll);
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.