repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
niranjan94/eclipse_kernel_htc_pico | arch/m68k/sun3/intersil.c | 4778 | 1746 | /*
* arch/m68k/sun3/intersil.c
*
* basic routines for accessing the intersil clock within the sun3 machines
*
* started 11/12/1999 Sam Creasey
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <asm/errno.h>
#include <asm/system.h>
#include <asm/rtc.h>
#include <asm/intersil.h>
/* bits to set for start/run of the intersil */
#define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
#define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
/* does this need to be implemented? */
unsigned long sun3_gettimeoffset(void)
{
return 1;
}
/* get/set hwclock */
int sun3_hwclk(int set, struct rtc_time *t)
{
volatile struct intersil_dt *todintersil;
unsigned long flags;
todintersil = (struct intersil_dt *) &intersil_clock->counter;
local_irq_save(flags);
intersil_clock->cmd_reg = STOP_VAL;
/* set or read the clock */
if(set) {
todintersil->csec = 0;
todintersil->hour = t->tm_hour;
todintersil->minute = t->tm_min;
todintersil->second = t->tm_sec;
todintersil->month = t->tm_mon;
todintersil->day = t->tm_mday;
todintersil->year = t->tm_year - 68;
todintersil->weekday = t->tm_wday;
} else {
/* read clock */
t->tm_sec = todintersil->csec;
t->tm_hour = todintersil->hour;
t->tm_min = todintersil->minute;
t->tm_sec = todintersil->second;
t->tm_mon = todintersil->month;
t->tm_mday = todintersil->day;
t->tm_year = todintersil->year + 68;
t->tm_wday = todintersil->weekday;
}
intersil_clock->cmd_reg = START_VAL;
local_irq_restore(flags);
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_sony_tianchi | drivers/clocksource/i8253.c | 5034 | 4934 | /*
* i8253 PIT clocksource
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
#include <linux/module.h>
#include <linux/i8253.h>
#include <linux/smp.h>
/*
* Protects access to I/O ports
*
* 0040-0043 : timer0, i8253 / i8254
* 0061-0061 : NMI Control Register which contains two speaker control bits.
*/
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static cycle_t i8253_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;
unsigned long flags;
int count;
u32 jifs;
raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
outb_p(0x00, PIT_MODE); /* latch the count ASAP */
count = inb_p(PIT_CH0); /* read the latched count */
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
count = PIT_LATCH - 1;
}
/*
* It's possible for count to appear to go the wrong way for a
* couple of reasons:
*
* 1. The timer counter underflows, but we haven't handled the
* resulting interrupt and incremented jiffies yet.
* 2. Hardware problem with the timer, not giving us continuous time,
* the counter does small "jumps" upwards on some Pentium systems,
* (see c't 95/10 page 335 for Neptun bug.)
*
* Previous attempts to handle these cases intelligently were
* buggy, so we just do the simple thing now.
*/
if (count > old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (PIT_LATCH - 1) - count;
return (cycle_t)(jifs * PIT_LATCH) + count;
}
static struct clocksource i8253_cs = {
.name = "pit",
.rating = 110,
.read = i8253_read,
.mask = CLOCKSOURCE_MASK(32),
};
int __init clocksource_i8253_init(void)
{
return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
}
#endif
#ifdef CONFIG_CLKEVT_I8253
/*
* Initialize the PIT timer.
*
* This is also called after resume to bring the PIT into operation again.
*/
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
evt->mode == CLOCK_EVT_MODE_ONESHOT) {
outb_p(0x30, PIT_MODE);
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
}
break;
case CLOCK_EVT_MODE_ONESHOT:
/* One shot setup */
outb_p(0x38, PIT_MODE);
break;
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */
break;
}
raw_spin_unlock(&i8253_lock);
}
/*
* Program the next event in oneshot mode
*
* Delta is given in PIT ticks
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb_p(delta >> 8 , PIT_CH0); /* MSB */
raw_spin_unlock(&i8253_lock);
return 0;
}
/*
* On UP the PIT can serve all of the possible timer functions. On SMP systems
* it can be solely used for the global tick.
*/
struct clock_event_device i8253_clockevent = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = init_pit_timer,
.set_next_event = pit_next_event,
};
/*
* Initialize the conversion factor and the min/max deltas of the clock event
* structure and register the clock event source with the framework.
*/
void __init clockevent_i8253_init(bool oneshot)
{
if (oneshot)
i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
/*
* Start pit with the boot cpu mask. x86 might make it global
* when it is used as broadcast device later.
*/
i8253_clockevent.cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE,
0xF, 0x7FFF);
}
#endif
| gpl-2.0 |
libertyjin/arm-linux-kernel | arch/mips/powertv/time.c | 10666 | 1134 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
* Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Setting up the clock on the MIPS boards.
*/
#include <linux/init.h>
#include <asm/mach-powertv/interrupts.h>
#include <asm/time.h>
#include "powertv-clock.h"
unsigned int __cpuinit get_c0_compare_int(void)
{
return irq_mips_timer;
}
void __init plat_time_init(void)
{
powertv_clocksource_init();
}
| gpl-2.0 |
bestmjh47/kernel_msm | arch/mn10300/mm/cache-inv-icache.c | 12202 | 3665 | /* Invalidate icache when dcache doesn't need invalidation as it's in
* write-through mode
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
#include "cache-smp.h"
/**
* flush_icache_page_range - Flush dcache and invalidate icache for part of a
* single page
* @start: The starting virtual address of the page part.
* @end: The ending virtual address of the page part.
*
* Invalidate the icache for part of a single page, as determined by the
* virtual addresses given. The page must be in the paged area. The dcache is
* not flushed as the cache must be in write-through mode to get here.
*/
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
/* work out how much of the page to flush */
off = start & ~PAGE_MASK;
size = end - start;
/* get the physical address the page is mapped to from the page
* tables */
pgd = pgd_offset(current->mm, start);
if (!pgd || !pgd_val(*pgd))
return;
pud = pud_offset(pgd, start);
if (!pud || !pud_val(*pud))
return;
pmd = pmd_offset(pud, start);
if (!pmd || !pmd_val(*pmd))
return;
ppte = pte_offset_map(pmd, start);
if (!ppte)
return;
pte = *ppte;
pte_unmap(ppte);
if (pte_none(pte))
return;
page = pte_page(pte);
if (!page)
return;
addr = page_to_phys(page);
/* invalidate the icache coverage on that region */
mn10300_local_icache_inv_range2(addr + off, size);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
}
/**
* flush_icache_range - Globally flush dcache and invalidate icache for region
* @start: The starting virtual address of the region.
* @end: The ending virtual address of the region.
*
* This is used by the kernel to globally flush some code it has just written
* from the dcache back to RAM and then to globally invalidate the icache over
* that region so that that code can be run on all CPUs in the system.
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long start_page, end_page;
unsigned long flags;
flags = smp_lock_cache();
if (end > 0x80000000UL) {
/* addresses above 0xa0000000 do not go through the cache */
if (end > 0xa0000000UL) {
end = 0xa0000000UL;
if (start >= end)
goto done;
}
/* kernel addresses between 0x80000000 and 0x9fffffff do not
* require page tables, so we just map such addresses
* directly */
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_icache_inv_range(start_page, end);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
if (start_page == start)
goto done;
end = start_page;
}
start_page = start & PAGE_MASK;
end_page = (end - 1) & PAGE_MASK;
if (start_page == end_page) {
/* the first and last bytes are on the same page */
flush_icache_page_range(start, end);
} else if (start_page + 1 == end_page) {
/* split over two virtually contiguous pages */
flush_icache_page_range(start, end_page);
flush_icache_page_range(end_page, end);
} else {
/* more than 2 pages; just flush the entire cache */
mn10300_local_icache_inv();
smp_cache_call(SMP_ICACHE_INV, 0, 0);
}
done:
smp_unlock_cache(flags);
}
EXPORT_SYMBOL(flush_icache_range);
| gpl-2.0 |
MiCode/mi2_kernel | drivers/video/i810/i810_gtf.c | 15530 | 9148 | /*-*- linux-c -*-
* linux/drivers/video/i810_main.h -- Intel 810 Non-discrete Video Timings
* (VESA GTF)
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
* All Rights Reserved
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/kernel.h>
#include "i810_regs.h"
#include "i810.h"
#include "i810_main.h"
/*
* FIFO and Watermark tables - based almost wholly on i810_wmark.c in
* XFree86 v4.03 by Precision Insight. Slightly modified for integer
* operation, instead of float
*/
struct wm_info {
u32 freq;
u32 wm;
};
static struct wm_info i810_wm_8_100[] = {
{ 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 },
{ 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 },
{ 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 },
{ 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 },
{ 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 },
{ 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 },
{ 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 },
{ 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 },
{ 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 },
{ 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 },
{ 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 },
};
static struct wm_info i810_wm_16_100[] = {
{ 15, 0x0070c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 },
{ 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 },
{ 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 },
{ 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 },
{ 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 },
{ 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 },
{ 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 },
{ 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 },
{ 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 },
{ 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 },
{ 218, 0x22416000 }, { 229, 0x22416000 },
};
static struct wm_info i810_wm_24_100[] = {
{ 15, 0x0020c000 }, { 19, 0x0040c000 }, { 25, 0x22009000 },
{ 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 },
{ 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 },
{ 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 },
{ 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 },
{ 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 },
{ 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 },
{ 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 },
{ 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 },
{ 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 },
};
static struct wm_info i810_wm_8_133[] = {
{ 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 },
{ 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 },
{ 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 },
{ 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 },
{ 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 },
{ 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 },
{ 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 },
{ 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 },
{ 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 },
{ 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 },
{ 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 },
};
static struct wm_info i810_wm_16_133[] = {
{ 15, 0x0020c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 },
{ 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 },
{ 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 },
{ 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 },
{ 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 },
{ 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 },
{ 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 },
{ 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 },
{ 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 },
{ 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 },
{ 218, 0x22416000 }, { 229, 0x22416000 },
};
static struct wm_info i810_wm_24_133[] = {
{ 15, 0x0020c000 }, { 19, 0x00408000 }, { 25, 0x22009000 },
{ 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 },
{ 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 },
{ 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 },
{ 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 },
{ 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 },
{ 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 },
{ 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 },
{ 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 },
{ 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 },
};
void round_off_xres(u32 *xres) { }
void round_off_yres(u32 *xres, u32 *yres) { }
/**
* i810fb_encode_registers - encode @var to hardware register values
* @var: pointer to var structure
* @par: pointer to hardware par structure
*
* DESCRIPTION:
* Timing values in @var will be converted to appropriate
* register values of @par.
*/
void i810fb_encode_registers(const struct fb_var_screeninfo *var,
struct i810fb_par *par, u32 xres, u32 yres)
{
int n, blank_s, blank_e;
u8 __iomem *mmio = par->mmio_start_virtual;
u8 msr = 0;
/* Horizontal */
/* htotal */
n = ((xres + var->right_margin + var->hsync_len +
var->left_margin) >> 3) - 5;
par->regs.cr00 = (u8) n;
par->regs.cr35 = (u8) ((n >> 8) & 1);
/* xres */
par->regs.cr01 = (u8) ((xres >> 3) - 1);
/* hblank */
blank_e = (xres + var->right_margin + var->hsync_len +
var->left_margin) >> 3;
blank_e--;
blank_s = blank_e - 127;
if (blank_s < (xres >> 3))
blank_s = xres >> 3;
par->regs.cr02 = (u8) blank_s;
par->regs.cr03 = (u8) (blank_e & 0x1F);
par->regs.cr05 = (u8) ((blank_e & (1 << 5)) << 2);
par->regs.cr39 = (u8) ((blank_e >> 6) & 1);
/* hsync */
par->regs.cr04 = (u8) ((xres + var->right_margin) >> 3);
par->regs.cr05 |= (u8) (((xres + var->right_margin +
var->hsync_len) >> 3) & 0x1F);
/* Vertical */
/* vtotal */
n = yres + var->lower_margin + var->vsync_len + var->upper_margin - 2;
par->regs.cr06 = (u8) (n & 0xFF);
par->regs.cr30 = (u8) ((n >> 8) & 0x0F);
/* vsync */
n = yres + var->lower_margin;
par->regs.cr10 = (u8) (n & 0xFF);
par->regs.cr32 = (u8) ((n >> 8) & 0x0F);
par->regs.cr11 = i810_readb(CR11, mmio) & ~0x0F;
par->regs.cr11 |= (u8) ((yres + var->lower_margin +
var->vsync_len) & 0x0F);
/* yres */
n = yres - 1;
par->regs.cr12 = (u8) (n & 0xFF);
par->regs.cr31 = (u8) ((n >> 8) & 0x0F);
/* vblank */
blank_e = yres + var->lower_margin + var->vsync_len +
var->upper_margin;
blank_e--;
blank_s = blank_e - 127;
if (blank_s < yres)
blank_s = yres;
par->regs.cr15 = (u8) (blank_s & 0xFF);
par->regs.cr33 = (u8) ((blank_s >> 8) & 0x0F);
par->regs.cr16 = (u8) (blank_e & 0xFF);
par->regs.cr09 = 0;
/* sync polarity */
if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
msr |= 1 << 6;
if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
msr |= 1 << 7;
par->regs.msr = msr;
/* interlace */
if (var->vmode & FB_VMODE_INTERLACED)
par->interlace = (1 << 7) | ((u8) (var->yres >> 4));
else
par->interlace = 0;
if (var->vmode & FB_VMODE_DOUBLE)
par->regs.cr09 |= 1 << 7;
/* overlay */
par->ovract = ((var->xres + var->right_margin + var->hsync_len +
var->left_margin - 32) | ((var->xres - 32) << 16));
}
void i810fb_fill_var_timings(struct fb_var_screeninfo *var) { }
/**
* i810_get_watermark - gets watermark
* @var: pointer to fb_var_screeninfo
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* Gets the required watermark based on
* pixelclock and RAMBUS frequency.
*
* RETURNS:
* watermark
*/
u32 i810_get_watermark(const struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
struct wm_info *wmark = NULL;
u32 i, size = 0, pixclock, wm_best = 0, min, diff;
if (par->mem_freq == 100) {
switch (var->bits_per_pixel) {
case 8:
wmark = i810_wm_8_100;
size = ARRAY_SIZE(i810_wm_8_100);
break;
case 16:
wmark = i810_wm_16_100;
size = ARRAY_SIZE(i810_wm_16_100);
break;
case 24:
case 32:
wmark = i810_wm_24_100;
size = ARRAY_SIZE(i810_wm_24_100);
}
} else {
switch(var->bits_per_pixel) {
case 8:
wmark = i810_wm_8_133;
size = ARRAY_SIZE(i810_wm_8_133);
break;
case 16:
wmark = i810_wm_16_133;
size = ARRAY_SIZE(i810_wm_16_133);
break;
case 24:
case 32:
wmark = i810_wm_24_133;
size = ARRAY_SIZE(i810_wm_24_133);
}
}
pixclock = 1000000/var->pixclock;
min = ~0;
for (i = 0; i < size; i++) {
if (pixclock <= wmark[i].freq)
diff = wmark[i].freq - pixclock;
else
diff = pixclock - wmark[i].freq;
if (diff < min) {
wm_best = wmark[i].wm;
min = diff;
}
}
return wm_best;
}
| gpl-2.0 |
xbmc/android | lib/ffmpeg/libavcodec/aandcttab.c | 171 | 1913 | /*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* AAN (Arai Agui Aakajima) (I)DCT tables
*/
#include <stdint.h>
const uint16_t ff_aanscales[64] = {
/* precomputed values scaled up by 14 bits */
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
};
const uint16_t ff_inv_aanscales[64] = {
4096, 2953, 3135, 3483, 4096, 5213, 7568, 14846,
2953, 2129, 2260, 2511, 2953, 3759, 5457, 10703,
3135, 2260, 2399, 2666, 3135, 3990, 5793, 11363,
3483, 2511, 2666, 2962, 3483, 4433, 6436, 12625,
4096, 2953, 3135, 3483, 4096, 5213, 7568, 14846,
5213, 3759, 3990, 4433, 5213, 6635, 9633, 18895,
7568, 5457, 5793, 6436, 7568, 9633, 13985, 27432,
14846, 10703, 11363, 12625, 14846, 18895, 27432, 53809,
};
| gpl-2.0 |
emagii/linux-2.6-imx | net/nfc/netlink.c | 171 | 12235 | /*
* Copyright (C) 2011 Instituto Nokia de Tecnologia
*
* Authors:
* Lauro Ramos Venancio <lauro.venancio@openbossa.org>
* Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <net/genetlink.h>
#include <linux/nfc.h>
#include <linux/slab.h>
#include "nfc.h"
static struct genl_multicast_group nfc_genl_event_mcgrp = {
.name = NFC_GENL_MCAST_EVENT_NAME,
};
struct genl_family nfc_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = NFC_GENL_NAME,
.version = NFC_GENL_VERSION,
.maxattr = NFC_ATTR_MAX,
};
static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 },
[NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
.len = NFC_DEVICE_NAME_MAXSIZE },
[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
struct netlink_callback *cb, int flags)
{
void *hdr;
nfc_dbg("entry");
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&nfc_genl_family, flags, NFC_CMD_GET_TARGET);
if (!hdr)
return -EMSGSIZE;
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS,
target->supported_protocols);
NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
{
struct nfc_dev *dev;
int rc;
u32 idx;
rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
nfc_genl_family.attrbuf,
nfc_genl_family.maxattr,
nfc_genl_policy);
if (rc < 0)
return ERR_PTR(rc);
if (!nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX])
return ERR_PTR(-EINVAL);
idx = nla_get_u32(nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return ERR_PTR(-ENODEV);
return dev;
}
static int nfc_genl_dump_targets(struct sk_buff *skb,
struct netlink_callback *cb)
{
int i = cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
int rc;
nfc_dbg("entry");
if (!dev) {
dev = __get_device_from_cb(cb);
if (IS_ERR(dev))
return PTR_ERR(dev);
cb->args[1] = (long) dev;
}
spin_lock_bh(&dev->targets_lock);
cb->seq = dev->targets_generation;
while (i < dev->n_targets) {
rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
NLM_F_MULTI);
if (rc < 0)
break;
i++;
}
spin_unlock_bh(&dev->targets_lock);
cb->args[0] = i;
return skb->len;
}
static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
{
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
nfc_dbg("entry");
if (dev)
nfc_put_device(dev);
return 0;
}
int nfc_genl_targets_found(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
nfc_dbg("entry");
dev->genl_data.poll_req_pid = 0;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_TARGETS_FOUND);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
genlmsg_end(msg, hdr);
return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
int nfc_genl_device_added(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
nfc_dbg("entry");
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_DEVICE_ADDED);
if (!hdr)
goto free_msg;
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
int nfc_genl_device_removed(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
nfc_dbg("entry");
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_DEVICE_REMOVED);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
u32 pid, u32 seq,
struct netlink_callback *cb,
int flags)
{
void *hdr;
nfc_dbg("entry");
hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
NFC_CMD_GET_DEVICE);
if (!hdr)
return -EMSGSIZE;
if (cb)
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int nfc_genl_dump_devices(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
bool first_call = false;
nfc_dbg("entry");
if (!iter) {
first_call = true;
iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[0] = (long) iter;
}
mutex_lock(&nfc_devlist_mutex);
cb->seq = nfc_devlist_generation;
if (first_call) {
nfc_device_iter_init(iter);
dev = nfc_device_iter_next(iter);
}
while (dev) {
int rc;
rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
cb, NLM_F_MULTI);
if (rc < 0)
break;
dev = nfc_device_iter_next(iter);
}
mutex_unlock(&nfc_devlist_mutex);
cb->args[1] = (long) dev;
return skb->len;
}
static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
nfc_dbg("entry");
nfc_device_iter_exit(iter);
kfree(iter);
return 0;
}
static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct nfc_dev *dev;
u32 idx;
int rc = -ENOBUFS;
nfc_dbg("entry");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
goto out_putdev;
}
rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
NULL, 0);
if (rc < 0)
goto out_free;
nfc_put_device(dev);
return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_putdev:
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
nfc_dbg("entry");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dev_up(dev);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
nfc_dbg("entry");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dev_down(dev);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
u32 protocols;
nfc_dbg("entry");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
mutex_lock(&dev->genl_data.genl_data_mutex);
rc = nfc_start_poll(dev, protocols);
if (!rc)
dev->genl_data.poll_req_pid = info->snd_pid;
mutex_unlock(&dev->genl_data.genl_data_mutex);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
nfc_dbg("entry");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid != info->snd_pid) {
rc = -EBUSY;
goto out;
}
rc = nfc_stop_poll(dev);
dev->genl_data.poll_req_pid = 0;
out:
mutex_unlock(&dev->genl_data.genl_data_mutex);
nfc_put_device(dev);
return rc;
}
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
.doit = nfc_genl_get_device,
.dumpit = nfc_genl_dump_devices,
.done = nfc_genl_dump_devices_done,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEV_UP,
.doit = nfc_genl_dev_up,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEV_DOWN,
.doit = nfc_genl_dev_down,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_START_POLL,
.doit = nfc_genl_start_poll,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_STOP_POLL,
.doit = nfc_genl_stop_poll,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_GET_TARGET,
.dumpit = nfc_genl_dump_targets,
.done = nfc_genl_dump_targets_done,
.policy = nfc_genl_policy,
},
};
static int nfc_genl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct class_dev_iter iter;
struct nfc_dev *dev;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
goto out;
nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
nfc_device_iter_init(&iter);
dev = nfc_device_iter_next(&iter);
while (dev) {
mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid == n->pid) {
nfc_stop_poll(dev);
dev->genl_data.poll_req_pid = 0;
}
mutex_unlock(&dev->genl_data.genl_data_mutex);
dev = nfc_device_iter_next(&iter);
}
nfc_device_iter_exit(&iter);
out:
return NOTIFY_DONE;
}
void nfc_genl_data_init(struct nfc_genl_data *genl_data)
{
genl_data->poll_req_pid = 0;
mutex_init(&genl_data->genl_data_mutex);
}
void nfc_genl_data_exit(struct nfc_genl_data *genl_data)
{
mutex_destroy(&genl_data->genl_data_mutex);
}
static struct notifier_block nl_notifier = {
.notifier_call = nfc_genl_rcv_nl_event,
};
/**
* nfc_genl_init() - Initialize netlink interface
*
* This initialization function registers the nfc netlink family.
*/
int __init nfc_genl_init(void)
{
int rc;
rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
ARRAY_SIZE(nfc_genl_ops));
if (rc)
return rc;
rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp);
netlink_register_notifier(&nl_notifier);
return rc;
}
/**
* nfc_genl_exit() - Deinitialize netlink interface
*
* This exit function unregisters the nfc netlink family.
*/
void nfc_genl_exit(void)
{
netlink_unregister_notifier(&nl_notifier);
genl_unregister_family(&nfc_genl_family);
}
| gpl-2.0 |
SamYaple/bcache-dev | arch/x86/boot/compressed/mkpiggy.c | 427 | 2763 | /* ----------------------------------------------------------------------- *
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
* H. Peter Anvin <hpa@linux.intel.com>
*
* ----------------------------------------------------------------------- */
/*
* Compute the desired load offset from a compressed program; outputs
* a small assembly wrapper with the appropriate symbols defined.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <tools/le_byteshift.h>
int main(int argc, char *argv[])
{
uint32_t olen;
long ilen;
unsigned long offs;
FILE *f = NULL;
int retval = 1;
if (argc < 2) {
fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
goto bail;
}
/* Get the information for the compressed kernel image first */
f = fopen(argv[1], "r");
if (!f) {
perror(argv[1]);
goto bail;
}
if (fseek(f, -4L, SEEK_END)) {
perror(argv[1]);
}
if (fread(&olen, sizeof(olen), 1, f) != 1) {
perror(argv[1]);
goto bail;
}
ilen = ftell(f);
olen = get_unaligned_le32(&olen);
/*
* Now we have the input (compressed) and output (uncompressed)
* sizes, compute the necessary decompression offset...
*/
offs = (olen > ilen) ? olen - ilen : 0;
offs += olen >> 12; /* Add 8 bytes for each 32K block */
offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
printf(".globl z_input_len\n");
printf("z_input_len = %lu\n", ilen);
printf(".globl z_output_len\n");
printf("z_output_len = %lu\n", (unsigned long)olen);
printf(".globl z_extract_offset\n");
printf("z_extract_offset = 0x%lx\n", offs);
/* z_extract_offset_negative allows simplification of head_32.S */
printf(".globl z_extract_offset_negative\n");
printf("z_extract_offset_negative = -0x%lx\n", offs);
printf(".globl input_data, input_data_end\n");
printf("input_data:\n");
printf(".incbin \"%s\"\n", argv[1]);
printf("input_data_end:\n");
retval = 0;
bail:
if (f)
fclose(f);
return retval;
}
| gpl-2.0 |
jyizheng/net-next-nuse-old | drivers/pwm/pwm-pca9685.c | 427 | 7283 | /*
* Driver for PCA9685 16-channel 12-bit PWM LED controller
*
* Copyright (C) 2013 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* based on the pwm-twl-led.c driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define PCA9685_MODE1 0x00
#define PCA9685_MODE2 0x01
#define PCA9685_SUBADDR1 0x02
#define PCA9685_SUBADDR2 0x03
#define PCA9685_SUBADDR3 0x04
#define PCA9685_ALLCALLADDR 0x05
#define PCA9685_LEDX_ON_L 0x06
#define PCA9685_LEDX_ON_H 0x07
#define PCA9685_LEDX_OFF_L 0x08
#define PCA9685_LEDX_OFF_H 0x09
#define PCA9685_ALL_LED_ON_L 0xFA
#define PCA9685_ALL_LED_ON_H 0xFB
#define PCA9685_ALL_LED_OFF_L 0xFC
#define PCA9685_ALL_LED_OFF_H 0xFD
#define PCA9685_PRESCALE 0xFE
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
#define LED_FULL (1 << 4)
#define MODE1_SLEEP (1 << 4)
#define MODE2_INVRT (1 << 4)
#define MODE2_OUTDRV (1 << 2)
#define LED_N_ON_H(N) (PCA9685_LEDX_ON_H + (4 * (N)))
#define LED_N_ON_L(N) (PCA9685_LEDX_ON_L + (4 * (N)))
#define LED_N_OFF_H(N) (PCA9685_LEDX_OFF_H + (4 * (N)))
#define LED_N_OFF_L(N) (PCA9685_LEDX_OFF_L + (4 * (N)))
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
int active_cnt;
};
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
{
return container_of(chip, struct pca9685, chip);
}
static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int reg;
if (duty_ns < 1) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
else
reg = LED_N_OFF_H(pwm->hwpwm);
regmap_write(pca->regmap, reg, LED_FULL);
return 0;
}
if (duty_ns == period_ns) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_ON_H;
else
reg = LED_N_ON_H(pwm->hwpwm);
regmap_write(pca->regmap, reg, LED_FULL);
return 0;
}
duty = 4096 * (unsigned long long)duty_ns;
duty = DIV_ROUND_UP_ULL(duty, period_ns);
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_L;
else
reg = LED_N_OFF_L(pwm->hwpwm);
regmap_write(pca->regmap, reg, (int)duty & 0xff);
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
else
reg = LED_N_OFF_H(pwm->hwpwm);
regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
return 0;
}
static int pca9685_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
unsigned int reg;
/*
* The PWM subsystem does not support a pre-delay.
* So, set the ON-timeout to 0
*/
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_ON_L;
else
reg = LED_N_ON_L(pwm->hwpwm);
regmap_write(pca->regmap, reg, 0);
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_ON_H;
else
reg = LED_N_ON_H(pwm->hwpwm);
regmap_write(pca->regmap, reg, 0);
/*
* Clear the full-off bit.
* It has precedence over the others and must be off.
*/
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
else
reg = LED_N_OFF_H(pwm->hwpwm);
regmap_update_bits(pca->regmap, reg, LED_FULL, 0x0);
return 0;
}
static void pca9685_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
unsigned int reg;
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
else
reg = LED_N_OFF_H(pwm->hwpwm);
regmap_write(pca->regmap, reg, LED_FULL);
/* Clear the LED_OFF counter. */
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_L;
else
reg = LED_N_OFF_L(pwm->hwpwm);
regmap_write(pca->regmap, reg, 0x0);
}
static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
if (pca->active_cnt++ == 0)
return regmap_update_bits(pca->regmap, PCA9685_MODE1,
MODE1_SLEEP, 0x0);
return 0;
}
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
if (--pca->active_cnt == 0)
regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP,
MODE1_SLEEP);
}
static const struct pwm_ops pca9685_pwm_ops = {
.enable = pca9685_pwm_enable,
.disable = pca9685_pwm_disable,
.config = pca9685_pwm_config,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
.owner = THIS_MODULE,
};
static const struct regmap_config pca9685_regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PCA9685_NUMREGS,
.cache_type = REGCACHE_NONE,
};
static int pca9685_pwm_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device_node *np = client->dev.of_node;
struct pca9685 *pca;
int ret;
int mode2;
pca = devm_kzalloc(&client->dev, sizeof(*pca), GFP_KERNEL);
if (!pca)
return -ENOMEM;
pca->regmap = devm_regmap_init_i2c(client, &pca9685_regmap_i2c_config);
if (IS_ERR(pca->regmap)) {
ret = PTR_ERR(pca->regmap);
dev_err(&client->dev, "Failed to initialize register map: %d\n",
ret);
return ret;
}
i2c_set_clientdata(client, pca);
regmap_read(pca->regmap, PCA9685_MODE2, &mode2);
if (of_property_read_bool(np, "invert"))
mode2 |= MODE2_INVRT;
else
mode2 &= ~MODE2_INVRT;
if (of_property_read_bool(np, "open-drain"))
mode2 &= ~MODE2_OUTDRV;
else
mode2 |= MODE2_OUTDRV;
regmap_write(pca->regmap, PCA9685_MODE2, mode2);
/* clear all "full off" bits */
regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, 0);
regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, 0);
pca->chip.ops = &pca9685_pwm_ops;
/* add an extra channel for ALL_LED */
pca->chip.npwm = PCA9685_MAXCHAN + 1;
pca->chip.dev = &client->dev;
pca->chip.base = -1;
pca->chip.can_sleep = true;
return pwmchip_add(&pca->chip);
}
static int pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP,
MODE1_SLEEP);
return pwmchip_remove(&pca->chip);
}
static const struct i2c_device_id pca9685_id[] = {
{ "pca9685", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(i2c, pca9685_id);
static const struct of_device_id pca9685_dt_ids[] = {
{ .compatible = "nxp,pca9685-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
.owner = THIS_MODULE,
.of_match_table = pca9685_dt_ids,
},
.probe = pca9685_pwm_probe,
.remove = pca9685_pwm_remove,
.id_table = pca9685_id,
};
module_i2c_driver(pca9685_i2c_driver);
MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
MODULE_DESCRIPTION("PWM driver for PCA9685");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Quintin-Z/rt-thread | components/external/libz/uncompr.c | 1707 | 2087 | /* uncompr.c -- decompress a memory buffer
* Copyright (C) 1995-2003 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* @(#) $Id$ */
#define ZLIB_INTERNAL
#include "zlib.h"
/* ===========================================================================
Decompresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total
size of the destination buffer, which must be large enough to hold the
entire uncompressed data. (The size of the uncompressed data must have
been saved previously by the compressor and transmitted to the decompressor
by some mechanism outside the scope of this compression library.)
Upon exit, destLen is the actual size of the compressed buffer.
This function can be used to decompress a whole file at once if the
input file is mmap'ed.
uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory, Z_BUF_ERROR if there was not enough room in the output
buffer, or Z_DATA_ERROR if the input data was corrupted.
*/
int ZEXPORT uncompress (dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
uLong sourceLen;
{
z_stream stream;
int err;
stream.next_in = (Bytef*)source;
stream.avail_in = (uInt)sourceLen;
/* Check for source > 64K on 16-bit machine: */
if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;
stream.next_out = dest;
stream.avail_out = (uInt)*destLen;
if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR;
stream.zalloc = (alloc_func)0;
stream.zfree = (free_func)0;
err = inflateInit(&stream);
if (err != Z_OK) return err;
err = inflate(&stream, Z_FINISH);
if (err != Z_STREAM_END) {
inflateEnd(&stream);
if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0))
return Z_DATA_ERROR;
return err;
}
*destLen = stream.total_out;
err = inflateEnd(&stream);
return err;
}
| gpl-2.0 |
jamison904/android_kernel_samsung_trlte | sound/soc/codecs/dmic.c | 2219 | 2501 | /*
* dmic.c -- SoC audio for Generic Digital MICs
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
static struct snd_soc_dai_driver dmic_dai = {
.name = "dmic-hifi",
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 8,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.formats = SNDRV_PCM_FMTBIT_S32_LE
| SNDRV_PCM_FMTBIT_S24_LE
| SNDRV_PCM_FMTBIT_S16_LE,
},
};
static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("DMIC AIF", "Capture", 0,
SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_INPUT("DMic"),
};
static const struct snd_soc_dapm_route intercon[] = {
{"DMIC AIF", NULL, "DMic"},
};
static int dmic_probe(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
ARRAY_SIZE(dmic_dapm_widgets));
snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
snd_soc_dapm_new_widgets(dapm);
return 0;
}
static struct snd_soc_codec_driver soc_dmic = {
.probe = dmic_probe,
};
static int dmic_dev_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_dmic, &dmic_dai, 1);
}
static int dmic_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
MODULE_ALIAS("platform:dmic-codec");
static struct platform_driver dmic_driver = {
.driver = {
.name = "dmic-codec",
.owner = THIS_MODULE,
},
.probe = dmic_dev_probe,
.remove = dmic_dev_remove,
};
module_platform_driver(dmic_driver);
MODULE_DESCRIPTION("Generic DMIC driver");
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Myself5/android_kernel_sony_msm | sound/soc/codecs/wm8741.c | 2475 | 15914 | /*
* wm8741.c -- WM8741 ALSA SoC Audio driver
*
* Copyright 2010-1 Wolfson Microelectronics plc
*
* Author: Ian Lartey <ian@opensource.wolfsonmicro.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8741.h"
#define WM8741_NUM_SUPPLIES 2
static const char *wm8741_supply_names[WM8741_NUM_SUPPLIES] = {
"AVDD",
"DVDD",
};
#define WM8741_NUM_RATES 6
/* codec private data */
struct wm8741_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8741_NUM_SUPPLIES];
unsigned int sysclk;
struct snd_pcm_hw_constraint_list *sysclk_constraints;
};
static const struct reg_default wm8741_reg_defaults[] = {
{ 0, 0x0000 }, /* R0 - DACLLSB Attenuation */
{ 1, 0x0000 }, /* R1 - DACLMSB Attenuation */
{ 2, 0x0000 }, /* R2 - DACRLSB Attenuation */
{ 3, 0x0000 }, /* R3 - DACRMSB Attenuation */
{ 4, 0x0000 }, /* R4 - Volume Control */
{ 5, 0x000A }, /* R5 - Format Control */
{ 6, 0x0000 }, /* R6 - Filter Control */
{ 7, 0x0000 }, /* R7 - Mode Control 1 */
{ 8, 0x0002 }, /* R8 - Mode Control 2 */
{ 32, 0x0002 }, /* R32 - ADDITONAL_CONTROL_1 */
};
static bool wm8741_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8741_DACLLSB_ATTENUATION:
case WM8741_DACLMSB_ATTENUATION:
case WM8741_DACRLSB_ATTENUATION:
case WM8741_DACRMSB_ATTENUATION:
case WM8741_VOLUME_CONTROL:
case WM8741_FORMAT_CONTROL:
case WM8741_FILTER_CONTROL:
case WM8741_MODE_CONTROL_1:
case WM8741_MODE_CONTROL_2:
case WM8741_ADDITIONAL_CONTROL_1:
return true;
default:
return false;
}
}
static int wm8741_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8741_RESET, 0);
}
static const DECLARE_TLV_DB_SCALE(dac_tlv_fine, -12700, 13, 0);
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 400, 0);
static const struct snd_kcontrol_new wm8741_snd_controls[] = {
SOC_DOUBLE_R_TLV("Fine Playback Volume", WM8741_DACLLSB_ATTENUATION,
WM8741_DACRLSB_ATTENUATION, 1, 255, 1, dac_tlv_fine),
SOC_DOUBLE_R_TLV("Playback Volume", WM8741_DACLMSB_ATTENUATION,
WM8741_DACRMSB_ATTENUATION, 0, 511, 1, dac_tlv),
};
static const struct snd_soc_dapm_widget wm8741_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DACL", "Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACR", "Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("VOUTLP"),
SND_SOC_DAPM_OUTPUT("VOUTLN"),
SND_SOC_DAPM_OUTPUT("VOUTRP"),
SND_SOC_DAPM_OUTPUT("VOUTRN"),
};
static const struct snd_soc_dapm_route wm8741_dapm_routes[] = {
{ "VOUTLP", NULL, "DACL" },
{ "VOUTLN", NULL, "DACL" },
{ "VOUTRP", NULL, "DACR" },
{ "VOUTRN", NULL, "DACR" },
};
static struct {
int value;
int ratio;
} lrclk_ratios[WM8741_NUM_RATES] = {
{ 1, 128 },
{ 2, 192 },
{ 3, 256 },
{ 4, 384 },
{ 5, 512 },
{ 6, 768 },
};
static unsigned int rates_11289[] = {
44100, 88235,
};
static struct snd_pcm_hw_constraint_list constraints_11289 = {
.count = ARRAY_SIZE(rates_11289),
.list = rates_11289,
};
static unsigned int rates_12288[] = {
32000, 48000, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_12288 = {
.count = ARRAY_SIZE(rates_12288),
.list = rates_12288,
};
static unsigned int rates_16384[] = {
32000,
};
static struct snd_pcm_hw_constraint_list constraints_16384 = {
.count = ARRAY_SIZE(rates_16384),
.list = rates_16384,
};
static unsigned int rates_16934[] = {
44100, 88235,
};
static struct snd_pcm_hw_constraint_list constraints_16934 = {
.count = ARRAY_SIZE(rates_16934),
.list = rates_16934,
};
static unsigned int rates_18432[] = {
48000, 96000,
};
static struct snd_pcm_hw_constraint_list constraints_18432 = {
.count = ARRAY_SIZE(rates_18432),
.list = rates_18432,
};
static unsigned int rates_22579[] = {
44100, 88235, 1764000
};
static struct snd_pcm_hw_constraint_list constraints_22579 = {
.count = ARRAY_SIZE(rates_22579),
.list = rates_22579,
};
static unsigned int rates_24576[] = {
32000, 48000, 96000, 192000
};
static struct snd_pcm_hw_constraint_list constraints_24576 = {
.count = ARRAY_SIZE(rates_24576),
.list = rates_24576,
};
static unsigned int rates_36864[] = {
48000, 96000, 19200
};
static struct snd_pcm_hw_constraint_list constraints_36864 = {
.count = ARRAY_SIZE(rates_36864),
.list = rates_36864,
};
static int wm8741_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
/* The set of sample rates that can be supported depends on the
* MCLK supplied to the CODEC - enforce this.
*/
if (!wm8741->sysclk) {
dev_err(codec->dev,
"No MCLK configured, call set_sysclk() on init\n");
return -EINVAL;
}
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
wm8741->sysclk_constraints);
return 0;
}
static int wm8741_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1FC;
int i;
/* Find a supported LRCLK ratio */
for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
if (wm8741->sysclk / params_rate(params) ==
lrclk_ratios[i].ratio)
break;
}
/* Should never happen, should be handled by constraints */
if (i == ARRAY_SIZE(lrclk_ratios)) {
dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n",
wm8741->sysclk / params_rate(params));
return -EINVAL;
}
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
iface |= 0x0001;
break;
case SNDRV_PCM_FORMAT_S24_LE:
iface |= 0x0002;
break;
case SNDRV_PCM_FORMAT_S32_LE:
iface |= 0x0003;
break;
default:
dev_dbg(codec->dev, "wm8741_hw_params: Unsupported bit size param = %d",
params_format(params));
return -EINVAL;
}
dev_dbg(codec->dev, "wm8741_hw_params: bit size param = %d",
params_format(params));
snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
return 0;
}
static int wm8741_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "wm8741_set_dai_sysclk info: freq=%dHz\n", freq);
switch (freq) {
case 11289600:
wm8741->sysclk_constraints = &constraints_11289;
wm8741->sysclk = freq;
return 0;
case 12288000:
wm8741->sysclk_constraints = &constraints_12288;
wm8741->sysclk = freq;
return 0;
case 16384000:
wm8741->sysclk_constraints = &constraints_16384;
wm8741->sysclk = freq;
return 0;
case 16934400:
wm8741->sysclk_constraints = &constraints_16934;
wm8741->sysclk = freq;
return 0;
case 18432000:
wm8741->sysclk_constraints = &constraints_18432;
wm8741->sysclk = freq;
return 0;
case 22579200:
case 33868800:
wm8741->sysclk_constraints = &constraints_22579;
wm8741->sysclk = freq;
return 0;
case 24576000:
wm8741->sysclk_constraints = &constraints_24576;
wm8741->sysclk = freq;
return 0;
case 36864000:
wm8741->sysclk_constraints = &constraints_36864;
wm8741->sysclk = freq;
return 0;
}
return -EINVAL;
}
static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1C3;
/* check master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0008;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0004;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x000C;
break;
case SND_SOC_DAIFMT_DSP_B:
iface |= 0x001C;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0010;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0020;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0030;
break;
default:
return -EINVAL;
}
dev_dbg(codec->dev, "wm8741_set_dai_fmt: Format=%x, Clock Inv=%x\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK,
((fmt & SND_SOC_DAIFMT_INV_MASK)));
snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
return 0;
}
#define WM8741_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
SNDRV_PCM_RATE_192000)
#define WM8741_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8741_dai_ops = {
.startup = wm8741_startup,
.hw_params = wm8741_hw_params,
.set_sysclk = wm8741_set_dai_sysclk,
.set_fmt = wm8741_set_dai_fmt,
};
static struct snd_soc_dai_driver wm8741_dai = {
.name = "wm8741",
.playback = {
.stream_name = "Playback",
.channels_min = 2, /* Mono modes not yet supported */
.channels_max = 2,
.rates = WM8741_RATES,
.formats = WM8741_FORMATS,
},
.ops = &wm8741_dai_ops,
};
#ifdef CONFIG_PM
static int wm8741_resume(struct snd_soc_codec *codec)
{
snd_soc_cache_sync(codec);
return 0;
}
#else
#define wm8741_suspend NULL
#define wm8741_resume NULL
#endif
static int wm8741_probe(struct snd_soc_codec *codec)
{
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
ret = regulator_bulk_enable(ARRAY_SIZE(wm8741->supplies),
wm8741->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
goto err_enable;
}
ret = wm8741_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
goto err_enable;
}
/* Change some default settings - latch VU */
snd_soc_update_bits(codec, WM8741_DACLLSB_ATTENUATION,
WM8741_UPDATELL, WM8741_UPDATELL);
snd_soc_update_bits(codec, WM8741_DACLMSB_ATTENUATION,
WM8741_UPDATELM, WM8741_UPDATELM);
snd_soc_update_bits(codec, WM8741_DACRLSB_ATTENUATION,
WM8741_UPDATERL, WM8741_UPDATERL);
snd_soc_update_bits(codec, WM8741_DACRMSB_ATTENUATION,
WM8741_UPDATERM, WM8741_UPDATERM);
dev_dbg(codec->dev, "Successful registration\n");
return ret;
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
err_get:
return ret;
}
static int wm8741_remove(struct snd_soc_codec *codec)
{
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
regulator_bulk_disable(ARRAY_SIZE(wm8741->supplies), wm8741->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8741 = {
.probe = wm8741_probe,
.remove = wm8741_remove,
.resume = wm8741_resume,
.controls = wm8741_snd_controls,
.num_controls = ARRAY_SIZE(wm8741_snd_controls),
.dapm_widgets = wm8741_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8741_dapm_widgets),
.dapm_routes = wm8741_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8741_dapm_routes),
};
static const struct of_device_id wm8741_of_match[] = {
{ .compatible = "wlf,wm8741", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8741_of_match);
static const struct regmap_config wm8741_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8741_MAX_REGISTER,
.reg_defaults = wm8741_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8741_reg_defaults),
.cache_type = REGCACHE_RBTREE,
.readable_reg = wm8741_readable,
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8741_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8741_priv *wm8741;
int ret, i;
wm8741 = devm_kzalloc(&i2c->dev, sizeof(struct wm8741_priv),
GFP_KERNEL);
if (wm8741 == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(wm8741->supplies); i++)
wm8741->supplies[i].supply = wm8741_supply_names[i];
ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8741->supplies),
wm8741->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
wm8741->regmap = devm_regmap_init_i2c(i2c, &wm8741_regmap);
if (IS_ERR(wm8741->regmap)) {
ret = PTR_ERR(wm8741->regmap);
dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, wm8741);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8741, &wm8741_dai, 1);
return ret;
}
static int wm8741_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8741_i2c_id[] = {
{ "wm8741", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8741_i2c_id);
static struct i2c_driver wm8741_i2c_driver = {
.driver = {
.name = "wm8741",
.owner = THIS_MODULE,
.of_match_table = wm8741_of_match,
},
.probe = wm8741_i2c_probe,
.remove = wm8741_i2c_remove,
.id_table = wm8741_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
static int wm8741_spi_probe(struct spi_device *spi)
{
struct wm8741_priv *wm8741;
int ret, i;
wm8741 = devm_kzalloc(&spi->dev, sizeof(struct wm8741_priv),
GFP_KERNEL);
if (wm8741 == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(wm8741->supplies); i++)
wm8741->supplies[i].supply = wm8741_supply_names[i];
ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(wm8741->supplies),
wm8741->supplies);
if (ret != 0) {
dev_err(&spi->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
wm8741->regmap = devm_regmap_init_spi(spi, &wm8741_regmap);
if (IS_ERR(wm8741->regmap)) {
ret = PTR_ERR(wm8741->regmap);
dev_err(&spi->dev, "Failed to init regmap: %d\n", ret);
return ret;
}
spi_set_drvdata(spi, wm8741);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8741, &wm8741_dai, 1);
return ret;
}
static int wm8741_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
}
static struct spi_driver wm8741_spi_driver = {
.driver = {
.name = "wm8741",
.owner = THIS_MODULE,
.of_match_table = wm8741_of_match,
},
.probe = wm8741_spi_probe,
.remove = wm8741_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
static int __init wm8741_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8741_i2c_driver);
if (ret != 0)
pr_err("Failed to register WM8741 I2C driver: %d\n", ret);
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8741_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register wm8741 SPI driver: %d\n",
ret);
}
#endif
return ret;
}
module_init(wm8741_modinit);
static void __exit wm8741_exit(void)
{
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8741_spi_driver);
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8741_i2c_driver);
#endif
}
module_exit(wm8741_exit);
MODULE_DESCRIPTION("ASoC WM8741 driver");
MODULE_AUTHOR("Ian Lartey <ian@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thicklizard/AwesomeSauce1_1 | arch/arm/mach-pxa/am200epd.c | 2987 | 9659 | /*
* am200epd.c -- Platform device for AM200 EPD kit
*
* Copyright (C) 2008, Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
*
* This work was made possible by help and equipment support from E-Ink
* Corporation. http://support.eink.com/community
*
* This driver is written to be used with the Metronome display controller.
* on the AM200 EPD prototype kit/development kit with an E-Ink 800x600
* Vizplex EPD on a Gumstix board using the Lyre interface board.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <mach/pxa25x.h>
#include <mach/gumstix.h>
#include <mach/pxafb.h>
#include "generic.h"
#include <video/metronomefb.h>
static unsigned int panel_type = 6;
static struct platform_device *am200_device;
static struct metronome_board am200_board;
static struct pxafb_mode_info am200_fb_mode_9inch7 = {
.pixclock = 40000,
.xres = 1200,
.yres = 842,
.bpp = 16,
.hsync_len = 2,
.left_margin = 2,
.right_margin = 2,
.vsync_len = 1,
.upper_margin = 2,
.lower_margin = 25,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mode_info am200_fb_mode_8inch = {
.pixclock = 40000,
.xres = 1088,
.yres = 791,
.bpp = 16,
.hsync_len = 28,
.left_margin = 8,
.right_margin = 30,
.vsync_len = 8,
.upper_margin = 10,
.lower_margin = 8,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mode_info am200_fb_mode_6inch = {
.pixclock = 40189,
.xres = 832,
.yres = 622,
.bpp = 16,
.hsync_len = 28,
.left_margin = 34,
.right_margin = 34,
.vsync_len = 25,
.upper_margin = 0,
.lower_margin = 2,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info am200_fb_info = {
.modes = &am200_fb_mode_6inch,
.num_modes = 1,
.lcd_conn = LCD_TYPE_COLOR_TFT | LCD_PCLK_EDGE_FALL |
LCD_AC_BIAS_FREQ(24),
};
/* register offsets for gpio control */
#define LED_GPIO_PIN 51
#define STDBY_GPIO_PIN 48
#define RST_GPIO_PIN 49
#define RDY_GPIO_PIN 32
#define ERR_GPIO_PIN 17
#define PCBPWR_GPIO_PIN 16
static int gpios[] = { LED_GPIO_PIN , STDBY_GPIO_PIN , RST_GPIO_PIN,
RDY_GPIO_PIN, ERR_GPIO_PIN, PCBPWR_GPIO_PIN };
static char *gpio_names[] = { "LED" , "STDBY" , "RST", "RDY", "ERR", "PCBPWR" };
static int am200_init_gpio_regs(struct metronomefb_par *par)
{
int i;
int err;
for (i = 0; i < ARRAY_SIZE(gpios); i++) {
err = gpio_request(gpios[i], gpio_names[i]);
if (err) {
dev_err(&am200_device->dev, "failed requesting "
"gpio %s, err=%d\n", gpio_names[i], err);
goto err_req_gpio;
}
}
gpio_direction_output(LED_GPIO_PIN, 0);
gpio_direction_output(STDBY_GPIO_PIN, 0);
gpio_direction_output(RST_GPIO_PIN, 0);
gpio_direction_input(RDY_GPIO_PIN);
gpio_direction_input(ERR_GPIO_PIN);
gpio_direction_output(PCBPWR_GPIO_PIN, 0);
return 0;
err_req_gpio:
while (--i >= 0)
gpio_free(gpios[i]);
return err;
}
static void am200_cleanup(struct metronomefb_par *par)
{
int i;
free_irq(IRQ_GPIO(RDY_GPIO_PIN), par);
for (i = 0; i < ARRAY_SIZE(gpios); i++)
gpio_free(gpios[i]);
}
static int am200_share_video_mem(struct fb_info *info)
{
/* rough check if this is our desired fb and not something else */
if ((info->var.xres != am200_fb_info.modes->xres)
|| (info->var.yres != am200_fb_info.modes->yres))
return 0;
/* we've now been notified that we have our new fb */
am200_board.metromem = info->screen_base;
am200_board.host_fbinfo = info;
/* try to refcount host drv since we are the consumer after this */
if (!try_module_get(info->fbops->owner))
return -ENODEV;
return 0;
}
static int am200_unshare_video_mem(struct fb_info *info)
{
dev_dbg(&am200_device->dev, "ENTER %s\n", __func__);
if (info != am200_board.host_fbinfo)
return 0;
module_put(am200_board.host_fbinfo->fbops->owner);
return 0;
}
static int am200_fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
struct fb_event *evdata = data;
struct fb_info *info = evdata->info;
dev_dbg(&am200_device->dev, "ENTER %s\n", __func__);
if (event == FB_EVENT_FB_REGISTERED)
return am200_share_video_mem(info);
else if (event == FB_EVENT_FB_UNREGISTERED)
return am200_unshare_video_mem(info);
return 0;
}
static struct notifier_block am200_fb_notif = {
.notifier_call = am200_fb_notifier_callback,
};
/* this gets called as part of our init. these steps must be done now so
* that we can use pxa_set_fb_info */
static void __init am200_presetup_fb(void)
{
int fw;
int fh;
int padding_size;
int totalsize;
switch (panel_type) {
case 6:
am200_fb_info.modes = &am200_fb_mode_6inch;
break;
case 8:
am200_fb_info.modes = &am200_fb_mode_8inch;
break;
case 97:
am200_fb_info.modes = &am200_fb_mode_9inch7;
break;
default:
dev_err(&am200_device->dev, "invalid panel_type selection,"
" setting to 6\n");
am200_fb_info.modes = &am200_fb_mode_6inch;
break;
}
/* the frame buffer is divided as follows:
command | CRC | padding
16kb waveform data | CRC | padding
image data | CRC
*/
fw = am200_fb_info.modes->xres;
fh = am200_fb_info.modes->yres;
/* waveform must be 16k + 2 for checksum */
am200_board.wfm_size = roundup(16*1024 + 2, fw);
padding_size = PAGE_SIZE + (4 * fw);
/* total is 1 cmd , 1 wfm, padding and image */
totalsize = fw + am200_board.wfm_size + padding_size + (fw*fh);
/* save this off because we're manipulating fw after this and
* we'll need it when we're ready to setup the framebuffer */
am200_board.fw = fw;
am200_board.fh = fh;
/* the reason we do this adjustment is because we want to acquire
* more framebuffer memory without imposing custom awareness on the
* underlying pxafb driver */
am200_fb_info.modes->yres = DIV_ROUND_UP(totalsize, fw);
/* we divide since we told the LCD controller we're 16bpp */
am200_fb_info.modes->xres /= 2;
pxa_set_fb_info(NULL, &am200_fb_info);
}
/* this gets called by metronomefb as part of its init, in our case, we
* have already completed initial framebuffer init in presetup_fb so we
* can just setup the fb access pointers */
static int am200_setup_fb(struct metronomefb_par *par)
{
int fw;
int fh;
fw = am200_board.fw;
fh = am200_board.fh;
/* metromem was set up by the notifier in share_video_mem so now
* we can use its value to calculate the other entries */
par->metromem_cmd = (struct metromem_cmd *) am200_board.metromem;
par->metromem_wfm = am200_board.metromem + fw;
par->metromem_img = par->metromem_wfm + am200_board.wfm_size;
par->metromem_img_csum = (u16 *) (par->metromem_img + (fw * fh));
par->metromem_dma = am200_board.host_fbinfo->fix.smem_start;
return 0;
}
static int am200_get_panel_type(void)
{
return panel_type;
}
static irqreturn_t am200_handle_irq(int irq, void *dev_id)
{
struct metronomefb_par *par = dev_id;
wake_up_interruptible(&par->waitq);
return IRQ_HANDLED;
}
static int am200_setup_irq(struct fb_info *info)
{
int ret;
ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am200_handle_irq,
IRQF_DISABLED|IRQF_TRIGGER_FALLING,
"AM200", info->par);
if (ret)
dev_err(&am200_device->dev, "request_irq failed: %d\n", ret);
return ret;
}
static void am200_set_rst(struct metronomefb_par *par, int state)
{
gpio_set_value(RST_GPIO_PIN, state);
}
static void am200_set_stdby(struct metronomefb_par *par, int state)
{
gpio_set_value(STDBY_GPIO_PIN, state);
}
static int am200_wait_event(struct metronomefb_par *par)
{
return wait_event_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ);
}
static int am200_wait_event_intr(struct metronomefb_par *par)
{
return wait_event_interruptible_timeout(par->waitq,
gpio_get_value(RDY_GPIO_PIN), HZ);
}
static struct metronome_board am200_board = {
.owner = THIS_MODULE,
.setup_irq = am200_setup_irq,
.setup_io = am200_init_gpio_regs,
.setup_fb = am200_setup_fb,
.set_rst = am200_set_rst,
.set_stdby = am200_set_stdby,
.met_wait_event = am200_wait_event,
.met_wait_event_intr = am200_wait_event_intr,
.get_panel_type = am200_get_panel_type,
.cleanup = am200_cleanup,
};
static unsigned long am200_pin_config[] __initdata = {
GPIO51_GPIO,
GPIO49_GPIO,
GPIO48_GPIO,
GPIO32_GPIO,
GPIO17_GPIO,
GPIO16_GPIO,
};
int __init am200_init(void)
{
int ret;
/* before anything else, we request notification for any fb
* creation events */
fb_register_client(&am200_fb_notif);
pxa2xx_mfp_config(ARRAY_AND_SIZE(am200_pin_config));
/* request our platform independent driver */
request_module("metronomefb");
am200_device = platform_device_alloc("metronomefb", -1);
if (!am200_device)
return -ENOMEM;
/* the am200_board that will be seen by metronomefb is a copy */
platform_device_add_data(am200_device, &am200_board,
sizeof(am200_board));
/* this _add binds metronomefb to am200. metronomefb refcounts am200 */
ret = platform_device_add(am200_device);
if (ret) {
platform_device_put(am200_device);
fb_unregister_client(&am200_fb_notif);
return ret;
}
am200_presetup_fb();
return 0;
}
module_param(panel_type, uint, 0);
MODULE_PARM_DESC(panel_type, "Select the panel type: 6, 8, 97");
MODULE_DESCRIPTION("board driver for am200 metronome epd kit");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ChronoMonochrome/Chrono_Kernel | drivers/hid/hid-quanta.c | 3243 | 6462 | /*
* HID driver for Quanta Optical Touch dual-touch panels
*
* Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
MODULE_DESCRIPTION("Quanta dual-touch panel");
MODULE_LICENSE("GPL");
#include "hid-ids.h"
struct quanta_data {
__u16 x, y;
__u8 id;
bool valid; /* valid finger data, or just placeholder? */
bool first; /* is this the first finger in this frame? */
bool activity_now; /* at least one active finger in this frame? */
bool activity; /* at least one active finger previously? */
};
static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
/* touchscreen emulation */
input_set_abs_params(hi->input, ABS_X,
field->logical_minimum,
field->logical_maximum, 0, 0);
return 1;
case HID_GD_Y:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
/* touchscreen emulation */
input_set_abs_params(hi->input, ABS_Y,
field->logical_minimum,
field->logical_maximum, 0, 0);
return 1;
}
return 0;
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_CONFIDENCE:
case HID_DG_TIPSWITCH:
case HID_DG_INPUTMODE:
case HID_DG_DEVICEINDEX:
case HID_DG_CONTACTCOUNT:
case HID_DG_CONTACTMAX:
case HID_DG_TIPPRESSURE:
case HID_DG_WIDTH:
case HID_DG_HEIGHT:
return -1;
case HID_DG_INRANGE:
/* touchscreen emulation */
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
return 1;
case HID_DG_CONTACTID:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TRACKING_ID);
return 1;
}
return 0;
case 0xff000000:
/* ignore vendor-specific features */
return -1;
}
return 0;
}
static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if (usage->type == EV_KEY || usage->type == EV_ABS)
clear_bit(usage->code, *bit);
return 0;
}
/*
* this function is called when a whole finger has been parsed,
* so that it can decide what to send to the input layer.
*/
static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
{
td->first = !td->first; /* touchscreen emulation */
if (!td->valid) {
/*
* touchscreen emulation: if no finger in this frame is valid
* and there previously was finger activity, this is a release
*/
if (!td->first && !td->activity_now && td->activity) {
input_event(input, EV_KEY, BTN_TOUCH, 0);
td->activity = false;
}
return;
}
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
input_mt_sync(input);
td->valid = false;
/* touchscreen emulation: if first active finger in this frame... */
if (!td->activity_now) {
/* if there was no previous activity, emit touch event */
if (!td->activity) {
input_event(input, EV_KEY, BTN_TOUCH, 1);
td->activity = true;
}
td->activity_now = true;
/* and in any case this is our preferred finger */
input_event(input, EV_ABS, ABS_X, td->x);
input_event(input, EV_ABS, ABS_Y, td->y);
}
}
static int quanta_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct quanta_data *td = hid_get_drvdata(hid);
if (hid->claimed & HID_CLAIMED_INPUT) {
struct input_dev *input = field->hidinput->input;
switch (usage->hid) {
case HID_DG_INRANGE:
td->valid = !!value;
break;
case HID_GD_X:
td->x = value;
break;
case HID_GD_Y:
td->y = value;
quanta_filter_event(td, input);
break;
case HID_DG_CONTACTID:
td->id = value;
break;
case HID_DG_CONTACTCOUNT:
/* touch emulation: this is the last field in a frame */
td->first = false;
td->activity_now = false;
break;
case HID_DG_CONFIDENCE:
case HID_DG_TIPSWITCH:
/* avoid interference from generic hidinput handling */
break;
default:
/* fallback to the generic hidinput handling */
return 0;
}
}
/* we have handled the hidinput part, now remains hiddev */
if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
hid->hiddev_hid_event(hid, field, usage, value);
return 1;
}
static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct quanta_data *td;
td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
if (!td) {
hid_err(hdev, "cannot allocate Quanta Touch data\n");
return -ENOMEM;
}
td->valid = false;
td->activity = false;
td->activity_now = false;
td->first = false;
hid_set_drvdata(hdev, td);
ret = hid_parse(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
kfree(td);
return ret;
}
static void quanta_remove(struct hid_device *hdev)
{
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
hid_set_drvdata(hdev, NULL);
}
static const struct hid_device_id quanta_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ }
};
MODULE_DEVICE_TABLE(hid, quanta_devices);
static const struct hid_usage_id quanta_grabbed_usages[] = {
{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
};
static struct hid_driver quanta_driver = {
.name = "quanta-touch",
.id_table = quanta_devices,
.probe = quanta_probe,
.remove = quanta_remove,
.input_mapping = quanta_input_mapping,
.input_mapped = quanta_input_mapped,
.usage_table = quanta_grabbed_usages,
.event = quanta_event,
};
static int __init quanta_init(void)
{
return hid_register_driver(&quanta_driver);
}
static void __exit quanta_exit(void)
{
hid_unregister_driver(&quanta_driver);
}
module_init(quanta_init);
module_exit(quanta_exit);
| gpl-2.0 |
mythos234/AndromedaCANCRO-KK | arch/arm/perfmon/per-process-perf.c | 3499 | 31060 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
per-process_perf
DESCRIPTION
Capture the processor performances registers when the process context
switches. The /proc file system is used to control and access the results
of the performance counters.
Each time a process is context switched, the performance counters for
the Snoop Control Unit and the standard ARM counters are set according
to the values stored for that process.
The events to capture per process are set in the /proc/ppPerf/settings
directory.
EXTERNALIZED FUNCTIONS
INITIALIZATION AND SEQUENCING REQUIREMENTS
Detail how to initialize and use this service. The sequencing aspect
is only needed if the order of operations is important.
*/
/*
INCLUDE FILES FOR MODULE
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/time.h>
#include "linux/proc_fs.h"
#include "linux/kernel_stat.h"
#include <asm/thread_notify.h>
#include "asm/uaccess.h"
#include "cp15_registers.h"
#include "l2_cp15_registers.h"
#include <asm/perftypes.h>
#include "per-axi.h"
#include "perf.h"
#define DEBUG_SWAPIO
#ifdef DEBUG_SWAPIO
#define MR_SIZE 1024
#define PM_PP_ERR -1
struct mark_data_s {
long c;
long cpu;
unsigned long pid_old;
unsigned long pid_new;
};
struct mark_data_s markRay[MR_SIZE] __attribute__((aligned(16)));
int mrcnt;
DEFINE_SPINLOCK(_mark_lock);
static inline void MARKPIDS(char a, int opid, int npid)
{
int cpu = smp_processor_id();
if (opid == 0)
return;
spin_lock(&_mark_lock);
if (++mrcnt >= MR_SIZE)
mrcnt = 0;
spin_unlock(&_mark_lock);
markRay[mrcnt].pid_old = opid;
markRay[mrcnt].pid_new = npid;
markRay[mrcnt].cpu = cpu;
markRay[mrcnt].c = a;
}
static inline void MARK(char a) { MARKPIDS(a, 0xFFFF, 0xFFFF); }
static inline void MARKPID(char a, int pid) { MARKPIDS(a, pid, 0xFFFF); }
#else
#define MARK(a)
#define MARKPID(a, b)
#define MARKPIDS(a, b, c)
#endif /* DEBUG_SWAPIO */
/*
DEFINITIONS AND DECLARATIONS FOR MODULE
This section contains definitions for constants, macros, types, variables
and other items needed by this module.
*/
/*
Constant / Define Declarations
*/
#define PERF_MON_PROCESS_NUM 0x400
#define PERF_MON_PROCESS_MASK (PERF_MON_PROCESS_NUM-1)
#define PP_MAX_PROC_ENTRIES 32
/*
* The entry is locked and is not to be replaced.
*/
#define PERF_ENTRY_LOCKED (1<<0)
#define PERF_NOT_FIRST_TIME (1<<1)
#define PERF_EXITED (1<<2)
#define PERF_AUTOLOCK (1<<3)
#define IS_LOCKED(p) (p->flags & PERF_ENTRY_LOCKED)
#define PERF_NUM_MONITORS 4
#define L1_EVENTS_0 0
#define L1_EVENTS_1 1
#define L2_EVENTS_0 2
#define L2_EVENTS_1 3
#define PM_CYCLE_OVERFLOW_MASK 0x80000000
#define L2_PM_CYCLE_OVERFLOW_MASK 0x80000000
#define PM_START_ALL() do {\
if (pm_global) \
pmStartAll();\
} while (0);
#define PM_STOP_ALL() do {\
if (pm_global)\
pmStopAll();\
} while (0);
#define PM_RESET_ALL() do {\
if (pm_global)\
pmResetAll();\
} while (0);
/*
* Accessors for SMP based variables.
*/
#define _SWAPS(p) ((p)->cnts[smp_processor_id()].swaps)
#define _CYCLES(p) ((p)->cnts[smp_processor_id()].cycles)
#define _COUNTS(p, i) ((p)->cnts[smp_processor_id()].counts[i])
#define _L2COUNTS(p, i) ((p)->cnts[smp_processor_id()].l2_counts[i])
#define _L2CYCLES(p) ((p)->cnts[smp_processor_id()].l2_cycles)
/*
Type Declarations
*/
/*
* Counts are on a per core basis.
*/
struct pm_counters_s {
unsigned long long cycles;
unsigned long long l2_cycles;
unsigned long long counts[PERF_NUM_MONITORS];
unsigned long long l2_counts[PERF_NUM_MONITORS];
unsigned long swaps;
};
struct per_process_perf_mon_type{
struct pm_counters_s cnts[NR_CPUS];
unsigned long control;
unsigned long index[PERF_NUM_MONITORS];
unsigned long l2_index[PERF_NUM_MONITORS];
unsigned long pid;
struct proc_dir_entry *proc;
struct proc_dir_entry *l2_proc;
unsigned short flags;
unsigned short running_cpu;
char *pidName;
unsigned long lpm0evtyper;
unsigned long lpm1evtyper;
unsigned long lpm2evtyper;
unsigned long l2lpmevtyper;
unsigned long vlpmevtyper;
unsigned long l2pmevtyper0;
unsigned long l2pmevtyper1;
unsigned long l2pmevtyper2;
unsigned long l2pmevtyper3;
unsigned long l2pmevtyper4;
};
unsigned long last_in_pid[NR_CPUS];
unsigned long fake_swap_out[NR_CPUS] = {0};
/*
Local Object Definitions
*/
struct per_process_perf_mon_type perf_mons[PERF_MON_PROCESS_NUM];
struct proc_dir_entry *proc_dir;
struct proc_dir_entry *settings_dir;
struct proc_dir_entry *values_dir;
struct proc_dir_entry *axi_dir;
struct proc_dir_entry *l2_dir;
struct proc_dir_entry *axi_settings_dir;
struct proc_dir_entry *axi_results_dir;
struct proc_dir_entry *l2_results_dir;
unsigned long pp_enabled;
unsigned long pp_settings_valid = -1;
unsigned long pp_auto_lock;
unsigned long pp_set_pid;
signed long pp_clear_pid = -1;
unsigned long per_proc_event[PERF_NUM_MONITORS];
unsigned long l2_per_proc_event[PERF_NUM_MONITORS];
unsigned long dbg_flags;
unsigned long pp_lpm0evtyper;
unsigned long pp_lpm1evtyper;
unsigned long pp_lpm2evtyper;
unsigned long pp_l2lpmevtyper;
unsigned long pp_vlpmevtyper;
unsigned long pm_stop_for_interrupts;
unsigned long pm_global; /* track all, not process based */
unsigned long pm_global_enable;
unsigned long pm_remove_pid;
unsigned long pp_l2pmevtyper0;
unsigned long pp_l2pmevtyper1;
unsigned long pp_l2pmevtyper2;
unsigned long pp_l2pmevtyper3;
unsigned long pp_l2pmevtyper4;
unsigned long pp_proc_entry_index;
char *per_process_proc_names[PP_MAX_PROC_ENTRIES];
unsigned int axi_swaps;
#define MAX_AXI_SWAPS 10
int first_switch = 1;
/*
Forward Declarations
*/
/*
Function Definitions
*/
/*
FUNCTION per_process_find
DESCRIPTION
Find the per process information based on the process id (pid) passed.
This is a simple mask based on the number of entries stored in the
static array
DEPENDENCIES
RETURN VALUE
Pointer to the per process data
SIDE EFFECTS
*/
struct per_process_perf_mon_type *per_process_find(unsigned long pid)
{
return &perf_mons[pid & PERF_MON_PROCESS_MASK];
}
/*
FUNCTION per_process_get_name
DESCRIPTION
Retreive the name of the performance counter based on the table and
index passed. We have two different sets of performance counters so
different table need to be used.
DEPENDENCIES
RETURN VALUE
Pointer to char string with the name of the event or "BAD"
Never returns NULL or a bad pointer.
SIDE EFFECTS
*/
char *per_process_get_name(unsigned long index)
{
return pm_find_event_name(index);
}
/*
FUNCTION per_process_results_read
DESCRIPTION
Print out the formatted results from the process id read. Event names
and counts are printed.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
int per_process_results_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
struct per_process_perf_mon_type *p =
(struct per_process_perf_mon_type *)data;
struct pm_counters_s cnts;
int i, j;
/*
* Total across all CPUS
*/
memset(&cnts, 0, sizeof(cnts));
for (i = 0; i < num_possible_cpus(); i++) {
cnts.swaps += p->cnts[i].swaps;
cnts.cycles += p->cnts[i].cycles;
for (j = 0; j < PERF_NUM_MONITORS; j++)
cnts.counts[j] += p->cnts[i].counts[j];
}
/*
* Display as single results of the totals calculated above.
* Do we want to display or have option to display individula cores?
*/
return sprintf(page, "pid:%lu one:%s:%llu two:%s:%llu three:%s:%llu \
four:%s:%llu cycles:%llu swaps:%lu\n",
p->pid,
per_process_get_name(p->index[0]), cnts.counts[0],
per_process_get_name(p->index[1]), cnts.counts[1],
per_process_get_name(p->index[2]), cnts.counts[2],
per_process_get_name(p->index[3]), cnts.counts[3],
cnts.cycles, cnts.swaps);
}
int per_process_l2_results_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
struct per_process_perf_mon_type *p =
(struct per_process_perf_mon_type *)data;
struct pm_counters_s cnts;
int i, j;
/*
* Total across all CPUS
*/
memset(&cnts, 0, sizeof(cnts));
for (i = 0; i < num_possible_cpus(); i++) {
cnts.l2_cycles += p->cnts[i].l2_cycles;
for (j = 0; j < PERF_NUM_MONITORS; j++)
cnts.l2_counts[j] += p->cnts[i].l2_counts[j];
}
/*
* Display as single results of the totals calculated above.
* Do we want to display or have option to display individula cores?
*/
return sprintf(page, "pid:%lu l2_one:%s:%llu l2_two:%s:%llu \
l2_three:%s:%llu \
l2_four:%s:%llu l2_cycles:%llu\n",
p->pid,
per_process_get_name(p->l2_index[0]), cnts.l2_counts[0],
per_process_get_name(p->l2_index[1]), cnts.l2_counts[1],
per_process_get_name(p->l2_index[2]), cnts.l2_counts[2],
per_process_get_name(p->l2_index[3]), cnts.l2_counts[3],
cnts.l2_cycles);
}
/*
FUNCTION per_process_results_write
DESCRIPTION
Allow some control over the results. If the user forgets to autolock or
wants to unlock the results so they will be deleted, then this is
where it is processed.
For example, to unlock process 23
echo "unlock" > 23
DEPENDENCIES
RETURN VALUE
Number of characters used (all of them!)
SIDE EFFECTS
*/
int per_process_results_write(struct file *file, const char *buff,
unsigned long cnt, void *data)
{
char *newbuf;
struct per_process_perf_mon_type *p =
(struct per_process_perf_mon_type *)data;
if (p == 0)
return cnt;
/*
* Alloc the user data in kernel space. and then copy user to kernel
*/
newbuf = kmalloc(cnt + 1, GFP_KERNEL);
if (0 == newbuf)
return cnt;
if (copy_from_user(newbuf, buff, cnt) != 0) {
printk(KERN_INFO "%s copy_from_user failed\n", __func__);
return cnt;
}
if (0 == strcmp("lock", newbuf))
p->flags |= PERF_ENTRY_LOCKED;
else if (0 == strcmp("unlock", newbuf))
p->flags &= ~PERF_ENTRY_LOCKED;
else if (0 == strcmp("auto", newbuf))
p->flags |= PERF_AUTOLOCK;
else if (0 == strcmp("autoun", newbuf))
p->flags &= ~PERF_AUTOLOCK;
return cnt;
}
/*
FUNCTION perProcessCreateResults
DESCRIPTION
Create the results /proc file if the system parameters allow it...
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
void per_process_create_results_proc(struct per_process_perf_mon_type *p)
{
if (0 == p->pidName)
p->pidName = kmalloc(12, GFP_KERNEL);
if (0 == p->pidName)
return;
sprintf(p->pidName, "%ld", p->pid);
if (0 == p->proc) {
p->proc = create_proc_entry(p->pidName, 0777, values_dir);
if (0 == p->proc)
return;
} else {
p->proc->name = p->pidName;
}
p->proc->read_proc = per_process_results_read;
p->proc->write_proc = per_process_results_write;
p->proc->data = (void *)p;
}
void per_process_create_l2_results_proc(struct per_process_perf_mon_type *p)
{
if (0 == p->pidName)
p->pidName = kmalloc(12, GFP_KERNEL);
if (0 == p->pidName)
return;
sprintf(p->pidName, "%ld", p->pid);
if (0 == p->l2_proc) {
p->l2_proc = create_proc_entry(p->pidName, 0777,
l2_results_dir);
if (0 == p->l2_proc)
return;
} else {
p->l2_proc->name = p->pidName;
}
p->l2_proc->read_proc = per_process_l2_results_read;
p->l2_proc->write_proc = per_process_results_write;
p->l2_proc->data = (void *)p;
}
/*
FUNCTION per_process_swap_out
DESCRIPTION
Store the counters from the process that is about to swap out. We take
the old counts and add them to the current counts in the perf registers.
Before the new process is swapped in, the counters are reset.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
typedef void (*vfun)(void *);
void per_process_swap_out(struct per_process_perf_mon_type *data)
{
int i;
unsigned long overflow;
#ifdef CONFIG_ARCH_MSM8X60
unsigned long l2_overflow;
#endif
struct per_process_perf_mon_type *p = data;
MARKPIDS('O', p->pid, 0);
RCP15_PMOVSR(overflow);
#ifdef CONFIG_ARCH_MSM8X60
RCP15_L2PMOVSR(l2_overflow);
#endif
if (!pp_enabled)
return;
/*
* The kernel for some reason (2.6.32.9) starts a process context on
* one core and ends on another. So the swap in and swap out can be
* on different cores. If this happens, we need to stop the
* counters and collect the data on the core that started the counters
* ....otherwise we receive invalid data. So we mark the the core with
* the process as deferred. The next time a process is swapped on
* the core that the process was running on, the counters will be
* updated.
*/
if ((smp_processor_id() != p->running_cpu) && (p->pid != 0)) {
fake_swap_out[p->running_cpu] = 1;
return;
}
_SWAPS(p)++;
_CYCLES(p) += pm_get_cycle_count();
if (overflow & PM_CYCLE_OVERFLOW_MASK)
_CYCLES(p) += 0xFFFFFFFF;
for (i = 0; i < PERF_NUM_MONITORS; i++) {
_COUNTS(p, i) += pm_get_count(i);
if (overflow & (1 << i))
_COUNTS(p, i) += 0xFFFFFFFF;
}
#ifdef CONFIG_ARCH_MSM8X60
_L2CYCLES(p) += l2_pm_get_cycle_count();
if (l2_overflow & L2_PM_CYCLE_OVERFLOW_MASK)
_L2CYCLES(p) += 0xFFFFFFFF;
for (i = 0; i < PERF_NUM_MONITORS; i++) {
_L2COUNTS(p, i) += l2_pm_get_count(i);
if (l2_overflow & (1 << i))
_L2COUNTS(p, i) += 0xFFFFFFFF;
}
#endif
}
/*
FUNCTION per_process_remove_manual
DESCRIPTION
Remove an entry from the results directory if the flags allow this.
When not enbled or the entry is locked, the values/results will
not be removed.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
void per_process_remove_manual(unsigned long pid)
{
struct per_process_perf_mon_type *p = per_process_find(pid);
/*
* Check all of the flags to see if we can remove this one
* Then mark as not used
*/
if (0 == p)
return;
p->pid = (0xFFFFFFFF);
/*
* Remove the proc entry.
*/
if (p->proc)
remove_proc_entry(p->pidName, values_dir);
if (p->l2_proc)
remove_proc_entry(p->pidName, l2_results_dir);
kfree(p->pidName);
/*
* Clear them out...and ensure the pid is invalid
*/
memset(p, 0, sizeof *p);
p->pid = 0xFFFFFFFF;
pm_remove_pid = -1;
}
/*
* Remove called when a process exits...
*/
void _per_process_remove(unsigned long pid) {}
/*
FUNCTION per_process_initialize
DESCRIPTION
Initialize performance collection information for a new process.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
May create a new proc entry
*/
void per_process_initialize(struct per_process_perf_mon_type *p,
unsigned long pid)
{
int i;
/*
* See if this is the pid we are interested in...
*/
if (pp_settings_valid == -1)
return;
if ((pp_set_pid != pid) && (pp_set_pid != 0))
return;
/*
* Clear out the statistics table then insert this pid
* We want to keep the proc entry and the name
*/
p->pid = pid;
/*
* Create a proc entry for this pid, then get the current event types and
* store in data struct so when the process is switched in we can track
* it.
*/
if (p->proc == 0) {
per_process_create_results_proc(p);
#ifdef CONFIG_ARCH_MSM8X60
per_process_create_l2_results_proc(p);
#endif
}
_CYCLES(p) = 0;
_L2CYCLES(p) = 0;
_SWAPS(p) = 0;
/*
* Set the per process data struct, but not the monitors until later...
* Init only happens with the user sets the SetPID variable to this pid
* so we can load new values.
*/
for (i = 0; i < PERF_NUM_MONITORS; i++) {
p->index[i] = per_proc_event[i];
#ifdef CONFIG_ARCH_MSM8X60
p->l2_index[i] = l2_per_proc_event[i];
#endif
_COUNTS(p, i) = 0;
_L2COUNTS(p, i) = 0;
}
p->lpm0evtyper = pp_lpm0evtyper;
p->lpm1evtyper = pp_lpm1evtyper;
p->lpm2evtyper = pp_lpm2evtyper;
p->l2lpmevtyper = pp_l2lpmevtyper;
p->vlpmevtyper = pp_vlpmevtyper;
#ifdef CONFIG_ARCH_MSM8X60
p->l2pmevtyper0 = pp_l2pmevtyper0;
p->l2pmevtyper1 = pp_l2pmevtyper1;
p->l2pmevtyper2 = pp_l2pmevtyper2;
p->l2pmevtyper3 = pp_l2pmevtyper3;
p->l2pmevtyper4 = pp_l2pmevtyper4;
#endif
/*
* Reset pid and settings value
*/
pp_set_pid = -1;
pp_settings_valid = -1;
}
/*
FUNCTION per_process_swap_in
DESCRIPTION
Called when a context switch is about to start this PID.
We check to see if this process has an entry or not and create one
if not locked...
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
void per_process_swap_in(struct per_process_perf_mon_type *p_new,
unsigned long pid)
{
int i;
MARKPIDS('I', p_new->pid, 0);
/*
* If the set proc variable == the current pid then init a new
* entry...
*/
if (pp_set_pid == pid)
per_process_initialize(p_new, pid);
p_new->running_cpu = smp_processor_id();
last_in_pid[smp_processor_id()] = pid;
/*
* setup the monitors for this process.
*/
for (i = 0; i < PERF_NUM_MONITORS; i++) {
pm_set_event(i, p_new->index[i]);
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_set_event(i, p_new->l2_index[i]);
#endif
}
pm_set_local_iu(p_new->lpm0evtyper);
pm_set_local_xu(p_new->lpm1evtyper);
pm_set_local_su(p_new->lpm2evtyper);
pm_set_local_l2(p_new->l2lpmevtyper);
#ifdef CONFIG_ARCH_MSM8X60
pm_set_local_bu(p_new->l2pmevtyper0);
pm_set_local_cb(p_new->l2pmevtyper1);
pm_set_local_mp(p_new->l2pmevtyper2);
pm_set_local_sp(p_new->l2pmevtyper3);
pm_set_local_scu(p_new->l2pmevtyper4);
#endif
}
/*
FUNCTION perProcessSwitch
DESCRIPTION
Called during context switch. Updates the counts on the process about to
be swapped out and brings in the counters for the process about to be
swapped in.
All is dependant on the enabled and lock flags.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
DEFINE_SPINLOCK(pm_lock);
void _per_process_switch(unsigned long old_pid, unsigned long new_pid)
{
struct per_process_perf_mon_type *p_old, *p_new;
if (pm_global_enable == 0)
return;
spin_lock(&pm_lock);
pm_stop_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_stop_all();
#endif
/*
* We detected that the process was swapped in on one core and out on
* a different core. This does not allow us to stop and stop counters
* properly so we need to defer processing. This checks to see if there
* is any defered processing necessary. And does it... */
if (fake_swap_out[smp_processor_id()] != 0) {
fake_swap_out[smp_processor_id()] = 0;
p_old = per_process_find(last_in_pid[smp_processor_id()]);
last_in_pid[smp_processor_id()] = 0;
if (p_old != 0)
per_process_swap_out(p_old);
}
/*
* Clear the data collected so far for this process?
*/
if (pp_clear_pid != -1) {
struct per_process_perf_mon_type *p_clear =
per_process_find(pp_clear_pid);
if (p_clear) {
memset(p_clear->cnts, 0,
sizeof(struct pm_counters_s)*num_possible_cpus());
printk(KERN_INFO "Clear Per Processor Stats for \
PID:%ld\n", pp_clear_pid);
pp_clear_pid = -1;
}
}
/*
* Always collect for 0, it collects for all.
*/
if (pp_enabled) {
if (first_switch == 1) {
per_process_initialize(&perf_mons[0], 0);
first_switch = 0;
}
if (pm_global) {
per_process_swap_out(&perf_mons[0]);
per_process_swap_in(&perf_mons[0], 0);
} else {
p_old = per_process_find(old_pid);
p_new = per_process_find(new_pid);
/*
* save the old counts to the old data struct, if the
* returned ptr is NULL or the process id passed is not
* the same as the process id in the data struct then
* don't update the data.
*/
if ((p_old) && (p_old->pid == old_pid) &&
(p_old->pid != 0)) {
per_process_swap_out(p_old);
}
/*
* Setup the counters for the new process
*/
if (pp_set_pid == new_pid)
per_process_initialize(p_new, new_pid);
if ((p_new->pid == new_pid) && (new_pid != 0))
per_process_swap_in(p_new, new_pid);
}
pm_reset_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_reset_all();
#endif
#ifdef CONFIG_ARCH_QSD8X50
axi_swaps++;
if (axi_swaps%pm_axi_info.refresh == 0) {
if (pm_axi_info.clear == 1) {
pm_axi_clear_cnts();
pm_axi_info.clear = 0;
}
if (pm_axi_info.enable == 0)
pm_axi_disable();
else
pm_axi_update_cnts();
axi_swaps = 0;
}
#endif
}
pm_start_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_start_all();
#endif
spin_unlock(&pm_lock);
}
/*
FUNCTION pmInterruptIn
DESCRIPTION
Called when an interrupt is being processed. If the pmStopForInterrutps
flag is non zero then we disable the counting of performance monitors.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
static int pm_interrupt_nesting_count;
static unsigned long pm_cycle_in, pm_cycle_out;
void _perf_mon_interrupt_in(void)
{
if (pm_global_enable == 0)
return;
if (pm_stop_for_interrupts == 0)
return;
pm_interrupt_nesting_count++; /* Atomic */
pm_stop_all();
pm_cycle_in = pm_get_cycle_count();
}
/*
FUNCTION perfMonInterruptOut
DESCRIPTION
Reenable performance monitor counting whn the nest count goes to zero
provided the counting has been stoped
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
void _perf_mon_interrupt_out(void)
{
if (pm_global_enable == 0)
return;
if (pm_stop_for_interrupts == 0)
return;
--pm_interrupt_nesting_count; /* Atomic?? */
if (pm_interrupt_nesting_count <= 0) {
pm_cycle_out = pm_get_cycle_count();
if (pm_cycle_in != pm_cycle_out)
printk(KERN_INFO "pmIn!=pmOut in:%lx out:%lx\n",
pm_cycle_in, pm_cycle_out);
if (pp_enabled) {
pm_start_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_start_all();
#endif
}
pm_interrupt_nesting_count = 0;
}
}
void per_process_do_global(unsigned long g)
{
pm_global = g;
if (pm_global == 1) {
pm_stop_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_stop_all();
#endif
pm_reset_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_reset_all();
#endif
pp_set_pid = 0;
per_process_swap_in(&perf_mons[0], 0);
pm_start_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_start_all();
#endif
} else {
pm_stop_all();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_stop_all();
#endif
}
}
/*
FUNCTION per_process_write
DESCRIPTION
Generic routine to handle any of the settings /proc directory writes.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
int per_process_write(struct file *file, const char *buff,
unsigned long cnt, void *data, const char *fmt)
{
char *newbuf;
unsigned long *d = (unsigned long *)data;
/*
* Alloc the user data in kernel space. and then copy user to kernel
*/
newbuf = kmalloc(cnt + 1, GFP_KERNEL);
if (0 == newbuf)
return PM_PP_ERR;
if (copy_from_user(newbuf, buff, cnt) != 0) {
printk(KERN_INFO "%s copy_from_user failed\n", __func__);
return cnt;
}
sscanf(newbuf, fmt, d);
kfree(newbuf);
/*
* If this is a remove command then do it now...
*/
if (d == &pm_remove_pid)
per_process_remove_manual(*d);
if (d == &pm_global)
per_process_do_global(*d);
return cnt;
}
int per_process_write_dec(struct file *file, const char *buff,
unsigned long cnt, void *data)
{
return per_process_write(file, buff, cnt, data, "%ld");
}
int per_process_write_hex(struct file *file, const char *buff,
unsigned long cnt, void *data)
{
return per_process_write(file, buff, cnt, data, "%lx");
}
/*
FUNCTION per_process_read
DESCRIPTION
Generic read handler for the /proc settings directory.
DEPENDENCIES
RETURN VALUE
Number of characters to output.
SIDE EFFECTS
*/
int per_process_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
unsigned long *d = (unsigned long *)data;
return sprintf(page, "%lx", *d);
}
int per_process_read_decimal(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
unsigned long *d = (unsigned long *)data;
return sprintf(page, "%ld", *d);
}
/*
FUNCTION per_process_proc_entry
DESCRIPTION
Create a generic entry for the /proc settings directory.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
void per_process_proc_entry(char *name, unsigned long *var,
struct proc_dir_entry *d, int hex)
{
struct proc_dir_entry *pe;
pe = create_proc_entry(name, 0777, d);
if (0 == pe)
return;
if (hex) {
pe->read_proc = per_process_read;
pe->write_proc = per_process_write_hex;
} else {
pe->read_proc = per_process_read_decimal;
pe->write_proc = per_process_write_dec;
}
pe->data = (void *)var;
if (pp_proc_entry_index >= PP_MAX_PROC_ENTRIES) {
printk(KERN_INFO "PERF: proc entry overflow,\
memleak on module unload occured");
return;
}
per_process_proc_names[pp_proc_entry_index++] = name;
}
static int perfmon_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
static int old_pid = -1;
struct thread_info *thread = v;
int current_pid;
if (cmd != THREAD_NOTIFY_SWITCH)
return old_pid;
current_pid = thread->task->pid;
if (old_pid != -1)
_per_process_switch(old_pid, current_pid);
old_pid = current_pid;
return old_pid;
}
static struct notifier_block perfmon_notifier_block = {
.notifier_call = perfmon_notifier,
};
/*
FUNCTION per_process_perf_init
DESCRIPTION
Initialze the per process performance monitor variables and /proc space.
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
*/
int per_process_perf_init(void)
{
#ifdef CONFIG_ARCH_MSM8X60
smp_call_function_single(0, (void *)pm_initialize, (void *)NULL, 1);
smp_call_function_single(1, (void *)pm_initialize, (void *)NULL, 1);
l2_pm_initialize();
#else
pm_initialize();
#endif
pm_axi_init();
pm_axi_clear_cnts();
proc_dir = proc_mkdir("ppPerf", NULL);
values_dir = proc_mkdir("results", proc_dir);
settings_dir = proc_mkdir("settings", proc_dir);
per_process_proc_entry("enable", &pp_enabled, settings_dir, 1);
per_process_proc_entry("valid", &pp_settings_valid, settings_dir, 1);
per_process_proc_entry("setPID", &pp_set_pid, settings_dir, 0);
per_process_proc_entry("clearPID", &pp_clear_pid, settings_dir, 0);
per_process_proc_entry("event0", &per_proc_event[0], settings_dir, 1);
per_process_proc_entry("event1", &per_proc_event[1], settings_dir, 1);
per_process_proc_entry("event2", &per_proc_event[2], settings_dir, 1);
per_process_proc_entry("event3", &per_proc_event[3], settings_dir, 1);
per_process_proc_entry("l2_event0", &l2_per_proc_event[0], settings_dir,
1);
per_process_proc_entry("l2_event1", &l2_per_proc_event[1], settings_dir,
1);
per_process_proc_entry("l2_event2", &l2_per_proc_event[2], settings_dir,
1);
per_process_proc_entry("l2_event3", &l2_per_proc_event[3], settings_dir,
1);
per_process_proc_entry("debug", &dbg_flags, settings_dir, 1);
per_process_proc_entry("autolock", &pp_auto_lock, settings_dir, 1);
per_process_proc_entry("lpm0evtyper", &pp_lpm0evtyper, settings_dir, 1);
per_process_proc_entry("lpm1evtyper", &pp_lpm1evtyper, settings_dir, 1);
per_process_proc_entry("lpm2evtyper", &pp_lpm2evtyper, settings_dir, 1);
per_process_proc_entry("l2lpmevtyper", &pp_l2lpmevtyper, settings_dir,
1);
per_process_proc_entry("vlpmevtyper", &pp_vlpmevtyper, settings_dir, 1);
per_process_proc_entry("l2pmevtyper0", &pp_l2pmevtyper0, settings_dir,
1);
per_process_proc_entry("l2pmevtyper1", &pp_l2pmevtyper1, settings_dir,
1);
per_process_proc_entry("l2pmevtyper2", &pp_l2pmevtyper2, settings_dir,
1);
per_process_proc_entry("l2pmevtyper3", &pp_l2pmevtyper3, settings_dir,
1);
per_process_proc_entry("l2pmevtyper4", &pp_l2pmevtyper4, settings_dir,
1);
per_process_proc_entry("stopForInterrupts", &pm_stop_for_interrupts,
settings_dir, 1);
per_process_proc_entry("global", &pm_global, settings_dir, 1);
per_process_proc_entry("globalEnable", &pm_global_enable, settings_dir,
1);
per_process_proc_entry("removePID", &pm_remove_pid, settings_dir, 0);
axi_dir = proc_mkdir("axi", proc_dir);
axi_settings_dir = proc_mkdir("settings", axi_dir);
axi_results_dir = proc_mkdir("results", axi_dir);
pm_axi_set_proc_entry("axi_enable", &pm_axi_info.enable,
axi_settings_dir, 1);
pm_axi_set_proc_entry("axi_clear", &pm_axi_info.clear, axi_settings_dir,
0);
pm_axi_set_proc_entry("axi_valid", &pm_axi_info.valid, axi_settings_dir,
1);
pm_axi_set_proc_entry("axi_sel_reg0", &pm_axi_info.sel_reg0,
axi_settings_dir, 1);
pm_axi_set_proc_entry("axi_sel_reg1", &pm_axi_info.sel_reg1,
axi_settings_dir, 1);
pm_axi_set_proc_entry("axi_ten_sel", &pm_axi_info.ten_sel_reg,
axi_settings_dir, 1);
pm_axi_set_proc_entry("axi_refresh", &pm_axi_info.refresh,
axi_settings_dir, 1);
pm_axi_get_cnt_proc_entry("axi_cnts", &axi_cnts, axi_results_dir, 0);
l2_dir = proc_mkdir("l2", proc_dir);
l2_results_dir = proc_mkdir("results", l2_dir);
memset(perf_mons, 0, sizeof(perf_mons));
per_process_create_results_proc(&perf_mons[0]);
per_process_create_l2_results_proc(&perf_mons[0]);
thread_register_notifier(&perfmon_notifier_block);
/*
* Set the function pointers so the module can be activated.
*/
pp_interrupt_out_ptr = _perf_mon_interrupt_out;
pp_interrupt_in_ptr = _perf_mon_interrupt_in;
pp_process_remove_ptr = _per_process_remove;
pp_loaded = 1;
pm_axi_info.refresh = 1;
#ifdef CONFIG_ARCH_MSM8X60
smp_call_function_single(0, (void *)pm_reset_all, (void *)NULL, 1);
smp_call_function_single(1, (void *)pm_reset_all, (void *)NULL, 1);
smp_call_function_single(0, (void *)l2_pm_reset_all, (void *)NULL, 1);
smp_call_function_single(1, (void *)l2_pm_reset_all, (void *)NULL, 1);
#else
pm_reset_all();
#endif
return 0;
}
/*
FUNCTION per_process_perf_exit
DESCRIPTION
Module exit functionm, clean up, renmove proc entries
DEPENDENCIES
RETURN VALUE
SIDE EFFECTS
No more per process
*/
void per_process_perf_exit(void)
{
unsigned long i;
/*
* Sert the function pointers to 0 so the functions will no longer
* be invoked
*/
pp_loaded = 0;
pp_interrupt_out_ptr = 0;
pp_interrupt_in_ptr = 0;
pp_process_remove_ptr = 0;
/*
* Remove the results
*/
for (i = 0; i < PERF_MON_PROCESS_NUM; i++)
per_process_remove_manual(perf_mons[i].pid);
/*
* Remove the proc entries in the settings dir
*/
i = 0;
for (i = 0; i < pp_proc_entry_index; i++)
remove_proc_entry(per_process_proc_names[i], settings_dir);
/*remove proc axi files*/
remove_proc_entry("axi_enable", axi_settings_dir);
remove_proc_entry("axi_valid", axi_settings_dir);
remove_proc_entry("axi_refresh", axi_settings_dir);
remove_proc_entry("axi_clear", axi_settings_dir);
remove_proc_entry("axi_sel_reg0", axi_settings_dir);
remove_proc_entry("axi_sel_reg1", axi_settings_dir);
remove_proc_entry("axi_ten_sel", axi_settings_dir);
remove_proc_entry("axi_cnts", axi_results_dir);
/*
* Remove the directories
*/
remove_proc_entry("results", l2_dir);
remove_proc_entry("l2", proc_dir);
remove_proc_entry("results", proc_dir);
remove_proc_entry("settings", proc_dir);
remove_proc_entry("results", axi_dir);
remove_proc_entry("settings", axi_dir);
remove_proc_entry("axi", proc_dir);
remove_proc_entry("ppPerf", NULL);
pm_free_irq();
#ifdef CONFIG_ARCH_MSM8X60
l2_pm_free_irq();
#endif
thread_unregister_notifier(&perfmon_notifier_block);
#ifdef CONFIG_ARCH_MSM8X60
smp_call_function_single(0, (void *)pm_deinitialize, (void *)NULL, 1);
smp_call_function_single(1, (void *)pm_deinitialize, (void *)NULL, 1);
l2_pm_deinitialize();
#else
pm_deinitialize();
#endif
}
| gpl-2.0 |
mseskir/android_kernel_vestel_g55 | drivers/base/power/domain_governor.c | 4267 | 4564 | /*
* drivers/base/power/domain_governor.c - Governors for device PM domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
*
* This file is released under the GPLv2.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#ifdef CONFIG_PM_RUNTIME
/**
* default_stop_ok - Default PM domain governor routine for stopping devices.
* @dev: Device to check.
*/
bool default_stop_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
dev_dbg(dev, "%s()\n", __func__);
if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
return true;
return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
&& td->break_even_ns < dev->power.max_time_suspended_ns;
}
/**
* default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
*
* This routine must be executed under the PM domain's lock.
*/
static bool default_power_down_ok(struct dev_pm_domain *pd)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
struct pm_domain_data *pdd;
s64 min_dev_off_time_ns;
s64 off_on_time_ns;
ktime_t time_now = ktime_get();
off_on_time_ns = genpd->power_off_latency_ns +
genpd->power_on_latency_ns;
/*
* It doesn't make sense to remove power from the domain if saving
* the state of all devices in it and the power off/power on operations
* take too much time.
*
* All devices in this domain have been stopped already at this point.
*/
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
if (pdd->dev->driver)
off_on_time_ns +=
to_gpd_data(pdd)->td.save_state_latency_ns;
}
/*
* Check if subdomains can be off for enough time.
*
* All subdomains have been powered off already at this point.
*/
list_for_each_entry(link, &genpd->master_links, master_node) {
struct generic_pm_domain *sd = link->slave;
s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0)
continue;
sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
sd->power_off_time));
/*
* Check if the subdomain is allowed to be off long enough for
* the current domain to turn off and on (that's how much time
* it will have to wait worst case).
*/
if (sd_max_off_ns <= off_on_time_ns)
return false;
}
/*
* Check if the devices in the domain can be off enough time.
*/
min_dev_off_time_ns = -1;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
struct gpd_timing_data *td;
struct device *dev = pdd->dev;
s64 dev_off_time_ns;
if (!dev->driver || dev->power.max_time_suspended_ns < 0)
continue;
td = &to_gpd_data(pdd)->td;
dev_off_time_ns = dev->power.max_time_suspended_ns -
(td->start_latency_ns + td->restore_state_latency_ns +
ktime_to_ns(ktime_sub(time_now,
dev->power.suspend_time)));
if (dev_off_time_ns <= off_on_time_ns)
return false;
if (min_dev_off_time_ns > dev_off_time_ns
|| min_dev_off_time_ns < 0)
min_dev_off_time_ns = dev_off_time_ns;
}
if (min_dev_off_time_ns < 0) {
/*
* There are no latency constraints, so the domain can spend
* arbitrary time in the "off" state.
*/
genpd->max_off_time_ns = -1;
return true;
}
/*
* The difference between the computed minimum delta and the time needed
* to turn the domain on is the maximum theoretical time this domain can
* spend in the "off" state.
*/
min_dev_off_time_ns -= genpd->power_on_latency_ns;
/*
* If the difference between the computed minimum delta and the time
* needed to turn the domain off and back on on is smaller than the
* domain's power break even time, removing power from the domain is not
* worth it.
*/
if (genpd->break_even_ns >
min_dev_off_time_ns - genpd->power_off_latency_ns)
return false;
genpd->max_off_time_ns = min_dev_off_time_ns;
return true;
}
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
}
#else /* !CONFIG_PM_RUNTIME */
bool default_stop_ok(struct device *dev)
{
return false;
}
#define default_power_down_ok NULL
#define always_on_power_down_ok NULL
#endif /* !CONFIG_PM_RUNTIME */
struct dev_power_governor simple_qos_governor = {
.stop_ok = default_stop_ok,
.power_down_ok = default_power_down_ok,
};
/**
* pm_genpd_gov_always_on - A governor implementing an always-on policy
*/
struct dev_power_governor pm_domain_always_on_gov = {
.power_down_ok = always_on_power_down_ok,
.stop_ok = default_stop_ok,
};
| gpl-2.0 |
mifl/android_kernel_pantech_p9090 | drivers/pci/irq.c | 4779 | 1804 | /*
* PCI IRQ failure handing code
*
* Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/pci.h>
static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
{
struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
dev_printk(KERN_ERR, &pdev->dev,
"Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
dev_name(&parent->dev), parent->vendor, parent->device);
dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
WARN_ON(1);
}
/**
* pci_lost_interrupt - reports a lost PCI interrupt
* @pdev: device whose interrupt is lost
*
* The primary function of this routine is to report a lost interrupt
* in a standard way which users can recognise (instead of blaming the
* driver).
*
* Returns:
* a suggestion for fixing it (although the driver is not required to
* act on this).
*/
enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
{
if (pdev->msi_enabled || pdev->msix_enabled) {
enum pci_lost_interrupt_reason ret;
if (pdev->msix_enabled) {
pci_note_irq_problem(pdev, "MSIX routing failure");
ret = PCI_LOST_IRQ_DISABLE_MSIX;
} else {
pci_note_irq_problem(pdev, "MSI routing failure");
ret = PCI_LOST_IRQ_DISABLE_MSI;
}
return ret;
}
#ifdef CONFIG_ACPI
if (!(acpi_disabled || acpi_noirq)) {
pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq");
/* currently no way to fix acpi on the fly */
return PCI_LOST_IRQ_DISABLE_ACPI;
}
#endif
pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)");
return PCI_LOST_IRQ_NO_INFORMATION;
}
EXPORT_SYMBOL(pci_lost_interrupt);
| gpl-2.0 |
Jackeagle/android_kernel_sm_g800h_kk | net/irda/irttp.c | 5035 | 51777 | /*********************************************************************
*
* Filename: irttp.c
* Version: 1.2
* Description: Tiny Transport Protocol (TTP) implementation
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
* Modified at: Wed Jan 5 11:31:27 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irlap.h>
#include <net/irda/irlmp.h>
#include <net/irda/parameters.h>
#include <net/irda/irttp.h>
static struct irttp_cb *irttp;
static void __irttp_close_tsap(struct tsap_cb *self);
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb);
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *);
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_run_tx_queue(struct tsap_cb *self);
static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
static void irttp_todo_expired(unsigned long data);
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get);
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock);
/* Information for parsing parameters in IrTTP */
static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
/*
* Function irttp_init (void)
*
* Initialize the IrTTP layer. Called by module initialization code
*
*/
int __init irttp_init(void)
{
irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
if (irttp == NULL)
return -ENOMEM;
irttp->magic = TTP_MAGIC;
irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
__func__);
kfree(irttp);
return -ENOMEM;
}
return 0;
}
/*
* Function irttp_cleanup (void)
*
* Called by module destruction/cleanup code
*
*/
void irttp_cleanup(void)
{
/* Check for main structure */
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
/*
* Delete hashbin and close all TSAP instances in it
*/
hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
irttp->magic = 0;
/* De-allocate main structure */
kfree(irttp);
irttp = NULL;
}
/*************************** SUBROUTINES ***************************/
/*
* Function irttp_start_todo_timer (self, timeout)
*
* Start todo timer.
*
* Made it more effient and unsensitive to race conditions - Jean II
*/
static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
{
/* Set new value for timer */
mod_timer(&self->todo_timer, jiffies + timeout);
}
/*
* Function irttp_todo_expired (data)
*
* Todo timer has expired!
*
* One of the restriction of the timer is that it is run only on the timer
* interrupt which run every 10ms. This mean that even if you set the timer
* with a delay of 0, it may take up to 10ms before it's run.
* So, to minimise latency and keep cache fresh, we try to avoid using
* it as much as possible.
* Note : we can't use tasklets, because they can't be asynchronously
* killed (need user context), and we can't guarantee that here...
* Jean II
*/
static void irttp_todo_expired(unsigned long data)
{
struct tsap_cb *self = (struct tsap_cb *) data;
/* Check that we still exist */
if (!self || self->magic != TTP_TSAP_MAGIC)
return;
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* Try to make some progress, especially on Tx side - Jean II */
irttp_run_rx_queue(self);
irttp_run_tx_queue(self);
/* Check if time for disconnect */
if (test_bit(0, &self->disconnect_pend)) {
/* Check if it's possible to disconnect yet */
if (skb_queue_empty(&self->tx_queue)) {
/* Make sure disconnect is not pending anymore */
clear_bit(0, &self->disconnect_pend); /* FALSE */
/* Note : self->disconnect_skb may be NULL */
irttp_disconnect_request(self, self->disconnect_skb,
P_NORMAL);
self->disconnect_skb = NULL;
} else {
/* Try again later */
irttp_start_todo_timer(self, HZ/10);
/* No reason to try and close now */
return;
}
}
/* Check if it's closing time */
if (self->close_pend)
/* Finish cleanup */
irttp_close_tsap(self);
}
/*
* Function irttp_flush_queues (self)
*
* Flushes (removes all frames) in transitt-buffer (tx_list)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
struct sk_buff* skb;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Deallocate frames waiting to be sent */
while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received frames */
while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received fragments */
while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
dev_kfree_skb(skb);
}
/*
* Function irttp_reassemble (self)
*
* Makes a new (continuous) skb of all the fragments in the fragment
* queue
*
*/
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
{
struct sk_buff *skb, *frag;
int n = 0; /* Fragment index */
IRDA_ASSERT(self != NULL, return NULL;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
self->rx_sdu_size);
skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
if (!skb)
return NULL;
/*
* Need to reserve space for TTP header in case this skb needs to
* be requeued in case delivery failes
*/
skb_reserve(skb, TTP_HEADER);
skb_put(skb, self->rx_sdu_size);
/*
* Copy all fragments to a new buffer
*/
while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
n += frag->len;
dev_kfree_skb(frag);
}
IRDA_DEBUG(2,
"%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
__func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
/* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
* by summing the size of all fragments, so we should always
* have n == self->rx_sdu_size, except in cases where we
* droped the last fragment (when self->rx_sdu_size exceed
* self->rx_max_sdu_size), where n < self->rx_sdu_size.
* Jean II */
IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
/* Set the new length */
skb_trim(skb, n);
self->rx_sdu_size = 0;
return skb;
}
/*
* Function irttp_fragment_skb (skb)
*
* Fragments a frame and queues all the fragments for transmission
*
*/
static inline void irttp_fragment_skb(struct tsap_cb *self,
struct sk_buff *skb)
{
struct sk_buff *frag;
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
/*
* Split frame into a number of segments
*/
while (skb->len > self->max_seg_size) {
IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
/* Make new segment */
frag = alloc_skb(self->max_seg_size+self->max_header_size,
GFP_ATOMIC);
if (!frag)
return;
skb_reserve(frag, self->max_header_size);
/* Copy data from the original skb into this fragment. */
skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
self->max_seg_size);
/* Insert TTP header, with the more bit set */
frame = skb_push(frag, TTP_HEADER);
frame[0] = TTP_MORE;
/* Hide the copied data from the original skb */
skb_pull(skb, self->max_seg_size);
/* Queue fragment */
skb_queue_tail(&self->tx_queue, frag);
}
/* Queue what is left of the original skb */
IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
/* Queue fragment */
skb_queue_tail(&self->tx_queue, skb);
}
/*
* Function irttp_param_max_sdu_size (self, param)
*
* Handle the MaxSduSize parameter in the connect frames, this function
* will be called both when this parameter needs to be inserted into, and
* extracted from the connect frames
*/
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get)
{
struct tsap_cb *self;
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
if (get)
param->pv.i = self->tx_max_sdu_size;
else
self->tx_max_sdu_size = param->pv.i;
IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
return 0;
}
/*************************** CLIENT CALLS ***************************/
/************************** LMP CALLBACKS **************************/
/* Everything is happily mixed up. Waiting for next clean up - Jean II */
/*
* Initialization, that has to be done on new tsap
* instance allocation and on duplication
*/
static void irttp_init_tsap(struct tsap_cb *tsap)
{
spin_lock_init(&tsap->lock);
init_timer(&tsap->todo_timer);
skb_queue_head_init(&tsap->rx_queue);
skb_queue_head_init(&tsap->tx_queue);
skb_queue_head_init(&tsap->rx_fragments);
}
/*
* Function irttp_open_tsap (stsap, notify)
*
* Create TSAP connection endpoint,
*/
struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
notify_t ttp_notify;
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
if((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
}
self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (self == NULL) {
IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
return NULL;
}
/* Initialize internal objects */
irttp_init_tsap(self);
/* Initialise todo timer */
self->todo_timer.data = (unsigned long) self;
self->todo_timer.function = &irttp_todo_expired;
/* Initialize callbacks for IrLMP to use */
irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
ttp_notify.connect_indication = irttp_connect_indication;
ttp_notify.disconnect_indication = irttp_disconnect_indication;
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
if(notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
self->magic = TTP_TSAP_MAGIC;
self->connected = FALSE;
/*
* Create LSAP at IrLMP layer
*/
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
return NULL;
}
/*
* If user specified LSAP_ANY as source TSAP selector, then IrLMP
* will replace it with whatever source selector which is free, so
* the stsap_sel we have might not be valid anymore
*/
self->stsap_sel = lsap->slsap_sel;
IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
self->notify = *notify;
self->lsap = lsap;
hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
if (credit > TTP_RX_MAX_CREDIT)
self->initial_credit = TTP_RX_MAX_CREDIT;
else
self->initial_credit = credit;
return self;
}
EXPORT_SYMBOL(irttp_open_tsap);
/*
* Function irttp_close (handle)
*
* Remove an instance of a TSAP. This function should only deal with the
* deallocation of the TSAP, and resetting of the TSAPs values;
*
*/
static void __irttp_close_tsap(struct tsap_cb *self)
{
/* First make sure we're connected. */
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
irttp_flush_queues(self);
del_timer(&self->todo_timer);
/* This one won't be cleaned up if we are disconnect_pend + close_pend
* and we receive a disconnect_indication */
if (self->disconnect_skb)
dev_kfree_skb(self->disconnect_skb);
self->connected = FALSE;
self->magic = ~TTP_TSAP_MAGIC;
kfree(self);
}
/*
* Function irttp_close (self)
*
* Remove TSAP from list of all TSAPs and then deallocate all resources
* associated with this TSAP
*
* Note : because we *free* the tsap structure, it is the responsibility
* of the caller to make sure we are called only once and to deal with
* possible race conditions. - Jean II
*/
int irttp_close_tsap(struct tsap_cb *self)
{
struct tsap_cb *tsap;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Make sure tsap has been disconnected */
if (self->connected) {
/* Check if disconnect is not pending */
if (!test_bit(0, &self->disconnect_pend)) {
IRDA_WARNING("%s: TSAP still connected!\n",
__func__);
irttp_disconnect_request(self, NULL, P_NORMAL);
}
self->close_pend = TRUE;
irttp_start_todo_timer(self, HZ/10);
return 0; /* Will be back! */
}
tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
IRDA_ASSERT(tsap == self, return -1;);
/* Close corresponding LSAP */
if (self->lsap) {
irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
__irttp_close_tsap(self);
return 0;
}
EXPORT_SYMBOL(irttp_close_tsap);
/*
* Function irttp_udata_request (self, skb)
*
* Send unreliable data on this TSAP
*
*/
int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(4, "%s()\n", __func__);
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s(), Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
if (skb->len > self->max_seg_size) {
IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
ret = -EMSGSIZE;
goto err;
}
irlmp_udata_request(self->lsap, skb);
self->stats.tx_packets++;
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_udata_request);
/*
* Function irttp_data_request (handle, skb)
*
* Queue frame for transmission. If SAR is enabled, fragement the frame
* and queue the fragments for transmission
*/
int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
{
__u8 *frame;
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
skb_queue_len(&self->tx_queue));
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s: Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
/*
* Check if SAR is disabled, and the frame is larger than what fits
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if SAR is enabled, and the frame is larger than the
* TxMaxSduSize
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if transmit queue is full
*/
if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
/*
* Give it a chance to empty itself
*/
irttp_run_tx_queue(self);
/* Drop packet. This error code should trigger the caller
* to resend the data in the client code - Jean II */
ret = -ENOBUFS;
goto err;
}
/* Queue frame, or queue frame segments */
if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
/* Queue frame */
IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
skb_queue_tail(&self->tx_queue, skb);
} else {
/*
* Fragment the frame, this function will also queue the
* fragments, we don't care about the fact the transmit
* queue may be overfilled by all the segments for a little
* while
*/
irttp_fragment_skb(self, skb);
}
/* Check if we can accept more data from client */
if ((!self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
/* Tx queue filling up, so stop client. */
if (self->notify.flow_indication) {
self->notify.flow_indication(self->notify.instance,
self, FLOW_STOP);
}
/* self->tx_sdu_busy is the state of the client.
* Update state after notifying client to avoid
* race condition with irttp_flow_indication().
* If the queue empty itself after our test but before
* we set the flag, we will fix ourselves below in
* irttp_run_tx_queue().
* Jean II */
self->tx_sdu_busy = TRUE;
}
/* Try to make some progress */
irttp_run_tx_queue(self);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_data_request);
/*
* Function irttp_run_tx_queue (self)
*
* Transmit packets queued for transmission (if possible)
*
*/
static void irttp_run_tx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
unsigned long flags;
int n;
IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
__func__,
self->send_credit, skb_queue_len(&self->tx_queue));
/* Get exclusive access to the tx queue, otherwise don't touch it */
if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
/* Try to send out frames as long as we have credits
* and as long as LAP is not full. If LAP is full, it will
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
(skb = skb_dequeue(&self->tx_queue)))
{
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only room for 127 credits in frame */
if (n > 127) {
self->avail_credit = n-127;
n = 127;
}
self->remote_credit += n;
self->send_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* More bit must be set by the data_request() or fragment()
* functions
*/
skb->data[0] |= (n & 0x7f);
/* Detach from socket.
* The current skb has a reference to the socket that sent
* it (skb->sk). When we pass it to IrLMP, the skb will be
* stored in in IrLAP (self->wx_list). When we are within
* IrLAP, we lose the notion of socket, so we should not
* have a reference to a socket. So, we drop it here.
*
* Why does it matter ?
* When the skb is freed (kfree_skb), if it is associated
* with a socket, it release buffer space on the socket
* (through sock_wfree() and sock_def_write_space()).
* If the socket no longer exist, we may crash. Hard.
* When we close a socket, we make sure that associated packets
* in IrTTP are freed. However, we have no way to cancel
* the packet that we have passed to IrLAP. So, if a packet
* remains in IrLAP (retry on the link or else) after we
* close the socket, we are dead !
* Jean II */
if (skb->sk != NULL) {
/* IrSOCK application, IrOBEX, ... */
skb_orphan(skb);
}
/* IrCOMM over IrTTP, IrLAN, ... */
/* Pass the skb to IrLMP - done */
irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
}
/* Check if we can accept more frames from client.
* We don't want to wait until the todo timer to do that, and we
* can't use tasklets (grr...), so we are obliged to give control
* to client. That's ok, this test will be true not too often
* (max once per LAP window) and we are called from places
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
(!self->close_pend))
{
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
/* self->tx_sdu_busy is the state of the client.
* We don't really have a race here, but it's always safer
* to update our state after the client - Jean II */
self->tx_sdu_busy = FALSE;
}
/* Reset lock */
self->tx_queue_lock = 0;
}
/*
* Function irttp_give_credit (self)
*
* Send a dataless flowdata TTP-PDU and give available credit to peer
* TSAP
*/
static inline void irttp_give_credit(struct tsap_cb *self)
{
struct sk_buff *tx_skb = NULL;
unsigned long flags;
int n;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
__func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for LMP, and LAP header */
skb_reserve(tx_skb, LMP_MAX_HEADER);
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only space for 127 credits in frame */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit += n;
spin_unlock_irqrestore(&self->lock, flags);
skb_put(tx_skb, 1);
tx_skb->data[0] = (__u8) (n & 0x7f);
irlmp_data_request(self->lsap, tx_skb);
self->stats.tx_packets++;
}
/*
* Function irttp_udata_indication (instance, sap, skb)
*
* Received some unit-data (unreliable)
*
*/
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
self->stats.rx_packets++;
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
self,skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
}
/* Either no handler, or handler returns an error */
dev_kfree_skb(skb);
return 0;
}
/*
* Function irttp_data_indication (instance, sap, skb)
*
* Receive segment from IrLMP.
*
*/
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
unsigned long flags;
int n;
self = instance;
n = skb->data[0] & 0x7f; /* Extract the credits */
self->stats.rx_packets++;
/* Deal with inbound credit
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
self->send_credit += n;
if (skb->len > 1)
self->remote_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* Data or dataless packet? Dataless frames contains only the
* TTP_HEADER.
*/
if (skb->len > 1) {
/*
* We don't remove the TTP header, since we must preserve the
* more bit, so the defragment routing knows what to do
*/
skb_queue_tail(&self->rx_queue, skb);
} else {
/* Dataless flowdata TTP-PDU */
dev_kfree_skb(skb);
}
/* Push data to the higher layer.
* We do it synchronously because running the todo timer for each
* receive packet would be too much overhead and latency.
* By passing control to the higher layer, we run the risk that
* it may take time or grab a lock. Most often, the higher layer
* will only put packet in a queue.
* Anyway, packets are only dripping through the IrDA, so we can
* have time before the next packet.
* Further, we are run from NET_BH, so the worse that can happen is
* us missing the optimal time to send back the PF bit in LAP.
* Jean II */
irttp_run_rx_queue(self);
/* We now give credits to peer in irttp_run_rx_queue().
* We need to send credit *NOW*, otherwise we are going
* to miss the next Tx window. The todo timer may take
* a while before it's run... - Jean II */
/*
* If the peer device has given us some credits and we didn't have
* anyone from before, then we need to shedule the tx queue.
* We need to do that because our Tx have stopped (so we may not
* get any LAP flow indication) and the user may be stopped as
* well. - Jean II
*/
if (self->send_credit == n) {
/* Restart pushing stuff to LAP */
irttp_run_tx_queue(self);
/* Note : we don't want to schedule the todo timer
* because it has horrible latency. No tasklets
* because the tasklet API is broken. - Jean II */
}
return 0;
}
/*
* Function irttp_status_indication (self, reason)
*
* Status_indication, just pass to the higher layer...
*
*/
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend)
return;
/*
* Inform service user if he has requested it
*/
if (self->notify.status_indication != NULL)
self->notify.status_indication(self->notify.instance,
link, lock);
else
IRDA_DEBUG(2, "%s(), no handler\n", __func__);
}
/*
* Function irttp_flow_indication (self, reason)
*
* Flow_indication : IrLAP tells us to send more data.
*
*/
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
{
struct tsap_cb *self;
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* We are "polled" directly from LAP, and the LAP want to fill
* its Tx window. We want to do our best to send it data, so that
* we maximise the window. On the other hand, we want to limit the
* amount of work here so that LAP doesn't hang forever waiting
* for packets. - Jean II */
/* Try to send some packets. Currently, LAP calls us every time
* there is one free slot, so we will send only one packet.
* This allow the scheduler to do its round robin - Jean II */
irttp_run_tx_queue(self);
/* Note regarding the interraction with higher layer.
* irttp_run_tx_queue() may call the client when its queue
* start to empty, via notify.flow_indication(). Initially.
* I wanted this to happen in a tasklet, to avoid client
* grabbing the CPU, but we can't use tasklets safely. And timer
* is definitely too slow.
* This will happen only once per LAP window, and usually at
* the third packet (unless window is smaller). LAP is still
* doing mtt and sending first packet so it's sort of OK
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
if(self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
/*
* Function irttp_flow_request (self, command)
*
* This function could be used by the upper layers to tell IrTTP to stop
* delivering frames if the receive queues are starting to get full, or
* to tell IrTTP to start delivering frames again.
*/
void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
{
IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
switch (flow) {
case FLOW_STOP:
IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
self->rx_sdu_busy = TRUE;
break;
case FLOW_START:
IRDA_DEBUG(1, "%s(), flow start\n", __func__);
self->rx_sdu_busy = FALSE;
/* Client say he can accept more data, try to free our
* queues ASAP - Jean II */
irttp_run_rx_queue(self);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
}
}
EXPORT_SYMBOL(irttp_flow_request);
/*
* Function irttp_connect_request (self, dtsap_sel, daddr, qos)
*
* Try to connect to remote destination TSAP selector
*
*/
int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
__u32 saddr, __u32 daddr,
struct qos_info *qos, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
__u8 n;
IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
IRDA_ASSERT(self != NULL, return -EBADR;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
if(userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
/* Initialize connection parameters */
self->connected = FALSE;
self->avail_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
self->dtsap_sel = dtsap_sel;
n = self->initial_credit;
self->remote_credit = 0;
self->send_credit = 0;
/*
* Give away max 127 credits for now
*/
if (n > 127) {
self->avail_credit=n-127;
n = 127;
}
self->remote_credit = n;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert plain TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
/* Insert initial credit in frame */
frame[0] = n & 0x7f;
}
/* Connect with IrLMP. No QoS parameters for now */
return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
tx_skb);
}
EXPORT_SYMBOL(irttp_connect_request);
/*
* Function irttp_connect_confirm (handle, qos, skb)
*
* Service user confirms TSAP connection with peer.
*
*/
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size, struct sk_buff *skb)
{
struct tsap_cb *self;
int parameters;
int ret;
__u8 plen;
__u8 n;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size + TTP_HEADER;
/*
* Check if we have got some QoS parameters back! This should be the
* negotiated QoS for the link.
*/
if (qos) {
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
qos->baud_rate.bits);
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
qos->baud_rate.value);
}
n = skb->data[0] & 0x7f;
IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
self->send_credit = n;
self->tx_max_sdu_size = 0;
self->connected = TRUE;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
self->tx_max_sdu_size);
if (self->notify.connect_confirm) {
self->notify.connect_confirm(self->notify.instance, self, qos,
self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_indication (handle, skb)
*
* Some other device is connecting to this TSAP
*
*/
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
int parameters;
int ret;
__u8 plen;
__u8 n;
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
lsap = sap;
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size+TTP_HEADER;
IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
/* Need to update dtsap_sel if its equal to LSAP_ANY */
self->dtsap_sel = lsap->dlsap_sel;
n = skb->data[0] & 0x7f;
self->send_credit = n;
self->tx_max_sdu_size = 0;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
if (self->notify.connect_indication) {
self->notify.connect_indication(self->notify.instance, self,
qos, self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_response (handle, userdata)
*
* Service user is accepting the connection, just pass it down to
* IrLMP!
*
*/
int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
int ret;
__u8 n;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
self->stsap_sel);
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
self->avail_credit = 0;
self->remote_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
n = self->initial_credit;
/* Frame has only space for max 127 credits (7 bits) */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit = n;
self->connected = TRUE;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert TTP header with SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
/* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */
/* TTP_SAR_HEADER, ¶m_info) */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
frame[0] = n & 0x7f;
}
ret = irlmp_connect_response(self->lsap, tx_skb);
return ret;
}
EXPORT_SYMBOL(irttp_connect_response);
/*
* Function irttp_dup (self, instance)
*
* Duplicate TSAP, can be used by servers to confirm a connection on a
* new TSAP so it can keep listening on the old one.
*/
struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
{
struct tsap_cb *new;
unsigned long flags;
IRDA_DEBUG(1, "%s()\n", __func__);
/* Protect our access to the old tsap instance */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
/* Find the old instance */
if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Allocate a new instance */
new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
spin_lock_init(&new->lock);
/* We don't need the old instance any more */
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
/* Try to dup the LSAP (may fail if we were too slow) */
new->lsap = irlmp_dup(orig->lsap, new);
if (!new->lsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
kfree(new);
return NULL;
}
/* Not everything should be copied */
new->notify.instance = instance;
/* Initialize internal objects */
irttp_init_tsap(new);
/* This is locked */
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
return new;
}
EXPORT_SYMBOL(irttp_dup);
/*
* Function irttp_disconnect_request (self)
*
* Close this connection please! If priority is high, the queued data
* segments, if any, will be deallocated first
*
*/
int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
int priority)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Already disconnected? */
if (!self->connected) {
IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
if (userdata)
dev_kfree_skb(userdata);
return -1;
}
/* Disconnect already pending ?
* We need to use an atomic operation to prevent reentry. This
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
if(test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
dev_kfree_skb(userdata);
/* Try to make some progress */
irttp_run_tx_queue(self);
return -1;
}
/*
* Check if there is still data segments in the transmit queue
*/
if (!skb_queue_empty(&self->tx_queue)) {
if (priority == P_HIGH) {
/*
* No need to send the queued data, if we are
* disconnecting right now since the data will
* not have any usable connection to be sent on
*/
IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
irttp_flush_queues(self);
} else if (priority == P_NORMAL) {
/*
* Must delay disconnect until after all data segments
* have been sent and the tx_queue is empty
*/
/* We'll reuse this one later for the disconnect */
self->disconnect_skb = userdata; /* May be NULL */
irttp_run_tx_queue(self);
irttp_start_todo_timer(self, HZ/10);
return -1;
}
}
/* Note : we don't need to check if self->rx_queue is full and the
* state of self->rx_sdu_busy because the disconnect response will
* be sent at the LMP level (so even if the peer has its Tx queue
* full of data). - Jean II */
IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
self->connected = FALSE;
if (!userdata) {
struct sk_buff *tx_skb;
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/*
* Reserve space for MUX and LAP header
*/
skb_reserve(tx_skb, LMP_MAX_HEADER);
userdata = tx_skb;
}
ret = irlmp_disconnect_request(self->lsap, userdata);
/* The disconnect is no longer pending */
clear_bit(0, &self->disconnect_pend); /* FALSE */
return ret;
}
EXPORT_SYMBOL(irttp_disconnect_request);
/*
* Function irttp_disconnect_indication (self, reason)
*
* Disconnect indication, TSAP disconnected by peer?
*
*/
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Prevent higher layer to send more data */
self->connected = FALSE;
/* Check if client has already tried to close the TSAP */
if (self->close_pend) {
/* In this case, the higher layer is probably gone. Don't
* bother it and clean up the remains - Jean II */
if (skb)
dev_kfree_skb(skb);
irttp_close_tsap(self);
return;
}
/* If we are here, we assume that is the higher layer is still
* waiting for the disconnect notification and able to process it,
* even if he tried to disconnect. Otherwise, it would have already
* attempted to close the tsap and self->close_pend would be TRUE.
* Jean II */
/* No need to notify the client if has already tried to disconnect */
if(self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
if (skb)
dev_kfree_skb(skb);
}
/*
* Function irttp_do_data_indication (self, skb)
*
* Try to deliver reassembled skb to layer above, and requeue it if that
* for some reason should fail. We mark rx sdu as busy to apply back
* pressure is necessary.
*/
static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
{
int err;
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend) {
dev_kfree_skb(skb);
return;
}
err = self->notify.data_indication(self->notify.instance, self, skb);
/* Usually the layer above will notify that it's input queue is
* starting to get filled by using the flow request, but this may
* be difficult, so it can instead just refuse to eat it and just
* give an error back
*/
if (err) {
IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
/* Make sure we take a break */
self->rx_sdu_busy = TRUE;
/* Need to push the header in again */
skb_push(skb, TTP_HEADER);
skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
/* Put skb back on queue */
skb_queue_head(&self->rx_queue, skb);
}
}
/*
* Function irttp_run_rx_queue (self)
*
* Check if we have any frames to be transmitted, or if we have any
* available credit to give away.
*/
static void irttp_run_rx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
int more = 0;
IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Get exclusive access to the rx queue, otherwise don't touch it */
if (irda_lock(&self->rx_queue_lock) == FALSE)
return;
/*
* Reassemble all frames in receive queue and deliver them
*/
while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
/* This bit will tell us if it's the last fragment or not */
more = skb->data[0] & 0x80;
/* Remove TTP header */
skb_pull(skb, TTP_HEADER);
/* Add the length of the remaining data */
self->rx_sdu_size += skb->len;
/*
* If SAR is disabled, or user has requested no reassembly
* of received fragments then we just deliver them
* immediately. This can be requested by clients that
* implements byte streams without any message boundaries
*/
if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
irttp_do_data_indication(self, skb);
self->rx_sdu_size = 0;
continue;
}
/* Check if this is a fragment, and not the last fragment */
if (more) {
/*
* Queue the fragment if we still are within the
* limits of the maximum size of the rx_sdu
*/
if (self->rx_sdu_size <= self->rx_max_sdu_size) {
IRDA_DEBUG(4, "%s(), queueing frag\n",
__func__);
skb_queue_tail(&self->rx_fragments, skb);
} else {
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
}
continue;
}
/*
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
(self->rx_max_sdu_size == TTP_SAR_UNBOUND))
{
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
* last and only fragment, there is no need to
* reassemble :-)
*/
if (!skb_queue_empty(&self->rx_fragments)) {
skb_queue_tail(&self->rx_fragments,
skb);
skb = irttp_reassemble_skb(self);
}
/* Now we can deliver the reassembled skb */
irttp_do_data_indication(self, skb);
} else {
IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
/* Deliver only the valid but truncated part of SDU */
skb = irttp_reassemble_skb(self);
irttp_do_data_indication(self, skb);
}
self->rx_sdu_size = 0;
}
/*
* It's not trivial to keep track of how many credits are available
* by incrementing at each packet, because delivery may fail
* (irttp_do_data_indication() may requeue the frame) and because
* we need to take care of fragmentation.
* We want the other side to send up to initial_credit packets.
* We have some frames in our queues, and we have already allowed it
* to send remote_credit.
* No need to spinlock, write is atomic and self correcting...
* Jean II
*/
self->avail_credit = (self->initial_credit -
(self->remote_credit +
skb_queue_len(&self->rx_queue) +
skb_queue_len(&self->rx_fragments)));
/* Do we have too much credits to send to peer ? */
if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
(self->avail_credit > 0)) {
/* Send explicit credit frame */
irttp_give_credit(self);
/* Note : do *NOT* check if tx_queue is non-empty, that
* will produce deadlocks. I repeat : send a credit frame
* even if we have something to send in our Tx queue.
* If we have credits, it means that our Tx queue is blocked.
*
* Let's suppose the peer can't keep up with our Tx. He will
* flow control us by not sending us any credits, and we
* will stop Tx and start accumulating credits here.
* Up to the point where the peer will stop its Tx queue,
* for lack of credits.
* Let's assume the peer application is single threaded.
* It will block on Tx and never consume any Rx buffer.
* Deadlock. Guaranteed. - Jean II
*/
}
/* Reset lock */
self->rx_queue_lock = 0;
}
#ifdef CONFIG_PROC_FS
struct irttp_iter_state {
int id;
};
static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
struct tsap_cb *self;
/* Protect our access to the tsap list */
spin_lock_irq(&irttp->tsaps->hb_spinlock);
iter->id = 0;
for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
self != NULL;
self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
if (iter->id == *pos)
break;
++iter->id;
}
return self;
}
static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
++*pos;
++iter->id;
return (void *) hashbin_get_next(irttp->tsaps);
}
static void irttp_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irttp->tsaps->hb_spinlock);
}
static int irttp_seq_show(struct seq_file *seq, void *v)
{
const struct irttp_iter_state *iter = seq->private;
const struct tsap_cb *self = v;
seq_printf(seq, "TSAP %d, ", iter->id);
seq_printf(seq, "stsap_sel: %02x, ",
self->stsap_sel);
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
self->connected? "TRUE":"FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
self->remote_credit);
seq_printf(seq, "send credit: %d\n",
self->send_credit);
seq_printf(seq, " tx packets: %lu, ",
self->stats.tx_packets);
seq_printf(seq, "rx packets: %lu, ",
self->stats.rx_packets);
seq_printf(seq, "tx_queue len: %u ",
skb_queue_len(&self->tx_queue));
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
self->tx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
self->rx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
self->tx_max_sdu_size);
seq_printf(seq, "rx_max_sdu_size: %u\n",
self->rx_max_sdu_size);
seq_printf(seq, " Used by (%s)\n\n",
self->notify.name);
return 0;
}
static const struct seq_operations irttp_seq_ops = {
.start = irttp_seq_start,
.next = irttp_seq_next,
.stop = irttp_seq_stop,
.show = irttp_seq_show,
};
static int irttp_seq_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &irttp_seq_ops,
sizeof(struct irttp_iter_state));
}
const struct file_operations irttp_seq_fops = {
.owner = THIS_MODULE,
.open = irttp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif /* PROC_FS */
| gpl-2.0 |
xsynergy510x/android_kernel_samsung_jf | drivers/net/wireless/libertas_tf/main.c | 5035 | 19558 | /*
* Copyright (C) 2008, cozybit Inc.
* Copyright (C) 2003-2006, Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "libertas_tf.h"
#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
#define QOS_CONTROL_LEN 2
/* Module parameters */
unsigned int lbtf_debug;
EXPORT_SYMBOL_GPL(lbtf_debug);
module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
#ifdef DEBUG
"-dbg"
#endif
"";
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 },
};
/* This table contains the hardware specific values for the modulation rates. */
static const struct ieee80211_rate lbtf_rates[] = {
{ .bitrate = 10,
.hw_value = 0, },
{ .bitrate = 20,
.hw_value = 1,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = 2,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = 3,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = 5,
.flags = 0 },
{ .bitrate = 90,
.hw_value = 6,
.flags = 0 },
{ .bitrate = 120,
.hw_value = 7,
.flags = 0 },
{ .bitrate = 180,
.hw_value = 8,
.flags = 0 },
{ .bitrate = 240,
.hw_value = 9,
.flags = 0 },
{ .bitrate = 360,
.hw_value = 10,
.flags = 0 },
{ .bitrate = 480,
.hw_value = 11,
.flags = 0 },
{ .bitrate = 540,
.hw_value = 12,
.flags = 0 },
};
static void lbtf_cmd_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
cmd_work);
lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irq(&priv->driver_lock);
/* command response? */
if (priv->cmd_response_rxed) {
priv->cmd_response_rxed = 0;
spin_unlock_irq(&priv->driver_lock);
lbtf_process_rx_command(priv);
spin_lock_irq(&priv->driver_lock);
}
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
if (++priv->nr_retries > 10) {
lbtf_complete_command(priv, cmdnode,
-ETIMEDOUT);
priv->nr_retries = 0;
} else {
priv->cur_cmd = NULL;
/* Stick it back at the _top_ of the pending
* queue for immediate resubmission */
list_add(&cmdnode->list, &priv->cmdpendingq);
}
}
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
if (!priv->fw_ready) {
lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
return;
}
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
* lbtf_setup_firmware: initialize firmware.
*
* @priv A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
static int lbtf_setup_firmware(struct lbtf_private *priv)
{
int ret = -1;
lbtf_deb_enter(LBTF_DEB_FW);
/*
* Read priv address from HW
*/
memset(priv->current_addr, 0xff, ETH_ALEN);
ret = lbtf_update_hw_spec(priv);
if (ret) {
ret = -1;
goto done;
}
lbtf_set_mac_control(priv);
lbtf_set_radio_control(priv);
ret = 0;
done:
lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
return ret;
}
/**
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
static void command_timer_fn(unsigned long data)
{
struct lbtf_private *priv = (struct lbtf_private *)data;
unsigned long flags;
lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (!priv->cur_cmd) {
printk(KERN_DEBUG "libertastf: command timer expired; "
"no pending command\n");
goto out;
}
printk(KERN_DEBUG "libertas: command %x timed out\n",
le16_to_cpu(priv->cur_cmd->cmdbuf->command));
priv->cmd_timed_out = 1;
queue_work(lbtf_wq, &priv->cmd_work);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbtf_deb_leave(LBTF_DEB_CMD);
}
static int lbtf_init_adapter(struct lbtf_private *priv)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
memset(priv->current_addr, 0xff, ETH_ALEN);
mutex_init(&priv->lock);
priv->vif = NULL;
setup_timer(&priv->command_timer, command_timer_fn,
(unsigned long)priv);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
spin_lock_init(&priv->driver_lock);
/* Allocate the command buffers */
if (lbtf_allocate_cmd_buffer(priv))
return -1;
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void lbtf_free_adapter(struct lbtf_private *priv)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_free_cmd_buffer(priv);
del_timer(&priv->command_timer);
lbtf_deb_leave(LBTF_DEB_MAIN);
}
static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct lbtf_private *priv = hw->priv;
priv->skb_to_tx = skb;
queue_work(lbtf_wq, &priv->tx_work);
/*
* queue will be restarted when we receive transmission feedback if
* there are no buffered multicast frames to send
*/
ieee80211_stop_queues(priv->hw);
}
static void lbtf_tx_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
tx_work);
unsigned int len;
struct ieee80211_tx_info *info;
struct txpd *txpd;
struct sk_buff *skb = NULL;
int err;
lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
if ((priv->vif->type == NL80211_IFTYPE_AP) &&
(!skb_queue_empty(&priv->bc_ps_buf)))
skb = skb_dequeue(&priv->bc_ps_buf);
else if (priv->skb_to_tx) {
skb = priv->skb_to_tx;
priv->skb_to_tx = NULL;
} else {
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
len = skb->len;
info = IEEE80211_SKB_CB(skb);
txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd));
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
memset(txpd, 0, sizeof(struct txpd));
/* Activate per-packet rate selection */
txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE |
ieee80211_get_tx_rate(priv->hw, info)->hw_value);
/* copy destination address from 802.11 header */
memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
spin_unlock_irq(&priv->driver_lock);
if (err) {
dev_kfree_skb_any(skb);
priv->tx_skb = NULL;
pr_err("TX error: %d", err);
}
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
}
static int lbtf_op_start(struct ieee80211_hw *hw)
{
struct lbtf_private *priv = hw->priv;
void *card = priv->card;
int ret = -1;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (!priv->fw_ready)
/* Upload firmware */
if (priv->hw_prog_firmware(card))
goto err_prog_firmware;
/* poke the firmware */
priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->radioon = RADIO_ON;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
ret = lbtf_setup_firmware(priv);
if (ret)
goto err_prog_firmware;
if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
(priv->fwrelease > LBTF_FW_VER_MAX)) {
ret = -1;
goto err_prog_firmware;
}
printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
err_prog_firmware:
priv->hw_reset_device(card);
lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
return ret;
}
static void lbtf_op_stop(struct ieee80211_hw *hw)
{
struct lbtf_private *priv = hw->priv;
unsigned long flags;
struct sk_buff *skb;
struct cmd_ctrl_node *cmdnode;
lbtf_deb_enter(LBTF_DEB_MACOPS);
/* Flush pending command nodes */
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
cmdnode->result = -ENOENT;
cmdnode->cmdwaitqwoken = 1;
wake_up_interruptible(&cmdnode->cmdwait_q);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
cancel_work_sync(&priv->cmd_work);
cancel_work_sync(&priv->tx_work);
while ((skb = skb_dequeue(&priv->bc_ps_buf)))
dev_kfree_skb_any(skb);
priv->radioon = RADIO_OFF;
lbtf_set_radio_control(priv);
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif != NULL)
return -EOPNOTSUPP;
priv->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
break;
case NL80211_IFTYPE_STATION:
lbtf_set_mode(priv, LBTF_STA_MODE);
break;
default:
priv->vif = NULL;
return -EOPNOTSUPP;
}
lbtf_set_mac_address(priv, (u8 *) vif->addr);
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif->type == NL80211_IFTYPE_AP ||
priv->vif->type == NL80211_IFTYPE_MESH_POINT)
lbtf_beacon_ctrl(priv, 0, 0);
lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
lbtf_set_bssid(priv, 0, NULL);
priv->vif = NULL;
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (conf->channel->center_freq != priv->cur_freq) {
priv->cur_freq = conf->channel->center_freq;
lbtf_set_channel(priv, conf->channel->hw_value);
}
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct lbtf_private *priv = hw->priv;
int i;
struct netdev_hw_addr *ha;
int mc_count = netdev_hw_addr_list_count(mc_list);
if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
return mc_count;
priv->nr_of_multicastmacaddr = mc_count;
i = 0;
netdev_hw_addr_list_for_each(ha, mc_list)
memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
return mc_count;
}
#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct lbtf_private *priv = hw->priv;
int old_mac_control = priv->mac_control;
lbtf_deb_enter(LBTF_DEB_MACOPS);
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
if (!changed_flags) {
lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
}
if (*new_flags & (FIF_PROMISC_IN_BSS))
priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
else
priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
if (*new_flags & (FIF_ALLMULTI) ||
multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
} else if (multicast) {
priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
lbtf_cmd_set_mac_multicast_addr(priv);
} else {
priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE |
CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
if (priv->nr_of_multicastmacaddr) {
priv->nr_of_multicastmacaddr = 0;
lbtf_cmd_set_mac_multicast_addr(priv);
}
}
if (priv->mac_control != old_mac_control)
lbtf_set_mac_control(priv);
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
switch (priv->vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
lbtf_beacon_set(priv, beacon);
kfree_skb(beacon);
lbtf_beacon_ctrl(priv, 1,
bss_conf->beacon_int);
}
break;
default:
break;
}
}
if (changes & BSS_CHANGED_BSSID) {
bool activate = !is_zero_ether_addr(bss_conf->bssid);
lbtf_set_bssid(priv, activate, bss_conf->bssid);
}
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
else
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = priv->noise;
return 0;
}
static const struct ieee80211_ops lbtf_ops = {
.tx = lbtf_op_tx,
.start = lbtf_op_start,
.stop = lbtf_op_stop,
.add_interface = lbtf_op_add_interface,
.remove_interface = lbtf_op_remove_interface,
.config = lbtf_op_config,
.prepare_multicast = lbtf_op_prepare_multicast,
.configure_filter = lbtf_op_configure_filter,
.bss_info_changed = lbtf_op_bss_info_changed,
.get_survey = lbtf_op_get_survey,
};
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
{
struct ieee80211_rx_status stats;
struct rxpd *prxpd;
int need_padding;
unsigned int flags;
struct ieee80211_hdr *hdr;
lbtf_deb_enter(LBTF_DEB_RX);
prxpd = (struct rxpd *) skb->data;
memset(&stats, 0, sizeof(stats));
if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
priv->noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
stats.rate_idx = prxpd->rx_rate;
skb_pull(skb, sizeof(struct rxpd));
hdr = (struct ieee80211_hdr *)skb->data;
flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
need_padding = ieee80211_is_data_qos(hdr->frame_control);
need_padding ^= ieee80211_has_a4(hdr->frame_control);
need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
if (need_padding) {
memmove(skb->data + 2, skb->data, skb->len);
skb_reserve(skb, 2);
}
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
min_t(unsigned int, skb->len, 100));
ieee80211_rx_irqsafe(priv->hw, skb);
lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
/**
* lbtf_add_card: Add and initialize the card, no fw upload yet.
*
* @card A pointer to card
*
* Returns: pointer to struct lbtf_priv.
*/
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
{
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
lbtf_deb_enter(LBTF_DEB_MAIN);
hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
if (!hw)
goto done;
priv = hw->priv;
if (lbtf_init_adapter(priv))
goto err_init_adapter;
priv->hw = hw;
priv->card = card;
priv->tx_skb = NULL;
hw->queues = 1;
hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->extra_tx_headroom = sizeof(struct txpd);
memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates);
priv->band.bitrates = priv->rates;
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
INIT_WORK(&priv->tx_work, lbtf_tx_work);
if (ieee80211_register_hw(hw))
goto err_init_adapter;
goto done;
err_init_adapter:
lbtf_free_adapter(priv);
ieee80211_free_hw(hw);
priv = NULL;
done:
lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbtf_add_card);
int lbtf_remove_card(struct lbtf_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
lbtf_deb_enter(LBTF_DEB_MAIN);
priv->surpriseremoved = 1;
del_timer(&priv->command_timer);
lbtf_free_adapter(priv);
priv->hw = NULL;
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
ieee80211_tx_info_clear_status(info);
/*
* Commented out, otherwise we never go beyond 1Mbit/s using mac80211
* default pid rc algorithm.
*
* info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
*/
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
info->flags |= IEEE80211_TX_STAT_ACK;
skb_pull(priv->tx_skb, sizeof(struct txpd));
ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
priv->tx_skb = NULL;
if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf))
ieee80211_wake_queues(priv->hw);
else
queue_work(lbtf_wq, &priv->tx_work);
}
EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback);
void lbtf_bcn_sent(struct lbtf_private *priv)
{
struct sk_buff *skb = NULL;
if (priv->vif->type != NL80211_IFTYPE_AP)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
queue_work(lbtf_wq, &priv->tx_work);
}
}
skb = ieee80211_beacon_get(priv->hw, priv->vif);
if (skb) {
lbtf_beacon_set(priv, skb);
kfree_skb(skb);
}
}
EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_wq = create_workqueue("libertastf");
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
}
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void __exit lbtf_exit_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
destroy_workqueue(lbtf_wq);
lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(lbtf_init_module);
module_exit(lbtf_exit_module);
MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library");
MODULE_AUTHOR("Cozybit Inc.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MSM8226-Samsung/android_kernel_samsung_kmini3g_old | net/irda/irttp.c | 5035 | 51777 | /*********************************************************************
*
* Filename: irttp.c
* Version: 1.2
* Description: Tiny Transport Protocol (TTP) implementation
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
* Modified at: Wed Jan 5 11:31:27 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irlap.h>
#include <net/irda/irlmp.h>
#include <net/irda/parameters.h>
#include <net/irda/irttp.h>
static struct irttp_cb *irttp;
static void __irttp_close_tsap(struct tsap_cb *self);
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb);
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *);
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_run_tx_queue(struct tsap_cb *self);
static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
static void irttp_todo_expired(unsigned long data);
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get);
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock);
/* Information for parsing parameters in IrTTP */
static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
/*
* Function irttp_init (void)
*
* Initialize the IrTTP layer. Called by module initialization code
*
*/
int __init irttp_init(void)
{
irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
if (irttp == NULL)
return -ENOMEM;
irttp->magic = TTP_MAGIC;
irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
__func__);
kfree(irttp);
return -ENOMEM;
}
return 0;
}
/*
* Function irttp_cleanup (void)
*
* Called by module destruction/cleanup code
*
*/
void irttp_cleanup(void)
{
/* Check for main structure */
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
/*
* Delete hashbin and close all TSAP instances in it
*/
hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
irttp->magic = 0;
/* De-allocate main structure */
kfree(irttp);
irttp = NULL;
}
/*************************** SUBROUTINES ***************************/
/*
* Function irttp_start_todo_timer (self, timeout)
*
* Start todo timer.
*
* Made it more effient and unsensitive to race conditions - Jean II
*/
static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
{
/* Set new value for timer */
mod_timer(&self->todo_timer, jiffies + timeout);
}
/*
* Function irttp_todo_expired (data)
*
* Todo timer has expired!
*
* One of the restriction of the timer is that it is run only on the timer
* interrupt which run every 10ms. This mean that even if you set the timer
* with a delay of 0, it may take up to 10ms before it's run.
* So, to minimise latency and keep cache fresh, we try to avoid using
* it as much as possible.
* Note : we can't use tasklets, because they can't be asynchronously
* killed (need user context), and we can't guarantee that here...
* Jean II
*/
static void irttp_todo_expired(unsigned long data)
{
struct tsap_cb *self = (struct tsap_cb *) data;
/* Check that we still exist */
if (!self || self->magic != TTP_TSAP_MAGIC)
return;
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* Try to make some progress, especially on Tx side - Jean II */
irttp_run_rx_queue(self);
irttp_run_tx_queue(self);
/* Check if time for disconnect */
if (test_bit(0, &self->disconnect_pend)) {
/* Check if it's possible to disconnect yet */
if (skb_queue_empty(&self->tx_queue)) {
/* Make sure disconnect is not pending anymore */
clear_bit(0, &self->disconnect_pend); /* FALSE */
/* Note : self->disconnect_skb may be NULL */
irttp_disconnect_request(self, self->disconnect_skb,
P_NORMAL);
self->disconnect_skb = NULL;
} else {
/* Try again later */
irttp_start_todo_timer(self, HZ/10);
/* No reason to try and close now */
return;
}
}
/* Check if it's closing time */
if (self->close_pend)
/* Finish cleanup */
irttp_close_tsap(self);
}
/*
* Function irttp_flush_queues (self)
*
* Flushes (removes all frames) in transitt-buffer (tx_list)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
struct sk_buff* skb;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Deallocate frames waiting to be sent */
while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received frames */
while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received fragments */
while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
dev_kfree_skb(skb);
}
/*
* Function irttp_reassemble (self)
*
* Makes a new (continuous) skb of all the fragments in the fragment
* queue
*
*/
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
{
struct sk_buff *skb, *frag;
int n = 0; /* Fragment index */
IRDA_ASSERT(self != NULL, return NULL;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
self->rx_sdu_size);
skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
if (!skb)
return NULL;
/*
* Need to reserve space for TTP header in case this skb needs to
* be requeued in case delivery failes
*/
skb_reserve(skb, TTP_HEADER);
skb_put(skb, self->rx_sdu_size);
/*
* Copy all fragments to a new buffer
*/
while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
n += frag->len;
dev_kfree_skb(frag);
}
IRDA_DEBUG(2,
"%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
__func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
/* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
* by summing the size of all fragments, so we should always
* have n == self->rx_sdu_size, except in cases where we
* droped the last fragment (when self->rx_sdu_size exceed
* self->rx_max_sdu_size), where n < self->rx_sdu_size.
* Jean II */
IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
/* Set the new length */
skb_trim(skb, n);
self->rx_sdu_size = 0;
return skb;
}
/*
* Function irttp_fragment_skb (skb)
*
* Fragments a frame and queues all the fragments for transmission
*
*/
static inline void irttp_fragment_skb(struct tsap_cb *self,
struct sk_buff *skb)
{
struct sk_buff *frag;
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
/*
* Split frame into a number of segments
*/
while (skb->len > self->max_seg_size) {
IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
/* Make new segment */
frag = alloc_skb(self->max_seg_size+self->max_header_size,
GFP_ATOMIC);
if (!frag)
return;
skb_reserve(frag, self->max_header_size);
/* Copy data from the original skb into this fragment. */
skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
self->max_seg_size);
/* Insert TTP header, with the more bit set */
frame = skb_push(frag, TTP_HEADER);
frame[0] = TTP_MORE;
/* Hide the copied data from the original skb */
skb_pull(skb, self->max_seg_size);
/* Queue fragment */
skb_queue_tail(&self->tx_queue, frag);
}
/* Queue what is left of the original skb */
IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
/* Queue fragment */
skb_queue_tail(&self->tx_queue, skb);
}
/*
* Function irttp_param_max_sdu_size (self, param)
*
* Handle the MaxSduSize parameter in the connect frames, this function
* will be called both when this parameter needs to be inserted into, and
* extracted from the connect frames
*/
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get)
{
struct tsap_cb *self;
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
if (get)
param->pv.i = self->tx_max_sdu_size;
else
self->tx_max_sdu_size = param->pv.i;
IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
return 0;
}
/*************************** CLIENT CALLS ***************************/
/************************** LMP CALLBACKS **************************/
/* Everything is happily mixed up. Waiting for next clean up - Jean II */
/*
* Initialization, that has to be done on new tsap
* instance allocation and on duplication
*/
static void irttp_init_tsap(struct tsap_cb *tsap)
{
spin_lock_init(&tsap->lock);
init_timer(&tsap->todo_timer);
skb_queue_head_init(&tsap->rx_queue);
skb_queue_head_init(&tsap->tx_queue);
skb_queue_head_init(&tsap->rx_fragments);
}
/*
* Function irttp_open_tsap (stsap, notify)
*
* Create TSAP connection endpoint,
*/
struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
notify_t ttp_notify;
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
if((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
}
self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (self == NULL) {
IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
return NULL;
}
/* Initialize internal objects */
irttp_init_tsap(self);
/* Initialise todo timer */
self->todo_timer.data = (unsigned long) self;
self->todo_timer.function = &irttp_todo_expired;
/* Initialize callbacks for IrLMP to use */
irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
ttp_notify.connect_indication = irttp_connect_indication;
ttp_notify.disconnect_indication = irttp_disconnect_indication;
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
if(notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
self->magic = TTP_TSAP_MAGIC;
self->connected = FALSE;
/*
* Create LSAP at IrLMP layer
*/
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
return NULL;
}
/*
* If user specified LSAP_ANY as source TSAP selector, then IrLMP
* will replace it with whatever source selector which is free, so
* the stsap_sel we have might not be valid anymore
*/
self->stsap_sel = lsap->slsap_sel;
IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
self->notify = *notify;
self->lsap = lsap;
hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
if (credit > TTP_RX_MAX_CREDIT)
self->initial_credit = TTP_RX_MAX_CREDIT;
else
self->initial_credit = credit;
return self;
}
EXPORT_SYMBOL(irttp_open_tsap);
/*
* Function irttp_close (handle)
*
* Remove an instance of a TSAP. This function should only deal with the
* deallocation of the TSAP, and resetting of the TSAPs values;
*
*/
static void __irttp_close_tsap(struct tsap_cb *self)
{
/* First make sure we're connected. */
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
irttp_flush_queues(self);
del_timer(&self->todo_timer);
/* This one won't be cleaned up if we are disconnect_pend + close_pend
* and we receive a disconnect_indication */
if (self->disconnect_skb)
dev_kfree_skb(self->disconnect_skb);
self->connected = FALSE;
self->magic = ~TTP_TSAP_MAGIC;
kfree(self);
}
/*
* Function irttp_close (self)
*
* Remove TSAP from list of all TSAPs and then deallocate all resources
* associated with this TSAP
*
* Note : because we *free* the tsap structure, it is the responsibility
* of the caller to make sure we are called only once and to deal with
* possible race conditions. - Jean II
*/
int irttp_close_tsap(struct tsap_cb *self)
{
struct tsap_cb *tsap;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Make sure tsap has been disconnected */
if (self->connected) {
/* Check if disconnect is not pending */
if (!test_bit(0, &self->disconnect_pend)) {
IRDA_WARNING("%s: TSAP still connected!\n",
__func__);
irttp_disconnect_request(self, NULL, P_NORMAL);
}
self->close_pend = TRUE;
irttp_start_todo_timer(self, HZ/10);
return 0; /* Will be back! */
}
tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
IRDA_ASSERT(tsap == self, return -1;);
/* Close corresponding LSAP */
if (self->lsap) {
irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
__irttp_close_tsap(self);
return 0;
}
EXPORT_SYMBOL(irttp_close_tsap);
/*
* Function irttp_udata_request (self, skb)
*
* Send unreliable data on this TSAP
*
*/
int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(4, "%s()\n", __func__);
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s(), Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
if (skb->len > self->max_seg_size) {
IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
ret = -EMSGSIZE;
goto err;
}
irlmp_udata_request(self->lsap, skb);
self->stats.tx_packets++;
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_udata_request);
/*
* Function irttp_data_request (handle, skb)
*
* Queue frame for transmission. If SAR is enabled, fragement the frame
* and queue the fragments for transmission
*/
int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
{
__u8 *frame;
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
skb_queue_len(&self->tx_queue));
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s: Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
/*
* Check if SAR is disabled, and the frame is larger than what fits
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if SAR is enabled, and the frame is larger than the
* TxMaxSduSize
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if transmit queue is full
*/
if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
/*
* Give it a chance to empty itself
*/
irttp_run_tx_queue(self);
/* Drop packet. This error code should trigger the caller
* to resend the data in the client code - Jean II */
ret = -ENOBUFS;
goto err;
}
/* Queue frame, or queue frame segments */
if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
/* Queue frame */
IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
skb_queue_tail(&self->tx_queue, skb);
} else {
/*
* Fragment the frame, this function will also queue the
* fragments, we don't care about the fact the transmit
* queue may be overfilled by all the segments for a little
* while
*/
irttp_fragment_skb(self, skb);
}
/* Check if we can accept more data from client */
if ((!self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
/* Tx queue filling up, so stop client. */
if (self->notify.flow_indication) {
self->notify.flow_indication(self->notify.instance,
self, FLOW_STOP);
}
/* self->tx_sdu_busy is the state of the client.
* Update state after notifying client to avoid
* race condition with irttp_flow_indication().
* If the queue empty itself after our test but before
* we set the flag, we will fix ourselves below in
* irttp_run_tx_queue().
* Jean II */
self->tx_sdu_busy = TRUE;
}
/* Try to make some progress */
irttp_run_tx_queue(self);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_data_request);
/*
* Function irttp_run_tx_queue (self)
*
* Transmit packets queued for transmission (if possible)
*
*/
static void irttp_run_tx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
unsigned long flags;
int n;
IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
__func__,
self->send_credit, skb_queue_len(&self->tx_queue));
/* Get exclusive access to the tx queue, otherwise don't touch it */
if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
/* Try to send out frames as long as we have credits
* and as long as LAP is not full. If LAP is full, it will
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
(skb = skb_dequeue(&self->tx_queue)))
{
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only room for 127 credits in frame */
if (n > 127) {
self->avail_credit = n-127;
n = 127;
}
self->remote_credit += n;
self->send_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* More bit must be set by the data_request() or fragment()
* functions
*/
skb->data[0] |= (n & 0x7f);
/* Detach from socket.
* The current skb has a reference to the socket that sent
* it (skb->sk). When we pass it to IrLMP, the skb will be
* stored in in IrLAP (self->wx_list). When we are within
* IrLAP, we lose the notion of socket, so we should not
* have a reference to a socket. So, we drop it here.
*
* Why does it matter ?
* When the skb is freed (kfree_skb), if it is associated
* with a socket, it release buffer space on the socket
* (through sock_wfree() and sock_def_write_space()).
* If the socket no longer exist, we may crash. Hard.
* When we close a socket, we make sure that associated packets
* in IrTTP are freed. However, we have no way to cancel
* the packet that we have passed to IrLAP. So, if a packet
* remains in IrLAP (retry on the link or else) after we
* close the socket, we are dead !
* Jean II */
if (skb->sk != NULL) {
/* IrSOCK application, IrOBEX, ... */
skb_orphan(skb);
}
/* IrCOMM over IrTTP, IrLAN, ... */
/* Pass the skb to IrLMP - done */
irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
}
/* Check if we can accept more frames from client.
* We don't want to wait until the todo timer to do that, and we
* can't use tasklets (grr...), so we are obliged to give control
* to client. That's ok, this test will be true not too often
* (max once per LAP window) and we are called from places
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
(!self->close_pend))
{
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
/* self->tx_sdu_busy is the state of the client.
* We don't really have a race here, but it's always safer
* to update our state after the client - Jean II */
self->tx_sdu_busy = FALSE;
}
/* Reset lock */
self->tx_queue_lock = 0;
}
/*
* Function irttp_give_credit (self)
*
* Send a dataless flowdata TTP-PDU and give available credit to peer
* TSAP
*/
static inline void irttp_give_credit(struct tsap_cb *self)
{
struct sk_buff *tx_skb = NULL;
unsigned long flags;
int n;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
__func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for LMP, and LAP header */
skb_reserve(tx_skb, LMP_MAX_HEADER);
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only space for 127 credits in frame */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit += n;
spin_unlock_irqrestore(&self->lock, flags);
skb_put(tx_skb, 1);
tx_skb->data[0] = (__u8) (n & 0x7f);
irlmp_data_request(self->lsap, tx_skb);
self->stats.tx_packets++;
}
/*
* Function irttp_udata_indication (instance, sap, skb)
*
* Received some unit-data (unreliable)
*
*/
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
self->stats.rx_packets++;
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
self,skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
}
/* Either no handler, or handler returns an error */
dev_kfree_skb(skb);
return 0;
}
/*
* Function irttp_data_indication (instance, sap, skb)
*
* Receive segment from IrLMP.
*
*/
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
unsigned long flags;
int n;
self = instance;
n = skb->data[0] & 0x7f; /* Extract the credits */
self->stats.rx_packets++;
/* Deal with inbound credit
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
self->send_credit += n;
if (skb->len > 1)
self->remote_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* Data or dataless packet? Dataless frames contains only the
* TTP_HEADER.
*/
if (skb->len > 1) {
/*
* We don't remove the TTP header, since we must preserve the
* more bit, so the defragment routing knows what to do
*/
skb_queue_tail(&self->rx_queue, skb);
} else {
/* Dataless flowdata TTP-PDU */
dev_kfree_skb(skb);
}
/* Push data to the higher layer.
* We do it synchronously because running the todo timer for each
* receive packet would be too much overhead and latency.
* By passing control to the higher layer, we run the risk that
* it may take time or grab a lock. Most often, the higher layer
* will only put packet in a queue.
* Anyway, packets are only dripping through the IrDA, so we can
* have time before the next packet.
* Further, we are run from NET_BH, so the worse that can happen is
* us missing the optimal time to send back the PF bit in LAP.
* Jean II */
irttp_run_rx_queue(self);
/* We now give credits to peer in irttp_run_rx_queue().
* We need to send credit *NOW*, otherwise we are going
* to miss the next Tx window. The todo timer may take
* a while before it's run... - Jean II */
/*
* If the peer device has given us some credits and we didn't have
* anyone from before, then we need to shedule the tx queue.
* We need to do that because our Tx have stopped (so we may not
* get any LAP flow indication) and the user may be stopped as
* well. - Jean II
*/
if (self->send_credit == n) {
/* Restart pushing stuff to LAP */
irttp_run_tx_queue(self);
/* Note : we don't want to schedule the todo timer
* because it has horrible latency. No tasklets
* because the tasklet API is broken. - Jean II */
}
return 0;
}
/*
* Function irttp_status_indication (self, reason)
*
* Status_indication, just pass to the higher layer...
*
*/
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend)
return;
/*
* Inform service user if he has requested it
*/
if (self->notify.status_indication != NULL)
self->notify.status_indication(self->notify.instance,
link, lock);
else
IRDA_DEBUG(2, "%s(), no handler\n", __func__);
}
/*
* Function irttp_flow_indication (self, reason)
*
* Flow_indication : IrLAP tells us to send more data.
*
*/
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
{
struct tsap_cb *self;
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* We are "polled" directly from LAP, and the LAP want to fill
* its Tx window. We want to do our best to send it data, so that
* we maximise the window. On the other hand, we want to limit the
* amount of work here so that LAP doesn't hang forever waiting
* for packets. - Jean II */
/* Try to send some packets. Currently, LAP calls us every time
* there is one free slot, so we will send only one packet.
* This allow the scheduler to do its round robin - Jean II */
irttp_run_tx_queue(self);
/* Note regarding the interraction with higher layer.
* irttp_run_tx_queue() may call the client when its queue
* start to empty, via notify.flow_indication(). Initially.
* I wanted this to happen in a tasklet, to avoid client
* grabbing the CPU, but we can't use tasklets safely. And timer
* is definitely too slow.
* This will happen only once per LAP window, and usually at
* the third packet (unless window is smaller). LAP is still
* doing mtt and sending first packet so it's sort of OK
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
if(self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
/*
* Function irttp_flow_request (self, command)
*
* This function could be used by the upper layers to tell IrTTP to stop
* delivering frames if the receive queues are starting to get full, or
* to tell IrTTP to start delivering frames again.
*/
void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
{
IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
switch (flow) {
case FLOW_STOP:
IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
self->rx_sdu_busy = TRUE;
break;
case FLOW_START:
IRDA_DEBUG(1, "%s(), flow start\n", __func__);
self->rx_sdu_busy = FALSE;
/* Client say he can accept more data, try to free our
* queues ASAP - Jean II */
irttp_run_rx_queue(self);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
}
}
EXPORT_SYMBOL(irttp_flow_request);
/*
* Function irttp_connect_request (self, dtsap_sel, daddr, qos)
*
* Try to connect to remote destination TSAP selector
*
*/
int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
__u32 saddr, __u32 daddr,
struct qos_info *qos, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
__u8 n;
IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
IRDA_ASSERT(self != NULL, return -EBADR;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
if(userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
/* Initialize connection parameters */
self->connected = FALSE;
self->avail_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
self->dtsap_sel = dtsap_sel;
n = self->initial_credit;
self->remote_credit = 0;
self->send_credit = 0;
/*
* Give away max 127 credits for now
*/
if (n > 127) {
self->avail_credit=n-127;
n = 127;
}
self->remote_credit = n;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert plain TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
/* Insert initial credit in frame */
frame[0] = n & 0x7f;
}
/* Connect with IrLMP. No QoS parameters for now */
return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
tx_skb);
}
EXPORT_SYMBOL(irttp_connect_request);
/*
* Function irttp_connect_confirm (handle, qos, skb)
*
* Service user confirms TSAP connection with peer.
*
*/
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size, struct sk_buff *skb)
{
struct tsap_cb *self;
int parameters;
int ret;
__u8 plen;
__u8 n;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size + TTP_HEADER;
/*
* Check if we have got some QoS parameters back! This should be the
* negotiated QoS for the link.
*/
if (qos) {
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
qos->baud_rate.bits);
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
qos->baud_rate.value);
}
n = skb->data[0] & 0x7f;
IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
self->send_credit = n;
self->tx_max_sdu_size = 0;
self->connected = TRUE;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
self->tx_max_sdu_size);
if (self->notify.connect_confirm) {
self->notify.connect_confirm(self->notify.instance, self, qos,
self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_indication (handle, skb)
*
* Some other device is connecting to this TSAP
*
*/
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
int parameters;
int ret;
__u8 plen;
__u8 n;
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
lsap = sap;
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size+TTP_HEADER;
IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
/* Need to update dtsap_sel if its equal to LSAP_ANY */
self->dtsap_sel = lsap->dlsap_sel;
n = skb->data[0] & 0x7f;
self->send_credit = n;
self->tx_max_sdu_size = 0;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
if (self->notify.connect_indication) {
self->notify.connect_indication(self->notify.instance, self,
qos, self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_response (handle, userdata)
*
* Service user is accepting the connection, just pass it down to
* IrLMP!
*
*/
int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
int ret;
__u8 n;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
self->stsap_sel);
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
self->avail_credit = 0;
self->remote_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
n = self->initial_credit;
/* Frame has only space for max 127 credits (7 bits) */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit = n;
self->connected = TRUE;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert TTP header with SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
/* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */
/* TTP_SAR_HEADER, ¶m_info) */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
frame[0] = n & 0x7f;
}
ret = irlmp_connect_response(self->lsap, tx_skb);
return ret;
}
EXPORT_SYMBOL(irttp_connect_response);
/*
* Function irttp_dup (self, instance)
*
* Duplicate TSAP, can be used by servers to confirm a connection on a
* new TSAP so it can keep listening on the old one.
*/
struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
{
struct tsap_cb *new;
unsigned long flags;
IRDA_DEBUG(1, "%s()\n", __func__);
/* Protect our access to the old tsap instance */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
/* Find the old instance */
if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Allocate a new instance */
new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
spin_lock_init(&new->lock);
/* We don't need the old instance any more */
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
/* Try to dup the LSAP (may fail if we were too slow) */
new->lsap = irlmp_dup(orig->lsap, new);
if (!new->lsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
kfree(new);
return NULL;
}
/* Not everything should be copied */
new->notify.instance = instance;
/* Initialize internal objects */
irttp_init_tsap(new);
/* This is locked */
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
return new;
}
EXPORT_SYMBOL(irttp_dup);
/*
* Function irttp_disconnect_request (self)
*
* Close this connection please! If priority is high, the queued data
* segments, if any, will be deallocated first
*
*/
int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
int priority)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Already disconnected? */
if (!self->connected) {
IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
if (userdata)
dev_kfree_skb(userdata);
return -1;
}
/* Disconnect already pending ?
* We need to use an atomic operation to prevent reentry. This
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
if(test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
dev_kfree_skb(userdata);
/* Try to make some progress */
irttp_run_tx_queue(self);
return -1;
}
/*
* Check if there is still data segments in the transmit queue
*/
if (!skb_queue_empty(&self->tx_queue)) {
if (priority == P_HIGH) {
/*
* No need to send the queued data, if we are
* disconnecting right now since the data will
* not have any usable connection to be sent on
*/
IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
irttp_flush_queues(self);
} else if (priority == P_NORMAL) {
/*
* Must delay disconnect until after all data segments
* have been sent and the tx_queue is empty
*/
/* We'll reuse this one later for the disconnect */
self->disconnect_skb = userdata; /* May be NULL */
irttp_run_tx_queue(self);
irttp_start_todo_timer(self, HZ/10);
return -1;
}
}
/* Note : we don't need to check if self->rx_queue is full and the
* state of self->rx_sdu_busy because the disconnect response will
* be sent at the LMP level (so even if the peer has its Tx queue
* full of data). - Jean II */
IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
self->connected = FALSE;
if (!userdata) {
struct sk_buff *tx_skb;
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/*
* Reserve space for MUX and LAP header
*/
skb_reserve(tx_skb, LMP_MAX_HEADER);
userdata = tx_skb;
}
ret = irlmp_disconnect_request(self->lsap, userdata);
/* The disconnect is no longer pending */
clear_bit(0, &self->disconnect_pend); /* FALSE */
return ret;
}
EXPORT_SYMBOL(irttp_disconnect_request);
/*
* Function irttp_disconnect_indication (self, reason)
*
* Disconnect indication, TSAP disconnected by peer?
*
*/
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Prevent higher layer to send more data */
self->connected = FALSE;
/* Check if client has already tried to close the TSAP */
if (self->close_pend) {
/* In this case, the higher layer is probably gone. Don't
* bother it and clean up the remains - Jean II */
if (skb)
dev_kfree_skb(skb);
irttp_close_tsap(self);
return;
}
/* If we are here, we assume that is the higher layer is still
* waiting for the disconnect notification and able to process it,
* even if he tried to disconnect. Otherwise, it would have already
* attempted to close the tsap and self->close_pend would be TRUE.
* Jean II */
/* No need to notify the client if has already tried to disconnect */
if(self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
if (skb)
dev_kfree_skb(skb);
}
/*
* Function irttp_do_data_indication (self, skb)
*
* Try to deliver reassembled skb to layer above, and requeue it if that
* for some reason should fail. We mark rx sdu as busy to apply back
* pressure is necessary.
*/
static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
{
int err;
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend) {
dev_kfree_skb(skb);
return;
}
err = self->notify.data_indication(self->notify.instance, self, skb);
/* Usually the layer above will notify that it's input queue is
* starting to get filled by using the flow request, but this may
* be difficult, so it can instead just refuse to eat it and just
* give an error back
*/
if (err) {
IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
/* Make sure we take a break */
self->rx_sdu_busy = TRUE;
/* Need to push the header in again */
skb_push(skb, TTP_HEADER);
skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
/* Put skb back on queue */
skb_queue_head(&self->rx_queue, skb);
}
}
/*
* Function irttp_run_rx_queue (self)
*
* Check if we have any frames to be transmitted, or if we have any
* available credit to give away.
*/
static void irttp_run_rx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
int more = 0;
IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Get exclusive access to the rx queue, otherwise don't touch it */
if (irda_lock(&self->rx_queue_lock) == FALSE)
return;
/*
* Reassemble all frames in receive queue and deliver them
*/
while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
/* This bit will tell us if it's the last fragment or not */
more = skb->data[0] & 0x80;
/* Remove TTP header */
skb_pull(skb, TTP_HEADER);
/* Add the length of the remaining data */
self->rx_sdu_size += skb->len;
/*
* If SAR is disabled, or user has requested no reassembly
* of received fragments then we just deliver them
* immediately. This can be requested by clients that
* implements byte streams without any message boundaries
*/
if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
irttp_do_data_indication(self, skb);
self->rx_sdu_size = 0;
continue;
}
/* Check if this is a fragment, and not the last fragment */
if (more) {
/*
* Queue the fragment if we still are within the
* limits of the maximum size of the rx_sdu
*/
if (self->rx_sdu_size <= self->rx_max_sdu_size) {
IRDA_DEBUG(4, "%s(), queueing frag\n",
__func__);
skb_queue_tail(&self->rx_fragments, skb);
} else {
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
}
continue;
}
/*
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
(self->rx_max_sdu_size == TTP_SAR_UNBOUND))
{
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
* last and only fragment, there is no need to
* reassemble :-)
*/
if (!skb_queue_empty(&self->rx_fragments)) {
skb_queue_tail(&self->rx_fragments,
skb);
skb = irttp_reassemble_skb(self);
}
/* Now we can deliver the reassembled skb */
irttp_do_data_indication(self, skb);
} else {
IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
/* Deliver only the valid but truncated part of SDU */
skb = irttp_reassemble_skb(self);
irttp_do_data_indication(self, skb);
}
self->rx_sdu_size = 0;
}
/*
* It's not trivial to keep track of how many credits are available
* by incrementing at each packet, because delivery may fail
* (irttp_do_data_indication() may requeue the frame) and because
* we need to take care of fragmentation.
* We want the other side to send up to initial_credit packets.
* We have some frames in our queues, and we have already allowed it
* to send remote_credit.
* No need to spinlock, write is atomic and self correcting...
* Jean II
*/
self->avail_credit = (self->initial_credit -
(self->remote_credit +
skb_queue_len(&self->rx_queue) +
skb_queue_len(&self->rx_fragments)));
/* Do we have too much credits to send to peer ? */
if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
(self->avail_credit > 0)) {
/* Send explicit credit frame */
irttp_give_credit(self);
/* Note : do *NOT* check if tx_queue is non-empty, that
* will produce deadlocks. I repeat : send a credit frame
* even if we have something to send in our Tx queue.
* If we have credits, it means that our Tx queue is blocked.
*
* Let's suppose the peer can't keep up with our Tx. He will
* flow control us by not sending us any credits, and we
* will stop Tx and start accumulating credits here.
* Up to the point where the peer will stop its Tx queue,
* for lack of credits.
* Let's assume the peer application is single threaded.
* It will block on Tx and never consume any Rx buffer.
* Deadlock. Guaranteed. - Jean II
*/
}
/* Reset lock */
self->rx_queue_lock = 0;
}
#ifdef CONFIG_PROC_FS
struct irttp_iter_state {
int id;
};
static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
struct tsap_cb *self;
/* Protect our access to the tsap list */
spin_lock_irq(&irttp->tsaps->hb_spinlock);
iter->id = 0;
for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
self != NULL;
self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
if (iter->id == *pos)
break;
++iter->id;
}
return self;
}
static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
++*pos;
++iter->id;
return (void *) hashbin_get_next(irttp->tsaps);
}
static void irttp_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irttp->tsaps->hb_spinlock);
}
static int irttp_seq_show(struct seq_file *seq, void *v)
{
const struct irttp_iter_state *iter = seq->private;
const struct tsap_cb *self = v;
seq_printf(seq, "TSAP %d, ", iter->id);
seq_printf(seq, "stsap_sel: %02x, ",
self->stsap_sel);
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
self->connected? "TRUE":"FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
self->remote_credit);
seq_printf(seq, "send credit: %d\n",
self->send_credit);
seq_printf(seq, " tx packets: %lu, ",
self->stats.tx_packets);
seq_printf(seq, "rx packets: %lu, ",
self->stats.rx_packets);
seq_printf(seq, "tx_queue len: %u ",
skb_queue_len(&self->tx_queue));
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
self->tx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
self->rx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
self->tx_max_sdu_size);
seq_printf(seq, "rx_max_sdu_size: %u\n",
self->rx_max_sdu_size);
seq_printf(seq, " Used by (%s)\n\n",
self->notify.name);
return 0;
}
static const struct seq_operations irttp_seq_ops = {
.start = irttp_seq_start,
.next = irttp_seq_next,
.stop = irttp_seq_stop,
.show = irttp_seq_show,
};
static int irttp_seq_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &irttp_seq_ops,
sizeof(struct irttp_iter_state));
}
const struct file_operations irttp_seq_fops = {
.owner = THIS_MODULE,
.open = irttp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif /* PROC_FS */
| gpl-2.0 |
AndroPlus-org/kernel | arch/arm/plat-pxa/dma.c | 7339 | 10008 | /*
* linux/arch/arm/plat-pxa/dma.c
*
* PXA DMA registration and IRQ dispatching
*
* Author: Nicolas Pitre
* Created: Nov 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#define DMA_DEBUG_NAME "pxa_dma"
#define DMA_MAX_REQUESTERS 64
struct dma_channel {
char *name;
pxa_dma_prio prio;
void (*irq_handler)(int, void *);
void *data;
spinlock_t lock;
};
static struct dma_channel *dma_channels;
static int num_dma_channels;
/*
* Debug fs
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i;
u32 drcmr;
pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
drcmr = DRCMR(i);
if ((drcmr & DRCMR_CHLNUM) == chan)
pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
!!(drcmr & DRCMR_MAPVLD));
}
return pos;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
{
int burst = (dcmd >> 16) & 0x3;
return burst ? 4 << burst : 0;
}
static int is_phys_valid(unsigned long addr)
{
return pfn_valid(__phys_to_pfn(addr));
}
#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
static int dbg_show_descriptors(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i, max_show = 20, burst, width;
u32 dcmd;
unsigned long phys_desc;
struct pxa_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&dma_channels[chan].lock, flags);
phys_desc = DDADR(chan);
pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
desc = phys_to_virt(phys_desc);
dcmd = desc->dcmd;
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
i, phys_desc, desc);
pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
" width=%d len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width,
dcmd & DCMD_LENGTH);
phys_desc = desc->ddadr;
}
if (i == max_show)
pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
i, phys_desc);
else
pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
i, phys_desc, phys_desc == DDADR_STOP ?
"DDADR_STOP" : "invalid");
spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
return pos;
}
static int dbg_show_chan_state(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
u32 dcsr, dcmd;
int burst, width;
static char *str_prio[] = { "high", "normal", "low" };
dcsr = DCSR(chan);
dcmd = DCMD(chan);
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "DMA channel %d\n", chan);
pos += seq_printf(s, "\tPriority : %s\n",
str_prio[dma_channels[chan].prio]);
pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
DALGN & (1 << chan) ? "yes" : "no");
pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
" len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
return pos;
}
static int dbg_show_state(struct seq_file *s, void *p)
{
int pos = 0;
/* basic device status */
pos += seq_printf(s, "DMA engine status\n");
pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
return pos;
}
#define DBGFS_FUNC_DECL(name) \
static int dbg_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
.owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
.release = single_release, \
}
DBGFS_FUNC_DECL(state);
DBGFS_FUNC_DECL(chan_state);
DBGFS_FUNC_DECL(descriptors);
DBGFS_FUNC_DECL(requester_chan);
static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
{
char chan_name[11];
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
struct dentry *chan_reqs = NULL;
void *dt;
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
chan = debugfs_create_dir(chan_name, chandir);
dt = (void *)ch;
if (chan)
chan_state = debugfs_create_file("state", 0400, chan, dt,
&dbg_fops_chan_state);
if (chan_state)
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
&dbg_fops_descriptors);
if (chan_descr)
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
&dbg_fops_requester_chan);
if (!chan_reqs)
goto err_state;
return chan;
err_state:
debugfs_remove_recursive(chan);
return NULL;
}
static void pxa_dma_init_debugfs(void)
{
int i;
struct dentry *chandir;
dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
if (IS_ERR(dbgfs_root) || !dbgfs_root)
goto err_root;
dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
&dbg_fops_state);
if (!dbgfs_state)
goto err_state;
dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
GFP_KERNEL);
if (!dbgfs_chan)
goto err_alloc;
chandir = debugfs_create_dir("channels", dbgfs_root);
if (!chandir)
goto err_chandir;
for (i = 0; i < num_dma_channels; i++) {
dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
if (!dbgfs_chan[i])
goto err_chans;
}
return;
err_chans:
err_chandir:
kfree(dbgfs_chan);
err_alloc:
err_state:
debugfs_remove_recursive(dbgfs_root);
err_root:
pr_err("pxa_dma: debugfs is not available\n");
}
static void __exit pxa_dma_cleanup_debugfs(void)
{
debugfs_remove_recursive(dbgfs_root);
}
#else
static inline void pxa_dma_init_debugfs(void) {}
static inline void pxa_dma_cleanup_debugfs(void) {}
#endif
int pxa_request_dma (char *name, pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
{
unsigned long flags;
int i, found = 0;
/* basic sanity checks */
if (!name || !irq_handler)
return -EINVAL;
local_irq_save(flags);
do {
/* try grabbing a DMA channel with the requested priority */
for (i = 0; i < num_dma_channels; i++) {
if ((dma_channels[i].prio == prio) &&
!dma_channels[i].name) {
found = 1;
break;
}
}
/* if requested prio group is full, try a hier priority */
} while (!found && prio--);
if (found) {
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[i].name = name;
dma_channels[i].irq_handler = irq_handler;
dma_channels[i].data = data;
} else {
printk (KERN_WARNING "No more available DMA channels for %s\n", name);
i = -ENODEV;
}
local_irq_restore(flags);
return i;
}
EXPORT_SYMBOL(pxa_request_dma);
void pxa_free_dma (int dma_ch)
{
unsigned long flags;
if (!dma_channels[dma_ch].name) {
printk (KERN_CRIT
"%s: trying to free channel %d which is already freed\n",
__func__, dma_ch);
return;
}
local_irq_save(flags);
DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[dma_ch].name = NULL;
local_irq_restore(flags);
}
EXPORT_SYMBOL(pxa_free_dma);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, dint = DINT;
struct dma_channel *channel;
while (dint) {
i = __ffs(dint);
dint &= (dint - 1);
channel = &dma_channels[i];
if (channel->name && channel->irq_handler) {
channel->irq_handler(i, channel->data);
} else {
/*
* IRQ for an unregistered DMA channel:
* let's clear the interrupts and disable it.
*/
printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
}
}
return IRQ_HANDLED;
}
int __init pxa_init_dma(int irq, int num_ch)
{
int i, ret;
dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
if (dma_channels == NULL)
return -ENOMEM;
/* dma channel priorities on pxa2xx processors:
* ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
* ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
* ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
*/
for (i = 0; i < num_ch; i++) {
DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
spin_lock_init(&dma_channels[i].lock);
}
ret = request_irq(irq, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
if (ret) {
printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
kfree(dma_channels);
return ret;
}
num_dma_channels = num_ch;
pxa_dma_init_debugfs();
return 0;
}
| gpl-2.0 |
MassStash/htc_m8_kernel_sense_6.0 | Documentation/ia64/aliasing-test.c | 8107 | 6132 | /*
* Exercise /dev/mem mmap cases that have been troublesome in the past
*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <dirent.h>
#include <fcntl.h>
#include <fnmatch.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/pci.h>
int sum;
static int map_mem(char *path, off_t offset, size_t length, int touch)
{
int fd, rc;
void *addr;
int *c;
fd = open(path, O_RDWR);
if (fd == -1) {
perror(path);
return -1;
}
if (fnmatch("/proc/bus/pci/*", path, 0) == 0) {
rc = ioctl(fd, PCIIOC_MMAP_IS_MEM);
if (rc == -1)
perror("PCIIOC_MMAP_IS_MEM ioctl");
}
addr = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
if (addr == MAP_FAILED)
return 1;
if (touch) {
c = (int *) addr;
while (c < (int *) (addr + length))
sum += *c++;
}
rc = munmap(addr, length);
if (rc == -1) {
perror("munmap");
return -1;
}
close(fd);
return 0;
}
static int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
{
struct dirent **namelist;
char *name, *path2;
int i, n, r, rc = 0, result = 0;
struct stat buf;
n = scandir(path, &namelist, 0, alphasort);
if (n < 0) {
perror("scandir");
return -1;
}
for (i = 0; i < n; i++) {
name = namelist[i]->d_name;
if (fnmatch(".", name, 0) == 0)
goto skip;
if (fnmatch("..", name, 0) == 0)
goto skip;
path2 = malloc(strlen(path) + strlen(name) + 3);
strcpy(path2, path);
strcat(path2, "/");
strcat(path2, name);
if (fnmatch(file, name, 0) == 0) {
rc = map_mem(path2, offset, length, touch);
if (rc == 0)
fprintf(stderr, "PASS: %s 0x%lx-0x%lx is %s\n", path2, offset, offset + length, touch ? "readable" : "mappable");
else if (rc > 0)
fprintf(stderr, "PASS: %s 0x%lx-0x%lx not mappable\n", path2, offset, offset + length);
else {
fprintf(stderr, "FAIL: %s 0x%lx-0x%lx not accessible\n", path2, offset, offset + length);
return rc;
}
} else {
r = lstat(path2, &buf);
if (r == 0 && S_ISDIR(buf.st_mode)) {
rc = scan_tree(path2, file, offset, length, touch);
if (rc < 0)
return rc;
}
}
result |= rc;
free(path2);
skip:
free(namelist[i]);
}
free(namelist);
return result;
}
char buf[1024];
static int read_rom(char *path)
{
int fd, rc;
size_t size = 0;
fd = open(path, O_RDWR);
if (fd == -1) {
perror(path);
return -1;
}
rc = write(fd, "1", 2);
if (rc <= 0) {
perror("write");
return -1;
}
do {
rc = read(fd, buf, sizeof(buf));
if (rc > 0)
size += rc;
} while (rc > 0);
close(fd);
return size;
}
static int scan_rom(char *path, char *file)
{
struct dirent **namelist;
char *name, *path2;
int i, n, r, rc = 0, result = 0;
struct stat buf;
n = scandir(path, &namelist, 0, alphasort);
if (n < 0) {
perror("scandir");
return -1;
}
for (i = 0; i < n; i++) {
name = namelist[i]->d_name;
if (fnmatch(".", name, 0) == 0)
goto skip;
if (fnmatch("..", name, 0) == 0)
goto skip;
path2 = malloc(strlen(path) + strlen(name) + 3);
strcpy(path2, path);
strcat(path2, "/");
strcat(path2, name);
if (fnmatch(file, name, 0) == 0) {
rc = read_rom(path2);
/*
* It's OK if the ROM is unreadable. Maybe there
* is no ROM, or some other error occurred. The
* important thing is that no MCA happened.
*/
if (rc > 0)
fprintf(stderr, "PASS: %s read %d bytes\n", path2, rc);
else {
fprintf(stderr, "PASS: %s not readable\n", path2);
return rc;
}
} else {
r = lstat(path2, &buf);
if (r == 0 && S_ISDIR(buf.st_mode)) {
rc = scan_rom(path2, file);
if (rc < 0)
return rc;
}
}
result |= rc;
free(path2);
skip:
free(namelist[i]);
}
free(namelist);
return result;
}
int main(void)
{
int rc;
if (map_mem("/dev/mem", 0, 0xA0000, 1) == 0)
fprintf(stderr, "PASS: /dev/mem 0x0-0xa0000 is readable\n");
else
fprintf(stderr, "FAIL: /dev/mem 0x0-0xa0000 not accessible\n");
/*
* It's not safe to blindly read the VGA frame buffer. If you know
* how to poke the card the right way, it should respond, but it's
* not safe in general. Many machines, e.g., Intel chipsets, cover
* up a non-responding card by just returning -1, but others will
* report the failure as a machine check.
*/
if (map_mem("/dev/mem", 0xA0000, 0x20000, 0) == 0)
fprintf(stderr, "PASS: /dev/mem 0xa0000-0xc0000 is mappable\n");
else
fprintf(stderr, "FAIL: /dev/mem 0xa0000-0xc0000 not accessible\n");
if (map_mem("/dev/mem", 0xC0000, 0x40000, 1) == 0)
fprintf(stderr, "PASS: /dev/mem 0xc0000-0x100000 is readable\n");
else
fprintf(stderr, "FAIL: /dev/mem 0xc0000-0x100000 not accessible\n");
/*
* Often you can map all the individual pieces above (0-0xA0000,
* 0xA0000-0xC0000, and 0xC0000-0x100000), but can't map the whole
* thing at once. This is because the individual pieces use different
* attributes, and there's no single attribute supported over the
* whole region.
*/
rc = map_mem("/dev/mem", 0, 1024*1024, 0);
if (rc == 0)
fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 is mappable\n");
else if (rc > 0)
fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 not mappable\n");
else
fprintf(stderr, "FAIL: /dev/mem 0x0-0x100000 not accessible\n");
scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 0xA0000, 1);
scan_tree("/sys/class/pci_bus", "legacy_mem", 0xA0000, 0x20000, 0);
scan_tree("/sys/class/pci_bus", "legacy_mem", 0xC0000, 0x40000, 1);
scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 1024*1024, 0);
scan_rom("/sys/devices", "rom");
scan_tree("/proc/bus/pci", "??.?", 0, 0xA0000, 1);
scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0);
scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1);
scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0);
return rc;
}
| gpl-2.0 |
Epirex/android_kernel_samsung_golden | arch/parisc/math-emu/fmpyfadd.c | 12203 | 80274 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fmpyfadd.c $Revision: 1.1 $
*
* Purpose:
* Double Floating-point Multiply Fused Add
* Double Floating-point Multiply Negate Fused Add
* Single Floating-point Multiply Fused Add
* Single Floating-point Multiply Negate Fused Add
*
* External Interfaces:
* dbl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
/*
* Double Floating-point Multiply Fused Add
*/
int
dbl_fmpyfadd(
dbl_floating_point *src1ptr,
dbl_floating_point *src2ptr,
dbl_floating_point *src3ptr,
unsigned int *status,
dbl_floating_point *dstptr)
{
unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
unsigned int rightp1, rightp2, rightp3, rightp4;
unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
/*
* set sign bit of result of multiply
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(opnd2p1,opnd2p2);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd3p1)) {
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* return infinity */
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = DBL_P-1; count >= 0; count -= 4) {
Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
if (Dbit28p2(opnd1p2)) {
/* Fourword_add should be an ADD followed by 3 ADDC's */
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
}
if (Dbit29p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
}
if (Dbit30p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
}
if (Dbit31p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1, opnd2p2, 0, 0);
}
Dbl_rightshiftby4(opnd1p1,opnd1p2);
}
if (Is_dexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Dbl_exponent(opnd3p1);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
Dbl_clear_signexponent(opnd3p1);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
} else {
Dbl_clear_exponent_set_hidden(opnd3p1);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
rightp2,rightp3,rightp4);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > DBLEXT_THRESHOLD) {
diff_exponent = DBLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Dblext_clear_sign(rightp1);
Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if(Dblext_iszero(resultp1,resultp2,resultp3,resultp4)){
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Dbl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Dblext_leftshiftby3(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 3;
break;
case 2:
case 3:
Dblext_leftshiftby2(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
/*to*/resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
result_exponent,is_tiny);
}
Dbl_set_sign(resultp1,/*using*/sign_save);
if (Dblext_isnotzero_mantissap3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Dblext_isone_highp3(resultp3)) {
/* at least 1/2 ulp */
if (Dblext_isnotzero_low31p3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4) ||
Dblext_isone_lowp2(resultp2)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Double Floating-point Multiply Negate Fused Add
*/
dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
dbl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
unsigned int rightp1, rightp2, rightp3, rightp4;
unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
/*
* set sign bit of result of multiply
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setzerop1(resultp1);
else
Dbl_setnegativezerop1(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(opnd2p1,opnd2p2);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd3p1)) {
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* return infinity */
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = DBL_P-1; count >= 0; count -= 4) {
Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
if (Dbit28p2(opnd1p2)) {
/* Fourword_add should be an ADD followed by 3 ADDC's */
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
}
if (Dbit29p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
}
if (Dbit30p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
}
if (Dbit31p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1, opnd2p2, 0, 0);
}
Dbl_rightshiftby4(opnd1p1,opnd1p2);
}
if (Is_dexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Dbl_exponent(opnd3p1);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
Dbl_clear_signexponent(opnd3p1);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
} else {
Dbl_clear_exponent_set_hidden(opnd3p1);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
rightp2,rightp3,rightp4);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > DBLEXT_THRESHOLD) {
diff_exponent = DBLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Dblext_clear_sign(rightp1);
Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Dblext_iszero(resultp1,resultp2,resultp3,resultp4)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Dbl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Dblext_leftshiftby3(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 3;
break;
case 2:
case 3:
Dblext_leftshiftby2(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
/*to*/resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
result_exponent,is_tiny);
}
Dbl_set_sign(resultp1,/*using*/sign_save);
if (Dblext_isnotzero_mantissap3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Dblext_isone_highp3(resultp3)) {
/* at least 1/2 ulp */
if (Dblext_isnotzero_low31p3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4) ||
Dblext_isone_lowp2(resultp2)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= DBL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Dbl_setoverflow(resultp1,resultp2);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Single Floating-point Multiply Fused Add
*/
sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1, opnd2, opnd3;
register unsigned int tmpresp1, tmpresp2;
unsigned int rightp1, rightp2;
unsigned int resultp1, resultp2 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Sgl_copyfromptr(src1ptr,opnd1);
Sgl_copyfromptr(src2ptr,opnd2);
Sgl_copyfromptr(src3ptr,opnd3);
/*
* set sign bit of result of multiply
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
Sgl_setnegativezero(resultp1);
else Sgl_setzero(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd1,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
if (Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd3)) {
if (Sgl_iszero_mantissa(opnd3)) {
/* return infinity */
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
Sgl_normalize(opnd2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Sglext_setzero(tmpresp1,tmpresp2);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = SGL_P-1; count >= 0; count -= 4) {
Sglext_rightshiftby4(tmpresp1,tmpresp2);
if (Sbit28(opnd1)) {
/* Twoword_add should be an ADD followed by 2 ADDC's */
Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
}
if (Sbit29(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
}
if (Sbit30(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
}
if (Sbit31(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
}
Sgl_rightshiftby4(opnd1);
}
if (Is_sexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Sglext_rightshiftby4(tmpresp1,tmpresp2);
} else {
Sglext_rightshiftby3(tmpresp1,tmpresp2);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Sgl_exponent(opnd3);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Sgl_iszero_mantissa(opnd3)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Sgl_signextendedsign(opnd3); /* save sign */
Sgl_clear_signexponent(opnd3);
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,add_exponent);
Sgl_set_sign(opnd3,sign_save); /* restore sign */
} else {
Sgl_clear_exponent_set_hidden(opnd3);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Sgl_copyto_sglext(opnd3,rightp1,rightp2);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Sglext_swap_lower(tmpresp2,rightp2);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > SGLEXT_THRESHOLD) {
diff_exponent = SGLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Sglext_clear_sign(rightp1);
Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Sglext_leftshiftby1(resultp1,resultp2);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Sglext_iszero(resultp1,resultp2)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Sgl_setone_sign(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Sgl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
Sglext_leftshiftby8(resultp1,resultp2);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Sglext_leftshiftby4(resultp1,resultp2);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Sglext_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
case 2:
case 3:
Sglext_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Sglext_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Sglext_addition(tmpresp1,tmpresp2,
rightp1,rightp2, /*to*/resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Sglext_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
}
Sgl_set_sign(resultp1,/*using*/sign_save);
if (Sglext_isnotzero_mantissap2(resultp2)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Sglext_isone_highp2(resultp2)) {
/* at least 1/2 ulp */
if (Sglext_isnotzero_low31p2(resultp2) ||
Sglext_isone_lowp1(resultp1)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Sgl_increment(resultp1);
}
}
break;
case ROUNDPLUS:
if (Sgl_iszero_sign(resultp1)) {
/* Round up positive results */
Sgl_increment(resultp1);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(resultp1)) {
/* Round down negative results */
Sgl_increment(resultp1);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= SGL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Sgl_setoverflow(resultp1);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Sgl_set_exponent(resultp1,result_exponent);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Single Floating-point Multiply Negate Fused Add
*/
sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1, opnd2, opnd3;
register unsigned int tmpresp1, tmpresp2;
unsigned int rightp1, rightp2;
unsigned int resultp1, resultp2 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Sgl_copyfromptr(src1ptr,opnd1);
Sgl_copyfromptr(src2ptr,opnd2);
Sgl_copyfromptr(src3ptr,opnd3);
/*
* set sign bit of result of multiply
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
Sgl_setzero(resultp1);
else
Sgl_setnegativezero(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd1,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
if (Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd3)) {
if (Sgl_iszero_mantissa(opnd3)) {
/* return infinity */
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
Sgl_normalize(opnd2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Sglext_setzero(tmpresp1,tmpresp2);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = SGL_P-1; count >= 0; count -= 4) {
Sglext_rightshiftby4(tmpresp1,tmpresp2);
if (Sbit28(opnd1)) {
/* Twoword_add should be an ADD followed by 2 ADDC's */
Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
}
if (Sbit29(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
}
if (Sbit30(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
}
if (Sbit31(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
}
Sgl_rightshiftby4(opnd1);
}
if (Is_sexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Sglext_rightshiftby4(tmpresp1,tmpresp2);
} else {
Sglext_rightshiftby3(tmpresp1,tmpresp2);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Sgl_exponent(opnd3);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Sgl_iszero_mantissa(opnd3)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Sgl_signextendedsign(opnd3); /* save sign */
Sgl_clear_signexponent(opnd3);
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,add_exponent);
Sgl_set_sign(opnd3,sign_save); /* restore sign */
} else {
Sgl_clear_exponent_set_hidden(opnd3);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Sgl_copyto_sglext(opnd3,rightp1,rightp2);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Sglext_swap_lower(tmpresp2,rightp2);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > SGLEXT_THRESHOLD) {
diff_exponent = SGLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Sglext_clear_sign(rightp1);
Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Sglext_leftshiftby1(resultp1,resultp2);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Sglext_iszero(resultp1,resultp2)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Sgl_setone_sign(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Sgl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
Sglext_leftshiftby8(resultp1,resultp2);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Sglext_leftshiftby4(resultp1,resultp2);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Sglext_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
case 2:
case 3:
Sglext_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Sglext_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Sglext_addition(tmpresp1,tmpresp2,
rightp1,rightp2, /*to*/resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Sglext_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
}
Sgl_set_sign(resultp1,/*using*/sign_save);
if (Sglext_isnotzero_mantissap2(resultp2)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Sglext_isone_highp2(resultp2)) {
/* at least 1/2 ulp */
if (Sglext_isnotzero_low31p2(resultp2) ||
Sglext_isone_lowp1(resultp1)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Sgl_increment(resultp1);
}
}
break;
case ROUNDPLUS:
if (Sgl_iszero_sign(resultp1)) {
/* Round up positive results */
Sgl_increment(resultp1);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(resultp1)) {
/* Round down negative results */
Sgl_increment(resultp1);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= SGL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Sgl_setoverflow(resultp1);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Sgl_set_exponent(resultp1,result_exponent);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| gpl-2.0 |
kim6515516/khypervisor_native_linux_3.8_for_rtsm | drivers/video/fbdev/omap2/dss/dpi.c | 172 | 15939 | /*
* linux/drivers/video/omap2/dss/dpi.c
*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <linux/of.h>
#include <video/omapdss.h>
#include "dss.h"
#include "dss_features.h"
static struct {
struct platform_device *pdev;
struct regulator *vdds_dsi_reg;
struct platform_device *dsidev;
struct mutex lock;
struct omap_video_timings timings;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
struct omap_dss_device output;
bool port_initialized;
} dpi;
static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
{
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
* would also be used for DISPC fclk. Meaning, when the DPI output is
* disabled, DISPC clock will be disabled, and TV out will stop.
*/
switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
case OMAPDSS_VER_OMAP34xx_ES1:
case OMAPDSS_VER_OMAP34xx_ES3:
case OMAPDSS_VER_OMAP3630:
case OMAPDSS_VER_AM35xx:
case OMAPDSS_VER_AM43xx:
return NULL;
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return dsi_get_dsidev_from_id(0);
case OMAP_DSS_CHANNEL_LCD2:
return dsi_get_dsidev_from_id(1);
default:
return NULL;
}
case OMAPDSS_VER_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return dsi_get_dsidev_from_id(0);
case OMAP_DSS_CHANNEL_LCD3:
return dsi_get_dsidev_from_id(1);
default:
return NULL;
}
default:
return NULL;
}
}
static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
{
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
case OMAP_DSS_CHANNEL_LCD2:
return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
case OMAP_DSS_CHANNEL_LCD3:
return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
default:
/* this shouldn't happen */
WARN_ON(1);
return OMAP_DSS_CLK_SRC_FCK;
}
}
struct dpi_clk_calc_ctx {
struct platform_device *dsidev;
/* inputs */
unsigned long pck_min, pck_max;
/* outputs */
struct dsi_clock_info dsi_cinfo;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
/*
* Odd dividers give us uneven duty cycle, causing problem when level
* shifted. So skip all odd dividers when the pixel clock is on the
* higher side.
*/
if (ctx->pck_min >= 100000000) {
if (lckd > 1 && lckd % 2 != 0)
return false;
if (pckd > 1 && pckd % 2 != 0)
return false;
}
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
return true;
}
static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
/*
* Odd dividers give us uneven duty cycle, causing problem when level
* shifted. So skip all odd dividers when the pixel clock is on the
* higher side.
*/
if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 100000000)
return false;
ctx->dsi_cinfo.regm_dispc = regm_dispc;
ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
unsigned long pll,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
ctx->dsi_cinfo.regn = regn;
ctx->dsi_cinfo.regm = regm;
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkin4ddr = pll;
return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min,
dpi_calc_hsdiv_cb, ctx);
}
static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
ctx->fck = fck;
return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
{
unsigned long clkin;
unsigned long pll_min, pll_max;
clkin = dsi_get_pll_clkin(dpi.dsidev);
memset(ctx, 0, sizeof(*ctx));
ctx->dsidev = dpi.dsidev;
ctx->pck_min = pck - 1000;
ctx->pck_max = pck + 1000;
ctx->dsi_cinfo.clkin = clkin;
pll_min = 0;
pll_max = 0;
return dsi_pll_calc(dpi.dsidev, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
}
static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
{
int i;
/*
* DSS fck gives us very few possibilities, so finding a good pixel
* clock may not be possible. We try multiple times to find the clock,
* each time widening the pixel clock range we look for, up to
* +/- ~15MHz.
*/
for (i = 0; i < 25; ++i) {
bool ok;
memset(ctx, 0, sizeof(*ctx));
if (pck > 1000 * i * i * i)
ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
else
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
return false;
}
static int dpi_set_dsi_clk(enum omap_channel channel,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
struct dpi_clk_calc_ctx ctx;
int r;
bool ok;
ok = dpi_dsi_clk_calc(pck_req, &ctx);
if (!ok)
return -EINVAL;
r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo);
if (r)
return r;
dss_select_lcd_clk_source(channel,
dpi_get_alt_clk_src(channel));
dpi.mgr_config.clock_info = ctx.dispc_cinfo;
*fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
return 0;
}
static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
int *lck_div, int *pck_div)
{
struct dpi_clk_calc_ctx ctx;
int r;
bool ok;
ok = dpi_dss_clk_calc(pck_req, &ctx);
if (!ok)
return -EINVAL;
r = dss_set_fck_rate(ctx.fck);
if (r)
return r;
dpi.mgr_config.clock_info = ctx.dispc_cinfo;
*fck = ctx.fck;
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
return 0;
}
static int dpi_set_mode(struct omap_overlay_manager *mgr)
{
struct omap_video_timings *t = &dpi.timings;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi.dsidev)
r = dpi_set_dsi_clk(mgr->id, t->pixelclock, &fck,
&lck_div, &pck_div);
else
r = dpi_set_dispc_clk(t->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
if (pck != t->pixelclock) {
DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
t->pixelclock, pck);
t->pixelclock = pck;
}
dss_mgr_set_timings(mgr, t);
return 0;
}
static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
{
dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dpi.mgr_config.stallmode = false;
dpi.mgr_config.fifohandcheck = false;
dpi.mgr_config.video_port_width = dpi.data_lines;
dpi.mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(mgr, &dpi.mgr_config);
}
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &dpi.output;
int r;
mutex_lock(&dpi.lock);
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi.vdds_dsi_reg) {
DSSERR("no VDSS_DSI regulator\n");
r = -ENODEV;
goto err_no_reg;
}
if (out == NULL || out->manager == NULL) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err_no_out_mgr;
}
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
goto err_reg_enable;
}
r = dispc_runtime_get();
if (r)
goto err_get_dispc;
r = dss_dpi_select_source(out->manager->id);
if (r)
goto err_src_sel;
if (dpi.dsidev) {
r = dsi_runtime_get(dpi.dsidev);
if (r)
goto err_get_dsi;
r = dsi_pll_init(dpi.dsidev, 0, 1);
if (r)
goto err_dsi_pll_init;
}
r = dpi_set_mode(out->manager);
if (r)
goto err_set_mode;
dpi_config_lcd_manager(out->manager);
mdelay(2);
r = dss_mgr_enable(out->manager);
if (r)
goto err_mgr_enable;
mutex_unlock(&dpi.lock);
return 0;
err_mgr_enable:
err_set_mode:
if (dpi.dsidev)
dsi_pll_uninit(dpi.dsidev, true);
err_dsi_pll_init:
if (dpi.dsidev)
dsi_runtime_put(dpi.dsidev);
err_get_dsi:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
regulator_disable(dpi.vdds_dsi_reg);
err_reg_enable:
err_no_out_mgr:
err_no_reg:
mutex_unlock(&dpi.lock);
return r;
}
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = dpi.output.manager;
mutex_lock(&dpi.lock);
dss_mgr_disable(mgr);
if (dpi.dsidev) {
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
dsi_runtime_put(dpi.dsidev);
}
dispc_runtime_put();
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
regulator_disable(dpi.vdds_dsi_reg);
mutex_unlock(&dpi.lock);
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
DSSDBG("dpi_set_timings\n");
mutex_lock(&dpi.lock);
dpi.timings = *timings;
mutex_unlock(&dpi.lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
mutex_lock(&dpi.lock);
*timings = dpi.timings;
mutex_unlock(&dpi.lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct omap_overlay_manager *mgr = dpi.output.manager;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
struct dpi_clk_calc_ctx ctx;
bool ok;
if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
if (timings->pixelclock == 0)
return -EINVAL;
if (dpi.dsidev) {
ok = dpi_dsi_clk_calc(timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
} else {
ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.fck;
}
lck_div = ctx.dispc_cinfo.lck_div;
pck_div = ctx.dispc_cinfo.pck_div;
pck = fck / lck_div / pck_div;
timings->pixelclock = pck;
return 0;
}
static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
{
mutex_lock(&dpi.lock);
dpi.data_lines = data_lines;
mutex_unlock(&dpi.lock);
}
static int dpi_verify_dsi_pll(struct platform_device *dsidev)
{
int r;
/* do initial setup with the PLL to see if it is operational */
r = dsi_runtime_get(dsidev);
if (r)
return r;
r = dsi_pll_init(dsidev, 0, 1);
if (r) {
dsi_runtime_put(dsidev);
return r;
}
dsi_pll_uninit(dsidev, true);
dsi_runtime_put(dsidev);
return 0;
}
static int dpi_init_regulator(void)
{
struct regulator *vdds_dsi;
if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
return 0;
if (dpi.vdds_dsi_reg)
return 0;
vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
dpi.vdds_dsi_reg = vdds_dsi;
return 0;
}
static void dpi_init_pll(void)
{
struct platform_device *dsidev;
if (dpi.dsidev)
return;
dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
if (!dsidev)
return;
if (dpi_verify_dsi_pll(dsidev)) {
DSSWARN("DSI PLL not operational\n");
return;
}
dpi.dsidev = dsidev;
}
/*
* Return a hardcoded channel for the DPI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
static enum omap_channel dpi_get_channel(void)
{
switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
case OMAPDSS_VER_OMAP34xx_ES1:
case OMAPDSS_VER_OMAP34xx_ES3:
case OMAPDSS_VER_OMAP3630:
case OMAPDSS_VER_AM35xx:
case OMAPDSS_VER_AM43xx:
return OMAP_DSS_CHANNEL_LCD;
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
return OMAP_DSS_CHANNEL_LCD2;
case OMAPDSS_VER_OMAP5:
return OMAP_DSS_CHANNEL_LCD3;
default:
DSSWARN("unsupported DSS version\n");
return OMAP_DSS_CHANNEL_LCD;
}
}
static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct omap_overlay_manager *mgr;
int r;
r = dpi_init_regulator();
if (r)
return r;
dpi_init_pll();
mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
if (!mgr)
return -ENODEV;
r = dss_mgr_connect(mgr, dssdev);
if (r)
return r;
r = omapdss_output_set_device(dssdev, dst);
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
dss_mgr_disconnect(mgr, dssdev);
return r;
}
return 0;
}
static void dpi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
if (dssdev->manager)
dss_mgr_disconnect(dssdev->manager, dssdev);
}
static const struct omapdss_dpi_ops dpi_ops = {
.connect = dpi_connect,
.disconnect = dpi_disconnect,
.enable = dpi_display_enable,
.disable = dpi_display_disable,
.check_timings = dpi_check_timings,
.set_timings = dpi_set_timings,
.get_timings = dpi_get_timings,
.set_data_lines = dpi_set_data_lines,
};
static void dpi_init_output(struct platform_device *pdev)
{
struct omap_dss_device *out = &dpi.output;
out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
out->name = "dpi.0";
out->dispc_channel = dpi_get_channel();
out->ops.dpi = &dpi_ops;
out->owner = THIS_MODULE;
omapdss_register_output(out);
}
static void __exit dpi_uninit_output(struct platform_device *pdev)
{
struct omap_dss_device *out = &dpi.output;
omapdss_unregister_output(out);
}
static int omap_dpi_probe(struct platform_device *pdev)
{
dpi.pdev = pdev;
mutex_init(&dpi.lock);
dpi_init_output(pdev);
return 0;
}
static int __exit omap_dpi_remove(struct platform_device *pdev)
{
dpi_uninit_output(pdev);
return 0;
}
static struct platform_driver omap_dpi_driver = {
.probe = omap_dpi_probe,
.remove = __exit_p(omap_dpi_remove),
.driver = {
.name = "omapdss_dpi",
.owner = THIS_MODULE,
},
};
int __init dpi_init_platform_driver(void)
{
return platform_driver_register(&omap_dpi_driver);
}
void __exit dpi_uninit_platform_driver(void)
{
platform_driver_unregister(&omap_dpi_driver);
}
int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
{
struct device_node *ep;
u32 datalines;
int r;
ep = omapdss_of_get_next_endpoint(port, NULL);
if (!ep)
return 0;
r = of_property_read_u32(ep, "data-lines", &datalines);
if (r) {
DSSERR("failed to parse datalines\n");
goto err_datalines;
}
dpi.data_lines = datalines;
of_node_put(ep);
dpi.pdev = pdev;
mutex_init(&dpi.lock);
dpi_init_output(pdev);
dpi.port_initialized = true;
return 0;
err_datalines:
of_node_put(ep);
return r;
}
void __exit dpi_uninit_port(void)
{
if (!dpi.port_initialized)
return;
dpi_uninit_output(dpi.pdev);
}
| gpl-2.0 |
delapuente/codeaurora_kernel_msm | drivers/media/platform/msm/camera_v2/isp/msm_isp32.c | 172 | 33833 | /* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/iommu.h>
#include "msm_isp32.h"
#include "msm_isp_util.h"
#include "msm_isp_axi_util.h"
#include "msm_isp_stats_util.h"
#include "msm_isp.h"
#include "msm.h"
#include "msm_camera_io_util.h"
#define VFE32_BURST_LEN 2
#define VFE32_UB_SIZE 1024
#define VFE32_EQUAL_SLICE_UB 204
#define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
#define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
#define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
#define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
#define VFE32_PING_PONG_BASE(wm, ping_pong) \
(VFE32_WM_BASE(wm) + 0x4 * (1 + (~(ping_pong >> wm) & 0x1)))
#define VFE32_NUM_STATS_TYPE 7
#define VFE32_STATS_PING_PONG_OFFSET 7
#define VFE32_STATS_BASE(idx) (0xF4 + 0xC * idx)
#define VFE32_STATS_PING_PONG_BASE(idx, ping_pong) \
(VFE32_STATS_BASE(idx) + 0x4 * \
(~(ping_pong >> (idx + VFE32_STATS_PING_PONG_OFFSET)) & 0x1))
#define VFE32_CLK_IDX 0
static struct msm_cam_clk_info msm_vfe32_1_clk_info[] = {
/*vfe32 clock info for B-family: 8610 */
{"vfe_clk_src", 266670000},
{"vfe_clk", -1},
{"vfe_ahb_clk", -1},
{"csi_vfe_clk", -1},
{"bus_clk", -1},
};
static struct msm_cam_clk_info msm_vfe32_2_clk_info[] = {
/*vfe32 clock info for A-family: 8960 */
{"vfe_clk", 266667000},
{"vfe_pclk", -1},
{"csi_vfe_clk", -1},
};
static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
{
int rc = -1;
vfe_dev->vfe_clk_idx = 0;
rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
if (rc < 0) {
pr_err("%s: Bandwidth registration Failed!\n", __func__);
goto bus_scale_register_failed;
}
if (vfe_dev->fs_vfe) {
rc = regulator_enable(vfe_dev->fs_vfe);
if (rc) {
pr_err("%s: Regulator enable failed\n", __func__);
goto fs_failed;
}
}
rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_1_clk_info,
vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_1_clk_info), 1);
if (rc < 0) {
rc = msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_2_clk_info), 1);
if (rc < 0)
goto clk_enable_failed;
else
vfe_dev->vfe_clk_idx = 2;
} else
vfe_dev->vfe_clk_idx = 1;
vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
resource_size(vfe_dev->vfe_mem));
if (!vfe_dev->vfe_base) {
rc = -ENOMEM;
pr_err("%s: vfe ioremap failed\n", __func__);
goto vfe_remap_failed;
}
rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
IRQF_TRIGGER_RISING, "vfe", vfe_dev);
if (rc < 0) {
pr_err("%s: irq request failed\n", __func__);
goto irq_req_failed;
}
return rc;
irq_req_failed:
iounmap(vfe_dev->vfe_base);
vfe_remap_failed:
if (vfe_dev->vfe_clk_idx == 1)
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
if (vfe_dev->vfe_clk_idx == 2)
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
clk_enable_failed:
regulator_disable(vfe_dev->fs_vfe);
fs_failed:
msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
bus_scale_register_failed:
return rc;
}
static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
{
free_irq(vfe_dev->vfe_irq->start, vfe_dev);
tasklet_kill(&vfe_dev->vfe_tasklet);
iounmap(vfe_dev->vfe_base);
if (vfe_dev->vfe_clk_idx == 1)
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
if (vfe_dev->vfe_clk_idx == 2)
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
regulator_disable(vfe_dev->fs_vfe);
msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
}
static void msm_vfe32_init_hardware_reg(struct vfe_device *vfe_dev)
{
/* CGC_OVERRIDE */
msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
/* BUS_CFG */
msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
}
static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
if (irq_status1 & BIT(23))
complete(&vfe_dev->reset_complete);
}
static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
}
static void msm_vfe32_process_eof_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
if (irq_status0 & BIT(1))
msm_isp_eof_notify(vfe_dev);
}
static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts)
{
if (!(irq_status0 & 0x1F))
return;
if (irq_status0 & BIT(0)) {
ISP_DBG("%s: SOF IRQ\n", __func__);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
pix_stream_count == 0) {
msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
}
}
}
static void msm_vfe32_process_violation_status(struct vfe_device *vfe_dev)
{
uint32_t violation_status = vfe_dev->error_info.violation_status;
if (!violation_status)
return;
if (violation_status & BIT(0))
pr_err("%s: black violation\n", __func__);
if (violation_status & BIT(1))
pr_err("%s: rolloff violation\n", __func__);
if (violation_status & BIT(2))
pr_err("%s: demux violation\n", __func__);
if (violation_status & BIT(3))
pr_err("%s: demosaic violation\n", __func__);
if (violation_status & BIT(4))
pr_err("%s: crop violation\n", __func__);
if (violation_status & BIT(5))
pr_err("%s: scale violation\n", __func__);
if (violation_status & BIT(6))
pr_err("%s: wb violation\n", __func__);
if (violation_status & BIT(7))
pr_err("%s: clf violation\n", __func__);
if (violation_status & BIT(8))
pr_err("%s: matrix violation\n", __func__);
if (violation_status & BIT(9))
pr_err("%s: rgb lut violation\n", __func__);
if (violation_status & BIT(10))
pr_err("%s: la violation\n", __func__);
if (violation_status & BIT(11))
pr_err("%s: chroma enhance violation\n", __func__);
if (violation_status & BIT(12))
pr_err("%s: chroma supress mce violation\n", __func__);
if (violation_status & BIT(13))
pr_err("%s: skin enhance violation\n", __func__);
if (violation_status & BIT(14))
pr_err("%s: asf violation\n", __func__);
if (violation_status & BIT(15))
pr_err("%s: scale y violation\n", __func__);
if (violation_status & BIT(16))
pr_err("%s: scale cbcr violation\n", __func__);
if (violation_status & BIT(17))
pr_err("%s: chroma subsample violation\n", __func__);
if (violation_status & BIT(18))
pr_err("%s: framedrop enc y violation\n", __func__);
if (violation_status & BIT(19))
pr_err("%s: framedrop enc cbcr violation\n", __func__);
if (violation_status & BIT(20))
pr_err("%s: framedrop view y violation\n", __func__);
if (violation_status & BIT(21))
pr_err("%s: framedrop view cbcr violation\n", __func__);
if (violation_status & BIT(22))
pr_err("%s: realign buf y violation\n", __func__);
if (violation_status & BIT(23))
pr_err("%s: realign buf cb violation\n", __func__);
if (violation_status & BIT(24))
pr_err("%s: realign buf cr violation\n", __func__);
}
static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
{
uint32_t error_status1 = vfe_dev->error_info.error_mask1;
if (error_status1 & BIT(0))
pr_err("%s: camif error status: 0x%x\n",
__func__, vfe_dev->error_info.camif_status);
if (error_status1 & BIT(1))
pr_err("%s: stats bhist overwrite\n", __func__);
if (error_status1 & BIT(2))
pr_err("%s: stats cs overwrite\n", __func__);
if (error_status1 & BIT(3))
pr_err("%s: stats ihist overwrite\n", __func__);
if (error_status1 & BIT(4))
pr_err("%s: realign buf y overflow\n", __func__);
if (error_status1 & BIT(5))
pr_err("%s: realign buf cb overflow\n", __func__);
if (error_status1 & BIT(6))
pr_err("%s: realign buf cr overflow\n", __func__);
if (error_status1 & BIT(7)) {
pr_err("%s: violation\n", __func__);
msm_vfe32_process_violation_status(vfe_dev);
}
if (error_status1 & BIT(8))
pr_err("%s: image master 0 bus overflow\n", __func__);
if (error_status1 & BIT(9))
pr_err("%s: image master 1 bus overflow\n", __func__);
if (error_status1 & BIT(10))
pr_err("%s: image master 2 bus overflow\n", __func__);
if (error_status1 & BIT(11))
pr_err("%s: image master 3 bus overflow\n", __func__);
if (error_status1 & BIT(12))
pr_err("%s: image master 4 bus overflow\n", __func__);
if (error_status1 & BIT(13))
pr_err("%s: image master 5 bus overflow\n", __func__);
if (error_status1 & BIT(14))
pr_err("%s: image master 6 bus overflow\n", __func__);
if (error_status1 & BIT(15))
pr_err("%s: status ae/bg bus overflow\n", __func__);
if (error_status1 & BIT(16))
pr_err("%s: status af/bf bus overflow\n", __func__);
if (error_status1 & BIT(17))
pr_err("%s: status awb bus overflow\n", __func__);
if (error_status1 & BIT(18))
pr_err("%s: status rs bus overflow\n", __func__);
if (error_status1 & BIT(19))
pr_err("%s: status cs bus overflow\n", __func__);
if (error_status1 & BIT(20))
pr_err("%s: status ihist bus overflow\n", __func__);
if (error_status1 & BIT(21))
pr_err("%s: status skin bhist bus overflow\n", __func__);
if (error_status1 & BIT(22))
pr_err("%s: axi error\n", __func__);
}
static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
uint32_t *irq_status0, uint32_t *irq_status1)
{
*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
*irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x24);
msm_camera_io_w_mb(*irq_status1, vfe_dev->vfe_base + 0x28);
msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x18);
if (*irq_status1 & BIT(0))
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x204);
if (*irq_status1 & BIT(7))
vfe_dev->error_info.violation_status |=
msm_camera_io_r(vfe_dev->vfe_base + 0x7B4);
}
static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts)
{
if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
return;
if (irq_status0 & BIT(5))
msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
if (irq_status1 & BIT(26))
msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
if (irq_status1 & BIT(27))
msm_isp_sof_notify(vfe_dev, VFE_RAW_1, ts);
if (irq_status1 & BIT(28))
msm_isp_sof_notify(vfe_dev, VFE_RAW_2, ts);
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
if (atomic_read(&vfe_dev->stats_data.stats_update))
msm_isp_stats_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
msm_isp_update_error_frame_count(vfe_dev);
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev);
return;
}
static void msm_vfe32_reg_update(
struct vfe_device *vfe_dev)
{
msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
}
static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev)
{
init_completion(&vfe_dev->reset_complete);
msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
return wait_for_completion_interruptible_timeout(
&vfe_dev->reset_complete, msecs_to_jiffies(50));
}
static void msm_vfe32_axi_reload_wm(
struct vfe_device *vfe_dev, uint32_t reload_mask)
{
if (!vfe_dev->pdev->dev.of_node) {
/*vfe32 A-family: 8960*/
msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
} else {
/*vfe32 B-family: 8610*/
msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x24);
msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x28);
msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x20);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
msm_camera_io_w(0x9AAAAAAA , vfe_dev->vfe_base + 0x600);
msm_camera_io_w(reload_mask, vfe_dev->vfe_base + 0x38);
}
}
static void msm_vfe32_axi_enable_wm(struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t enable)
{
uint32_t val = msm_camera_io_r(
vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
if (enable)
val |= 0x1;
else
val &= ~0x1;
msm_camera_io_w_mb(val,
vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
}
static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t comp_mask, comp_mask_index =
stream_info->comp_mask_index;
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
comp_mask |= (axi_data->composite_info[comp_mask_index].
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x34);
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask |= BIT(comp_mask_index + 21);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x34);
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask &= ~BIT(comp_mask_index + 21);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
static void msm_vfe32_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask |= BIT(stream_info->wm[0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
static void msm_vfe32_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask &= ~BIT(stream_info->wm[0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t framedrop_pattern = 0, framedrop_period = 0;
if (stream_info->runtime_init_frame_drop == 0) {
framedrop_pattern = stream_info->framedrop_pattern;
framedrop_period = stream_info->framedrop_period;
}
if (stream_info->stream_type == BURST_STREAM &&
stream_info->runtime_burst_frame_count == 0) {
framedrop_pattern = 0;
framedrop_period = 0;
}
if (stream_info->stream_src == PIX_ENCODER) {
msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x504);
msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x508);
msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x50C);
msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x510);
} else if (stream_info->stream_src == PIX_VIEWFINDER) {
msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x514);
msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x518);
msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x51C);
msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x520);
}
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x260);
}
static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
if (stream_info->stream_src == PIX_ENCODER) {
msm_camera_io_w(0, vfe_dev->vfe_base + 0x50C);
msm_camera_io_w(0, vfe_dev->vfe_base + 0x510);
} else if (stream_info->stream_src == PIX_VIEWFINDER) {
msm_camera_io_w(0, vfe_dev->vfe_base + 0x51C);
msm_camera_io_w(0, vfe_dev->vfe_base + 0x520);
}
}
static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int bpp, bpp_reg = 0;
uint32_t io_format_reg;
bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
switch (bpp) {
case 8:
bpp_reg = 0;
break;
case 10:
bpp_reg = 1 << 0;
break;
case 12:
bpp_reg = 1 << 1;
break;
}
io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
switch (stream_info->stream_src) {
case CAMIF_RAW:
io_format_reg &= 0xFFFFCFFF;
io_format_reg |= bpp_reg << 12;
break;
case IDEAL_RAW:
io_format_reg &= 0xFFFFFFC8;
io_format_reg |= bpp_reg << 4;
break;
case PIX_ENCODER:
case PIX_VIEWFINDER:
case RDI_INTF_0:
case RDI_INTF_1:
case RDI_INTF_2:
default:
pr_err("%s: Invalid stream source\n", __func__);
return;
}
msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x6F8);
}
static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
struct msm_vfe_pix_cfg *pix_cfg)
{
uint16_t first_pixel, last_pixel, first_line, last_line;
struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
uint32_t val;
first_pixel = camif_cfg->first_pixel;
last_pixel = camif_cfg->last_pixel;
first_line = camif_cfg->first_line;
last_line = camif_cfg->last_line;
msm_camera_io_w(pix_cfg->input_mux << 16 | pix_cfg->pixel_pattern,
vfe_dev->vfe_base + 0x14);
msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
camif_cfg->pixels_per_line,
vfe_dev->vfe_base + 0x1EC);
msm_camera_io_w(first_pixel << 16 | last_pixel,
vfe_dev->vfe_base + 0x1F0);
msm_camera_io_w(first_line << 16 | last_line,
vfe_dev->vfe_base + 0x1F4);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x6FC);
val &= 0xFFFFFFFC;
val |= camif_cfg->camif_input;
msm_camera_io_w(val, vfe_dev->vfe_base + 0x6FC);
}
static void msm_vfe32_update_camif_state(
struct vfe_device *vfe_dev,
enum msm_isp_camif_update_state update_state)
{
uint32_t val;
bool bus_en, vfe_en;
if (update_state == NO_UPDATE)
return;
val = msm_camera_io_r(vfe_dev->vfe_base + 0x1E4);
if (update_state == ENABLE_CAMIF) {
bus_en =
((vfe_dev->axi_data.src_info[
VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.src_info[
VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
} else if (update_state == DISABLE_CAMIF) {
msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1E0);
vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
} else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x1E0);
vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev);
vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev);
vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
}
}
static void msm_vfe32_cfg_rdi_reg(struct vfe_device *vfe_dev,
struct msm_vfe_rdi_cfg *rdi_cfg, enum msm_vfe_input_src input_src)
{
uint8_t rdi = input_src - VFE_RAW_0;
uint32_t rdi_reg_cfg;
rdi_reg_cfg = msm_camera_io_r(
vfe_dev->vfe_base + VFE32_RDI_BASE(0));
rdi_reg_cfg &= ~(BIT(16 + rdi));
rdi_reg_cfg |= rdi_cfg->frame_based << (16 + rdi);
msm_camera_io_w(rdi_reg_cfg,
vfe_dev->vfe_base + VFE32_RDI_BASE(0));
rdi_reg_cfg = msm_camera_io_r(
vfe_dev->vfe_base + VFE32_RDI_BASE(rdi));
rdi_reg_cfg &= 0x70003;
rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
msm_camera_io_w(
rdi_reg_cfg, vfe_dev->vfe_base + VFE32_RDI_BASE(rdi));
}
static void msm_vfe32_axi_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
uint32_t val;
uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
if (!stream_info->frame_based) {
/*WR_IMAGE_SIZE*/
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
stream_info->plane_cfg[plane_idx].
output_width)+1)/2 - 1) << 16 |
(stream_info->plane_cfg[plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
/*WR_BUFFER_CFG*/
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
stream_info->plane_cfg[plane_idx].
output_stride) << 16 |
(stream_info->plane_cfg[plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
stream_info->plane_cfg[plane_idx].
output_width) << 16 |
(stream_info->plane_cfg[plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
}
return;
}
static void msm_vfe32_axi_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
/*WR_IMAGE_SIZE*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
/*WR_BUFFER_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
return;
}
static void msm_vfe32_axi_cfg_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
struct msm_vfe_axi_plane_cfg *plane_cfg =
&stream_info->plane_cfg[plane_idx];
uint8_t wm = stream_info->wm[plane_idx];
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
switch (stream_info->stream_src) {
case PIX_ENCODER:
case PIX_VIEWFINDER: {
if (plane_cfg->output_plane_format != CRCB_PLANE &&
plane_cfg->output_plane_format != CBCR_PLANE) {
/*SINGLE_STREAM_SEL*/
xbar_cfg |= plane_cfg->output_plane_format << 5;
} else {
switch (stream_info->output_format) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
xbar_cfg |= 0x3 << 3; /*PAIR_STREAM_SWAP_CTRL*/
break;
}
xbar_cfg |= BIT(1); /*PAIR_STREAM_EN*/
}
if (stream_info->stream_src == PIX_VIEWFINDER)
xbar_cfg |= 0x1; /*VIEW_STREAM_EN*/
break;
}
case CAMIF_RAW:
xbar_cfg = 0x60;
break;
case IDEAL_RAW:
xbar_cfg = 0x80;
break;
case RDI_INTF_0:
xbar_cfg = 0xA0;
break;
case RDI_INTF_1:
xbar_cfg = 0xC0;
break;
case RDI_INTF_2:
xbar_cfg = 0xE0;
break;
default:
pr_err("%s: Invalid stream src\n", __func__);
}
xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
xbar_reg_cfg |= (xbar_cfg << VFE32_XBAR_SHIFT(wm));
msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
return;
}
static void msm_vfe32_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint8_t wm = stream_info->wm[plane_idx];
uint32_t xbar_reg_cfg = 0;
xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
}
static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
{
int i;
uint32_t ub_offset = 0;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
msm_camera_io_w(ub_offset << 16 | (VFE32_EQUAL_SLICE_UB - 1),
vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
ub_offset += VFE32_EQUAL_SLICE_UB;
}
}
static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
uint8_t wm_idx, uint32_t pingpong_status, unsigned long paddr)
{
msm_camera_io_w(paddr, vfe_dev->vfe_base +
VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
}
static long msm_vfe32_axi_halt(struct vfe_device *vfe_dev)
{
uint32_t halt_mask;
uint32_t axi_busy_flag = true;
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
while (axi_busy_flag) {
if (msm_camera_io_r(
vfe_dev->vfe_base + 0x1DC) & 0x1)
axi_busy_flag = false;
}
msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
halt_mask &= 0xFEFFFFFF;
/* Disable AXI IRQ */
msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
return 0;
}
static uint32_t msm_vfe32_get_wm_mask(
uint32_t irq_status0, uint32_t irq_status1)
{
return (irq_status0 >> 6) & 0x7F;
}
static uint32_t msm_vfe32_get_comp_mask(
uint32_t irq_status0, uint32_t irq_status1)
{
return (irq_status0 >> 21) & 0x7;
}
static uint32_t msm_vfe32_get_pingpong_status(struct vfe_device *vfe_dev)
{
return msm_camera_io_r(vfe_dev->vfe_base + 0x180);
}
static int msm_vfe32_get_stats_idx(enum msm_isp_stats_type stats_type)
{
switch (stats_type) {
case MSM_ISP_STATS_AEC:
case MSM_ISP_STATS_BG:
return 0;
case MSM_ISP_STATS_AF:
case MSM_ISP_STATS_BF:
return 1;
case MSM_ISP_STATS_AWB:
return 2;
case MSM_ISP_STATS_RS:
return 3;
case MSM_ISP_STATS_CS:
return 4;
case MSM_ISP_STATS_IHIST:
return 5;
case MSM_ISP_STATS_SKIN:
case MSM_ISP_STATS_BHIST:
return 6;
default:
pr_err("%s: Invalid stats type\n", __func__);
return -EINVAL;
}
}
static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable)
{
return;
}
static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
return;
}
static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
return;
}
static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
/*Nothing to configure for VFE3.x*/
return;
}
static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
/*Nothing to configure for VFE3.x*/
return;
}
static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
{
int i;
uint32_t ub_offset = VFE32_UB_SIZE;
uint32_t ub_size[VFE32_NUM_STATS_TYPE] = {
64, /*MSM_ISP_STATS_BG*/
64, /*MSM_ISP_STATS_BF*/
16, /*MSM_ISP_STATS_AWB*/
8, /*MSM_ISP_STATS_RS*/
16, /*MSM_ISP_STATS_CS*/
16, /*MSM_ISP_STATS_IHIST*/
16, /*MSM_ISP_STATS_BHIST*/
};
for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
ub_offset -= ub_size[i];
msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
}
return;
}
static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable)
{
int i;
uint32_t module_cfg, module_cfg_mask = 0;
for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
if ((stats_mask >> i) & 0x1) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
case 4:
module_cfg_mask |= 1 << (5 + i);
break;
case 5:
module_cfg_mask |= 1 << 16;
break;
case 6:
module_cfg_mask |= 1 << 19;
break;
default:
pr_err("%s: Invalid stats mask\n", __func__);
return;
}
}
}
module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x10);
if (enable)
module_cfg |= module_cfg_mask;
else
module_cfg &= ~module_cfg_mask;
msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x10);
}
static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
unsigned long paddr)
{
int stats_idx = STATS_IDX(stream_info->stream_handle);
msm_camera_io_w(paddr, vfe_dev->vfe_base +
VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
static uint32_t msm_vfe32_stats_get_wm_mask(uint32_t irq_status0,
uint32_t irq_status1)
{
return (irq_status0 >> 13) & 0x7F;
}
static uint32_t msm_vfe32_stats_get_comp_mask(uint32_t irq_status0,
uint32_t irq_status1)
{
return (irq_status0 >> 24) & 0x1;
}
static uint32_t msm_vfe32_stats_get_frame_id(struct vfe_device *vfe_dev)
{
return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
}
static int msm_vfe32_get_platform_data(struct vfe_device *vfe_dev)
{
int rc = 0;
vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
IORESOURCE_MEM, "vfe");
if (!vfe_dev->vfe_mem) {
pr_err("%s: no mem resource?\n", __func__);
rc = -ENODEV;
goto vfe_no_resource;
}
vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
IORESOURCE_IRQ, "vfe");
if (!vfe_dev->vfe_irq) {
pr_err("%s: no irq resource?\n", __func__);
rc = -ENODEV;
goto vfe_no_resource;
}
vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
if (IS_ERR(vfe_dev->fs_vfe)) {
pr_err("%s: Regulator get failed %ld\n", __func__,
PTR_ERR(vfe_dev->fs_vfe));
vfe_dev->fs_vfe = NULL;
rc = -ENODEV;
goto vfe_no_resource;
}
if (!vfe_dev->pdev->dev.of_node)
vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
else
vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
if (!vfe_dev->iommu_ctx[0]) {
pr_err("%s: no iommux ctx resource?\n", __func__);
rc = -ENODEV;
goto vfe_no_resource;
}
if (!vfe_dev->pdev->dev.of_node)
vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
else
vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe0");
if (!vfe_dev->iommu_ctx[1]) {
pr_err("%s: no iommux ctx resource?\n", __func__);
rc = -ENODEV;
goto vfe_no_resource;
}
vfe_no_resource:
return rc;
}
static void msm_vfe32_get_error_mask(uint32_t *error_mask0,
uint32_t *error_mask1)
{
*error_mask0 = 0x00000000;
*error_mask1 = 0x007FFFFF;
}
struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
.num_wm = 4,
.num_comp_mask = 3,
.num_rdi = 3,
.num_rdi_master = 3,
};
static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
.stats_capability_mask =
1 << MSM_ISP_STATS_AEC | 1 << MSM_ISP_STATS_BG |
1 << MSM_ISP_STATS_AF | 1 << MSM_ISP_STATS_BF |
1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
.stats_ping_pong_offset = VFE32_STATS_PING_PONG_OFFSET,
.num_stats_type = VFE32_NUM_STATS_TYPE,
.num_stats_comp_mask = 0,
};
static struct v4l2_subdev_core_ops msm_vfe32_subdev_core_ops = {
.ioctl = msm_isp_ioctl,
.subscribe_event = msm_isp_subscribe_event,
.unsubscribe_event = msm_isp_unsubscribe_event,
};
static struct v4l2_subdev_ops msm_vfe32_subdev_ops = {
.core = &msm_vfe32_subdev_core_ops,
};
static struct v4l2_subdev_internal_ops msm_vfe32_internal_ops = {
.open = msm_isp_open_node,
.close = msm_isp_close_node,
};
struct msm_vfe_hardware_info vfe32_hw_info = {
.num_iommu_ctx = 2,
.vfe_clk_idx = VFE32_CLK_IDX,
.vfe_ops = {
.irq_ops = {
.read_irq_status = msm_vfe32_read_irq_status,
.process_camif_irq = msm_vfe32_process_camif_irq,
.process_reset_irq = msm_vfe32_process_reset_irq,
.process_halt_irq = msm_vfe32_process_halt_irq,
.process_eof_irq = msm_vfe32_process_eof_irq,
.process_reg_update = msm_vfe32_process_reg_update,
.process_axi_irq = msm_isp_process_axi_irq,
.process_stats_irq = msm_isp_process_stats_irq,
},
.axi_ops = {
.reload_wm = msm_vfe32_axi_reload_wm,
.enable_wm = msm_vfe32_axi_enable_wm,
.cfg_io_format = msm_vfe32_cfg_io_format,
.cfg_comp_mask = msm_vfe32_axi_cfg_comp_mask,
.clear_comp_mask = msm_vfe32_axi_clear_comp_mask,
.cfg_wm_irq_mask = msm_vfe32_axi_cfg_wm_irq_mask,
.clear_wm_irq_mask = msm_vfe32_axi_clear_wm_irq_mask,
.cfg_framedrop = msm_vfe32_cfg_framedrop,
.clear_framedrop = msm_vfe32_clear_framedrop,
.cfg_wm_reg = msm_vfe32_axi_cfg_wm_reg,
.clear_wm_reg = msm_vfe32_axi_clear_wm_reg,
.cfg_wm_xbar_reg = msm_vfe32_axi_cfg_wm_xbar_reg,
.clear_wm_xbar_reg = msm_vfe32_axi_clear_wm_xbar_reg,
.cfg_ub = msm_vfe32_cfg_axi_ub,
.update_ping_pong_addr =
msm_vfe32_update_ping_pong_addr,
.get_comp_mask = msm_vfe32_get_comp_mask,
.get_wm_mask = msm_vfe32_get_wm_mask,
.get_pingpong_status = msm_vfe32_get_pingpong_status,
.halt = msm_vfe32_axi_halt,
},
.core_ops = {
.reg_update = msm_vfe32_reg_update,
.cfg_camif = msm_vfe32_cfg_camif,
.update_camif_state = msm_vfe32_update_camif_state,
.cfg_rdi_reg = msm_vfe32_cfg_rdi_reg,
.reset_hw = msm_vfe32_reset_hardware,
.init_hw = msm_vfe32_init_hardware,
.init_hw_reg = msm_vfe32_init_hardware_reg,
.release_hw = msm_vfe32_release_hardware,
.get_platform_data = msm_vfe32_get_platform_data,
.get_error_mask = msm_vfe32_get_error_mask,
.process_error_status = msm_vfe32_process_error_status,
},
.stats_ops = {
.get_stats_idx = msm_vfe32_get_stats_idx,
.cfg_comp_mask = msm_vfe32_stats_cfg_comp_mask,
.cfg_wm_irq_mask = msm_vfe32_stats_cfg_wm_irq_mask,
.clear_wm_irq_mask = msm_vfe32_stats_clear_wm_irq_mask,
.cfg_wm_reg = msm_vfe32_stats_cfg_wm_reg,
.clear_wm_reg = msm_vfe32_stats_clear_wm_reg,
.cfg_ub = msm_vfe32_stats_cfg_ub,
.enable_module = msm_vfe32_stats_enable_module,
.update_ping_pong_addr =
msm_vfe32_stats_update_ping_pong_addr,
.get_comp_mask = msm_vfe32_stats_get_comp_mask,
.get_wm_mask = msm_vfe32_stats_get_wm_mask,
.get_frame_id = msm_vfe32_stats_get_frame_id,
.get_pingpong_status = msm_vfe32_get_pingpong_status,
},
},
.dmi_reg_offset = 0x5A0,
.axi_hw_info = &msm_vfe32_axi_hw_info,
.stats_hw_info = &msm_vfe32_stats_hw_info,
.subdev_ops = &msm_vfe32_subdev_ops,
.subdev_internal_ops = &msm_vfe32_internal_ops,
};
EXPORT_SYMBOL(vfe32_hw_info);
| gpl-2.0 |
y10g/lge-kernel-startablet-l06c | arch/s390/crypto/des_s390.c | 428 | 12194 | /*
* Cryptographic API.
*
* s390 implementation of the DES Cipher Algorithm.
*
* Copyright IBM Corp. 2003,2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include "crypt_s390.h"
#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
struct crypt_s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES_KEY_SIZE];
};
struct crypt_s390_des3_192_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_192_KEY_SIZE];
};
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
u32 tmp[DES_EXPKEY_WORDS];
/* check for weak keys */
if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
memcpy(dctx->key, key, keylen);
return 0;
}
static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = des_encrypt,
.cia_decrypt = des_decrypt,
}
}
};
static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
void *param, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes;
while ((nbytes = walk->nbytes)) {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
}
return ret;
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
void *param, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
if (!nbytes)
goto out;
memcpy(param, walk->iv, DES_BLOCK_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
memcpy(walk->iv, param, DES_BLOCK_SIZE);
out:
return ret;
}
static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = des_setkey,
.encrypt = ecb_des_encrypt,
.decrypt = ecb_des_decrypt,
}
}
};
static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
}
static struct crypto_alg cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey,
.encrypt = cbc_des_encrypt,
.decrypt = cbc_des_decrypt,
}
}
};
/*
* RFC2451:
*
* For DES-EDE3, there is no known need to reject weak or
* complementation keys. Any weakness is obviated by the use of
* multiple keys.
*
* However, if the first two or last two independent 64-bit keys are
* equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
*/
static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
memcpy(dctx->key, key, keylen);
return 0;
}
static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
DES_BLOCK_SIZE);
}
static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
DES_BLOCK_SIZE);
}
static struct crypto_alg des3_192_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_192_KEY_SIZE,
.cia_max_keysize = DES3_192_KEY_SIZE,
.cia_setkey = des3_192_setkey,
.cia_encrypt = des3_192_encrypt,
.cia_decrypt = des3_192_decrypt,
}
}
};
static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
}
static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
}
static struct crypto_alg ecb_des3_192_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
ecb_des3_192_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_192_KEY_SIZE,
.max_keysize = DES3_192_KEY_SIZE,
.setkey = des3_192_setkey,
.encrypt = ecb_des3_192_encrypt,
.decrypt = ecb_des3_192_decrypt,
}
}
};
static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
}
static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
}
static struct crypto_alg cbc_des3_192_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
cbc_des3_192_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_192_KEY_SIZE,
.max_keysize = DES3_192_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des3_192_setkey,
.encrypt = cbc_des3_192_encrypt,
.decrypt = cbc_des3_192_decrypt,
}
}
};
static int des_s390_init(void)
{
int ret;
if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
return -EOPNOTSUPP;
ret = crypto_register_alg(&des_alg);
if (ret)
goto des_err;
ret = crypto_register_alg(&ecb_des_alg);
if (ret)
goto ecb_des_err;
ret = crypto_register_alg(&cbc_des_alg);
if (ret)
goto cbc_des_err;
ret = crypto_register_alg(&des3_192_alg);
if (ret)
goto des3_192_err;
ret = crypto_register_alg(&ecb_des3_192_alg);
if (ret)
goto ecb_des3_192_err;
ret = crypto_register_alg(&cbc_des3_192_alg);
if (ret)
goto cbc_des3_192_err;
out:
return ret;
cbc_des3_192_err:
crypto_unregister_alg(&ecb_des3_192_alg);
ecb_des3_192_err:
crypto_unregister_alg(&des3_192_alg);
des3_192_err:
crypto_unregister_alg(&cbc_des_alg);
cbc_des_err:
crypto_unregister_alg(&ecb_des_alg);
ecb_des_err:
crypto_unregister_alg(&des_alg);
des_err:
goto out;
}
static void __exit des_s390_exit(void)
{
crypto_unregister_alg(&cbc_des3_192_alg);
crypto_unregister_alg(&ecb_des3_192_alg);
crypto_unregister_alg(&des3_192_alg);
crypto_unregister_alg(&cbc_des_alg);
crypto_unregister_alg(&ecb_des_alg);
crypto_unregister_alg(&des_alg);
}
module_init(des_s390_init);
module_exit(des_s390_exit);
MODULE_ALIAS("des");
MODULE_ALIAS("des3_ede");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
| gpl-2.0 |
Grarak/android_kernel_oneplus_msm8996 | net/ipv6/xfrm6_tunnel.c | 1452 | 9982 | /*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*
* Based on net/ipv4/xfrm4_tunnel.c
*
*/
#include <linux/module.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ipv6.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/mutex.h>
#include <net/netns/generic.h>
#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
#define XFRM6_TUNNEL_SPI_MIN 1
#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
struct xfrm6_tunnel_net {
struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
u32 spi;
};
static int xfrm6_tunnel_net_id __read_mostly;
static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
{
return net_generic(net, xfrm6_tunnel_net_id);
}
/*
* xfrm_tunnel_spi things are for allocating unique id ("spi")
* per xfrm_address_t.
*/
struct xfrm6_tunnel_spi {
struct hlist_node list_byaddr;
struct hlist_node list_byspi;
xfrm_address_t addr;
u32 spi;
atomic_t refcnt;
struct rcu_head rcu_head;
};
static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
{
unsigned int h;
h = ipv6_addr_hash((const struct in6_addr *)addr);
h ^= h >> 16;
h ^= h >> 8;
h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
return h;
}
static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
{
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
}
static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
hlist_for_each_entry_rcu(x6spi,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) {
if (xfrm6_addr_equal(&x6spi->addr, saddr))
return x6spi;
}
return NULL;
}
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
rcu_read_lock_bh();
x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
spi = x6spi ? x6spi->spi : 0;
rcu_read_unlock_bh();
return htonl(spi);
}
EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
int index = xfrm6_tunnel_spi_hash_byspi(spi);
hlist_for_each_entry(x6spi,
&xfrm6_tn->spi_byspi[index],
list_byspi) {
if (x6spi->spi == spi)
return -1;
}
return index;
}
static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
u32 spi;
struct xfrm6_tunnel_spi *x6spi;
int index;
if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
else
xfrm6_tn->spi++;
for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
spi = 0;
goto out;
alloc_spi:
xfrm6_tn->spi = spi;
x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
if (!x6spi)
goto out;
memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
x6spi->spi = spi;
atomic_set(&x6spi->refcnt, 1);
hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
index = xfrm6_tunnel_spi_hash_byaddr(saddr);
hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
out:
return spi;
}
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
if (x6spi) {
atomic_inc(&x6spi->refcnt);
spi = x6spi->spi;
} else
spi = __xfrm6_tunnel_alloc_spi(net, saddr);
spin_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
static void x6spi_destroy_rcu(struct rcu_head *head)
{
kmem_cache_free(xfrm6_tunnel_spi_kmem,
container_of(head, struct xfrm6_tunnel_spi, rcu_head));
}
static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *n;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, n,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
if (atomic_dec_and_test(&x6spi->refcnt)) {
hlist_del_rcu(&x6spi->list_byaddr);
hlist_del_rcu(&x6spi->list_byspi);
call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
break;
}
}
}
spin_unlock_bh(&xfrm6_tunnel_spi_lock);
}
static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
skb_push(skb, -skb_network_offset(skb));
return 0;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return skb_network_header(skb)[IP6CB(skb)->nhoff];
}
static int xfrm6_tunnel_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct ipv6hdr *iph = ipv6_hdr(skb);
__be32 spi;
spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
/* xfrm6_tunnel native err handling */
switch (type) {
case ICMPV6_DEST_UNREACH:
switch (code) {
case ICMPV6_NOROUTE:
case ICMPV6_ADM_PROHIBITED:
case ICMPV6_NOT_NEIGHBOUR:
case ICMPV6_ADDR_UNREACH:
case ICMPV6_PORT_UNREACH:
default:
break;
}
break;
case ICMPV6_PKT_TOOBIG:
break;
case ICMPV6_TIME_EXCEED:
switch (code) {
case ICMPV6_EXC_HOPLIMIT:
break;
case ICMPV6_EXC_FRAGTIME:
default:
break;
}
break;
case ICMPV6_PARAMPROB:
switch (code) {
case ICMPV6_HDR_FIELD: break;
case ICMPV6_UNK_NEXTHDR: break;
case ICMPV6_UNK_OPTION: break;
}
break;
default:
break;
}
return 0;
}
static int xfrm6_tunnel_init_state(struct xfrm_state *x)
{
if (x->props.mode != XFRM_MODE_TUNNEL)
return -EINVAL;
if (x->encap)
return -EINVAL;
x->props.header_len = sizeof(struct ipv6hdr);
return 0;
}
static void xfrm6_tunnel_destroy(struct xfrm_state *x)
{
struct net *net = xs_net(x);
xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
}
static const struct xfrm_type xfrm6_tunnel_type = {
.description = "IP6IP6",
.owner = THIS_MODULE,
.proto = IPPROTO_IPV6,
.init_state = xfrm6_tunnel_init_state,
.destructor = xfrm6_tunnel_destroy,
.input = xfrm6_tunnel_input,
.output = xfrm6_tunnel_output,
};
static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
static int __net_init xfrm6_tunnel_net_init(struct net *net)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
xfrm6_tn->spi = 0;
return 0;
}
static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
{
}
static struct pernet_operations xfrm6_tunnel_net_ops = {
.init = xfrm6_tunnel_net_init,
.exit = xfrm6_tunnel_net_exit,
.id = &xfrm6_tunnel_net_id,
.size = sizeof(struct xfrm6_tunnel_net),
};
static int __init xfrm6_tunnel_init(void)
{
int rv;
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
if (rv < 0)
goto out_pernet;
rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
if (rv < 0)
goto out_type;
rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
if (rv < 0)
goto out_xfrm6;
rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
if (rv < 0)
goto out_xfrm46;
return 0;
out_xfrm46:
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
out_xfrm6:
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
out_type:
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
out_pernet:
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
return rv;
}
static void __exit xfrm6_tunnel_fini(void)
{
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}
module_init(xfrm6_tunnel_init);
module_exit(xfrm6_tunnel_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
| gpl-2.0 |
bigzz/big-LITTLE-MP | drivers/net/ethernet/8390/hydra.c | 2476 | 7614 | /* New Hydra driver using generic 8390 core */
/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */
/* This file is subject to the terms and conditions of the GNU General */
/* Public License. See the file COPYING in the main directory of the */
/* Linux distribution for more details. */
/* Peter De Schrijver (p2@mind.be) */
/* Oldenburg 2000 */
/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */
/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */
/* and 10BASE-2 (thin coax) and AUI connectors. */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <linux/zorro.h>
#define EI_SHIFT(x) (ei_local->reg_offset[x])
#define ei_inb(port) in_8(port)
#define ei_outb(val,port) out_8(port,val)
#define ei_inb_p(port) in_8(port)
#define ei_outb_p(val,port) out_8(port,val)
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include "lib8390.c"
#define NE_EN0_DCFG (0x0e*2)
#define NESM_START_PG 0x0 /* First page of TX buffer */
#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */
#define HYDRA_NIC_BASE 0xffe1
#define HYDRA_ADDRPROM 0xffc0
#define HYDRA_VERSION "v3.0alpha"
#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
static int hydra_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent);
static int hydra_init(struct zorro_dev *z);
static int hydra_open(struct net_device *dev);
static int hydra_close(struct net_device *dev);
static void hydra_reset_8390(struct net_device *dev);
static void hydra_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
static void hydra_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
static struct zorro_driver hydra_driver = {
.name = "hydra",
.id_table = hydra_zorro_tbl,
.probe = hydra_init_one,
.remove = hydra_remove_one,
};
static int hydra_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
int err;
if (!request_mem_region(z->resource.start, 0x10000, "Hydra"))
return -EBUSY;
if ((err = hydra_init(z))) {
release_mem_region(z->resource.start, 0x10000);
return -EBUSY;
}
return 0;
}
static const struct net_device_ops hydra_netdev_ops = {
.ndo_open = hydra_open,
.ndo_stop = hydra_close,
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
};
static int hydra_init(struct zorro_dev *z)
{
struct net_device *dev;
unsigned long board = ZTWO_VADDR(z->resource.start);
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
int j;
int err;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
dev)) {
free_netdev(dev);
return -EAGAIN;
}
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
ei_status.word16 = 1;
ei_status.bigendian = 1;
ei_status.rx_start_page = start_page + TX_PAGES;
ei_status.reset_8390 = hydra_reset_8390;
ei_status.block_input = hydra_block_input;
ei_status.block_output = hydra_block_output;
ei_status.get_8390_hdr = hydra_get_8390_hdr;
ei_status.reg_offset = hydra_offsets;
dev->netdev_ops = &hydra_netdev_ops;
__NS8390_init(dev, 0);
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
free_netdev(dev);
return err;
}
zorro_set_drvdata(z, dev);
pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
dev->name, &z->resource, dev->dev_addr);
return 0;
}
static int hydra_open(struct net_device *dev)
{
__ei_open(dev);
return 0;
}
static int hydra_close(struct net_device *dev)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
__ei_close(dev);
return 0;
}
static void hydra_reset_8390(struct net_device *dev)
{
printk(KERN_INFO "Hydra hw reset not there\n");
}
static void hydra_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
int nic_base = dev->base_addr;
short *ptrs;
unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) +
((ring_page - NESM_START_PG)<<8);
ptrs = (short *)hdr;
*(ptrs++) = z_readw(hdr_start);
*((short *)hdr) = WORDSWAP(*((short *)hdr));
hdr_start += 2;
*(ptrs++) = z_readw(hdr_start);
*((short *)hdr+1) = WORDSWAP(*((short *)hdr+1));
}
static void hydra_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
unsigned long nic_base = dev->base_addr;
unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8);
if (count&1)
count++;
if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) {
int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start;
z_memcpy_fromio(skb->data,xfer_start,semi_count);
count -= semi_count;
z_memcpy_fromio(skb->data+semi_count, mem_base, count);
} else
z_memcpy_fromio(skb->data, xfer_start,count);
}
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page)
{
unsigned long nic_base = dev->base_addr;
unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
if (count&1)
count++;
z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
}
static void hydra_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
unregister_netdev(dev);
free_irq(IRQ_AMIGA_PORTS, dev);
release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000);
free_netdev(dev);
}
static int __init hydra_init_module(void)
{
return zorro_register_driver(&hydra_driver);
}
static void __exit hydra_cleanup_module(void)
{
zorro_unregister_driver(&hydra_driver);
}
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
MODULE_LICENSE("GPL");
| gpl-2.0 |
PureNexusProject/android_kernel_asus_fugu | drivers/input/touchscreen/ucb1400_ts.c | 2476 | 12535 | /*
* Philips UCB1400 touchscreen driver
*
* Author: Nicolas Pitre
* Created: September 25, 2006
* Copyright: MontaVista Software, Inc.
*
* Spliting done by: Marek Vasut <marek.vasut@gmail.com>
* If something doesn't work and it worked before spliting, e-mail me,
* dont bother Nicolas please ;-)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code is heavily based on ucb1x00-*.c copyrighted by Russell King
* covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
* been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/input.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/ucb1400.h>
#define UCB1400_TS_POLL_PERIOD 10 /* ms */
static bool adcsync;
static int ts_delay = 55; /* us */
static int ts_delay_pressure; /* us */
/* Switch to interrupt mode. */
static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_INT);
}
/*
* Switch to pressure mode, and read pressure. We don't need to wait
* here, since both plates are being driven.
*/
static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
udelay(ts_delay_pressure);
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
}
/*
* Switch to X position mode and measure Y plate. We switch the plate
* configuration in pressure mode, then switch to position mode. This
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
udelay(ts_delay);
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
}
/*
* Switch to Y position mode and measure X plate. We switch the plate
* configuration in pressure mode, then switch to position mode. This
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
udelay(ts_delay);
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPX, adcsync);
}
/*
* Switch to X plate resistance mode. Set MX to ground, PX to
* supply. Measure current.
*/
static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
return ucb1400_adc_read(ucb->ac97, 0, adcsync);
}
/*
* Switch to Y plate resistance mode. Set MY to ground, PY to
* supply. Measure current.
*/
static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
return ucb1400_adc_read(ucb->ac97, 0, adcsync);
}
static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
{
unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
}
static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
}
static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
}
static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
{
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
input_report_abs(idev, ABS_PRESSURE, pressure);
input_report_key(idev, BTN_TOUCH, 1);
input_sync(idev);
}
static void ucb1400_ts_event_release(struct input_dev *idev)
{
input_report_abs(idev, ABS_PRESSURE, 0);
input_report_key(idev, BTN_TOUCH, 0);
input_sync(idev);
}
static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
{
unsigned int isr;
isr = ucb1400_reg_read(ucb->ac97, UCB_IE_STATUS);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
if (isr & UCB_IE_TSPX)
ucb1400_ts_irq_disable(ucb);
else
dev_dbg(&ucb->ts_idev->dev,
"ucb1400: unexpected IE_STATUS = %#x\n", isr);
}
/*
* A restriction with interrupts exists when using the ucb1400, as
* the codec read/write routines may sleep while waiting for codec
* access completion and uses semaphores for access control to the
* AC97 bus. Therefore the driver is forced to use threaded interrupt
* handler.
*/
static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
struct ucb1400_ts *ucb = devid;
unsigned int x, y, p;
bool penup;
if (unlikely(irqnr != ucb->irq))
return IRQ_NONE;
ucb1400_clear_pending_irq(ucb);
/* Start with a small delay before checking pendown state */
msleep(UCB1400_TS_POLL_PERIOD);
while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
y = ucb1400_ts_read_ypos(ucb);
p = ucb1400_ts_read_pressure(ucb);
ucb1400_adc_disable(ucb->ac97);
ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
wait_event_timeout(ucb->ts_wait, ucb->stopped,
msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
}
ucb1400_ts_event_release(ucb->ts_idev);
if (!ucb->stopped) {
/* Switch back to interrupt mode. */
ucb1400_ts_mode_int(ucb);
ucb1400_ts_irq_enable(ucb);
}
return IRQ_HANDLED;
}
static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
{
/* Signal IRQ thread to stop polling and disable the handler. */
ucb->stopped = true;
mb();
wake_up(&ucb->ts_wait);
disable_irq(ucb->irq);
ucb1400_ts_irq_disable(ucb);
ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
}
/* Must be called with ts->lock held */
static void ucb1400_ts_start(struct ucb1400_ts *ucb)
{
/* Tell IRQ thread that it may poll the device. */
ucb->stopped = false;
mb();
ucb1400_ts_mode_int(ucb);
ucb1400_ts_irq_enable(ucb);
enable_irq(ucb->irq);
}
static int ucb1400_ts_open(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
ucb1400_ts_start(ucb);
return 0;
}
static void ucb1400_ts_close(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
ucb1400_ts_stop(ucb);
}
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
/*
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
struct platform_device *pdev)
{
unsigned long mask, timeout;
mask = probe_irq_on();
/* Enable the ADC interrupt. */
ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC);
ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
/* Cause an ADC interrupt. */
ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA);
ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
/* Wait for the conversion to complete. */
timeout = jiffies + HZ/2;
while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) &
UCB_ADC_DAT_VALID)) {
cpu_relax();
if (time_after(jiffies, timeout)) {
dev_err(&pdev->dev, "timed out in IRQ probe\n");
probe_irq_off(mask);
return -ENODEV;
}
}
ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0);
/* Disable and clear interrupt. */
ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0);
ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
/* Read triggered interrupt. */
ucb->irq = probe_irq_off(mask);
if (ucb->irq < 0 || ucb->irq == NO_IRQ)
return -ENODEV;
return 0;
}
static int ucb1400_ts_probe(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
u16 fcsr;
ucb->ts_idev = input_allocate_device();
if (!ucb->ts_idev) {
error = -ENOMEM;
goto err;
}
/* Only in case the IRQ line wasn't supplied, try detecting it */
if (ucb->irq < 0) {
error = ucb1400_ts_detect_irq(ucb, pdev);
if (error) {
dev_err(&pdev->dev, "IRQ probe failed\n");
goto err_free_devs;
}
}
dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
init_waitqueue_head(&ucb->ts_wait);
input_set_drvdata(ucb->ts_idev, ucb);
ucb->ts_idev->dev.parent = &pdev->dev;
ucb->ts_idev->name = "UCB1400 touchscreen interface";
ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97,
AC97_VENDOR_ID1);
ucb->ts_idev->id.product = ucb->id;
ucb->ts_idev->open = ucb1400_ts_open;
ucb->ts_idev->close = ucb1400_ts_close;
ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
/*
* Enable ADC filter to prevent horrible jitter on Colibri.
* This also further reduces jitter on boards where ADCSYNC
* pin is connected.
*/
fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR);
ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE);
ucb1400_adc_enable(ucb->ac97);
x_res = ucb1400_ts_read_xres(ucb);
y_res = ucb1400_ts_read_yres(ucb);
ucb1400_adc_disable(ucb->ac97);
dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
ucb1400_ts_stop(ucb);
error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"UCB1400", ucb);
if (error) {
dev_err(&pdev->dev,
"unable to grab irq%d: %d\n", ucb->irq, error);
goto err_free_devs;
}
error = input_register_device(ucb->ts_idev);
if (error)
goto err_free_irq;
return 0;
err_free_irq:
free_irq(ucb->irq, ucb);
err_free_devs:
input_free_device(ucb->ts_idev);
err:
return error;
}
static int ucb1400_ts_remove(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ucb1400_ts_suspend(struct device *dev)
{
struct ucb1400_ts *ucb = dev->platform_data;
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
if (idev->users)
ucb1400_ts_start(ucb);
mutex_unlock(&idev->mutex);
return 0;
}
static int ucb1400_ts_resume(struct device *dev)
{
struct ucb1400_ts *ucb = dev->platform_data;
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
if (idev->users)
ucb1400_ts_stop(ucb);
mutex_unlock(&idev->mutex);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
ucb1400_ts_suspend, ucb1400_ts_resume);
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
.remove = ucb1400_ts_remove,
.driver = {
.name = "ucb1400_ts",
.owner = THIS_MODULE,
.pm = &ucb1400_ts_pm_ops,
},
};
module_platform_driver(ucb1400_ts_driver);
module_param(adcsync, bool, 0444);
MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
module_param(ts_delay, int, 0444);
MODULE_PARM_DESC(ts_delay, "Delay between panel setup and"
" position read. Default = 55us.");
module_param(ts_delay_pressure, int, 0444);
MODULE_PARM_DESC(ts_delay_pressure,
"delay between panel setup and pressure read."
" Default = 0us.");
MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/lge-kernel-star | arch/um/drivers/chan_kern.c | 2732 | 13210 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
* Licensed under the GPL
*/
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include "chan_kern.h"
#include "os.h"
#ifdef CONFIG_NOCONFIG_CHAN
static void *not_configged_init(char *str, int device,
const struct chan_opts *opts)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return NULL;
}
static int not_configged_open(int input, int output, int primary, void *data,
char **dev_out)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -ENODEV;
}
static void not_configged_close(int fd, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
}
static int not_configged_read(int fd, char *c_out, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_write(int fd, const char *buf, int len, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_console_write(int fd, const char *buf, int len)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_window_size(int fd, void *data, unsigned short *rows,
unsigned short *cols)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -ENODEV;
}
static void not_configged_free(void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
}
static const struct chan_ops not_configged_ops = {
.init = not_configged_init,
.open = not_configged_open,
.close = not_configged_close,
.read = not_configged_read,
.write = not_configged_write,
.console_write = not_configged_console_write,
.window_size = not_configged_window_size,
.free = not_configged_free,
.winch = 0,
};
#endif /* CONFIG_NOCONFIG_CHAN */
static void tty_receive_char(struct tty_struct *tty, char ch)
{
if (tty == NULL)
return;
if (I_IXON(tty) && !I_IXOFF(tty) && !tty->raw) {
if (ch == STOP_CHAR(tty)) {
stop_tty(tty);
return;
}
else if (ch == START_CHAR(tty)) {
start_tty(tty);
return;
}
}
tty_insert_flip_char(tty, ch, TTY_NORMAL);
}
static int open_one_chan(struct chan *chan)
{
int fd, err;
if (chan->opened)
return 0;
if (chan->ops->open == NULL)
fd = 0;
else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
chan->data, &chan->dev);
if (fd < 0)
return fd;
err = os_set_fd_block(fd, 0);
if (err) {
(*chan->ops->close)(fd, chan->data);
return err;
}
chan->fd = fd;
chan->opened = 1;
return 0;
}
static int open_chan(struct list_head *chans)
{
struct list_head *ele;
struct chan *chan;
int ret, err = 0;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
ret = open_one_chan(chan);
if (chan->primary)
err = ret;
}
return err;
}
void chan_enable_winch(struct list_head *chans, struct tty_struct *tty)
{
struct list_head *ele;
struct chan *chan;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (chan->primary && chan->output && chan->ops->winch) {
register_winch(chan->fd, tty);
return;
}
}
}
int enable_chan(struct line *line)
{
struct list_head *ele;
struct chan *chan;
int err;
list_for_each(ele, &line->chan_list) {
chan = list_entry(ele, struct chan, list);
err = open_one_chan(chan);
if (err) {
if (chan->primary)
goto out_close;
continue;
}
if (chan->enabled)
continue;
err = line_setup_irq(chan->fd, chan->input, chan->output, line,
chan);
if (err)
goto out_close;
chan->enabled = 1;
}
return 0;
out_close:
close_chan(&line->chan_list, 0);
return err;
}
/* Items are added in IRQ context, when free_irq can't be called, and
* removed in process context, when it can.
* This handles interrupt sources which disappear, and which need to
* be permanently disabled. This is discovered in IRQ context, but
* the freeing of the IRQ must be done later.
*/
static DEFINE_SPINLOCK(irqs_to_free_lock);
static LIST_HEAD(irqs_to_free);
void free_irqs(void)
{
struct chan *chan;
LIST_HEAD(list);
struct list_head *ele;
unsigned long flags;
spin_lock_irqsave(&irqs_to_free_lock, flags);
list_splice_init(&irqs_to_free, &list);
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
list_for_each(ele, &list) {
chan = list_entry(ele, struct chan, free_list);
if (chan->input && chan->enabled)
free_irq(chan->line->driver->read_irq, chan);
if (chan->output && chan->enabled)
free_irq(chan->line->driver->write_irq, chan);
chan->enabled = 0;
}
}
static void close_one_chan(struct chan *chan, int delay_free_irq)
{
unsigned long flags;
if (!chan->opened)
return;
if (delay_free_irq) {
spin_lock_irqsave(&irqs_to_free_lock, flags);
list_add(&chan->free_list, &irqs_to_free);
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
}
else {
if (chan->input && chan->enabled)
free_irq(chan->line->driver->read_irq, chan);
if (chan->output && chan->enabled)
free_irq(chan->line->driver->write_irq, chan);
chan->enabled = 0;
}
if (chan->ops->close != NULL)
(*chan->ops->close)(chan->fd, chan->data);
chan->opened = 0;
chan->fd = -1;
}
void close_chan(struct list_head *chans, int delay_free_irq)
{
struct chan *chan;
/* Close in reverse order as open in case more than one of them
* refers to the same device and they save and restore that device's
* state. Then, the first one opened will have the original state,
* so it must be the last closed.
*/
list_for_each_entry_reverse(chan, chans, list) {
close_one_chan(chan, delay_free_irq);
}
}
void deactivate_chan(struct list_head *chans, int irq)
{
struct list_head *ele;
struct chan *chan;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (chan->enabled && chan->input)
deactivate_fd(chan->fd, irq);
}
}
void reactivate_chan(struct list_head *chans, int irq)
{
struct list_head *ele;
struct chan *chan;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (chan->enabled && chan->input)
reactivate_fd(chan->fd, irq);
}
}
int write_chan(struct list_head *chans, const char *buf, int len,
int write_irq)
{
struct list_head *ele;
struct chan *chan = NULL;
int n, ret = 0;
if (len == 0)
return 0;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (!chan->output || (chan->ops->write == NULL))
continue;
n = chan->ops->write(chan->fd, buf, len, chan->data);
if (chan->primary) {
ret = n;
if ((ret == -EAGAIN) || ((ret >= 0) && (ret < len)))
reactivate_fd(chan->fd, write_irq);
}
}
return ret;
}
int console_write_chan(struct list_head *chans, const char *buf, int len)
{
struct list_head *ele;
struct chan *chan;
int n, ret = 0;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (!chan->output || (chan->ops->console_write == NULL))
continue;
n = chan->ops->console_write(chan->fd, buf, len);
if (chan->primary)
ret = n;
}
return ret;
}
int console_open_chan(struct line *line, struct console *co)
{
int err;
err = open_chan(&line->chan_list);
if (err)
return err;
printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name,
co->index);
return 0;
}
int chan_window_size(struct list_head *chans, unsigned short *rows_out,
unsigned short *cols_out)
{
struct list_head *ele;
struct chan *chan;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (chan->primary) {
if (chan->ops->window_size == NULL)
return 0;
return chan->ops->window_size(chan->fd, chan->data,
rows_out, cols_out);
}
}
return 0;
}
static void free_one_chan(struct chan *chan, int delay_free_irq)
{
list_del(&chan->list);
close_one_chan(chan, delay_free_irq);
if (chan->ops->free != NULL)
(*chan->ops->free)(chan->data);
if (chan->primary && chan->output)
ignore_sigio_fd(chan->fd);
kfree(chan);
}
static void free_chan(struct list_head *chans, int delay_free_irq)
{
struct list_head *ele, *next;
struct chan *chan;
list_for_each_safe(ele, next, chans) {
chan = list_entry(ele, struct chan, list);
free_one_chan(chan, delay_free_irq);
}
}
static int one_chan_config_string(struct chan *chan, char *str, int size,
char **error_out)
{
int n = 0;
if (chan == NULL) {
CONFIG_CHUNK(str, size, n, "none", 1);
return n;
}
CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
if (chan->dev == NULL) {
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
CONFIG_CHUNK(str, size, n, ":", 0);
CONFIG_CHUNK(str, size, n, chan->dev, 0);
return n;
}
static int chan_pair_config_string(struct chan *in, struct chan *out,
char *str, int size, char **error_out)
{
int n;
n = one_chan_config_string(in, str, size, error_out);
str += n;
size -= n;
if (in == out) {
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
CONFIG_CHUNK(str, size, n, ",", 1);
n = one_chan_config_string(out, str, size, error_out);
str += n;
size -= n;
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
int chan_config_string(struct list_head *chans, char *str, int size,
char **error_out)
{
struct list_head *ele;
struct chan *chan, *in = NULL, *out = NULL;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
if (!chan->primary)
continue;
if (chan->input)
in = chan;
if (chan->output)
out = chan;
}
return chan_pair_config_string(in, out, str, size, error_out);
}
struct chan_type {
char *key;
const struct chan_ops *ops;
};
static const struct chan_type chan_table[] = {
{ "fd", &fd_ops },
#ifdef CONFIG_NULL_CHAN
{ "null", &null_ops },
#else
{ "null", ¬_configged_ops },
#endif
#ifdef CONFIG_PORT_CHAN
{ "port", &port_ops },
#else
{ "port", ¬_configged_ops },
#endif
#ifdef CONFIG_PTY_CHAN
{ "pty", &pty_ops },
{ "pts", &pts_ops },
#else
{ "pty", ¬_configged_ops },
{ "pts", ¬_configged_ops },
#endif
#ifdef CONFIG_TTY_CHAN
{ "tty", &tty_ops },
#else
{ "tty", ¬_configged_ops },
#endif
#ifdef CONFIG_XTERM_CHAN
{ "xterm", &xterm_ops },
#else
{ "xterm", ¬_configged_ops },
#endif
};
static struct chan *parse_chan(struct line *line, char *str, int device,
const struct chan_opts *opts, char **error_out)
{
const struct chan_type *entry;
const struct chan_ops *ops;
struct chan *chan;
void *data;
int i;
ops = NULL;
data = NULL;
for(i = 0; i < ARRAY_SIZE(chan_table); i++) {
entry = &chan_table[i];
if (!strncmp(str, entry->key, strlen(entry->key))) {
ops = entry->ops;
str += strlen(entry->key);
break;
}
}
if (ops == NULL) {
*error_out = "No match for configured backends";
return NULL;
}
data = (*ops->init)(str, device, opts);
if (data == NULL) {
*error_out = "Configuration failed";
return NULL;
}
chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
if (chan == NULL) {
*error_out = "Memory allocation failed";
return NULL;
}
*chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
.free_list =
LIST_HEAD_INIT(chan->free_list),
.line = line,
.primary = 1,
.input = 0,
.output = 0,
.opened = 0,
.enabled = 0,
.fd = -1,
.ops = ops,
.data = data });
return chan;
}
int parse_chan_pair(char *str, struct line *line, int device,
const struct chan_opts *opts, char **error_out)
{
struct list_head *chans = &line->chan_list;
struct chan *new, *chan;
char *in, *out;
if (!list_empty(chans)) {
chan = list_entry(chans->next, struct chan, list);
free_chan(chans, 0);
INIT_LIST_HEAD(chans);
}
out = strchr(str, ',');
if (out != NULL) {
in = str;
*out = '\0';
out++;
new = parse_chan(line, in, device, opts, error_out);
if (new == NULL)
return -1;
new->input = 1;
list_add(&new->list, chans);
new = parse_chan(line, out, device, opts, error_out);
if (new == NULL)
return -1;
list_add(&new->list, chans);
new->output = 1;
}
else {
new = parse_chan(line, str, device, opts, error_out);
if (new == NULL)
return -1;
list_add(&new->list, chans);
new->input = 1;
new->output = 1;
}
return 0;
}
void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
struct list_head *ele, *next;
struct chan *chan;
int err;
char c;
list_for_each_safe(ele, next, chans) {
chan = list_entry(ele, struct chan, list);
if (!chan->input || (chan->ops->read == NULL))
continue;
do {
if (tty && !tty_buffer_request_room(tty, 1)) {
schedule_delayed_work(task, 1);
goto out;
}
err = chan->ops->read(chan->fd, &c, chan->data);
if (err > 0)
tty_receive_char(tty, c);
} while (err > 0);
if (err == 0)
reactivate_fd(chan->fd, irq);
if (err == -EIO) {
if (chan->primary) {
if (tty != NULL)
tty_hangup(tty);
close_chan(chans, 1);
return;
}
else close_one_chan(chan, 1);
}
}
out:
if (tty)
tty_flip_buffer_push(tty);
}
| gpl-2.0 |
Split-Screen/android_kernel_htc_flounder | drivers/net/wimax/i2400m/usb.c | 2732 | 24435 | /*
* Intel Wireless WiMAX Connection 2400m
* Linux driver model glue for USB device, reset & fw upload
*
*
* Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* See i2400m-usb.h for a general description of this driver.
*
* This file implements driver model glue, and hook ups for the
* generic driver to implement the bus-specific functions (device
* communication setup/tear down, firmware upload and resetting).
*
* ROADMAP
*
* i2400mu_probe()
* alloc_netdev()...
* i2400mu_netdev_setup()
* i2400mu_init()
* i2400m_netdev_setup()
* i2400m_setup()...
*
* i2400mu_disconnect
* i2400m_release()
* free_netdev()
*
* i2400mu_suspend()
* i2400m_cmd_enter_powersave()
* i2400mu_notification_release()
*
* i2400mu_resume()
* i2400mu_notification_setup()
*
* i2400mu_bus_dev_start() Called by i2400m_dev_start() [who is
* i2400mu_tx_setup() called by i2400m_setup()]
* i2400mu_rx_setup()
* i2400mu_notification_setup()
*
* i2400mu_bus_dev_stop() Called by i2400m_dev_stop() [who is
* i2400mu_notification_release() called by i2400m_release()]
* i2400mu_rx_release()
* i2400mu_tx_release()
*
* i2400mu_bus_reset() Called by i2400m_reset
* __i2400mu_reset()
* __i2400mu_send_barker()
* usb_reset_device()
*/
#include "i2400m-usb.h"
#include <linux/wimax/i2400m.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/module.h>
#define D_SUBMODULE usb
#include "usb-debug-levels.h"
static char i2400mu_debug_params[128];
module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params),
0644);
MODULE_PARM_DESC(debug,
"String of space-separated NAME:VALUE pairs, where NAMEs "
"are the different debug submodules and VALUE are the "
"initial debug value to set.");
/* Our firmware file name */
static const char *i2400mu_bus_fw_names_5x50[] = {
#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
I2400MU_FW_FILE_NAME_v1_5,
#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
I2400MU_FW_FILE_NAME_v1_4,
NULL,
};
static const char *i2400mu_bus_fw_names_6050[] = {
#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf"
I6050U_FW_FILE_NAME_v1_5,
NULL,
};
static
int i2400mu_bus_dev_start(struct i2400m *i2400m)
{
int result;
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct device *dev = &i2400mu->usb_iface->dev;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
result = i2400mu_tx_setup(i2400mu);
if (result < 0)
goto error_usb_tx_setup;
result = i2400mu_rx_setup(i2400mu);
if (result < 0)
goto error_usb_rx_setup;
result = i2400mu_notification_setup(i2400mu);
if (result < 0)
goto error_notif_setup;
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
error_notif_setup:
i2400mu_rx_release(i2400mu);
error_usb_rx_setup:
i2400mu_tx_release(i2400mu);
error_usb_tx_setup:
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
return result;
}
static
void i2400mu_bus_dev_stop(struct i2400m *i2400m)
{
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct device *dev = &i2400mu->usb_iface->dev;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
i2400mu_notification_release(i2400mu);
i2400mu_rx_release(i2400mu);
i2400mu_tx_release(i2400mu);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
/*
* Sends a barker buffer to the device
*
* This helper will allocate a kmalloced buffer and use it to transmit
* (then free it). Reason for this is that other arches cannot use
* stack/vmalloc/text areas for DMA transfers.
*
* Error recovery here is simpler: anything is considered a hard error
* and will move the reset code to use a last-resort bus-based reset.
*/
static
int __i2400mu_send_barker(struct i2400mu *i2400mu,
const __le32 *barker,
size_t barker_size,
unsigned endpoint)
{
struct usb_endpoint_descriptor *epd = NULL;
int pipe, actual_len, ret;
struct device *dev = &i2400mu->usb_iface->dev;
void *buffer;
int do_autopm = 1;
ret = usb_autopm_get_interface(i2400mu->usb_iface);
if (ret < 0) {
dev_err(dev, "RESET: can't get autopm: %d\n", ret);
do_autopm = 0;
}
ret = -ENOMEM;
buffer = kmalloc(barker_size, GFP_KERNEL);
if (buffer == NULL)
goto error_kzalloc;
epd = usb_get_epd(i2400mu->usb_iface, endpoint);
pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
memcpy(buffer, barker, barker_size);
retry:
ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
&actual_len, 200);
switch (ret) {
case 0:
if (actual_len != barker_size) { /* Too short? drop it */
dev_err(dev, "E: %s: short write (%d B vs %zu "
"expected)\n",
__func__, actual_len, barker_size);
ret = -EIO;
}
break;
case -EPIPE:
/*
* Stall -- maybe the device is choking with our
* requests. Clear it and give it some time. If they
* happen to often, it might be another symptom, so we
* reset.
*
* No error handling for usb_clear_halt(0; if it
* works, the retry works; if it fails, this switch
* does the error handling for us.
*/
if (edc_inc(&i2400mu->urb_edc,
10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "E: %s: too many stalls in "
"URB; resetting device\n", __func__);
usb_queue_reset_device(i2400mu->usb_iface);
/* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, pipe);
msleep(10); /* give the device some time */
goto retry;
}
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
case -ESHUTDOWN: /* and exit */
case -ECONNRESET:
ret = -ESHUTDOWN;
break;
default: /* Some error? */
if (edc_inc(&i2400mu->urb_edc,
EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "E: %s: maximum errors in URB "
"exceeded; resetting device\n",
__func__);
usb_queue_reset_device(i2400mu->usb_iface);
} else {
dev_warn(dev, "W: %s: cannot send URB: %d\n",
__func__, ret);
goto retry;
}
}
kfree(buffer);
error_kzalloc:
if (do_autopm)
usb_autopm_put_interface(i2400mu->usb_iface);
return ret;
}
/*
* Reset a device at different levels (warm, cold or bus)
*
* @i2400m: device descriptor
* @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
*
* Warm and cold resets get a USB reset if they fail.
*
* Warm reset:
*
* The device will be fully reset internally, but won't be
* disconnected from the USB bus (so no reenumeration will
* happen). Firmware upload will be necessary.
*
* The device will send a reboot barker in the notification endpoint
* that will trigger the driver to reinitialize the state
* automatically from notif.c:i2400m_notification_grok() into
* i2400m_dev_bootstrap_delayed().
*
* Cold and bus (USB) reset:
*
* The device will be fully reset internally, disconnected from the
* USB bus an a reenumeration will happen. Firmware upload will be
* necessary. Thus, we don't do any locking or struct
* reinitialization, as we are going to be fully disconnected and
* reenumerated.
*
* Note we need to return -ENODEV if a warm reset was requested and we
* had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
* and wimax_dev->op_reset.
*
* WARNING: no driver state saved/fixed
*/
static
int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
{
int result;
struct i2400mu *i2400mu =
container_of(i2400m, struct i2400mu, i2400m);
struct device *dev = i2400m_dev(i2400m);
static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
};
static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
};
d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
if (rt == I2400M_RT_WARM)
result = __i2400mu_send_barker(
i2400mu, i2400m_WARM_BOOT_BARKER,
sizeof(i2400m_WARM_BOOT_BARKER),
i2400mu->endpoint_cfg.bulk_out);
else if (rt == I2400M_RT_COLD)
result = __i2400mu_send_barker(
i2400mu, i2400m_COLD_BOOT_BARKER,
sizeof(i2400m_COLD_BOOT_BARKER),
i2400mu->endpoint_cfg.reset_cold);
else if (rt == I2400M_RT_BUS) {
result = usb_reset_device(i2400mu->usb_dev);
switch (result) {
case 0:
case -EINVAL: /* device is gone */
case -ENODEV:
case -ENOENT:
case -ESHUTDOWN:
result = 0;
break; /* We assume the device is disconnected */
default:
dev_err(dev, "USB reset failed (%d), giving up!\n",
result);
}
} else {
result = -EINVAL; /* shut gcc up in certain arches */
BUG();
}
if (result < 0
&& result != -EINVAL /* device is gone */
&& rt != I2400M_RT_BUS) {
/*
* Things failed -- resort to lower level reset, that
* we queue in another context; the reason for this is
* that the pre and post reset functionality requires
* the i2400m->init_mutex; RT_WARM and RT_COLD can
* come from areas where i2400m->init_mutex is taken.
*/
dev_err(dev, "%s reset failed (%d); trying USB reset\n",
rt == I2400M_RT_WARM ? "warm" : "cold", result);
usb_queue_reset_device(i2400mu->usb_iface);
result = -ENODEV;
}
d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
return result;
}
static void i2400mu_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct usb_device *udev = i2400mu->usb_dev;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, i2400m->fw_name ? : "",
sizeof(info->fw_version));
usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
}
static const struct ethtool_ops i2400mu_ethtool_ops = {
.get_drvinfo = i2400mu_get_drvinfo,
.get_link = ethtool_op_get_link,
};
static
void i2400mu_netdev_setup(struct net_device *net_dev)
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
i2400mu_init(i2400mu);
i2400m_netdev_setup(net_dev);
net_dev->ethtool_ops = &i2400mu_ethtool_ops;
}
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(usb),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(notif),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \
if (result < 0) \
goto error; \
} while (0)
static
int i2400mu_debugfs_add(struct i2400mu *i2400mu)
{
int result;
struct device *dev = &i2400mu->usb_iface->dev;
struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
struct dentry *fd;
dentry = debugfs_create_dir("i2400m-usb", dentry);
result = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
if (result == -ENODEV)
result = 0; /* No debugfs support */
goto error;
}
i2400mu->debugfs_dentry = dentry;
__debugfs_register("dl_", usb, dentry);
__debugfs_register("dl_", fw, dentry);
__debugfs_register("dl_", notif, dentry);
__debugfs_register("dl_", rx, dentry);
__debugfs_register("dl_", tx, dentry);
/* Don't touch these if you don't know what you are doing */
fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
&i2400mu->rx_size_auto_shrink);
result = PTR_ERR(fd);
if (IS_ERR(fd) && result != -ENODEV) {
dev_err(dev, "Can't create debugfs entry "
"rx_size_auto_shrink: %d\n", result);
goto error;
}
fd = debugfs_create_size_t("rx_size", 0600, dentry,
&i2400mu->rx_size);
result = PTR_ERR(fd);
if (IS_ERR(fd) && result != -ENODEV) {
dev_err(dev, "Can't create debugfs entry "
"rx_size: %d\n", result);
goto error;
}
return 0;
error:
debugfs_remove_recursive(i2400mu->debugfs_dentry);
return result;
}
static struct device_type i2400mu_type = {
.name = "wimax",
};
/*
* Probe a i2400m interface and register it
*
* @iface: USB interface to link to
* @id: USB class/subclass/protocol id
* @returns: 0 if ok, < 0 errno code on error.
*
* Alloc a net device, initialize the bus-specific details and then
* calls the bus-generic initialization routine. That will register
* the wimax and netdev devices, upload the firmware [using
* _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
* communication with the device and then will start to talk to it to
* finnish setting it up.
*/
static
int i2400mu_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
int result;
struct net_device *net_dev;
struct device *dev = &iface->dev;
struct i2400m *i2400m;
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");
/* Allocate instance [calls i2400m_netdev_setup() on it]. */
result = -ENOMEM;
net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d",
i2400mu_netdev_setup);
if (net_dev == NULL) {
dev_err(dev, "no memory for network device instance\n");
goto error_alloc_netdev;
}
SET_NETDEV_DEV(net_dev, dev);
SET_NETDEV_DEVTYPE(net_dev, &i2400mu_type);
i2400m = net_dev_to_i2400m(net_dev);
i2400mu = container_of(i2400m, struct i2400mu, i2400m);
i2400m->wimax_dev.net_dev = net_dev;
i2400mu->usb_dev = usb_get_dev(usb_dev);
i2400mu->usb_iface = iface;
usb_set_intfdata(iface, i2400mu);
i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
/*
* Room required in the Tx queue for USB message to accommodate
* a smallest payload while allocating header space is 16 bytes.
* Adding this room for the new tx message increases the
* possibilities of including any payload with size <= 16 bytes.
*/
i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
i2400m->bus_setup = NULL;
i2400m->bus_dev_start = i2400mu_bus_dev_start;
i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
i2400m->bus_release = NULL;
i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
i2400m->bus_reset = i2400mu_bus_reset;
i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES;
i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_bm_mac_addr_impaired = 0;
switch (id->idProduct) {
case USB_DEVICE_ID_I6050:
case USB_DEVICE_ID_I6050_2:
case USB_DEVICE_ID_I6150:
case USB_DEVICE_ID_I6150_2:
case USB_DEVICE_ID_I6150_3:
case USB_DEVICE_ID_I6250:
i2400mu->i6050 = 1;
break;
default:
break;
}
if (i2400mu->i6050) {
i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 3;
i2400mu->endpoint_cfg.reset_cold = 2;
i2400mu->endpoint_cfg.bulk_in = 1;
} else {
i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 1;
i2400mu->endpoint_cfg.reset_cold = 2;
i2400mu->endpoint_cfg.bulk_in = 3;
}
#ifdef CONFIG_PM
iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
device_init_wakeup(dev, 1);
pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
usb_enable_autosuspend(usb_dev);
#endif
result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
if (result < 0) {
dev_err(dev, "cannot setup device: %d\n", result);
goto error_setup;
}
result = i2400mu_debugfs_add(i2400mu);
if (result < 0) {
dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result);
goto error_debugfs_add;
}
return 0;
error_debugfs_add:
i2400m_release(i2400m);
error_setup:
usb_set_intfdata(iface, NULL);
usb_put_dev(i2400mu->usb_dev);
free_netdev(net_dev);
error_alloc_netdev:
return result;
}
/*
* Disconect a i2400m from the system.
*
* i2400m_stop() has been called before, so al the rx and tx contexts
* have been taken down already. Make sure the queue is stopped,
* unregister netdev and i2400m, free and kill.
*/
static
void i2400mu_disconnect(struct usb_interface *iface)
{
struct i2400mu *i2400mu = usb_get_intfdata(iface);
struct i2400m *i2400m = &i2400mu->i2400m;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = &iface->dev;
d_fnstart(3, dev, "(iface %p i2400m %p)\n", iface, i2400m);
debugfs_remove_recursive(i2400mu->debugfs_dentry);
i2400m_release(i2400m);
usb_set_intfdata(iface, NULL);
usb_put_dev(i2400mu->usb_dev);
free_netdev(net_dev);
d_fnend(3, dev, "(iface %p i2400m %p) = void\n", iface, i2400m);
}
/*
* Get the device ready for USB port or system standby and hibernation
*
* USB port and system standby are handled the same.
*
* When the system hibernates, the USB device is powered down and then
* up, so we don't really have to do much here, as it will be seen as
* a reconnect. Still for simplicity we consider this case the same as
* suspend, so that the device has a chance to do notify the base
* station (if connected).
*
* So at the end, the three cases require common handling.
*
* If at the time of this call the device's firmware is not loaded,
* nothing has to be done. Note we can be "loose" about not reading
* i2400m->updown under i2400m->init_mutex. If it happens to change
* inmediately, other parts of the call flow will fail and effectively
* catch it.
*
* If the firmware is loaded, we need to:
*
* - tell the device to go into host interface power save mode, wait
* for it to ack
*
* This is quite more interesting than it is; we need to execute a
* command, but this time, we don't want the code in usb-{tx,rx}.c
* to call the usb_autopm_get/put_interface() barriers as it'd
* deadlock, so we need to decrement i2400mu->do_autopm, that acts
* as a poor man's semaphore. Ugly, but it works.
*
* As well, the device might refuse going to sleep for whichever
* reason. In this case we just fail. For system suspend/hibernate,
* we *can't* fail. We check PMSG_IS_AUTO to see if the
* suspend call comes from the USB stack or from the system and act
* in consequence.
*
* - stop the notification endpoint polling
*/
static
int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
{
int result = 0;
struct device *dev = &iface->dev;
struct i2400mu *i2400mu = usb_get_intfdata(iface);
unsigned is_autosuspend = 0;
struct i2400m *i2400m = &i2400mu->i2400m;
#ifdef CONFIG_PM
if (PMSG_IS_AUTO(pm_msg))
is_autosuspend = 1;
#endif
d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
rmb(); /* see i2400m->updown's documentation */
if (i2400m->updown == 0)
goto no_firmware;
if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
/* ugh -- the device is connected and this suspend
* request is an autosuspend one (not a system standby
* / hibernate).
*
* The only way the device can go to standby is if the
* link with the base station is in IDLE mode; that
* were the case, we'd be in status
* I2400M_SS_CONNECTED_IDLE. But we are not.
*
* If we *tell* him to go power save now, it'll reset
* as a precautionary measure, so if this is an
* autosuspend thing, say no and it'll come back
* later, when the link is IDLE
*/
result = -EBADF;
d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
"not entering powersave\n");
goto error_not_now;
}
d_printf(1, dev, "fw up: entering powersave\n");
atomic_dec(&i2400mu->do_autopm);
result = i2400m_cmd_enter_powersave(i2400m);
atomic_inc(&i2400mu->do_autopm);
if (result < 0 && !is_autosuspend) {
/* System suspend, can't fail */
dev_err(dev, "failed to suspend, will reset on resume\n");
result = 0;
}
if (result < 0)
goto error_enter_powersave;
i2400mu_notification_release(i2400mu);
d_printf(1, dev, "powersave requested\n");
error_enter_powersave:
error_not_now:
no_firmware:
d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
iface, pm_msg.event, result);
return result;
}
static
int i2400mu_resume(struct usb_interface *iface)
{
int ret = 0;
struct device *dev = &iface->dev;
struct i2400mu *i2400mu = usb_get_intfdata(iface);
struct i2400m *i2400m = &i2400mu->i2400m;
d_fnstart(3, dev, "(iface %p)\n", iface);
rmb(); /* see i2400m->updown's documentation */
if (i2400m->updown == 0) {
d_printf(1, dev, "fw was down, no resume needed\n");
goto out;
}
d_printf(1, dev, "fw was up, resuming\n");
i2400mu_notification_setup(i2400mu);
/* USB has flow control, so we don't need to give it time to
* come back; otherwise, we'd use something like a get-state
* command... */
out:
d_fnend(3, dev, "(iface %p) = %d\n", iface, ret);
return ret;
}
static
int i2400mu_reset_resume(struct usb_interface *iface)
{
int result;
struct device *dev = &iface->dev;
struct i2400mu *i2400mu = usb_get_intfdata(iface);
struct i2400m *i2400m = &i2400mu->i2400m;
d_fnstart(3, dev, "(iface %p)\n", iface);
result = i2400m_dev_reset_handle(i2400m, "device reset on resume");
d_fnend(3, dev, "(iface %p) = %d\n", iface, result);
return result < 0 ? result : 0;
}
/*
* Another driver or user space is triggering a reset on the device
* which contains the interface passed as an argument. Cease IO and
* save any device state you need to restore.
*
* If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
* you are in atomic context.
*/
static
int i2400mu_pre_reset(struct usb_interface *iface)
{
struct i2400mu *i2400mu = usb_get_intfdata(iface);
return i2400m_pre_reset(&i2400mu->i2400m);
}
/*
* The reset has completed. Restore any saved device state and begin
* using the device again.
*
* If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
* you are in atomic context.
*/
static
int i2400mu_post_reset(struct usb_interface *iface)
{
struct i2400mu *i2400mu = usb_get_intfdata(iface);
return i2400m_post_reset(&i2400mu->i2400m);
}
static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
{ USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
{ USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
{ USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
{ USB_DEVICE(0x8086, 0x1405) },
{ USB_DEVICE(0x8086, 0x0180) },
{ USB_DEVICE(0x8086, 0x0182) },
{ USB_DEVICE(0x8086, 0x1406) },
{ USB_DEVICE(0x8086, 0x1403) },
{ },
};
MODULE_DEVICE_TABLE(usb, i2400mu_id_table);
static
struct usb_driver i2400mu_driver = {
.name = KBUILD_MODNAME,
.suspend = i2400mu_suspend,
.resume = i2400mu_resume,
.reset_resume = i2400mu_reset_resume,
.probe = i2400mu_probe,
.disconnect = i2400mu_disconnect,
.pre_reset = i2400mu_pre_reset,
.post_reset = i2400mu_post_reset,
.id_table = i2400mu_id_table,
.supports_autosuspend = 1,
};
static
int __init i2400mu_driver_init(void)
{
d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params,
"i2400m_usb.debug");
return usb_register(&i2400mu_driver);
}
module_init(i2400mu_driver_init);
static
void __exit i2400mu_driver_exit(void)
{
usb_deregister(&i2400mu_driver);
}
module_exit(i2400mu_driver_exit);
MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
"(5x50 & 6050)");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
| gpl-2.0 |
alexfeinman/nv-tegra-21.1 | arch/arm/mach-omap2/omap_twl.c | 2732 | 9072 | /**
* OMAP and TWL PMIC specific intializations.
*
* Copyright (C) 2010 Texas Instruments Incorporated.
* Thara Gopinath
* Copyright (C) 2009 Texas Instruments Incorporated.
* Nishanth Menon
* Copyright (C) 2009 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/i2c/twl.h>
#include "soc.h"
#include "voltage.h"
#include "pm.h"
#define OMAP3_SRI2C_SLAVE_ADDR 0x12
#define OMAP3_VDD_MPU_SR_CONTROL_REG 0x00
#define OMAP3_VDD_CORE_SR_CONTROL_REG 0x01
#define OMAP3_VP_CONFIG_ERROROFFSET 0x00
#define OMAP3_VP_VSTEPMIN_VSTEPMIN 0x1
#define OMAP3_VP_VSTEPMAX_VSTEPMAX 0x04
#define OMAP3_VP_VLIMITTO_TIMEOUT_US 200
#define OMAP4_SRI2C_SLAVE_ADDR 0x12
#define OMAP4_VDD_MPU_SR_VOLT_REG 0x55
#define OMAP4_VDD_MPU_SR_CMD_REG 0x56
#define OMAP4_VDD_IVA_SR_VOLT_REG 0x5B
#define OMAP4_VDD_IVA_SR_CMD_REG 0x5C
#define OMAP4_VDD_CORE_SR_VOLT_REG 0x61
#define OMAP4_VDD_CORE_SR_CMD_REG 0x62
#define OMAP4_VP_CONFIG_ERROROFFSET 0x00
#define OMAP4_VP_VSTEPMIN_VSTEPMIN 0x01
#define OMAP4_VP_VSTEPMAX_VSTEPMAX 0x04
#define OMAP4_VP_VLIMITTO_TIMEOUT_US 200
static bool is_offset_valid;
static u8 smps_offset;
/*
* Flag to ensure Smartreflex bit in TWL
* being cleared in board file is not overwritten.
*/
static bool __initdata twl_sr_enable_autoinit;
#define TWL4030_DCDC_GLOBAL_CFG 0x06
#define REG_SMPS_OFFSET 0xE0
#define SMARTREFLEX_ENABLE BIT(3)
static unsigned long twl4030_vsel_to_uv(const u8 vsel)
{
return (((vsel * 125) + 6000)) * 100;
}
static u8 twl4030_uv_to_vsel(unsigned long uv)
{
return DIV_ROUND_UP(uv - 600000, 12500);
}
static unsigned long twl6030_vsel_to_uv(const u8 vsel)
{
/*
* In TWL6030 depending on the value of SMPS_OFFSET
* efuse register the voltage range supported in
* standard mode can be either between 0.6V - 1.3V or
* 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse
* is programmed to all 0's where as starting from
* TWL6030 ES1.1 the efuse is programmed to 1
*/
if (!is_offset_valid) {
twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset,
REG_SMPS_OFFSET);
is_offset_valid = true;
}
if (!vsel)
return 0;
/*
* There is no specific formula for voltage to vsel
* conversion above 1.3V. There are special hardcoded
* values for voltages above 1.3V. Currently we are
* hardcoding only for 1.35 V which is used for 1GH OPP for
* OMAP4430.
*/
if (vsel == 0x3A)
return 1350000;
if (smps_offset & 0x8)
return ((((vsel - 1) * 1266) + 70900)) * 10;
else
return ((((vsel - 1) * 1266) + 60770)) * 10;
}
static u8 twl6030_uv_to_vsel(unsigned long uv)
{
/*
* In TWL6030 depending on the value of SMPS_OFFSET
* efuse register the voltage range supported in
* standard mode can be either between 0.6V - 1.3V or
* 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse
* is programmed to all 0's where as starting from
* TWL6030 ES1.1 the efuse is programmed to 1
*/
if (!is_offset_valid) {
twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset,
REG_SMPS_OFFSET);
is_offset_valid = true;
}
if (!uv)
return 0x00;
/*
* There is no specific formula for voltage to vsel
* conversion above 1.3V. There are special hardcoded
* values for voltages above 1.3V. Currently we are
* hardcoding only for 1.35 V which is used for 1GH OPP for
* OMAP4430.
*/
if (uv > twl6030_vsel_to_uv(0x39)) {
if (uv == 1350000)
return 0x3A;
pr_err("%s:OUT OF RANGE! non mapped vsel for %ld Vs max %ld\n",
__func__, uv, twl6030_vsel_to_uv(0x39));
return 0x3A;
}
if (smps_offset & 0x8)
return DIV_ROUND_UP(uv - 709000, 12660) + 1;
else
return DIV_ROUND_UP(uv - 607700, 12660) + 1;
}
static struct omap_voltdm_pmic omap3_mpu_pmic = {
.slew_rate = 4000,
.step_size = 12500,
.vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 600000,
.vddmax = 1450000,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP3_VDD_MPU_SR_CONTROL_REG,
.i2c_high_speed = true,
.vsel_to_uv = twl4030_vsel_to_uv,
.uv_to_vsel = twl4030_uv_to_vsel,
};
static struct omap_voltdm_pmic omap3_core_pmic = {
.slew_rate = 4000,
.step_size = 12500,
.vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 600000,
.vddmax = 1450000,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP3_VDD_CORE_SR_CONTROL_REG,
.i2c_high_speed = true,
.vsel_to_uv = twl4030_vsel_to_uv,
.uv_to_vsel = twl4030_uv_to_vsel,
};
static struct omap_voltdm_pmic omap4_mpu_pmic = {
.slew_rate = 4000,
.step_size = 12660,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 0,
.vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_MPU_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_MPU_SR_CMD_REG,
.i2c_high_speed = true,
.i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
static struct omap_voltdm_pmic omap4_iva_pmic = {
.slew_rate = 4000,
.step_size = 12660,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 0,
.vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_IVA_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_IVA_SR_CMD_REG,
.i2c_high_speed = true,
.i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
static struct omap_voltdm_pmic omap4_core_pmic = {
.slew_rate = 4000,
.step_size = 12660,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 0,
.vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_CORE_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_CORE_SR_CMD_REG,
.i2c_high_speed = true,
.i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
int __init omap4_twl_init(void)
{
struct voltagedomain *voltdm;
if (!cpu_is_omap44xx())
return -ENODEV;
voltdm = voltdm_lookup("mpu");
omap_voltage_register_pmic(voltdm, &omap4_mpu_pmic);
voltdm = voltdm_lookup("iva");
omap_voltage_register_pmic(voltdm, &omap4_iva_pmic);
voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap4_core_pmic);
return 0;
}
int __init omap3_twl_init(void)
{
struct voltagedomain *voltdm;
if (!cpu_is_omap34xx())
return -ENODEV;
/*
* The smartreflex bit on twl4030 specifies if the setting of voltage
* is done over the I2C_SR path. Since this setting is independent of
* the actual usage of smartreflex AVS module, we enable TWL SR bit
* by default irrespective of whether smartreflex AVS module is enabled
* on the OMAP side or not. This is because without this bit enabled,
* the voltage scaling through vp forceupdate/bypass mechanism of
* voltage scaling will not function on TWL over I2C_SR.
*/
if (!twl_sr_enable_autoinit)
omap3_twl_set_sr_bit(true);
voltdm = voltdm_lookup("mpu_iva");
omap_voltage_register_pmic(voltdm, &omap3_mpu_pmic);
voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap3_core_pmic);
return 0;
}
/**
* omap3_twl_set_sr_bit() - Set/Clear SR bit on TWL
* @enable: enable SR mode in twl or not
*
* If 'enable' is true, enables Smartreflex bit on TWL 4030 to make sure
* voltage scaling through OMAP SR works. Else, the smartreflex bit
* on twl4030 is cleared as there are platforms which use OMAP3 and T2 but
* use Synchronized Scaling Hardware Strategy (ENABLE_VMODE=1) and Direct
* Strategy Software Scaling Mode (ENABLE_VMODE=0), for setting the voltages,
* in those scenarios this bit is to be cleared (enable = false).
*
* Returns 0 on success, error is returned if I2C read/write fails.
*/
int __init omap3_twl_set_sr_bit(bool enable)
{
u8 temp;
int ret;
if (twl_sr_enable_autoinit)
pr_warning("%s: unexpected multiple calls\n", __func__);
ret = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp,
TWL4030_DCDC_GLOBAL_CFG);
if (ret)
goto err;
if (enable)
temp |= SMARTREFLEX_ENABLE;
else
temp &= ~SMARTREFLEX_ENABLE;
ret = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp,
TWL4030_DCDC_GLOBAL_CFG);
if (!ret) {
twl_sr_enable_autoinit = true;
return 0;
}
err:
pr_err("%s: Error access to TWL4030 (%d)\n", __func__, ret);
return ret;
}
| gpl-2.0 |
RC-MODULE/linux-3.10.x | drivers/staging/speakup/speakup_dectlk.c | 2988 | 8885 | /*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/unistd.h>
#include <linux/proc_fs.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "speakup.h"
#include "spk_priv.h"
#include "serialio.h"
#define DRV_VERSION "2.20"
#define SYNTH_CLEAR 0x03
#define PROCSPEECH 0x0b
static int xoff;
static inline int synth_full(void)
{
return xoff;
}
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static void read_buff_add(u_char c);
static unsigned char get_index(void);
static int in_escape;
static int is_flushing;
static spinlock_t flush_lock;
static DECLARE_WAIT_QUEUE_HEAD(flush);
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
{ PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/dectlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306};
static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73};
static struct spk_synth synth_dectlk = {
.name = "dectlk",
.version = DRV_VERSION,
.long_name = "Dectalk Express",
.init = "[:error sp :name paul :rate 180 :tsr off] ",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.default_pitch = ap_defaults,
.default_vol = g5_defaults,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = read_buff_add,
.get_index = get_index,
.indexing = {
.command = "[:in re %d ] ",
.lowindex = 1,
.highindex = 8,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "dectlk",
},
};
static int is_indnum(u_char *ch)
{
if ((*ch >= '0') && (*ch <= '9')) {
*ch = *ch - '0';
return 1;
}
return 0;
}
static u_char lastind;
static unsigned char get_index(void)
{
u_char rv;
rv = lastind;
lastind = 0;
return rv;
}
static void read_buff_add(u_char c)
{
static int ind = -1;
if (c == 0x01) {
unsigned long flags;
spin_lock_irqsave(&flush_lock, flags);
is_flushing = 0;
wake_up_interruptible(&flush);
spin_unlock_irqrestore(&flush_lock, flags);
} else if (c == 0x13) {
xoff = 1;
} else if (c == 0x11) {
xoff = 0;
} else if (is_indnum(&c)) {
if (ind == -1)
ind = c;
else
ind = ind * 10 + c;
} else if ((c > 31) && (c < 127)) {
if (ind != -1)
lastind = (u_char)ind;
ind = -1;
}
}
static void do_catch_up(struct spk_synth *synth)
{
int synth_full_val = 0;
static u_char ch;
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
unsigned long timeout = msecs_to_jiffies(4000);
DEFINE_WAIT(wait);
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val;
int delay_time_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
/* if no ctl-a in 4, send data anyway */
spin_lock_irqsave(&flush_lock, flags);
while (is_flushing && timeout) {
prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&flush_lock, flags);
timeout = schedule_timeout(timeout);
spin_lock_irqsave(&flush_lock, flags);
}
finish_wait(&flush, &wait);
is_flushing = 0;
spin_unlock_irqrestore(&flush_lock, flags);
spk_lock(flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spk_unlock(flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
spk_unlock(flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
synth_full_val = synth_full();
spk_unlock(flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full_val || !spk_serial_out(ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spk_lock(flags);
synth_buffer_getc();
spk_unlock(flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
in_escape = 0;
else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
spk_serial_out(PROCSPEECH);
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spk_unlock(flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
}
if (!in_escape)
spk_serial_out(PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
if (in_escape) {
/* if in command output ']' so we don't get an error */
spk_serial_out(']');
}
in_escape = 0;
is_flushing = 1;
spk_serial_out(SYNTH_CLEAR);
}
module_param_named(ser, synth_dectlk.ser, int, S_IRUGO);
module_param_named(start, synth_dectlk.startup, short, S_IRUGO);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init dectlk_init(void)
{
return synth_add(&synth_dectlk);
}
static void __exit dectlk_exit(void)
{
synth_remove(&synth_dectlk);
}
module_init(dectlk_init);
module_exit(dectlk_exit);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
GuneetAtwal/Blaze.Kernel-MT6589 | lib/debugobjects.c | 4780 | 26845 | /*
* Generic infrastructure for lifetime debugging of objects.
*
* Started by Thomas Gleixner
*
* Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/debugobjects.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/hash.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
#define ODEBUG_POOL_SIZE 512
#define ODEBUG_POOL_MIN_LEVEL 256
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
struct debug_bucket {
struct hlist_head list;
raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly;
static int debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
static struct debug_obj_descr *descr_test __read_mostly;
static void free_obj_work(struct work_struct *work);
static DECLARE_WORK(debug_obj_work, free_obj_work);
static int __init enable_object_debug(char *str)
{
debug_objects_enabled = 1;
return 0;
}
static int __init disable_object_debug(char *str)
{
debug_objects_enabled = 0;
return 0;
}
early_param("debug_objects", enable_object_debug);
early_param("no_debug_objects", disable_object_debug);
static const char *obj_states[ODEBUG_STATE_MAX] = {
[ODEBUG_STATE_NONE] = "none",
[ODEBUG_STATE_INIT] = "initialized",
[ODEBUG_STATE_INACTIVE] = "inactive",
[ODEBUG_STATE_ACTIVE] = "active",
[ODEBUG_STATE_DESTROYED] = "destroyed",
[ODEBUG_STATE_NOTAVAILABLE] = "not available",
};
static int fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *new;
unsigned long flags;
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
return obj_pool_free;
if (unlikely(!obj_cache))
return obj_pool_free;
while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
new = kmem_cache_zalloc(obj_cache, gfp);
if (!new)
return obj_pool_free;
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
/*
* Lookup an object in the hash bucket.
*/
static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
{
struct hlist_node *node;
struct debug_obj *obj;
int cnt = 0;
hlist_for_each_entry(obj, node, &b->list, node) {
cnt++;
if (obj->object == addr)
return obj;
}
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
return NULL;
}
/*
* Allocate a new object. If the pool is empty, switch off the debugger.
* Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
obj->object = addr;
obj->descr = descr;
obj->state = ODEBUG_STATE_NONE;
obj->astate = 0;
hlist_del(&obj->node);
hlist_add_head(&obj->node, &b->list);
obj_pool_used++;
if (obj_pool_used > obj_pool_max_used)
obj_pool_max_used = obj_pool_used;
obj_pool_free--;
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
raw_spin_unlock(&pool_lock);
return obj;
}
/*
* workqueue function to free objects.
*/
static void free_obj_work(struct work_struct *work)
{
struct debug_obj *obj;
unsigned long flags;
raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
obj_pool_free--;
/*
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
raw_spin_lock_irqsave(&pool_lock, flags);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
* Put the object back into the pool and schedule work to free objects
* if necessary.
*/
static void free_object(struct debug_obj *obj)
{
unsigned long flags;
int sched = 0;
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
sched = keventd_up() && !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
/*
* We run out of memory. That means we probably have tons of objects
* allocated.
*/
static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
int i;
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
}
}
/*
* We use the pfn of the address for the hash. That way we can check
* for freed objects simply by checking the affected bucket.
*/
static struct debug_bucket *get_bucket(unsigned long addr)
{
unsigned long hash;
hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
return &obj_hash[hash];
}
static void debug_print_object(struct debug_obj *obj, char *msg)
{
struct debug_obj_descr *descr = obj->descr;
static int limit;
if (limit < 5 && descr != descr_test) {
void *hint = descr->debug_hint ?
descr->debug_hint(obj->object) : NULL;
limit++;
WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
"object type: %s hint: %pS\n",
msg, obj_states[obj->state], obj->astate,
descr->name, hint);
}
debug_objects_warnings++;
}
/*
* Try to repair the damage, so we have a better chance to get useful
* debug output.
*/
static int
debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
void * addr, enum debug_obj_state state)
{
int fixed = 0;
if (fixup)
fixed = fixup(addr, state);
debug_objects_fixups += fixed;
return fixed;
}
static void debug_object_is_on_stack(void *addr, int onstack)
{
int is_on_stack;
static int limit;
if (limit > 4)
return;
is_on_stack = object_is_on_stack(addr);
if (is_on_stack == onstack)
return;
limit++;
if (is_on_stack)
printk(KERN_WARNING
"ODEBUG: object is on stack, but not annotated\n");
else
printk(KERN_WARNING
"ODEBUG: object is not on stack, but annotated\n");
WARN_ON(1);
}
static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
fill_pool();
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
debug_object_is_on_stack(addr, onstack);
}
switch (obj->state) {
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
break;
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
debug_print_object(obj, "init");
break;
default:
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
* debug_object_init - debug checks when an object is initialized
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_init(void *addr, struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
__debug_object_init(addr, descr, 0);
}
/**
* debug_object_init_on_stack - debug checks when an object on stack is
* initialized
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
__debug_object_init(addr, descr, 1);
}
/**
* debug_object_activate - debug checks when an object is activated
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_activate(void *addr, struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_ACTIVE;
break;
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
debug_print_object(obj, "activate");
break;
default:
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
* true or not.
*/
if (debug_object_fixup(descr->fixup_activate, addr,
ODEBUG_STATE_NOTAVAILABLE))
debug_print_object(&o, "activate");
}
/**
* debug_object_deactivate - debug checks when an object is deactivated
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
if (!obj->astate)
obj->state = ODEBUG_STATE_INACTIVE;
else
debug_print_object(obj, "deactivate");
break;
case ODEBUG_STATE_DESTROYED:
debug_print_object(obj, "deactivate");
break;
default:
break;
}
} else {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "deactivate");
}
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
* debug_object_destroy - debug checks when an object is destroyed
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
goto out_unlock;
switch (obj->state) {
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
break;
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
debug_print_object(obj, "destroy");
break;
default:
break;
}
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
* debug_object_free - debug checks when an object is freed
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_free(void *addr, struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
goto out_unlock;
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
* debug_object_assert_init - debug checks when object should be init-ed
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
{
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* Maybe the object is static. Let the type specific
* code decide what to do.
*/
if (debug_object_fixup(descr->fixup_assert_init, addr,
ODEBUG_STATE_NOTAVAILABLE))
debug_print_object(&o, "assert_init");
return;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
* debug_object_active_state - debug checks object usage state machine
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
* @expect: expected state
* @next: state to move to if expected state is found
*/
void
debug_object_active_state(void *addr, struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
return;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
if (obj->astate == expect)
obj->astate = next;
else
debug_print_object(obj, "active_state");
break;
default:
debug_print_object(obj, "active_state");
break;
}
} else {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "active_state");
}
raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp;
HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
int cnt;
saddr = (unsigned long) address;
eaddr = saddr + size;
paddr = saddr & ODEBUG_CHUNK_MASK;
chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
chunks >>= ODEBUG_CHUNK_SHIFT;
for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
db = get_bucket(paddr);
repeat:
cnt = 0;
raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
if (oaddr < saddr || oaddr >= eaddr)
continue;
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
default:
hlist_del(&obj->node);
hlist_add_head(&obj->node, &freelist);
break;
}
}
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
}
}
void debug_check_no_obj_freed(const void *address, unsigned long size)
{
if (debug_objects_enabled)
__debug_check_no_obj_freed(address, size);
}
#endif
#ifdef CONFIG_DEBUG_FS
static int debug_stats_show(struct seq_file *m, void *v)
{
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
seq_printf(m, "pool_free :%d\n", obj_pool_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
return 0;
}
static int debug_stats_open(struct inode *inode, struct file *filp)
{
return single_open(filp, debug_stats_show, NULL);
}
static const struct file_operations debug_stats_fops = {
.open = debug_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init debug_objects_init_debugfs(void)
{
struct dentry *dbgdir, *dbgstats;
if (!debug_objects_enabled)
return 0;
dbgdir = debugfs_create_dir("debug_objects", NULL);
if (!dbgdir)
return -ENOMEM;
dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
&debug_stats_fops);
if (!dbgstats)
goto err;
return 0;
err:
debugfs_remove(dbgdir);
return -ENOMEM;
}
__initcall(debug_objects_init_debugfs);
#else
static inline void debug_objects_init_debugfs(void) { }
#endif
#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
/* Random data structure for the self test */
struct self_test {
unsigned long dummy1[6];
int static_init;
unsigned long dummy2[3];
};
static __initdata struct debug_obj_descr descr_type_test;
/*
* fixup_init is called when:
* - an active object is initialized
*/
static int __init fixup_init(void *addr, enum debug_obj_state state)
{
struct self_test *obj = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
debug_object_deactivate(obj, &descr_type_test);
debug_object_init(obj, &descr_type_test);
return 1;
default:
return 0;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
*/
static int __init fixup_activate(void *addr, enum debug_obj_state state)
{
struct self_test *obj = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
if (obj->static_init == 1) {
debug_object_init(obj, &descr_type_test);
debug_object_activate(obj, &descr_type_test);
return 0;
}
return 1;
case ODEBUG_STATE_ACTIVE:
debug_object_deactivate(obj, &descr_type_test);
debug_object_activate(obj, &descr_type_test);
return 1;
default:
return 0;
}
}
/*
* fixup_destroy is called when:
* - an active object is destroyed
*/
static int __init fixup_destroy(void *addr, enum debug_obj_state state)
{
struct self_test *obj = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
debug_object_deactivate(obj, &descr_type_test);
debug_object_destroy(obj, &descr_type_test);
return 1;
default:
return 0;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static int __init fixup_free(void *addr, enum debug_obj_state state)
{
struct self_test *obj = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
debug_object_deactivate(obj, &descr_type_test);
debug_object_free(obj, &descr_type_test);
return 1;
default:
return 0;
}
}
static int __init
check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
{
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int res = -EINVAL;
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
goto out;
}
if (obj && obj->state != state) {
WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
obj->state, state);
goto out;
}
if (fixups != debug_objects_fixups) {
WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
fixups, debug_objects_fixups);
goto out;
}
if (warnings != debug_objects_warnings) {
WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
warnings, debug_objects_warnings);
goto out;
}
res = 0;
out:
raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
}
static __initdata struct debug_obj_descr descr_type_test = {
.name = "selftest",
.fixup_init = fixup_init,
.fixup_activate = fixup_activate,
.fixup_destroy = fixup_destroy,
.fixup_free = fixup_free,
};
static __initdata struct self_test obj = { .static_init = 0 };
static void __init debug_objects_selftest(void)
{
int fixups, oldfixups, warnings, oldwarnings;
unsigned long flags;
local_irq_save(flags);
fixups = oldfixups = debug_objects_fixups;
warnings = oldwarnings = debug_objects_warnings;
descr_test = &descr_type_test;
debug_object_init(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
goto out;
debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
goto out;
debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
goto out;
debug_object_deactivate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
goto out;
debug_object_destroy(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
goto out;
debug_object_init(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
goto out;
debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
goto out;
debug_object_deactivate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
goto out;
debug_object_free(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
goto out;
obj.static_init = 1;
debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
goto out;
debug_object_init(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
goto out;
debug_object_free(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
goto out;
#ifdef CONFIG_DEBUG_OBJECTS_FREE
debug_object_init(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
goto out;
debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
goto out;
__debug_check_no_obj_freed(&obj, sizeof(obj));
if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
goto out;
#endif
printk(KERN_INFO "ODEBUG: selftest passed\n");
out:
debug_objects_fixups = oldfixups;
debug_objects_warnings = oldwarnings;
descr_test = NULL;
local_irq_restore(flags);
}
#else
static inline void debug_objects_selftest(void) { }
#endif
/*
* Called during early boot to initialize the hash buckets and link
* the static object pool objects into the poll list. After this call
* the object tracker is fully operational.
*/
void __init debug_objects_early_init(void)
{
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
}
/*
* Convert the statically allocated objects to dynamic ones:
*/
static int __init debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
struct debug_obj *obj, *new;
HLIST_HEAD(objects);
int i, cnt = 0;
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
hlist_add_head(&obj->node, &objects);
}
/*
* When debug_objects_mem_init() is called we know that only
* one CPU is up, so disabling interrupts is enough
* protection. This avoids the lockdep hell of lock ordering.
*/
local_irq_disable();
/* Remove the statically allocated objects from the pool */
hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
hlist_del(&obj->node);
/* Move the allocated objects to the pool */
hlist_move_list(&objects, &obj_pool);
/* Replace the active object references */
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects);
hlist_for_each_entry(obj, node, &objects, node) {
new = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&new->node);
/* copy object data */
*new = *obj;
hlist_add_head(&new->node, &db->list);
cnt++;
}
}
printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
obj_pool_used);
local_irq_enable();
return 0;
free:
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
return -ENOMEM;
}
/*
* Called after the kmem_caches are functional to setup a dedicated
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
* prevents that the debug code is called on kmem_cache_free() for the
* debug tracker objects to avoid recursive calls.
*/
void __init debug_objects_mem_init(void)
{
if (!debug_objects_enabled)
return;
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS, NULL);
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
if (obj_cache)
kmem_cache_destroy(obj_cache);
printk(KERN_WARNING "ODEBUG: out of memory.\n");
} else
debug_objects_selftest();
}
| gpl-2.0 |
RolanDroid/lge_MonsterKernel-JB-Stock | drivers/media/dvb/dvb-usb/au6610.c | 5036 | 5910 | /*
* DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0.
*
* Copyright (C) 2006 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au6610.h"
#include "zl10353.h"
#include "qt1010.h"
/* debug */
static int dvb_usb_au6610_debug;
module_param_named(debug, dvb_usb_au6610_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
int ret;
u16 index;
u8 *usb_buf;
/*
* allocate enough for all known requests,
* read returns 5 and write 6 bytes
*/
usb_buf = kmalloc(6, GFP_KERNEL);
if (!usb_buf)
return -ENOMEM;
switch (wlen) {
case 1:
index = wbuf[0] << 8;
break;
case 2:
index = wbuf[0] << 8;
index += wbuf[1];
break;
default:
warn("wlen = %x, aborting.", wlen);
ret = -EINVAL;
goto error;
}
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation,
USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index,
usb_buf, 6, AU6610_USB_TIMEOUT);
if (ret < 0)
goto error;
switch (operation) {
case AU6610_REQ_I2C_READ:
case AU6610_REQ_USB_READ:
/* requested value is always 5th byte in buffer */
rbuf[0] = usb_buf[4];
}
error:
kfree(usb_buf);
return ret;
}
static int au6610_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
u8 request;
u8 wo = (rbuf == NULL || rlen == 0); /* write-only */
if (wo) {
request = AU6610_REQ_I2C_WRITE;
} else { /* rw */
request = AU6610_REQ_I2C_READ;
}
return au6610_usb_msg(d, request, addr, wbuf, wlen, rbuf, rlen);
}
/* I2C */
static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i;
if (num > 2)
return -EINVAL;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
for (i = 0; i < num; i++) {
/* write/read request */
if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf,
msg[i].len, msg[i+1].buf,
msg[i+1].len) < 0)
break;
i++;
} else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf,
msg[i].len, NULL, 0) < 0)
break;
}
mutex_unlock(&d->i2c_mutex);
return i;
}
static u32 au6610_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm au6610_i2c_algo = {
.master_xfer = au6610_i2c_xfer,
.functionality = au6610_i2c_func,
};
/* Callbacks for DVB USB */
static struct zl10353_config au6610_zl10353_config = {
.demod_address = 0x0f,
.no_tuner = 1,
.parallel_ts = 1,
};
static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap)
{
adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &au6610_zl10353_config,
&adap->dev->i2c_adap);
if (adap->fe_adap[0].fe == NULL)
return -ENODEV;
return 0;
}
static struct qt1010_config au6610_qt1010_config = {
.i2c_address = 0x62
};
static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
{
return dvb_attach(qt1010_attach,
adap->fe_adap[0].fe, &adap->dev->i2c_adap,
&au6610_qt1010_config) == NULL ? -ENODEV : 0;
}
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties au6610_properties;
static int au6610_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct dvb_usb_device *d;
struct usb_host_interface *alt;
int ret;
if (intf->num_altsetting < AU6610_ALTSETTING_COUNT)
return -ENODEV;
ret = dvb_usb_device_init(intf, &au6610_properties, THIS_MODULE, &d,
adapter_nr);
if (ret == 0) {
alt = usb_altnum_to_altsetting(intf, AU6610_ALTSETTING);
if (alt == NULL) {
deb_info("%s: no alt found!\n", __func__);
return -ENODEV;
}
ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
}
return ret;
}
static struct usb_device_id au6610_table [] = {
{ USB_DEVICE(USB_VID_ALCOR_MICRO, USB_PID_SIGMATEK_DVB_110) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, au6610_table);
static struct dvb_usb_device_properties au6610_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.size_of_priv = 0,
.num_adapters = 1,
.adapter = {
{
.num_frontends = 1,
.fe = {{
.frontend_attach = au6610_zl10353_frontend_attach,
.tuner_attach = au6610_qt1010_tuner_attach,
.stream = {
.type = USB_ISOC,
.count = 5,
.endpoint = 0x82,
.u = {
.isoc = {
.framesperurb = 40,
.framesize = 942,
.interval = 1,
}
}
},
}},
}
},
.i2c_algo = &au6610_i2c_algo,
.num_device_descs = 1,
.devices = {
{
.name = "Sigmatek DVB-110 DVB-T USB2.0",
.cold_ids = {NULL},
.warm_ids = {&au6610_table[0], NULL},
},
}
};
static struct usb_driver au6610_driver = {
.name = "dvb_usb_au6610",
.probe = au6610_probe,
.disconnect = dvb_usb_device_exit,
.id_table = au6610_table,
};
module_usb_driver(au6610_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
| gpl-2.0 |
willizambrano01/Evolution_victara | arch/m68k/platform/68360/ints.c | 7340 | 4302 | /*
* linux/arch/$(ARCH)/platform/$(PLATFORM)/ints.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Copyright (c) 2000 Michael Leslie <mleslie@lineo.com>
* Copyright (c) 1996 Roman Zippel
* Copyright (c) 1999 D. Jeff Dionne <jeff@uclinux.org>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/m68360.h>
/* from quicc/commproc.c: */
extern QUICC *pquicc;
extern void cpm_interrupt_init(void);
#define INTERNAL_IRQS (96)
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void bad_interrupt(void);
asmlinkage void inthandler(void);
static void intc_irq_unmask(struct irq_data *d)
{
pquicc->intr_cimr |= (1 << d->irq);
}
static void intc_irq_mask(struct irq_data *d)
{
pquicc->intr_cimr &= ~(1 << d->irq);
}
static void intc_irq_ack(struct irq_data *d)
{
pquicc->intr_cisr = (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_ack = intc_irq_ack,
};
/*
* This function should be called during kernel startup to initialize
* the vector table.
*/
void __init trap_init(void)
{
int vba = (CPM_VECTOR_BASE<<4);
/* set up the vectors */
_ramvec[2] = buserr;
_ramvec[3] = trap;
_ramvec[4] = trap;
_ramvec[5] = trap;
_ramvec[6] = trap;
_ramvec[7] = trap;
_ramvec[8] = trap;
_ramvec[9] = trap;
_ramvec[10] = trap;
_ramvec[11] = trap;
_ramvec[12] = trap;
_ramvec[13] = trap;
_ramvec[14] = trap;
_ramvec[15] = trap;
_ramvec[32] = system_call;
_ramvec[33] = trap;
cpm_interrupt_init();
/* set up CICR for vector base address and irq level */
/* irl = 4, hp = 1f - see MC68360UM p 7-377 */
pquicc->intr_cicr = 0x00e49f00 | vba;
/* CPM interrupt vectors: (p 7-376) */
_ramvec[vba+CPMVEC_ERROR] = bad_interrupt; /* Error */
_ramvec[vba+CPMVEC_PIO_PC11] = inthandler; /* pio - pc11 */
_ramvec[vba+CPMVEC_PIO_PC10] = inthandler; /* pio - pc10 */
_ramvec[vba+CPMVEC_SMC2] = inthandler; /* smc2/pip */
_ramvec[vba+CPMVEC_SMC1] = inthandler; /* smc1 */
_ramvec[vba+CPMVEC_SPI] = inthandler; /* spi */
_ramvec[vba+CPMVEC_PIO_PC9] = inthandler; /* pio - pc9 */
_ramvec[vba+CPMVEC_TIMER4] = inthandler; /* timer 4 */
_ramvec[vba+CPMVEC_RESERVED1] = inthandler; /* reserved */
_ramvec[vba+CPMVEC_PIO_PC8] = inthandler; /* pio - pc8 */
_ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */
_ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */
_ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */
_ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */
_ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */
_ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */
_ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* timer table */
_ramvec[vba+CPMVEC_TIMER2] = inthandler; /* timer 2 */
_ramvec[vba+CPMVEC_RESERVED3] = inthandler; /* reserved */
_ramvec[vba+CPMVEC_IDMA2] = inthandler; /* idma 2 */
_ramvec[vba+CPMVEC_IDMA1] = inthandler; /* idma 1 */
_ramvec[vba+CPMVEC_SDMA_CB_ERR] = inthandler; /* sdma channel bus error */
_ramvec[vba+CPMVEC_PIO_PC3] = inthandler; /* pio - pc3 */
_ramvec[vba+CPMVEC_PIO_PC2] = inthandler; /* pio - pc2 */
/* _ramvec[vba+CPMVEC_TIMER1] = cpm_isr_timer1; */ /* timer 1 */
_ramvec[vba+CPMVEC_TIMER1] = inthandler; /* timer 1 */
_ramvec[vba+CPMVEC_PIO_PC1] = inthandler; /* pio - pc1 */
_ramvec[vba+CPMVEC_SCC4] = inthandler; /* scc 4 */
_ramvec[vba+CPMVEC_SCC3] = inthandler; /* scc 3 */
_ramvec[vba+CPMVEC_SCC2] = inthandler; /* scc 2 */
_ramvec[vba+CPMVEC_SCC1] = inthandler; /* scc 1 */
_ramvec[vba+CPMVEC_PIO_PC0] = inthandler; /* pio - pc0 */
/* turn off all CPM interrupts */
pquicc->intr_cimr = 0x00000000;
}
void init_IRQ(void)
{
int i;
for (i = 0; (i < NR_IRQS); i++) {
irq_set_chip(i, &intc_irq_chip);
irq_set_handler(i, handle_level_irq);
}
}
| gpl-2.0 |
juldiadia/kernel_stock_g3815 | drivers/staging/vt6656/michael.c | 8620 | 4387 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: michael.cpp
*
* Purpose: The implementation of LIST data structure.
*
* Author: Kyle Hsu
*
* Date: Sep 4, 2002
*
* Functions:
* s_dwGetUINT32 - Convert from BYTE[] to DWORD in a portable way
* s_vPutUINT32 - Convert from DWORD to BYTE[] in a portable way
* s_vClear - Reset the state to the empty message.
* s_vSetKey - Set the key.
* MIC_vInit - Set the key.
* s_vAppendByte - Append the byte to our word-sized buffer.
* MIC_vAppend - call s_vAppendByte.
* MIC_vGetMIC - Append the minimum padding and call s_vAppendByte.
*
* Revision History:
*
*/
#include "tmacro.h"
#include "michael.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*
* static DWORD s_dwGetUINT32(BYTE * p); Get DWORD from
* 4 bytes LSByte first
* static void s_vPutUINT32(BYTE* p, DWORD val); Put DWORD into
* 4 bytes LSByte first
*/
static void s_vClear(void); /* Clear the internal message,
* resets the object to the
* state just after construction. */
static void s_vSetKey(DWORD dwK0, DWORD dwK1);
static void s_vAppendByte(BYTE b); /* Add a single byte to the internal
* message */
/*--------------------- Export Variables --------------------------*/
static DWORD L, R; /* Current state */
static DWORD K0, K1; /* Key */
static DWORD M; /* Message accumulator (single word) */
static unsigned int nBytesInM; /* # bytes in M */
/*--------------------- Export Functions --------------------------*/
/*
static DWORD s_dwGetUINT32 (BYTE * p)
// Convert from BYTE[] to DWORD in a portable way
{
DWORD res = 0;
unsigned int i;
for (i = 0; i < 4; i++)
res |= (*p++) << (8*i);
return res;
}
static void s_vPutUINT32(BYTE *p, DWORD val)
// Convert from DWORD to BYTE[] in a portable way
{
unsigned int i;
for (i = 0; i < 4; i++) {
*p++ = (BYTE) (val & 0xff);
val >>= 8;
}
}
*/
static void s_vClear(void)
{
/* Reset the state to the empty message. */
L = K0;
R = K1;
nBytesInM = 0;
M = 0;
}
static void s_vSetKey(DWORD dwK0, DWORD dwK1)
{
/* Set the key */
K0 = dwK0;
K1 = dwK1;
/* and reset the message */
s_vClear();
}
static void s_vAppendByte(BYTE b)
{
/* Append the byte to our word-sized buffer */
M |= b << (8*nBytesInM);
nBytesInM++;
/* Process the word if it is full. */
if (nBytesInM >= 4) {
L ^= M;
R ^= ROL32(L, 17);
L += R;
R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
L += R;
R ^= ROL32(L, 3);
L += R;
R ^= ROR32(L, 2);
L += R;
/* Clear the buffer */
M = 0;
nBytesInM = 0;
}
}
void MIC_vInit(DWORD dwK0, DWORD dwK1)
{
/* Set the key */
s_vSetKey(dwK0, dwK1);
}
void MIC_vUnInit(void)
{
/* Wipe the key material */
K0 = 0;
K1 = 0;
/* And the other fields as well. */
/* Note that this sets (L,R) to (K0,K1) which is just fine. */
s_vClear();
}
void MIC_vAppend(PBYTE src, unsigned int nBytes)
{
/* This is simple */
while (nBytes > 0) {
s_vAppendByte(*src++);
nBytes--;
}
}
void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR)
{
/* Append the minimum padding */
s_vAppendByte(0x5a);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
/* and then zeroes until the length is a multiple of 4 */
while (nBytesInM != 0)
s_vAppendByte(0);
/* The s_vAppendByte function has already computed the result. */
*pdwL = L;
*pdwR = R;
/* Reset to the empty message. */
s_vClear();
}
| gpl-2.0 |
yangkkokk/linux | fs/configfs/symlink.c | 8620 | 7556 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* symlink.c - operations for configfs symlinks.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/* Protects attachments of new symlinks */
DEFINE_MUTEX(configfs_symlink_mutex);
static int item_depth(struct config_item * item)
{
struct config_item * p = item;
int depth = 0;
do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p));
return depth;
}
static int item_path_length(struct config_item * item)
{
struct config_item * p = item;
int length = 1;
do {
length += strlen(config_item_name(p)) + 1;
p = p->ci_parent;
} while (p && !configfs_is_root(p));
return length;
}
static void fill_item_path(struct config_item * item, char * buffer, int length)
{
struct config_item * p;
--length;
for (p = item; p && !configfs_is_root(p); p = p->ci_parent) {
int cur = strlen(config_item_name(p));
/* back up enough to print this bus id with '/' */
length -= cur;
strncpy(buffer + length,config_item_name(p),cur);
*(buffer + --length) = '/';
}
}
static int create_link(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry)
{
struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
struct configfs_symlink *sl;
int ret;
ret = -ENOENT;
if (!configfs_dirent_is_ready(target_sd))
goto out;
ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) {
sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
config_item_put(item);
kfree(sl);
return -ENOENT;
}
list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry,
dentry);
if (ret) {
spin_lock(&configfs_dirent_lock);
list_del_init(&sl->sl_list);
spin_unlock(&configfs_dirent_lock);
config_item_put(item);
kfree(sl);
}
}
out:
return ret;
}
static int get_target(const char *symname, struct path *path,
struct config_item **target, struct super_block *sb)
{
int ret;
ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path);
if (!ret) {
if (path->dentry->d_sb == sb) {
*target = configfs_get_config_item(path->dentry);
if (!*target) {
ret = -ENOENT;
path_put(path);
}
} else {
ret = -EPERM;
path_put(path);
}
}
return ret;
}
int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
int ret;
struct path path;
struct configfs_dirent *sd;
struct config_item *parent_item;
struct config_item *target_item = NULL;
struct config_item_type *type;
sd = dentry->d_parent->d_fsdata;
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*/
ret = -ENOENT;
if (!configfs_dirent_is_ready(sd))
goto out;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
ret = -EPERM;
if (!type || !type->ct_item_ops ||
!type->ct_item_ops->allow_link)
goto out_put;
ret = get_target(symname, &path, &target_item, dentry->d_sb);
if (ret)
goto out_put;
ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) {
mutex_lock(&configfs_symlink_mutex);
ret = create_link(parent_item, target_item, dentry);
mutex_unlock(&configfs_symlink_mutex);
if (ret && type->ct_item_ops->drop_link)
type->ct_item_ops->drop_link(parent_item,
target_item);
}
config_item_put(target_item);
path_put(&path);
out_put:
config_item_put(parent_item);
out:
return ret;
}
int configfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct configfs_dirent *sd = dentry->d_fsdata;
struct configfs_symlink *sl;
struct config_item *parent_item;
struct config_item_type *type;
int ret;
ret = -EPERM; /* What lack-of-symlink returns */
if (!(sd->s_type & CONFIGFS_ITEM_LINK))
goto out;
sl = sd->s_element;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_drop_dentry(sd, dentry->d_parent);
dput(dentry);
configfs_put(sd);
/*
* drop_link() must be called before
* list_del_init(&sl->sl_list), so that the order of
* drop_link(this, target) and drop_item(target) is preserved.
*/
if (type && type->ct_item_ops &&
type->ct_item_ops->drop_link)
type->ct_item_ops->drop_link(parent_item,
sl->sl_target);
spin_lock(&configfs_dirent_lock);
list_del_init(&sl->sl_list);
spin_unlock(&configfs_dirent_lock);
/* Put reference from create_link() */
config_item_put(sl->sl_target);
kfree(sl);
config_item_put(parent_item);
ret = 0;
out:
return ret;
}
static int configfs_get_target_path(struct config_item * item, struct config_item * target,
char *path)
{
char * s;
int depth, size;
depth = item_depth(item);
size = item_path_length(target) + depth * 3 - 1;
if (size > PATH_MAX)
return -ENAMETOOLONG;
pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size);
for (s = path; depth--; s += 3)
strcpy(s,"../");
fill_item_path(target, path, size);
pr_debug("%s: path = '%s'\n", __func__, path);
return 0;
}
static int configfs_getlink(struct dentry *dentry, char * path)
{
struct config_item *item, *target_item;
int error = 0;
item = configfs_get_config_item(dentry->d_parent);
if (!item)
return -EINVAL;
target_item = configfs_get_config_item(dentry);
if (!target_item) {
config_item_put(item);
return -EINVAL;
}
down_read(&configfs_rename_sem);
error = configfs_get_target_path(item, target_item, path);
up_read(&configfs_rename_sem);
config_item_put(item);
config_item_put(target_item);
return error;
}
static void *configfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
int error = -ENOMEM;
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (page) {
error = configfs_getlink(dentry, (char *)page);
if (!error) {
nd_set_link(nd, (char *)page);
return (void *)page;
}
}
nd_set_link(nd, ERR_PTR(error));
return NULL;
}
static void configfs_put_link(struct dentry *dentry, struct nameidata *nd,
void *cookie)
{
if (cookie) {
unsigned long page = (unsigned long)cookie;
free_page(page);
}
}
const struct inode_operations configfs_symlink_inode_operations = {
.follow_link = configfs_follow_link,
.readlink = generic_readlink,
.put_link = configfs_put_link,
.setattr = configfs_setattr,
};
| gpl-2.0 |
UnORoms/SebastianFM-kernel | arch/x86/boot/tty.c | 12972 | 2430 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
/*
* Very simple screen and serial I/O
*/
#include "boot.h"
int early_serial_base;
#define XMTRDY 0x20
#define TXR 0 /* Transmit register (WRITE) */
#define LSR 5 /* Line Status */
/*
* These functions are in .inittext so they can be used to signal
* error during initialization.
*/
static void __attribute__((section(".inittext"))) serial_putchar(int ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
}
static void __attribute__((section(".inittext"))) bios_putchar(int ch)
{
struct biosregs ireg;
initregs(&ireg);
ireg.bx = 0x0007;
ireg.cx = 0x0001;
ireg.ah = 0x0e;
ireg.al = ch;
intcall(0x10, &ireg, NULL);
}
void __attribute__((section(".inittext"))) putchar(int ch)
{
if (ch == '\n')
putchar('\r'); /* \n -> \r\n */
bios_putchar(ch);
if (early_serial_base != 0)
serial_putchar(ch);
}
void __attribute__((section(".inittext"))) puts(const char *str)
{
while (*str)
putchar(*str++);
}
/*
* Read the CMOS clock through the BIOS, and return the
* seconds in BCD.
*/
static u8 gettime(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x02;
intcall(0x1a, &ireg, &oreg);
return oreg.dh;
}
/*
* Read from the keyboard
*/
int getchar(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
/* ireg.ah = 0x00; */
intcall(0x16, &ireg, &oreg);
return oreg.al;
}
static int kbd_pending(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x01;
intcall(0x16, &ireg, &oreg);
return !(oreg.eflags & X86_EFLAGS_ZF);
}
void kbd_flush(void)
{
for (;;) {
if (!kbd_pending())
break;
getchar();
}
}
int getchar_timeout(void)
{
int cnt = 30;
int t0, t1;
t0 = gettime();
while (cnt) {
if (kbd_pending())
return getchar();
t1 = gettime();
if (t0 != t1) {
cnt--;
t0 = t1;
}
}
return 0; /* Timeout! */
}
| gpl-2.0 |
TeamEOS/kernel_moto_wingray | drivers/staging/ath6kl/os/linux/netbuf.c | 173 | 5347 | //------------------------------------------------------------------------------
// Copyright (c) 2004-2010 Atheros Communications Inc.
// All rights reserved.
//
//
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
//
//
// Author(s): ="Atheros"
//------------------------------------------------------------------------------
#include <a_config.h>
#include "athdefs.h"
#include "a_types.h"
#include "a_osapi.h"
#include "htc_packet.h"
#define AR6000_DATA_OFFSET 64
void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt)
{
skb_queue_tail((struct sk_buff_head *) q, (struct sk_buff *) pkt);
}
void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt)
{
skb_queue_head((struct sk_buff_head *) q, (struct sk_buff *) pkt);
}
void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q)
{
return((void *) skb_dequeue((struct sk_buff_head *) q));
}
int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q)
{
return(skb_queue_len((struct sk_buff_head *) q));
}
int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q)
{
return(skb_queue_empty((struct sk_buff_head *) q));
}
void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q)
{
skb_queue_head_init((struct sk_buff_head *) q);
}
void *
a_netbuf_alloc(int size)
{
struct sk_buff *skb;
size += 2 * (A_GET_CACHE_LINE_BYTES()); /* add some cacheline space at front and back of buffer */
skb = dev_alloc_skb(AR6000_DATA_OFFSET + sizeof(struct htc_packet) + size);
skb_reserve(skb, AR6000_DATA_OFFSET + sizeof(struct htc_packet) + A_GET_CACHE_LINE_BYTES());
return ((void *)skb);
}
/*
* Allocate an SKB w.o. any encapsulation requirement.
*/
void *
a_netbuf_alloc_raw(int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size);
return ((void *)skb);
}
void
a_netbuf_free(void *bufPtr)
{
struct sk_buff *skb = (struct sk_buff *)bufPtr;
dev_kfree_skb(skb);
}
u32 a_netbuf_to_len(void *bufPtr)
{
return (((struct sk_buff *)bufPtr)->len);
}
void *
a_netbuf_to_data(void *bufPtr)
{
return (((struct sk_buff *)bufPtr)->data);
}
/*
* Add len # of bytes to the beginning of the network buffer
* pointed to by bufPtr
*/
int
a_netbuf_push(void *bufPtr, s32 len)
{
skb_push((struct sk_buff *)bufPtr, len);
return 0;
}
/*
* Add len # of bytes to the beginning of the network buffer
* pointed to by bufPtr and also fill with data
*/
int
a_netbuf_push_data(void *bufPtr, char *srcPtr, s32 len)
{
skb_push((struct sk_buff *) bufPtr, len);
memcpy(((struct sk_buff *)bufPtr)->data, srcPtr, len);
return 0;
}
/*
* Add len # of bytes to the end of the network buffer
* pointed to by bufPtr
*/
int
a_netbuf_put(void *bufPtr, s32 len)
{
skb_put((struct sk_buff *)bufPtr, len);
return 0;
}
/*
* Add len # of bytes to the end of the network buffer
* pointed to by bufPtr and also fill with data
*/
int
a_netbuf_put_data(void *bufPtr, char *srcPtr, s32 len)
{
char *start = (char*)(((struct sk_buff *)bufPtr)->data +
((struct sk_buff *)bufPtr)->len);
skb_put((struct sk_buff *)bufPtr, len);
memcpy(start, srcPtr, len);
return 0;
}
/*
* Trim the network buffer pointed to by bufPtr to len # of bytes
*/
int
a_netbuf_setlen(void *bufPtr, s32 len)
{
skb_trim((struct sk_buff *)bufPtr, len);
return 0;
}
/*
* Chop of len # of bytes from the end of the buffer.
*/
int
a_netbuf_trim(void *bufPtr, s32 len)
{
skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
return 0;
}
/*
* Chop of len # of bytes from the end of the buffer and return the data.
*/
int
a_netbuf_trim_data(void *bufPtr, char *dstPtr, s32 len)
{
char *start = (char*)(((struct sk_buff *)bufPtr)->data +
(((struct sk_buff *)bufPtr)->len - len));
memcpy(dstPtr, start, len);
skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len);
return 0;
}
/*
* Returns the number of bytes available to a a_netbuf_push()
*/
s32 a_netbuf_headroom(void *bufPtr)
{
return (skb_headroom((struct sk_buff *)bufPtr));
}
/*
* Removes specified number of bytes from the beginning of the buffer
*/
int
a_netbuf_pull(void *bufPtr, s32 len)
{
skb_pull((struct sk_buff *)bufPtr, len);
return 0;
}
/*
* Removes specified number of bytes from the beginning of the buffer
* and return the data
*/
int
a_netbuf_pull_data(void *bufPtr, char *dstPtr, s32 len)
{
memcpy(dstPtr, ((struct sk_buff *)bufPtr)->data, len);
skb_pull((struct sk_buff *)bufPtr, len);
return 0;
}
#ifdef EXPORT_HCI_BRIDGE_INTERFACE
EXPORT_SYMBOL(a_netbuf_to_data);
EXPORT_SYMBOL(a_netbuf_put);
EXPORT_SYMBOL(a_netbuf_pull);
EXPORT_SYMBOL(a_netbuf_alloc);
EXPORT_SYMBOL(a_netbuf_free);
#endif
| gpl-2.0 |
kyupltd/linux | drivers/char/xilinx_hwicap/xilinx_hwicap.c | 429 | 22568 | /*****************************************************************************
*
* Author: Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
* (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2007-2008 Xilinx Inc.
* All rights reserved.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
/*
* This is the code behind /dev/icap* -- it allows a user-space
* application to use the Xilinx ICAP subsystem.
*
* The following operations are possible:
*
* open open the port and initialize for access.
* release release port
* write Write a bitstream to the configuration processor.
* read Read a data stream from the configuration processor.
*
* After being opened, the port is initialized and accessed to avoid a
* corrupted first read which may occur with some hardware. The port
* is left in a desynched state, requiring that a synch sequence be
* transmitted before any valid configuration data. A user will have
* exclusive access to the device while it remains open, and the state
* of the ICAP cannot be guaranteed after the device is closed. Note
* that a complete reset of the core and the state of the ICAP cannot
* be performed on many versions of the cores, hence users of this
* device should avoid making inconsistent accesses to the device. In
* particular, accessing the read interface, without first generating
* a write containing a readback packet can leave the ICAP in an
* inaccessible state.
*
* Note that in order to use the read interface, it is first necessary
* to write a request packet to the write interface. i.e., it is not
* possible to simply readback the bitstream (or any configuration
* bits) from a device without specifically requesting them first.
* The code to craft such packets is intended to be part of the
* user-space application code that uses this device. The simplest
* way to use this interface is simply:
*
* cp foo.bit /dev/icap0
*
* Note that unless foo.bit is an appropriately constructed partial
* bitstream, this has a high likelihood of overwriting the design
* currently programmed in the FPGA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/sysctl.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#ifdef CONFIG_OF
/* For open firmware. */
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
#include "xilinx_hwicap.h"
#include "buffer_icap.h"
#include "fifo_icap.h"
#define DRIVER_NAME "icap"
#define HWICAP_REGS (0x10000)
#define XHWICAP_MAJOR 259
#define XHWICAP_MINOR 0
#define HWICAP_DEVICES 1
/* An array, which is set to true when the device is registered. */
static DEFINE_MUTEX(hwicap_mutex);
static bool probed_devices[HWICAP_DEVICES];
static struct mutex icap_sem;
static struct class *icap_class;
#define UNIMPLEMENTED 0xFFFF
static const struct config_registers v2_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = 11,
.KEY = 12,
.CBC = 13,
.IDCODE = 14,
.AXSS = UNIMPLEMENTED,
.C0R_1 = UNIMPLEMENTED,
.CSOB = UNIMPLEMENTED,
.WBSTAR = UNIMPLEMENTED,
.TIMER = UNIMPLEMENTED,
.BOOTSTS = UNIMPLEMENTED,
.CTL_1 = UNIMPLEMENTED,
};
static const struct config_registers v4_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = UNIMPLEMENTED,
.KEY = UNIMPLEMENTED,
.CBC = 11,
.IDCODE = 12,
.AXSS = 13,
.C0R_1 = UNIMPLEMENTED,
.CSOB = UNIMPLEMENTED,
.WBSTAR = UNIMPLEMENTED,
.TIMER = UNIMPLEMENTED,
.BOOTSTS = UNIMPLEMENTED,
.CTL_1 = UNIMPLEMENTED,
};
static const struct config_registers v5_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = UNIMPLEMENTED,
.KEY = UNIMPLEMENTED,
.CBC = 11,
.IDCODE = 12,
.AXSS = 13,
.C0R_1 = 14,
.CSOB = 15,
.WBSTAR = 16,
.TIMER = 17,
.BOOTSTS = 18,
.CTL_1 = 19,
};
static const struct config_registers v6_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = UNIMPLEMENTED,
.KEY = UNIMPLEMENTED,
.CBC = 11,
.IDCODE = 12,
.AXSS = 13,
.C0R_1 = 14,
.CSOB = 15,
.WBSTAR = 16,
.TIMER = 17,
.BOOTSTS = 22,
.CTL_1 = 24,
};
/**
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
*/
static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
{
u32 buffer[4];
u32 index = 0;
/*
* Create the data to be written to the ICAP.
*/
buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
buffer[index++] = XHI_CMD_DESYNCH;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and intiate the transfer of data present
* in the FIFO to the ICAP device.
*/
return drvdata->config->set_configuration(drvdata,
&buffer[0], index);
}
/**
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
* register value to be returned.
* Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
u32 reg, u32 *reg_data)
{
int status;
u32 buffer[6];
u32 index = 0;
/*
* Create the data to be written to the ICAP.
*/
buffer[index++] = XHI_DUMMY_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_SYNC_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
&buffer[0], index);
if (status)
return status;
/* If the syncword was not found, then we need to start over. */
status = drvdata->config->get_status(drvdata);
if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK)
return -EIO;
index = 0;
buffer[index++] = hwicap_type_1_read(reg) | 1;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and intiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
&buffer[0], index);
if (status)
return status;
/*
* Read the configuration register
*/
status = drvdata->config->get_configuration(drvdata, reg_data, 1);
if (status)
return status;
return 0;
}
static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
{
int status;
u32 idcode;
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
* ICAP in a good state. */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
dev_dbg(drvdata->dev, "Desync...\n");
status = hwicap_command_desync(drvdata);
if (status)
return status;
/* Attempt to read the IDCODE from ICAP. This
* may not be returned correctly, due to the design of the
* hardware.
*/
dev_dbg(drvdata->dev, "Reading IDCODE...\n");
status = hwicap_get_configuration_register(
drvdata, drvdata->config_regs->IDCODE, &idcode);
dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
if (status)
return status;
dev_dbg(drvdata->dev, "Desync...\n");
status = hwicap_command_desync(drvdata);
if (status)
return status;
return 0;
}
static ssize_t
hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct hwicap_drvdata *drvdata = file->private_data;
ssize_t bytes_to_read = 0;
u32 *kbuf;
u32 words;
u32 bytes_remaining;
int status;
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
return status;
if (drvdata->read_buffer_in_use) {
/* If there are leftover bytes in the buffer, just */
/* return them and don't try to read more from the */
/* ICAP device. */
bytes_to_read =
(count < drvdata->read_buffer_in_use) ? count :
drvdata->read_buffer_in_use;
/* Return the data currently in the read buffer. */
if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
status = -EFAULT;
goto error;
}
drvdata->read_buffer_in_use -= bytes_to_read;
memmove(drvdata->read_buffer,
drvdata->read_buffer + bytes_to_read,
4 - bytes_to_read);
} else {
/* Get new data from the ICAP, and return was was requested. */
kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
goto error;
}
/* The ICAP device is only able to read complete */
/* words. If a number of bytes that do not correspond */
/* to complete words is requested, then we read enough */
/* words to get the required number of bytes, and then */
/* save the remaining bytes for the next read. */
/* Determine the number of words to read, rounding up */
/* if necessary. */
words = ((count + 3) >> 2);
bytes_to_read = words << 2;
if (bytes_to_read > PAGE_SIZE)
bytes_to_read = PAGE_SIZE;
/* Ensure we only read a complete number of words. */
bytes_remaining = bytes_to_read & 3;
bytes_to_read &= ~3;
words = bytes_to_read >> 2;
status = drvdata->config->get_configuration(drvdata,
kbuf, words);
/* If we didn't read correctly, then bail out. */
if (status) {
free_page((unsigned long)kbuf);
goto error;
}
/* If we fail to return the data to the user, then bail out. */
if (copy_to_user(buf, kbuf, bytes_to_read)) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
memcpy(drvdata->read_buffer,
kbuf,
bytes_remaining);
drvdata->read_buffer_in_use = bytes_remaining;
free_page((unsigned long)kbuf);
}
status = bytes_to_read;
error:
mutex_unlock(&drvdata->sem);
return status;
}
static ssize_t
hwicap_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hwicap_drvdata *drvdata = file->private_data;
ssize_t written = 0;
ssize_t left = count;
u32 *kbuf;
ssize_t len;
ssize_t status;
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
return status;
left += drvdata->write_buffer_in_use;
/* Only write multiples of 4 bytes. */
if (left < 4) {
status = 0;
goto error;
}
kbuf = (u32 *) __get_free_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
goto error;
}
while (left > 3) {
/* only write multiples of 4 bytes, so there might */
/* be as many as 3 bytes left (at the end). */
len = left;
if (len > PAGE_SIZE)
len = PAGE_SIZE;
len &= ~3;
if (drvdata->write_buffer_in_use) {
memcpy(kbuf, drvdata->write_buffer,
drvdata->write_buffer_in_use);
if (copy_from_user(
(((char *)kbuf) + drvdata->write_buffer_in_use),
buf + written,
len - (drvdata->write_buffer_in_use))) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
} else {
if (copy_from_user(kbuf, buf + written, len)) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
}
status = drvdata->config->set_configuration(drvdata,
kbuf, len >> 2);
if (status) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
if (drvdata->write_buffer_in_use) {
len -= drvdata->write_buffer_in_use;
left -= drvdata->write_buffer_in_use;
drvdata->write_buffer_in_use = 0;
}
written += len;
left -= len;
}
if ((left > 0) && (left < 4)) {
if (!copy_from_user(drvdata->write_buffer,
buf + written, left)) {
drvdata->write_buffer_in_use = left;
written += left;
left = 0;
}
}
free_page((unsigned long)kbuf);
status = written;
error:
mutex_unlock(&drvdata->sem);
return status;
}
static int hwicap_open(struct inode *inode, struct file *file)
{
struct hwicap_drvdata *drvdata;
int status;
mutex_lock(&hwicap_mutex);
drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
goto out;
if (drvdata->is_open) {
status = -EBUSY;
goto error;
}
status = hwicap_initialize_hwicap(drvdata);
if (status) {
dev_err(drvdata->dev, "Failed to open file");
goto error;
}
file->private_data = drvdata;
drvdata->write_buffer_in_use = 0;
drvdata->read_buffer_in_use = 0;
drvdata->is_open = 1;
error:
mutex_unlock(&drvdata->sem);
out:
mutex_unlock(&hwicap_mutex);
return status;
}
static int hwicap_release(struct inode *inode, struct file *file)
{
struct hwicap_drvdata *drvdata = file->private_data;
int i;
int status = 0;
mutex_lock(&drvdata->sem);
if (drvdata->write_buffer_in_use) {
/* Flush write buffer. */
for (i = drvdata->write_buffer_in_use; i < 4; i++)
drvdata->write_buffer[i] = 0;
status = drvdata->config->set_configuration(drvdata,
(u32 *) drvdata->write_buffer, 1);
if (status)
goto error;
}
status = hwicap_command_desync(drvdata);
if (status)
goto error;
error:
drvdata->is_open = 0;
mutex_unlock(&drvdata->sem);
return status;
}
static const struct file_operations hwicap_fops = {
.owner = THIS_MODULE,
.write = hwicap_write,
.read = hwicap_read,
.open = hwicap_open,
.release = hwicap_release,
.llseek = noop_llseek,
};
static int hwicap_setup(struct device *dev, int id,
const struct resource *regs_res,
const struct hwicap_driver_config *config,
const struct config_registers *config_regs)
{
dev_t devt;
struct hwicap_drvdata *drvdata = NULL;
int retval = 0;
dev_info(dev, "Xilinx icap port driver\n");
mutex_lock(&icap_sem);
if (id < 0) {
for (id = 0; id < HWICAP_DEVICES; id++)
if (!probed_devices[id])
break;
}
if (id < 0 || id >= HWICAP_DEVICES) {
mutex_unlock(&icap_sem);
dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
return -EINVAL;
}
if (probed_devices[id]) {
mutex_unlock(&icap_sem);
dev_err(dev, "cannot assign to %s%i; it is already in use\n",
DRIVER_NAME, id);
return -EBUSY;
}
probed_devices[id] = 1;
mutex_unlock(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
dev_set_drvdata(dev, (void *)drvdata);
if (!regs_res) {
dev_err(dev, "Couldn't get registers resource\n");
retval = -EFAULT;
goto failed1;
}
drvdata->mem_start = regs_res->start;
drvdata->mem_end = regs_res->end;
drvdata->mem_size = resource_size(regs_res);
if (!request_mem_region(drvdata->mem_start,
drvdata->mem_size, DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at %Lx\n",
(unsigned long long) regs_res->start);
retval = -EBUSY;
goto failed1;
}
drvdata->devt = devt;
drvdata->dev = dev;
drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
if (!drvdata->base_address) {
dev_err(dev, "ioremap() failed\n");
goto failed2;
}
drvdata->config = config;
drvdata->config_regs = config_regs;
mutex_init(&drvdata->sem);
drvdata->is_open = 0;
dev_info(dev, "ioremap %llx to %p with size %llx\n",
(unsigned long long) drvdata->mem_start,
drvdata->base_address,
(unsigned long long) drvdata->mem_size);
cdev_init(&drvdata->cdev, &hwicap_fops);
drvdata->cdev.owner = THIS_MODULE;
retval = cdev_add(&drvdata->cdev, devt, 1);
if (retval) {
dev_err(dev, "cdev_add() failed\n");
goto failed3;
}
device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
return 0; /* success */
failed3:
iounmap(drvdata->base_address);
failed2:
release_mem_region(regs_res->start, drvdata->mem_size);
failed1:
kfree(drvdata);
failed0:
mutex_lock(&icap_sem);
probed_devices[id] = 0;
mutex_unlock(&icap_sem);
return retval;
}
static struct hwicap_driver_config buffer_icap_config = {
.get_configuration = buffer_icap_get_configuration,
.set_configuration = buffer_icap_set_configuration,
.get_status = buffer_icap_get_status,
.reset = buffer_icap_reset,
};
static struct hwicap_driver_config fifo_icap_config = {
.get_configuration = fifo_icap_get_configuration,
.set_configuration = fifo_icap_set_configuration,
.get_status = fifo_icap_get_status,
.reset = fifo_icap_reset,
};
static int hwicap_remove(struct device *dev)
{
struct hwicap_drvdata *drvdata;
drvdata = dev_get_drvdata(dev);
if (!drvdata)
return 0;
device_destroy(icap_class, drvdata->devt);
cdev_del(&drvdata->cdev);
iounmap(drvdata->base_address);
release_mem_region(drvdata->mem_start, drvdata->mem_size);
kfree(drvdata);
mutex_lock(&icap_sem);
probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
mutex_unlock(&icap_sem);
return 0; /* success */
}
#ifdef CONFIG_OF
static int hwicap_of_probe(struct platform_device *op,
const struct hwicap_driver_config *config)
{
struct resource res;
const unsigned int *id;
const char *family;
int rc;
const struct config_registers *regs;
rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
regs = &v2_config_registers;
} else if (!strcmp(family, "virtex4")) {
regs = &v4_config_registers;
} else if (!strcmp(family, "virtex5")) {
regs = &v5_config_registers;
} else if (!strcmp(family, "virtex6")) {
regs = &v6_config_registers;
}
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
}
#else
static inline int hwicap_of_probe(struct platform_device *op,
const struct hwicap_driver_config *config)
{
return -EINVAL;
}
#endif /* CONFIG_OF */
static const struct of_device_id hwicap_of_match[];
static int hwicap_drv_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct resource *res;
const struct config_registers *regs;
const char *family;
match = of_match_device(hwicap_of_match, &pdev->dev);
if (match)
return hwicap_of_probe(pdev, match->data);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
if (!strcmp(family, "virtex2p")) {
regs = &v2_config_registers;
} else if (!strcmp(family, "virtex4")) {
regs = &v4_config_registers;
} else if (!strcmp(family, "virtex5")) {
regs = &v5_config_registers;
} else if (!strcmp(family, "virtex6")) {
regs = &v6_config_registers;
}
}
return hwicap_setup(&pdev->dev, pdev->id, res,
&buffer_icap_config, regs);
}
static int hwicap_drv_remove(struct platform_device *pdev)
{
return hwicap_remove(&pdev->dev);
}
#ifdef CONFIG_OF
/* Match table for device tree binding */
static const struct of_device_id hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
{ .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
{},
};
MODULE_DEVICE_TABLE(of, hwicap_of_match);
#else
#define hwicap_of_match NULL
#endif
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
.remove = hwicap_drv_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
},
};
static int __init hwicap_module_init(void)
{
dev_t devt;
int retval;
icap_class = class_create(THIS_MODULE, "xilinx_config");
mutex_init(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
retval = register_chrdev_region(devt,
HWICAP_DEVICES,
DRIVER_NAME);
if (retval < 0)
return retval;
retval = platform_driver_register(&hwicap_platform_driver);
if (retval)
goto failed;
return retval;
failed:
unregister_chrdev_region(devt, HWICAP_DEVICES);
return retval;
}
static void __exit hwicap_module_cleanup(void)
{
dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
class_destroy(icap_class);
platform_driver_unregister(&hwicap_platform_driver);
unregister_chrdev_region(devt, HWICAP_DEVICES);
}
module_init(hwicap_module_init);
module_exit(hwicap_module_cleanup);
MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
prarit/staging-next-unisys | tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c | 685 | 1613 | #include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include "utils.h"
/* This must match the huge page & THP size */
#define SIZE (16 * 1024 * 1024)
static int test_body(void)
{
void *addr;
char *p;
addr = (void *)0xa0000000;
p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
MAP_HUGETLB | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p != MAP_FAILED) {
/*
* Typically the mmap will fail because no huge pages are
* allocated on the system. But if there are huge pages
* allocated the mmap will succeed. That's fine too, we just
* munmap here before continuing.
*/
munmap(addr, SIZE);
}
p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p == MAP_FAILED) {
printf("Mapping failed @ %p\n", addr);
perror("mmap");
return 1;
}
/*
* Either a user or kernel access is sufficient to trigger the bug.
* A kernel access is easier to spot & debug, as it will trigger the
* softlockup or RCU stall detectors, and when the system is kicked
* into xmon we get a backtrace in the kernel.
*
* A good option is:
* getcwd(p, SIZE);
*
* For the purposes of this testcase it's preferable to spin in
* userspace, so the harness can kill us if we get stuck. That way we
* see a test failure rather than a dead system.
*/
*p = 0xf;
munmap(addr, SIZE);
return 0;
}
static int test_main(void)
{
int i;
/* 10,000 because it's a "bunch", and completes reasonably quickly */
for (i = 0; i < 10000; i++)
if (test_body())
return 1;
return 0;
}
int main(void)
{
return test_harness(test_main, "hugetlb_vs_thp");
}
| gpl-2.0 |
BobZhome/android_kernel_gelato | drivers/net/wireless/prism54/islpci_eth.c | 941 | 15768 | /*
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <asm/byteorder.h>
#include "prismcompat.h"
#include "isl_38xx.h"
#include "islpci_eth.h"
#include "islpci_mgt.h"
#include "oid_mgt.h"
/******************************************************************************
Network Interface functions
******************************************************************************/
void
islpci_eth_cleanup_transmit(islpci_private *priv,
isl38xx_control_block *control_block)
{
struct sk_buff *skb;
u32 index;
/* compare the control block read pointer with the free pointer */
while (priv->free_data_tx !=
le32_to_cpu(control_block->
device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
/* read the index of the first fragment to be freed */
index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
/* check for holes in the arrays caused by multi fragment frames
* searching for the last fragment of a frame */
if (priv->pci_map_tx_address[index]) {
/* entry is the last fragment of a frame
* free the skb structure and unmap pci memory */
skb = priv->data_low_tx[index];
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
"cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
skb, skb->data, skb->len, skb->truesize);
#endif
pci_unmap_single(priv->pdev,
priv->pci_map_tx_address[index],
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
skb = NULL;
}
/* increment the free data low queue pointer */
priv->free_data_tx++;
}
}
netdev_tx_t
islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb = priv->control_block;
u32 index;
dma_addr_t pci_map_address;
int frame_size;
isl38xx_fragment *fragment;
int offset;
struct sk_buff *newskb;
int newskb_offset;
unsigned long flags;
unsigned char wds_mac[6];
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif
/* lock the driver code */
spin_lock_irqsave(&priv->slock, flags);
/* check whether the destination queue has enough fragments for the frame */
curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
printk(KERN_ERR "%s: transmit device queue full when awake\n",
ndev->name);
netif_stop_queue(ndev);
/* trigger the device */
isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
ISL38XX_DEV_INT_REG);
udelay(ISL38XX_WRITEIO_DELAY);
goto drop_free;
}
/* Check alignment and WDS frame formatting. The start of the packet should
* be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
* and add WDS address information */
if (likely(((long) skb->data & 0x03) | init_wds)) {
/* get the number of bytes to add and re-allign */
offset = (4 - (long) skb->data) & 0x03;
offset += init_wds ? 6 : 0;
/* check whether the current skb can be used */
if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
unsigned char *src = skb->data;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
init_wds);
#endif
/* align the buffer on 4-byte boundary */
skb_reserve(skb, (4 - (long) skb->data) & 0x03);
if (init_wds) {
/* wds requires an additional address field of 6 bytes */
skb_put(skb, 6);
#ifdef ISLPCI_ETH_DEBUG
printk("islpci_eth_transmit:wds_mac\n");
#endif
memmove(skb->data + 6, src, skb->len);
skb_copy_to_linear_data(skb, wds_mac, 6);
} else {
memmove(skb->data, src, skb->len);
}
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
src, skb->len);
#endif
} else {
newskb =
dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
if (unlikely(newskb == NULL)) {
printk(KERN_ERR "%s: Cannot allocate skb\n",
ndev->name);
goto drop_free;
}
newskb_offset = (4 - (long) newskb->data) & 0x03;
/* Check if newskb->data is aligned */
if (newskb_offset)
skb_reserve(newskb, newskb_offset);
skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
if (init_wds) {
skb_copy_from_linear_data(skb,
newskb->data + 6,
skb->len);
skb_copy_to_linear_data(newskb, wds_mac, 6);
#ifdef ISLPCI_ETH_DEBUG
printk("islpci_eth_transmit:wds_mac\n");
#endif
} else
skb_copy_from_linear_data(skb, newskb->data,
skb->len);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
newskb->data, skb->data, skb->len, init_wds);
#endif
newskb->dev = skb->dev;
dev_kfree_skb_irq(skb);
skb = newskb;
}
}
/* display the buffer contents for debugging */
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
display_buffer((char *) skb->data, skb->len);
#endif
/* map the skb buffer to pci memory for DMA operation */
pci_map_address = pci_map_single(priv->pdev,
(void *) skb->data, skb->len,
PCI_DMA_TODEVICE);
if (unlikely(pci_map_address == 0)) {
printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
ndev->name);
goto drop_free;
}
/* Place the fragment in the control block structure. */
index = curr_frag % ISL38XX_CB_TX_QSIZE;
fragment = &cb->tx_data_low[index];
priv->pci_map_tx_address[index] = pci_map_address;
/* store the skb address for future freeing */
priv->data_low_tx[index] = skb;
/* set the proper fragment start address and size information */
frame_size = skb->len;
fragment->size = cpu_to_le16(frame_size);
fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
fragment->address = cpu_to_le32(pci_map_address);
curr_frag++;
/* The fragment address in the control block must have been
* written before announcing the frame buffer to device. */
wmb();
cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
> ISL38XX_CB_TX_QSIZE) {
/* stop sends from upper layers */
netif_stop_queue(ndev);
/* set the full flag for the transmission queue */
priv->data_low_tx_full = 1;
}
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
/* trigger the device */
islpci_trigger(priv);
/* unlock the driver code */
spin_unlock_irqrestore(&priv->slock, flags);
return NETDEV_TX_OK;
drop_free:
ndev->stats.tx_dropped++;
spin_unlock_irqrestore(&priv->slock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static inline int
islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
{
/* The card reports full 802.11 packets but with a 20 bytes
* header and without the FCS. But there a is a bit that
* indicates if the packet is corrupted :-) */
struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
if (hdr->flags & 0x01)
/* This one is bad. Drop it ! */
return -1;
if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
struct avs_80211_1_header *avs;
/* extract the relevant data from the header */
u32 clock = le32_to_cpu(hdr->clock);
u8 rate = hdr->rate;
u16 freq = le16_to_cpu(hdr->freq);
u8 rssi = hdr->rssi;
skb_pull(*skb, sizeof (struct rfmon_header));
if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
struct sk_buff *newskb = skb_copy_expand(*skb,
sizeof (struct
avs_80211_1_header),
0, GFP_ATOMIC);
if (newskb) {
dev_kfree_skb_irq(*skb);
*skb = newskb;
} else
return -1;
/* This behavior is not very subtile... */
}
/* make room for the new header and fill it. */
avs =
(struct avs_80211_1_header *) skb_push(*skb,
sizeof (struct
avs_80211_1_header));
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
avs->mactime = cpu_to_be64(clock);
avs->hosttime = cpu_to_be64(jiffies);
avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
avs->channel = cpu_to_be32(channel_of_freq(freq));
avs->datarate = cpu_to_be32(rate * 5);
avs->antenna = cpu_to_be32(0); /*unknown */
avs->priority = cpu_to_be32(0); /*unknown */
avs->ssi_type = cpu_to_be32(3); /*2: dBm, 3: raw RSSI */
avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise); /*better than 'undefined', I assume */
avs->preamble = cpu_to_be32(0); /*unknown */
avs->encoding = cpu_to_be32(0); /*unknown */
} else
skb_pull(*skb, sizeof (struct rfmon_header));
(*skb)->protocol = htons(ETH_P_802_2);
skb_reset_mac_header(*skb);
(*skb)->pkt_type = PACKET_OTHERHOST;
return 0;
}
int
islpci_eth_receive(islpci_private *priv)
{
struct net_device *ndev = priv->ndev;
isl38xx_control_block *control_block = priv->control_block;
struct sk_buff *skb;
u16 size;
u32 index, offset;
unsigned char *src;
int discard = 0;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
#endif
/* the device has written an Ethernet frame in the data area
* of the sk_buff without updating the structure, do it now */
index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
size = le16_to_cpu(control_block->rx_data_low[index].size);
skb = priv->data_low_rx[index];
offset = ((unsigned long)
le32_to_cpu(control_block->rx_data_low[index].address) -
(unsigned long) skb->data) & 3;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
"frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
control_block->rx_data_low[priv->free_data_rx].address, skb->data,
skb->len, offset, skb->truesize);
#endif
/* delete the streaming DMA mapping before processing the skb */
pci_unmap_single(priv->pdev,
priv->pci_map_rx_address[index],
MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
/* update the skb structure and allign the buffer */
skb_put(skb, size);
if (offset) {
/* shift the buffer allocation offset bytes to get the right frame */
skb_pull(skb, 2);
skb_put(skb, 2);
}
#if VERBOSE > SHOW_ERROR_MESSAGES
/* display the buffer contents for debugging */
DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
display_buffer((char *) skb->data, skb->len);
#endif
/* check whether WDS is enabled and whether the data frame is a WDS frame */
if (init_wds) {
/* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
src = skb->data + 6;
memmove(skb->data, src, skb->len - 6);
skb_trim(skb, skb->len - 6);
}
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
/* display the buffer contents for debugging */
DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
display_buffer((char *) skb->data, skb->len);
#endif
/* take care of monitor mode and spy monitoring. */
if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
skb->dev = ndev;
discard = islpci_monitor_rx(priv, &skb);
} else {
if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
/* The packet has a rx_annex. Read it for spy monitoring, Then
* remove it, while keeping the 2 leading MAC addr.
*/
struct iw_quality wstats;
struct rx_annex_header *annex =
(struct rx_annex_header *) skb->data;
wstats.level = annex->rfmon.rssi;
/* The noise value can be a bit outdated if nobody's
* reading wireless stats... */
wstats.noise = priv->local_iwstatistics.qual.noise;
wstats.qual = wstats.level - wstats.noise;
wstats.updated = 0x07;
/* Update spy records */
wireless_spy_update(ndev, annex->addr2, &wstats);
skb_copy_from_linear_data(skb,
(skb->data +
sizeof(struct rfmon_header)),
2 * ETH_ALEN);
skb_pull(skb, sizeof (struct rfmon_header));
}
skb->protocol = eth_type_trans(skb, ndev);
}
skb->ip_summed = CHECKSUM_NONE;
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += size;
/* deliver the skb to the network layer */
#ifdef ISLPCI_ETH_DEBUG
printk
("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
skb->data[0], skb->data[1], skb->data[2], skb->data[3],
skb->data[4], skb->data[5]);
#endif
if (unlikely(discard)) {
dev_kfree_skb_irq(skb);
skb = NULL;
} else
netif_rx(skb);
/* increment the read index for the rx data low queue */
priv->free_data_rx++;
/* add one or more sk_buff structures */
while (index =
le32_to_cpu(control_block->
driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
/* allocate an sk_buff for received data frames storage
* include any required allignment operations */
skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
if (unlikely(skb == NULL)) {
/* error allocating an sk_buff structure elements */
DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
break;
}
skb_reserve(skb, (4 - (long) skb->data) & 0x03);
/* store the new skb structure pointer */
index = index % ISL38XX_CB_RX_QSIZE;
priv->data_low_rx[index] = skb;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
"new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
skb, skb->data, skb->len, index, skb->truesize);
#endif
/* set the streaming DMA mapping for proper PCI bus operation */
priv->pci_map_rx_address[index] =
pci_map_single(priv->pdev, (void *) skb->data,
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
if (unlikely(!priv->pci_map_rx_address[index])) {
/* error mapping the buffer to device accessable memory address */
DEBUG(SHOW_ERROR_MESSAGES,
"Error mapping DMA address\n");
/* free the skbuf structure before aborting */
dev_kfree_skb_irq((struct sk_buff *) skb);
skb = NULL;
break;
}
/* update the fragment address */
control_block->rx_data_low[index].address =
cpu_to_le32((u32)priv->pci_map_rx_address[index]);
wmb();
/* increment the driver read pointer */
le32_add_cpu(&control_block->
driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
}
/* trigger the device */
islpci_trigger(priv);
return 0;
}
void
islpci_do_reset_and_wake(struct work_struct *work)
{
islpci_private *priv = container_of(work, islpci_private, reset_task);
islpci_reset(priv, 1);
priv->reset_task_pending = 0;
smp_wmb();
netif_wake_queue(priv->ndev);
}
void
islpci_eth_tx_timeout(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
/* increment the transmit error counter */
ndev->stats.tx_errors++;
if (!priv->reset_task_pending) {
printk(KERN_WARNING
"%s: tx_timeout, scheduling reset", ndev->name);
netif_stop_queue(ndev);
priv->reset_task_pending = 1;
schedule_work(&priv->reset_task);
} else {
printk(KERN_WARNING
"%s: tx_timeout, waiting for reset", ndev->name);
}
}
| gpl-2.0 |
ProjectOpenCannibal/GingerKernel-LGLS670-old | drivers/media/video/adv7170.c | 941 | 9669 | /*
* adv7170 - adv7170, adv7171 video encoder driver version 0.0.1
*
* Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com>
*
* Based on adv7176 driver by:
*
* Copyright (C) 1998 Dave Perks <dperks@ibm.net>
* Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
* Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
* - some corrections for Pinnacle Systems Inc. DC10plus card.
*
* Changes by Ronald Bultje <rbultje@ronald.bitfreak.net>
* - moved over to linux>=2.4.x i2c protocol (1/1/2003)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* ----------------------------------------------------------------------- */
struct adv7170 {
struct v4l2_subdev sd;
unsigned char reg[128];
v4l2_std_id norm;
int input;
};
static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7170, sd);
}
static char *inputs[] = { "pass_through", "play_back" };
/* ----------------------------------------------------------------------- */
static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7170 *encoder = to_adv7170(sd);
encoder->reg[reg] = value;
return i2c_smbus_write_byte_data(client, reg, value);
}
static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte_data(client, reg);
}
static int adv7170_write_block(struct v4l2_subdev *sd,
const u8 *data, unsigned int len)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7170 *encoder = to_adv7170(sd);
int ret = -1;
u8 reg;
/* the adv7170 has an autoincrement function, use it if
* the adapter understands raw I2C */
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
/* do raw I2C, not smbus compatible */
u8 block_data[32];
int block_len;
while (len >= 2) {
block_len = 0;
block_data[block_len++] = reg = data[0];
do {
block_data[block_len++] =
encoder->reg[reg++] = data[1];
len -= 2;
data += 2;
} while (len >= 2 && data[0] == reg && block_len < 32);
ret = i2c_master_send(client, block_data, block_len);
if (ret < 0)
break;
}
} else {
/* do some slow I2C emulation kind of thing */
while (len >= 2) {
reg = *data++;
ret = adv7170_write(sd, reg, *data++);
if (ret < 0)
break;
len -= 2;
}
}
return ret;
}
/* ----------------------------------------------------------------------- */
#define TR0MODE 0x4c
#define TR0RST 0x80
#define TR1CAPT 0x00
#define TR1PLAY 0x00
static const unsigned char init_NTSC[] = {
0x00, 0x10, /* MR0 */
0x01, 0x20, /* MR1 */
0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */
0x03, 0x80, /* MR3 */
0x04, 0x30, /* MR4 */
0x05, 0x00, /* Reserved */
0x06, 0x00, /* Reserved */
0x07, TR0MODE, /* TM0 */
0x08, TR1CAPT, /* TM1 */
0x09, 0x16, /* Fsc0 */
0x0a, 0x7c, /* Fsc1 */
0x0b, 0xf0, /* Fsc2 */
0x0c, 0x21, /* Fsc3 */
0x0d, 0x00, /* Subcarrier Phase */
0x0e, 0x00, /* Closed Capt. Ext 0 */
0x0f, 0x00, /* Closed Capt. Ext 1 */
0x10, 0x00, /* Closed Capt. 0 */
0x11, 0x00, /* Closed Capt. 1 */
0x12, 0x00, /* Pedestal Ctl 0 */
0x13, 0x00, /* Pedestal Ctl 1 */
0x14, 0x00, /* Pedestal Ctl 2 */
0x15, 0x00, /* Pedestal Ctl 3 */
0x16, 0x00, /* CGMS_WSS_0 */
0x17, 0x00, /* CGMS_WSS_1 */
0x18, 0x00, /* CGMS_WSS_2 */
0x19, 0x00, /* Teletext Ctl */
};
static const unsigned char init_PAL[] = {
0x00, 0x71, /* MR0 */
0x01, 0x20, /* MR1 */
0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */
0x03, 0x80, /* MR3 */
0x04, 0x30, /* MR4 */
0x05, 0x00, /* Reserved */
0x06, 0x00, /* Reserved */
0x07, TR0MODE, /* TM0 */
0x08, TR1CAPT, /* TM1 */
0x09, 0xcb, /* Fsc0 */
0x0a, 0x8a, /* Fsc1 */
0x0b, 0x09, /* Fsc2 */
0x0c, 0x2a, /* Fsc3 */
0x0d, 0x00, /* Subcarrier Phase */
0x0e, 0x00, /* Closed Capt. Ext 0 */
0x0f, 0x00, /* Closed Capt. Ext 1 */
0x10, 0x00, /* Closed Capt. 0 */
0x11, 0x00, /* Closed Capt. 1 */
0x12, 0x00, /* Pedestal Ctl 0 */
0x13, 0x00, /* Pedestal Ctl 1 */
0x14, 0x00, /* Pedestal Ctl 2 */
0x15, 0x00, /* Pedestal Ctl 3 */
0x16, 0x00, /* CGMS_WSS_0 */
0x17, 0x00, /* CGMS_WSS_1 */
0x18, 0x00, /* CGMS_WSS_2 */
0x19, 0x00, /* Teletext Ctl */
};
static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7170 *encoder = to_adv7170(sd);
v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std);
if (std & V4L2_STD_NTSC) {
adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC));
if (encoder->input == 0)
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
} else if (std & V4L2_STD_PAL) {
adv7170_write_block(sd, init_PAL, sizeof(init_PAL));
if (encoder->input == 0)
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
} else {
v4l2_dbg(1, debug, sd, "illegal norm: %llx\n",
(unsigned long long)std);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std);
encoder->norm = std;
return 0;
}
static int adv7170_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct adv7170 *encoder = to_adv7170(sd);
/* RJ: input = 0: input is from decoder
input = 1: input is from ZR36060
input = 2: color bar */
v4l2_dbg(1, debug, sd, "set input from %s\n",
input == 0 ? "decoder" : "ZR36060");
switch (input) {
case 0:
adv7170_write(sd, 0x01, 0x20);
adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
case 1:
adv7170_write(sd, 0x01, 0x00);
adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */
adv7170_write(sd, 0x02, 0x08);
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
default:
v4l2_dbg(1, debug, sd, "illegal input: %d\n", input);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]);
encoder->input = input;
return 0;
}
static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7170, 0);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops adv7170_core_ops = {
.g_chip_ident = adv7170_g_chip_ident,
};
static const struct v4l2_subdev_video_ops adv7170_video_ops = {
.s_std_output = adv7170_s_std_output,
.s_routing = adv7170_s_routing,
};
static const struct v4l2_subdev_ops adv7170_ops = {
.core = &adv7170_core_ops,
.video = &adv7170_video_ops,
};
/* ----------------------------------------------------------------------- */
static int adv7170_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7170 *encoder;
struct v4l2_subdev *sd;
int i;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
sd = &encoder->sd;
v4l2_i2c_subdev_init(sd, client, &adv7170_ops);
encoder->norm = V4L2_STD_NTSC;
encoder->input = 0;
i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC));
if (i >= 0) {
i = adv7170_write(sd, 0x07, TR0MODE | TR0RST);
i = adv7170_write(sd, 0x07, TR0MODE);
i = adv7170_read(sd, 0x12);
v4l2_dbg(1, debug, sd, "revision %d\n", i & 1);
}
if (i < 0)
v4l2_dbg(1, debug, sd, "init error 0x%x\n", i);
return 0;
}
static int adv7170_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_adv7170(sd));
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7170_id[] = {
{ "adv7170", 0 },
{ "adv7171", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
static struct v4l2_i2c_driver_data v4l2_i2c_data = {
.name = "adv7170",
.probe = adv7170_probe,
.remove = adv7170_remove,
.id_table = adv7170_id,
};
| gpl-2.0 |
friedrich420/N910G-AEL-Kernel-Lollipop-Sources | arch/x86/kernel/process_32.c | 1197 | 9496 | /*
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <asm/pgtable.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/desc.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
#include <linux/err.h>
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.sp)[3];
}
void __show_regs(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
unsigned long sp;
unsigned short ss, gs;
if (user_mode_vm(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
gs = get_user_gs(regs);
} else {
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
savesegment(gs, gs);
}
printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
(u16)regs->cs, regs->ip, regs->flags,
smp_processor_id());
print_symbol("EIP is at %s\n", regs->ip);
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);
printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
regs->si, regs->di, regs->bp, sp);
printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
(u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
if (!all)
return;
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
cr4 = read_cr4_safe();
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
get_debugreg(d2, 2);
get_debugreg(d3, 3);
printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
d0, d1, d2, d3);
get_debugreg(d6, 6);
get_debugreg(d7, 7);
printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
d6, d7);
}
void release_thread(struct task_struct *dead_task)
{
BUG_ON(dead_task->mm);
release_vm86_irqs(dead_task);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
struct task_struct *tsk;
int err;
p->thread.sp = (unsigned long) childregs;
p->thread.sp0 = (unsigned long) (childregs+1);
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.ip = (unsigned long) ret_from_kernel_thread;
task_user_gs(p) = __KERNEL_STACK_CANARY;
childregs->ds = __USER_DS;
childregs->es = __USER_DS;
childregs->fs = __KERNEL_PERCPU;
childregs->bx = sp; /* function */
childregs->bp = arg;
childregs->orig_ax = -1;
childregs->cs = __KERNEL_CS | get_kernel_rpl();
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
return 0;
}
*childregs = *current_pt_regs();
childregs->ax = 0;
if (sp)
childregs->sp = sp;
p->thread.ip = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(current_pt_regs());
p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
err = 0;
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
return err;
}
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
set_user_gs(regs, 0);
regs->fs = 0;
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
regs->cs = __USER_CS;
regs->ip = new_ip;
regs->sp = new_sp;
regs->flags = X86_EFLAGS_IF;
/*
* force it to the iret return path by making it look as if there was
* some work pending.
*/
set_thread_flag(TIF_NOTIFY_RESUME);
}
EXPORT_SYMBOL_GPL(start_thread);
/*
* switch_to(x,y) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
* (as a call from the fsave or fwait in effect) rather than to
* the wrong process. Lazy FP saving no longer makes any sense
* with modern CPU's, and this simplifies a lot of things (SMP
* and UP become the same).
*
* NOTE! We used to use the x86 hardware context switching. The
* reason for not using it any more becomes apparent when you
* try to recover gracefully from saved state that is no longer
* valid (stale segment register values in particular). With the
* hardware task-switch, there is no way to fix up bad state in
* a reasonable manner.
*
* The fact that Intel documents the hardware task-switching to
* be slow is a fairly red herring - this code is not noticeably
* faster. However, there _is_ some room for improvement here,
* so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much
* more flexibility.
*
* The return value (in %ax) will be the "prev" task after
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
fpu_switch_t fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
* Reload esp0.
*/
load_sp0(tss, next);
/*
* Save away %gs. No need to save %fs, as it was saved on the
* stack on entry. No need to save %es and %ds, as those are
* always kernel segments while inside the kernel. Doing this
* before setting the new TLS descriptors avoids the situation
* where we temporarily have non-reloadable segments in %fs
* and %gs. This could be an issue if the NMI handler ever
* used %fs or %gs (it does not today), or if the kernel is
* running inside of a hypervisor layer.
*/
lazy_save_gs(prev->gs);
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
load_TLS(next, cpu);
/*
* Restore IOPL if needed. In normal use, the flags restore
* in the switch assembly will handle this. But if the kernel
* is running virtualized at a non-zero CPL, the popf will
* not restore flags, so it must be done in a separate step.
*/
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
set_iopl_mask(next->iopl);
/*
* Now maybe handle debug registers and/or IO bitmaps
*/
if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
/*
* Restore %gs if needed (which is common)
*/
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
switch_fpu_finish(next_p, fpu);
this_cpu_write(current_task, next_p);
return prev_p;
}
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
unsigned long get_wchan(struct task_struct *p)
{
unsigned long bp, sp, ip;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
sp = p->thread.sp;
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
return 0;
/* include/asm-i386/system.h:switch_to() pushes bp last. */
bp = *(unsigned long *) sp;
do {
if (bp < stack_page || bp > top_ebp+stack_page)
return 0;
ip = *(unsigned long *) (bp+4);
if (!in_sched_functions(ip))
return ip;
bp = *(unsigned long *) bp;
} while (count++ < 16);
return 0;
}
| gpl-2.0 |
TEAM-RAZOR-DEVICES/kernel_cyanogen_tomato | drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c | 2221 | 4293 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
void
nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
{
struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
struct nv50_i2c_port *port = (void *)base;
if (state) port->state |= 0x01;
else port->state &= 0xfe;
nv_wr32(priv, port->addr, port->state);
}
void
nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
{
struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
struct nv50_i2c_port *port = (void *)base;
if (state) port->state |= 0x02;
else port->state &= 0xfd;
nv_wr32(priv, port->addr, port->state);
}
int
nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
{
struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000001);
}
int
nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
{
struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
struct nv50_i2c_port *port = (void *)base;
return !!(nv_rd32(priv, port->addr) & 0x00000002);
}
static const struct nouveau_i2c_func
nv50_i2c_func = {
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
.sense_scl = nv50_i2c_sense_scl,
.sense_sda = nv50_i2c_sense_sda,
};
const u32 nv50_i2c_addr[] = {
0x00e138, 0x00e150, 0x00e168, 0x00e180,
0x00e254, 0x00e274, 0x00e764, 0x00e780,
0x00e79c, 0x00e7b8
};
const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
static int
nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 index,
struct nouveau_object **pobject)
{
struct dcb_i2c_entry *info = data;
struct nv50_i2c_port *port;
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
&nouveau_i2c_bit_algo, &port);
*pobject = nv_object(port);
if (ret)
return ret;
if (info->drive >= nv50_i2c_addr_nr)
return -EINVAL;
port->base.func = &nv50_i2c_func;
port->state = 0x00000007;
port->addr = nv50_i2c_addr[info->drive];
return 0;
}
int
nv50_i2c_port_init(struct nouveau_object *object)
{
struct nv50_i2c_priv *priv = (void *)object->engine;
struct nv50_i2c_port *port = (void *)object;
nv_wr32(priv, port->addr, port->state);
return nouveau_i2c_port_init(&port->base);
}
static struct nouveau_oclass
nv50_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_i2c_port_ctor,
.dtor = _nouveau_i2c_port_dtor,
.init = nv50_i2c_port_init,
.fini = _nouveau_i2c_port_fini,
},
},
{}
};
static int
nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_i2c_priv *priv;
int ret;
ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
return 0;
}
struct nouveau_oclass
nv50_i2c_oclass = {
.handle = NV_SUBDEV(I2C, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
};
| gpl-2.0 |
erikcas/android_kernel_sony_msm | lib/cpumask.c | 2477 | 4481 | #include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/bootmem.h>
int __first_cpu(const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
}
EXPORT_SYMBOL(__first_cpu);
int __next_cpu(int n, const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
}
EXPORT_SYMBOL(__next_cpu);
#if NR_CPUS > 64
int __next_cpu_nr(int n, const cpumask_t *srcp)
{
return min_t(int, nr_cpu_ids,
find_next_bit(srcp->bits, nr_cpu_ids, n+1));
}
EXPORT_SYMBOL(__next_cpu_nr);
#endif
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set in both.
*/
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
if (cpumask_test_cpu(n, src2p))
break;
return n;
}
EXPORT_SYMBOL(cpumask_next_and);
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
* Returns >= nr_cpu_ids if no cpus set.
*/
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
return i;
}
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
* alloc_cpumask_var_node - allocate a struct cpumask on a given node
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>)
* Returns TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
* CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
* too.
*/
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
*mask = kmalloc_node(cpumask_size(), flags, node);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) {
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
dump_stack();
}
#endif
/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
if (*mask) {
unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
unsigned int tail;
tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
memset(ptr + cpumask_size() - tail, 0, tail);
}
return *mask != NULL;
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(zalloc_cpumask_var_node);
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
*/
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_cpumask_var);
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(zalloc_cpumask_var);
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop (in <linux/cpumask.h>).
* Either returns an allocated (zero-filled) cpumask, or causes the
* system to panic.
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
*mask = alloc_bootmem(cpumask_size());
}
/**
* free_cpumask_var - frees memory allocated for a struct cpumask.
* @mask: cpumask to free
*
* This is safe on a NULL mask.
*/
void free_cpumask_var(cpumask_var_t mask)
{
kfree(mask);
}
EXPORT_SYMBOL(free_cpumask_var);
/**
* free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
* @mask: cpumask to free
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
free_bootmem(__pa(mask), cpumask_size());
}
#endif
| gpl-2.0 |
Mr-AW/Kernel_TeLo_LP_LenovoA6000 | kernel/trace/trace_probe.c | 2477 | 20761 | /*
* Common code for probe-based Dynamic events.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This code was copied from kernel/trace/trace_kprobe.c written by
* Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
*
* Updates to make this generic:
* Copyright (C) IBM Corporation, 2010-2011
* Author: Srikar Dronamraju
*/
#include "trace_probe.h"
const char *reserved_field_names[] = {
"common_type",
"common_flags",
"common_preempt_count",
"common_pid",
"common_tgid",
FIELD_STRING_IP,
FIELD_STRING_RETIP,
FIELD_STRING_FUNC,
};
/* Printing function type */
#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
/* Printing in basic type function template */
#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \
static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
const char *name, \
void *data, void *ent)\
{ \
return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
} \
static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
static inline void *get_rloc_data(u32 *dl)
{
return (u8 *)dl + get_rloc_offs(*dl);
}
/* For data_loc conversion */
static inline void *get_loc_data(u32 *dl, void *ent)
{
return (u8 *)ent + get_rloc_offs(*dl);
}
/* For defining macros, define string/string_size types */
typedef u32 string;
typedef u32 string_size;
/* Print type function for string type */
static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
const char *name,
void *data, void *ent)
{
int len = *(u32 *)data >> 16;
if (!len)
return trace_seq_printf(s, " %s=(fault)", name);
else
return trace_seq_printf(s, " %s=\"%s\"", name,
(const char *)get_loc_data(data, ent));
}
static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type
/*
* Define macro for basic types - we don't need to define s* types, because
* we have to care only about bitwidth at recording time.
*/
#define DEFINE_BASIC_FETCH_FUNCS(method) \
DEFINE_FETCH_##method(u8) \
DEFINE_FETCH_##method(u16) \
DEFINE_FETCH_##method(u32) \
DEFINE_FETCH_##method(u64)
#define CHECK_FETCH_FUNCS(method, fn) \
(((FETCH_FUNC_NAME(method, u8) == fn) || \
(FETCH_FUNC_NAME(method, u16) == fn) || \
(FETCH_FUNC_NAME(method, u32) == fn) || \
(FETCH_FUNC_NAME(method, u64) == fn) || \
(FETCH_FUNC_NAME(method, string) == fn) || \
(FETCH_FUNC_NAME(method, string_size) == fn)) \
&& (fn != NULL))
/* Data fetch function templates */
#define DEFINE_FETCH_reg(type) \
static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
void *offset, void *dest) \
{ \
*(type *)dest = (type)regs_get_register(regs, \
(unsigned int)((unsigned long)offset)); \
}
DEFINE_BASIC_FETCH_FUNCS(reg)
/* No string on the register */
#define fetch_reg_string NULL
#define fetch_reg_string_size NULL
#define DEFINE_FETCH_stack(type) \
static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
void *offset, void *dest) \
{ \
*(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
(unsigned int)((unsigned long)offset)); \
}
DEFINE_BASIC_FETCH_FUNCS(stack)
/* No string on the stack entry */
#define fetch_stack_string NULL
#define fetch_stack_string_size NULL
#define DEFINE_FETCH_retval(type) \
static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
void *dummy, void *dest) \
{ \
*(type *)dest = (type)regs_return_value(regs); \
}
DEFINE_BASIC_FETCH_FUNCS(retval)
/* No string on the retval */
#define fetch_retval_string NULL
#define fetch_retval_string_size NULL
#define DEFINE_FETCH_memory(type) \
static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
void *addr, void *dest) \
{ \
type retval; \
if (probe_kernel_address(addr, retval)) \
*(type *)dest = 0; \
else \
*(type *)dest = retval; \
}
DEFINE_BASIC_FETCH_FUNCS(memory)
/*
* Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
* length and relative data location.
*/
static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
void *addr, void *dest)
{
long ret;
int maxlen = get_rloc_len(*(u32 *)dest);
u8 *dst = get_rloc_data(dest);
u8 *src = addr;
mm_segment_t old_fs = get_fs();
if (!maxlen)
return;
/*
* Try to get string again, since the string can be changed while
* probing.
*/
set_fs(KERNEL_DS);
pagefault_disable();
do
ret = __copy_from_user_inatomic(dst++, src++, 1);
while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
dst[-1] = '\0';
pagefault_enable();
set_fs(old_fs);
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
} else {
*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
get_rloc_offs(*(u32 *)dest));
}
}
/* Return the length of string -- including null terminal byte */
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
void *addr, void *dest)
{
mm_segment_t old_fs;
int ret, len = 0;
u8 c;
old_fs = get_fs();
set_fs(KERNEL_DS);
pagefault_disable();
do {
ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
len++;
} while (c && ret == 0 && len < MAX_STRING_SIZE);
pagefault_enable();
set_fs(old_fs);
if (ret < 0) /* Failed to check the length */
*(u32 *)dest = 0;
else
*(u32 *)dest = len;
}
/* Memory fetching by symbol */
struct symbol_cache {
char *symbol;
long offset;
unsigned long addr;
};
static unsigned long update_symbol_cache(struct symbol_cache *sc)
{
sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
if (sc->addr)
sc->addr += sc->offset;
return sc->addr;
}
static void free_symbol_cache(struct symbol_cache *sc)
{
kfree(sc->symbol);
kfree(sc);
}
static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
{
struct symbol_cache *sc;
if (!sym || strlen(sym) == 0)
return NULL;
sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
if (!sc)
return NULL;
sc->symbol = kstrdup(sym, GFP_KERNEL);
if (!sc->symbol) {
kfree(sc);
return NULL;
}
sc->offset = offset;
update_symbol_cache(sc);
return sc;
}
#define DEFINE_FETCH_symbol(type) \
static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
void *data, void *dest) \
{ \
struct symbol_cache *sc = data; \
if (sc->addr) \
fetch_memory_##type(regs, (void *)sc->addr, dest); \
else \
*(type *)dest = 0; \
}
DEFINE_BASIC_FETCH_FUNCS(symbol)
DEFINE_FETCH_symbol(string)
DEFINE_FETCH_symbol(string_size)
/* Dereference memory access function */
struct deref_fetch_param {
struct fetch_param orig;
long offset;
};
#define DEFINE_FETCH_deref(type) \
static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
void *data, void *dest) \
{ \
struct deref_fetch_param *dprm = data; \
unsigned long addr; \
call_fetch(&dprm->orig, regs, &addr); \
if (addr) { \
addr += dprm->offset; \
fetch_memory_##type(regs, (void *)addr, dest); \
} else \
*(type *)dest = 0; \
}
DEFINE_BASIC_FETCH_FUNCS(deref)
DEFINE_FETCH_deref(string)
DEFINE_FETCH_deref(string_size)
static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
{
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
update_deref_fetch_param(data->orig.data);
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
update_symbol_cache(data->orig.data);
}
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
{
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
free_deref_fetch_param(data->orig.data);
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
free_symbol_cache(data->orig.data);
kfree(data);
}
/* Bitfield fetch function */
struct bitfield_fetch_param {
struct fetch_param orig;
unsigned char hi_shift;
unsigned char low_shift;
};
#define DEFINE_FETCH_bitfield(type) \
static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
void *data, void *dest) \
{ \
struct bitfield_fetch_param *bprm = data; \
type buf = 0; \
call_fetch(&bprm->orig, regs, &buf); \
if (buf) { \
buf <<= bprm->hi_shift; \
buf >>= bprm->low_shift; \
} \
*(type *)dest = buf; \
}
DEFINE_BASIC_FETCH_FUNCS(bitfield)
#define fetch_bitfield_string NULL
#define fetch_bitfield_string_size NULL
static __kprobes void
update_bitfield_fetch_param(struct bitfield_fetch_param *data)
{
/*
* Don't check the bitfield itself, because this must be the
* last fetch function.
*/
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
update_deref_fetch_param(data->orig.data);
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
update_symbol_cache(data->orig.data);
}
static __kprobes void
free_bitfield_fetch_param(struct bitfield_fetch_param *data)
{
/*
* Don't check the bitfield itself, because this must be the
* last fetch function.
*/
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
free_deref_fetch_param(data->orig.data);
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
free_symbol_cache(data->orig.data);
kfree(data);
}
/* Default (unsigned long) fetch type */
#define __DEFAULT_FETCH_TYPE(t) u##t
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
#define ASSIGN_FETCH_FUNC(method, type) \
[FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
{.name = _name, \
.size = _size, \
.is_signed = sign, \
.print = PRINT_TYPE_FUNC_NAME(ptype), \
.fmt = PRINT_TYPE_FMT_NAME(ptype), \
.fmttype = _fmttype, \
.fetch = { \
ASSIGN_FETCH_FUNC(reg, ftype), \
ASSIGN_FETCH_FUNC(stack, ftype), \
ASSIGN_FETCH_FUNC(retval, ftype), \
ASSIGN_FETCH_FUNC(memory, ftype), \
ASSIGN_FETCH_FUNC(symbol, ftype), \
ASSIGN_FETCH_FUNC(deref, ftype), \
ASSIGN_FETCH_FUNC(bitfield, ftype), \
} \
}
#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
__ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
#define FETCH_TYPE_STRING 0
#define FETCH_TYPE_STRSIZE 1
/* Fetch type information table */
static const struct fetch_type fetch_type_table[] = {
/* Special types */
[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
sizeof(u32), 1, "__data_loc char[]"),
[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
string_size, sizeof(u32), 0, "u32"),
/* Basic types */
ASSIGN_FETCH_TYPE(u8, u8, 0),
ASSIGN_FETCH_TYPE(u16, u16, 0),
ASSIGN_FETCH_TYPE(u32, u32, 0),
ASSIGN_FETCH_TYPE(u64, u64, 0),
ASSIGN_FETCH_TYPE(s8, u8, 1),
ASSIGN_FETCH_TYPE(s16, u16, 1),
ASSIGN_FETCH_TYPE(s32, u32, 1),
ASSIGN_FETCH_TYPE(s64, u64, 1),
};
static const struct fetch_type *find_fetch_type(const char *type)
{
int i;
if (!type)
type = DEFAULT_FETCH_TYPE_STR;
/* Special case: bitfield */
if (*type == 'b') {
unsigned long bs;
type = strchr(type, '/');
if (!type)
goto fail;
type++;
if (kstrtoul(type, 0, &bs))
goto fail;
switch (bs) {
case 8:
return find_fetch_type("u8");
case 16:
return find_fetch_type("u16");
case 32:
return find_fetch_type("u32");
case 64:
return find_fetch_type("u64");
default:
goto fail;
}
}
for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
if (strcmp(type, fetch_type_table[i].name) == 0)
return &fetch_type_table[i];
fail:
return NULL;
}
/* Special function : only accept unsigned long */
static __kprobes void fetch_stack_address(struct pt_regs *regs,
void *dummy, void *dest)
{
*(unsigned long *)dest = kernel_stack_pointer(regs);
}
static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
fetch_func_t orig_fn)
{
int i;
if (type != &fetch_type_table[FETCH_TYPE_STRING])
return NULL; /* Only string type needs size function */
for (i = 0; i < FETCH_MTD_END; i++)
if (type->fetch[i] == orig_fn)
return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i];
WARN_ON(1); /* This should not happen */
return NULL;
}
/* Split symbol and offset. */
int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
{
char *tmp;
int ret;
if (!offset)
return -EINVAL;
tmp = strchr(symbol, '+');
if (tmp) {
/* skip sign because kstrtoul doesn't accept '+' */
ret = kstrtoul(tmp + 1, 0, offset);
if (ret)
return ret;
*tmp = '\0';
} else
*offset = 0;
return 0;
}
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
static int parse_probe_vars(char *arg, const struct fetch_type *t,
struct fetch_param *f, bool is_return)
{
int ret = 0;
unsigned long param;
if (strcmp(arg, "retval") == 0) {
if (is_return)
f->fn = t->fetch[FETCH_MTD_retval];
else
ret = -EINVAL;
} else if (strncmp(arg, "stack", 5) == 0) {
if (arg[5] == '\0') {
if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
f->fn = fetch_stack_address;
else
ret = -EINVAL;
} else if (isdigit(arg[5])) {
ret = kstrtoul(arg + 5, 10, ¶m);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
f->fn = t->fetch[FETCH_MTD_stack];
f->data = (void *)param;
}
} else
ret = -EINVAL;
} else
ret = -EINVAL;
return ret;
}
/* Recursive argument parser */
static int parse_probe_arg(char *arg, const struct fetch_type *t,
struct fetch_param *f, bool is_return, bool is_kprobe)
{
unsigned long param;
long offset;
char *tmp;
int ret;
ret = 0;
/* Until uprobe_events supports only reg arguments */
if (!is_kprobe && arg[0] != '%')
return -EINVAL;
switch (arg[0]) {
case '$':
ret = parse_probe_vars(arg + 1, t, f, is_return);
break;
case '%': /* named register */
ret = regs_query_register_offset(arg + 1);
if (ret >= 0) {
f->fn = t->fetch[FETCH_MTD_reg];
f->data = (void *)(unsigned long)ret;
ret = 0;
}
break;
case '@': /* memory or symbol */
if (isdigit(arg[1])) {
ret = kstrtoul(arg + 1, 0, ¶m);
if (ret)
break;
f->fn = t->fetch[FETCH_MTD_memory];
f->data = (void *)param;
} else {
ret = traceprobe_split_symbol_offset(arg + 1, &offset);
if (ret)
break;
f->data = alloc_symbol_cache(arg + 1, offset);
if (f->data)
f->fn = t->fetch[FETCH_MTD_symbol];
}
break;
case '+': /* deref memory */
arg++; /* Skip '+', because kstrtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
ret = kstrtol(arg, 0, &offset);
if (ret)
break;
arg = tmp + 1;
tmp = strrchr(arg, ')');
if (tmp) {
struct deref_fetch_param *dprm;
const struct fetch_type *t2;
t2 = find_fetch_type(NULL);
*tmp = '\0';
dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL);
if (!dprm)
return -ENOMEM;
dprm->offset = offset;
ret = parse_probe_arg(arg, t2, &dprm->orig, is_return,
is_kprobe);
if (ret)
kfree(dprm);
else {
f->fn = t->fetch[FETCH_MTD_deref];
f->data = (void *)dprm;
}
}
break;
}
if (!ret && !f->fn) { /* Parsed, but do not find fetch method */
pr_info("%s type has no corresponding fetch method.\n", t->name);
ret = -EINVAL;
}
return ret;
}
#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
/* Bitfield type needs to be parsed into a fetch function */
static int __parse_bitfield_probe_arg(const char *bf,
const struct fetch_type *t,
struct fetch_param *f)
{
struct bitfield_fetch_param *bprm;
unsigned long bw, bo;
char *tail;
if (*bf != 'b')
return 0;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
return -ENOMEM;
bprm->orig = *f;
f->fn = t->fetch[FETCH_MTD_bitfield];
f->data = (void *)bprm;
bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */
if (bw == 0 || *tail != '@')
return -EINVAL;
bf = tail + 1;
bo = simple_strtoul(bf, &tail, 0);
if (tail == bf || *tail != '/')
return -EINVAL;
bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
bprm->low_shift = bprm->hi_shift + bo;
return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
}
/* String length checking wrapper */
int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
struct probe_arg *parg, bool is_return, bool is_kprobe)
{
const char *t;
int ret;
if (strlen(arg) > MAX_ARGSTR_LEN) {
pr_info("Argument is too long.: %s\n", arg);
return -ENOSPC;
}
parg->comm = kstrdup(arg, GFP_KERNEL);
if (!parg->comm) {
pr_info("Failed to allocate memory for command '%s'.\n", arg);
return -ENOMEM;
}
t = strchr(parg->comm, ':');
if (t) {
arg[t - parg->comm] = '\0';
t++;
}
parg->type = find_fetch_type(t);
if (!parg->type) {
pr_info("Unsupported type: %s\n", t);
return -EINVAL;
}
parg->offset = *size;
*size += parg->type->size;
ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, is_kprobe);
if (ret >= 0 && t != NULL)
ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
if (ret >= 0) {
parg->fetch_size.fn = get_fetch_size_function(parg->type,
parg->fetch.fn);
parg->fetch_size.data = parg->fetch.data;
}
return ret;
}
/* Return 1 if name is reserved or already used by another argument */
int traceprobe_conflict_field_name(const char *name,
struct probe_arg *args, int narg)
{
int i;
for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
if (strcmp(reserved_field_names[i], name) == 0)
return 1;
for (i = 0; i < narg; i++)
if (strcmp(args[i].name, name) == 0)
return 1;
return 0;
}
void traceprobe_update_arg(struct probe_arg *arg)
{
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
update_bitfield_fetch_param(arg->fetch.data);
else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
update_deref_fetch_param(arg->fetch.data);
else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
update_symbol_cache(arg->fetch.data);
}
void traceprobe_free_probe_arg(struct probe_arg *arg)
{
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
free_bitfield_fetch_param(arg->fetch.data);
else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
free_deref_fetch_param(arg->fetch.data);
else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
free_symbol_cache(arg->fetch.data);
kfree(arg->name);
kfree(arg->comm);
}
int traceprobe_command(const char *buf, int (*createfn)(int, char **))
{
char **argv;
int argc, ret;
argc = 0;
ret = 0;
argv = argv_split(GFP_KERNEL, buf, &argc);
if (!argv)
return -ENOMEM;
if (argc)
ret = createfn(argc, argv);
argv_free(argv);
return ret;
}
#define WRITE_BUFSIZE 4096
ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos,
int (*createfn)(int, char **))
{
char *kbuf, *tmp;
int ret = 0;
size_t done = 0;
size_t size;
kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
while (done < count) {
size = count - done;
if (size >= WRITE_BUFSIZE)
size = WRITE_BUFSIZE - 1;
if (copy_from_user(kbuf, buffer + done, size)) {
ret = -EFAULT;
goto out;
}
kbuf[size] = '\0';
tmp = strchr(kbuf, '\n');
if (tmp) {
*tmp = '\0';
size = tmp - kbuf + 1;
} else if (done + size < count) {
pr_warning("Line length is too long: "
"Should be less than %d.", WRITE_BUFSIZE);
ret = -EINVAL;
goto out;
}
done += size;
/* Remove comments */
tmp = strchr(kbuf, '#');
if (tmp)
*tmp = '\0';
ret = traceprobe_command(kbuf, createfn);
if (ret)
goto out;
}
ret = done;
out:
kfree(kbuf);
return ret;
}
| gpl-2.0 |
mrimp/N910TUVU1ANIH_kernel | drivers/pci/hotplug/pciehp_ctrl.c | 4013 | 14877 | /*
* PCI Express Hot Plug Controller Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
* Copyright (C) 2003-2004 Intel Corporation
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
static void interrupt_event_handler(struct work_struct *work);
static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
{
struct event_info *info;
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (!info)
return -ENOMEM;
info->event_type = event_type;
info->p_slot = p_slot;
INIT_WORK(&info->work, interrupt_event_handler);
queue_work(p_slot->wq, &info->work);
return 0;
}
u8 pciehp_handle_attention_button(struct slot *p_slot)
{
u32 event_type;
struct controller *ctrl = p_slot->ctrl;
/* Attention Button Change */
ctrl_dbg(ctrl, "Attention button interrupt received\n");
/*
* Button pressed - See if need to TAKE ACTION!!!
*/
ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
event_type = INT_BUTTON_PRESS;
queue_interrupt_event(p_slot, event_type);
return 0;
}
u8 pciehp_handle_switch_change(struct slot *p_slot)
{
u8 getstatus;
u32 event_type;
struct controller *ctrl = p_slot->ctrl;
/* Switch Change */
ctrl_dbg(ctrl, "Switch interrupt received\n");
pciehp_get_latch_status(p_slot, &getstatus);
if (getstatus) {
/*
* Switch opened
*/
ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_OPEN;
} else {
/*
* Switch closed
*/
ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_CLOSE;
}
queue_interrupt_event(p_slot, event_type);
return 1;
}
u8 pciehp_handle_presence_change(struct slot *p_slot)
{
u32 event_type;
u8 presence_save;
struct controller *ctrl = p_slot->ctrl;
/* Presence Change */
ctrl_dbg(ctrl, "Presence/Notify input change\n");
/* Switch is open, assume a presence change
* Save the presence state
*/
pciehp_get_adapter_status(p_slot, &presence_save);
if (presence_save) {
/*
* Card Present
*/
ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
event_type = INT_PRESENCE_ON;
} else {
/*
* Not Present
*/
ctrl_info(ctrl, "Card not present on Slot(%s)\n",
slot_name(p_slot));
event_type = INT_PRESENCE_OFF;
}
queue_interrupt_event(p_slot, event_type);
return 1;
}
u8 pciehp_handle_power_fault(struct slot *p_slot)
{
u32 event_type;
struct controller *ctrl = p_slot->ctrl;
/* power fault */
ctrl_dbg(ctrl, "Power fault interrupt received\n");
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
event_type = INT_POWER_FAULT;
ctrl_info(ctrl, "Power fault bit %x set\n", 0);
queue_interrupt_event(p_slot, event_type);
return 1;
}
/* The following routines constitute the bulk of the
hotplug controller logic
*/
static void set_slot_off(struct controller *ctrl, struct slot * pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
if (pciehp_power_off_slot(pslot)) {
ctrl_err(ctrl,
"Issue of Slot Power Off command failed\n");
return;
}
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
* removed from the slot/adapter.
*/
msleep(1000);
}
if (PWR_LED(ctrl))
pciehp_green_led_off(pslot);
if (ATTN_LED(ctrl)) {
if (pciehp_set_attention_status(pslot, 1)) {
ctrl_err(ctrl,
"Issue of Set Attention Led command failed\n");
return;
}
}
}
/**
* board_added - Called after a board has been added to the system.
* @p_slot: &slot where board is added
*
* Turns power on for the board.
* Configures board.
*/
static int board_added(struct slot *p_slot)
{
int retval = 0;
struct controller *ctrl = p_slot->ctrl;
struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
retval = pciehp_power_on_slot(p_slot);
if (retval)
return retval;
}
if (PWR_LED(ctrl))
pciehp_green_led_blink(p_slot);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
goto err_exit;
}
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
retval = -EIO;
goto err_exit;
}
retval = pciehp_configure_device(p_slot);
if (retval) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
pci_domain_nr(parent), parent->number);
goto err_exit;
}
if (PWR_LED(ctrl))
pciehp_green_led_on(p_slot);
return 0;
err_exit:
set_slot_off(ctrl, p_slot);
return retval;
}
/**
* remove_board - Turns off slot and LEDs
* @p_slot: slot where board is being removed
*/
static int remove_board(struct slot *p_slot)
{
int retval = 0;
struct controller *ctrl = p_slot->ctrl;
retval = pciehp_unconfigure_device(p_slot);
if (retval)
return retval;
if (POWER_CTRL(ctrl)) {
/* power off slot */
retval = pciehp_power_off_slot(p_slot);
if (retval) {
ctrl_err(ctrl,
"Issue of Slot Disable command failed\n");
return retval;
}
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
* removed from the slot/adapter.
*/
msleep(1000);
}
/* turn off Green LED */
if (PWR_LED(ctrl))
pciehp_green_led_off(p_slot);
return 0;
}
struct power_work_info {
struct slot *p_slot;
struct work_struct work;
};
/**
* pciehp_power_thread - handle pushbutton events
* @work: &struct work_struct describing work to be done
*
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
*/
static void pciehp_power_thread(struct work_struct *work)
{
struct power_work_info *info =
container_of(work, struct power_work_info, work);
struct slot *p_slot = info->p_slot;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case POWEROFF_STATE:
mutex_unlock(&p_slot->lock);
ctrl_dbg(p_slot->ctrl,
"Disabling domain:bus:device=%04x:%02x:00\n",
pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
p_slot->ctrl->pcie->port->subordinate->number);
pciehp_disable_slot(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
default:
break;
}
mutex_unlock(&p_slot->lock);
kfree(info);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
struct power_work_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
__func__);
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
p_slot->state = POWEROFF_STATE;
break;
case BLINKINGON_STATE:
p_slot->state = POWERON_STATE;
break;
default:
kfree(info);
goto out;
}
queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
/*
* Note: This function must be called with slot->lock held
*/
static void handle_button_press_event(struct slot *p_slot)
{
struct controller *ctrl = p_slot->ctrl;
u8 getstatus;
switch (p_slot->state) {
case STATIC_STATE:
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl,
"PCI slot #%s - powering off due to button "
"press.\n", slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
ctrl_info(ctrl,
"PCI slot #%s - powering on due to button "
"press.\n", slot_name(p_slot));
}
/* blink green LED and turn off amber */
if (PWR_LED(ctrl))
pciehp_green_led_blink(p_slot);
if (ATTN_LED(ctrl))
pciehp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
/*
* Cancel if we are still blinking; this means that we
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE) {
if (PWR_LED(ctrl))
pciehp_green_led_on(p_slot);
} else {
if (PWR_LED(ctrl))
pciehp_green_led_off(p_slot);
}
if (ATTN_LED(ctrl))
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
case POWERON_STATE:
/*
* Ignore if the slot is on power-on or power-off state;
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
break;
default:
ctrl_warn(ctrl, "Not a valid state\n");
break;
}
}
/*
* Note: This function must be called with slot->lock held
*/
static void handle_surprise_event(struct slot *p_slot)
{
u8 getstatus;
struct power_work_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
__func__);
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus)
p_slot->state = POWEROFF_STATE;
else
p_slot->state = POWERON_STATE;
queue_work(p_slot->wq, &info->work);
}
static void interrupt_event_handler(struct work_struct *work)
{
struct event_info *info = container_of(work, struct event_info, work);
struct slot *p_slot = info->p_slot;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (info->event_type) {
case INT_BUTTON_PRESS:
handle_button_press_event(p_slot);
break;
case INT_POWER_FAULT:
if (!POWER_CTRL(ctrl))
break;
if (ATTN_LED(ctrl))
pciehp_set_attention_status(p_slot, 1);
if (PWR_LED(ctrl))
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
case INT_PRESENCE_OFF:
if (!HP_SUPR_RM(ctrl))
break;
ctrl_dbg(ctrl, "Surprise Removal\n");
handle_surprise_event(p_slot);
break;
default:
break;
}
mutex_unlock(&p_slot->lock);
kfree(info);
}
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
int rc;
struct controller *ctrl = p_slot->ctrl;
rc = pciehp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
rc = pciehp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
rc = pciehp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
}
}
pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
if (rc) {
pciehp_get_latch_status(p_slot, &getstatus);
}
return rc;
}
int pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
int ret = 0;
struct controller *ctrl = p_slot->ctrl;
if (!p_slot->ctrl)
return 1;
if (!HP_SUPR_RM(p_slot->ctrl)) {
ret = pciehp_get_adapter_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
}
}
if (MRL_SENS(p_slot->ctrl)) {
ret = pciehp_get_latch_status(p_slot, &getstatus);
if (ret || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
ret = pciehp_get_power_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
}
}
return remove_board(p_slot);
}
int pciehp_sysfs_enable_slot(struct slot *p_slot)
{
int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
retval = pciehp_enable_slot(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
ctrl_info(ctrl, "Slot %s is already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Already enabled on slot %s\n",
slot_name(p_slot));
break;
default:
ctrl_err(ctrl, "Not a valid state on slot %s\n",
slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
return retval;
}
int pciehp_sysfs_disable_slot(struct slot *p_slot)
{
int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);
retval = pciehp_disable_slot(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot %s is already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Already disabled on slot %s\n",
slot_name(p_slot));
break;
default:
ctrl_err(ctrl, "Not a valid state on slot %s\n",
slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
return retval;
}
| gpl-2.0 |
TheTypoMaster/android_kernel_samsung_smdk4412 | arch/sparc/kernel/setup_64.c | 4525 | 13000 | /*
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <asm/smp.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/mmu_context.h>
#include <asm/timer.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/mmu.h>
#include <asm/ns87303.h>
#include <asm/btext.h>
#include <asm/elf.h>
#include <asm/mdesc.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#endif
#include "entry.h"
#include "kernel.h"
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
EXPORT_SYMBOL(ns87303_lock);
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
128, /* orig-video-cols */
0, 0, 0, /* unused, ega_bx, unused */
54, /* orig-video-lines */
0, /* orig-video-isVGA */
16 /* orig-video-points */
};
static void
prom_console_write(struct console *con, const char *s, unsigned n)
{
prom_write(s, n);
}
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size = 0;
static struct console prom_early_console = {
.name = "earlyprom",
.write = prom_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
static void __init process_switch(char c)
{
switch (c) {
case 'd':
case 's':
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
prom_halt();
break;
case 'p':
prom_early_console.flags &= ~CON_BOOT;
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
if (tlb_type != cheetah) {
printk("BOOT: Ignoring P-Cache force option.\n");
break;
}
cheetah_pcache_forced_on = 1;
add_taint(TAINT_MACHINE_CHECK);
cheetah_enable_pcache();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;
}
}
static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
while (*commands && *commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
if (*commands == '\0')
break;
if (*commands == '-') {
commands++;
while (*commands && *commands != ' ')
process_switch(*commands++);
continue;
}
if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM]" overrides the PROM-reported
* memory size.
*/
cmdline_memory_size = simple_strtoul(commands + 4,
&commands, 0);
if (*commands == 'K' || *commands == 'k') {
cmdline_memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
cmdline_memory_size <<= 20;
commands++;
}
}
while (*commands && *commands != ' ')
commands++;
}
}
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
int is_jbus;
if (tlb_type == spitfire && !this_is_starfire)
return;
is_jbus = 0;
if (tlb_type != hypervisor) {
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID);
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (tlb_type) {
case spitfire:
insns = &p->starfire[0];
break;
case cheetah:
case cheetah_plus:
if (is_jbus)
insns = &p->cheetah_jbus[0];
else
insns = &p->cheetah_safari[0];
break;
case hypervisor:
insns = &p->sun4v[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
}
*(unsigned int *) (addr + 0) = insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
*(unsigned int *) (addr + 8) = insns[2];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 8));
*(unsigned int *) (addr + 12) = insns[3];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 12));
p++;
}
}
void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
struct sun4v_1insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insn;
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
start++;
}
}
void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
struct sun4v_2insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = start->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
start++;
}
}
void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
if (tlb_type != hypervisor)
return;
sun4v_patch_1insn_range(&__sun4v_1insn_patch,
&__sun4v_1insn_patch_end);
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
sun4v_hvapi_init();
}
static void __init popc_patch(void)
{
struct popc_3insn_patch_entry *p3;
struct popc_6insn_patch_entry *p6;
p3 = &__popc_3insn_patch;
while (p3 < &__popc_3insn_patch_end) {
unsigned long i, addr = p3->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p3->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p3++;
}
p6 = &__popc_6insn_patch;
while (p6 < &__popc_6insn_patch_end) {
unsigned long i, addr = p6->addr;
for (i = 0; i < 6; i++) {
*(unsigned int *) (addr + (i * 4)) = p6->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p6++;
}
}
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
cpu, NR_CPUS);
prom_halt();
}
#endif
/* On Ultra, we support all of the v8 capabilities. */
unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
HWCAP_SPARC_V9);
EXPORT_SYMBOL(sparc64_elf_hwcap);
static const char *hwcaps[] = {
"flush", "stbar", "swap", "muldiv", "v9",
"ultra3", "blkinit", "n2",
/* These strings are as they appear in the machine description
* 'hwcap-list' property for cpu nodes.
*/
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
"ima", "cspare",
};
void cpucap_info(struct seq_file *m)
{
unsigned long caps = sparc64_elf_hwcap;
int i, printed = 0;
seq_puts(m, "cpucaps\t\t: ");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit) {
seq_printf(m, "%s%s",
printed ? "," : "", hwcaps[i]);
printed++;
}
}
seq_putc(m, '\n');
}
static void __init report_hwcaps(unsigned long caps)
{
int i, printed = 0;
printk(KERN_INFO "CPU CAPS: [");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit) {
printk(KERN_CONT "%s%s",
printed ? "," : "", hwcaps[i]);
if (++printed == 8) {
printk(KERN_CONT "]\n");
printk(KERN_INFO "CPU CAPS: [");
printed = 0;
}
}
}
printk(KERN_CONT "]\n");
}
static unsigned long __init mdesc_cpu_hwcap_list(void)
{
struct mdesc_handle *hp;
unsigned long caps = 0;
const char *prop;
int len;
u64 pn;
hp = mdesc_grab();
if (!hp)
return 0;
pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
if (pn == MDESC_NODE_NULL)
goto out;
prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
if (!prop)
goto out;
while (len) {
int i, plen;
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (!strcmp(prop, hwcaps[i])) {
caps |= bit;
break;
}
}
plen = strlen(prop) + 1;
prop += plen;
len -= plen;
}
out:
mdesc_release(hp);
return caps;
}
/* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
static void __init init_sparc64_elf_hwcap(void)
{
unsigned long cap = sparc64_elf_hwcap;
unsigned long mdesc_caps;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= HWCAP_SPARC_ULTRA3;
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_N2;
}
cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
mdesc_caps = mdesc_cpu_hwcap_list();
if (!mdesc_caps) {
if (tlb_type == spitfire)
cap |= AV_SPARC_VIS;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
if (tlb_type == cheetah_plus) {
unsigned long impl, ver;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
impl = ((ver >> 32) & 0xffff);
if (impl == PANTHER_IMPL)
cap |= AV_SPARC_POPC;
}
if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
cap |= AV_SPARC_ASI_BLK_INIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
}
sparc64_elf_hwcap = cap | mdesc_caps;
report_hwcaps(sparc64_elf_hwcap);
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
}
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strcpy(boot_command_line, *cmdline_p);
parse_early_param();
boot_flags_init(*cmdline_p);
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
register_console(&prom_early_console);
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
else
printk("ARCH: SUN4U\n");
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
idprom_init();
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
if (cl && sv) {
ic_myaddr = cl;
ic_servaddr = sv;
if (gw)
ic_gateway = gw;
#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
ic_proto_enabled = 0;
#endif
}
}
#endif
/* Get boot processor trap_block[] setup. */
init_cur_cpu_trap(current_thread_info());
paging_init();
init_sparc64_elf_hwcap();
}
extern int stop_a_enabled;
void sun_do_break(void)
{
if (!stop_a_enabled)
return;
prom_printf("\n");
flush_user_windows();
prom_cmdline();
}
EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
EXPORT_SYMBOL(stop_a_enabled);
| gpl-2.0 |
shankarathi07/linux_lg_lollipop | arch/mips/jazz/setup.c | 4781 | 4577 | /*
* Setup pointers to hardware-dependent routines.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2007 by Thomas Bogendoerfer
*/
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/reboot.h>
#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
extern asmlinkage void jazz_handle_int(void);
extern void jazz_machine_restart(char *command);
static struct resource jazz_io_resources[] = {
{
.start = 0x00,
.end = 0x1f,
.name = "dma1",
.flags = IORESOURCE_BUSY
}, {
.start = 0x40,
.end = 0x5f,
.name = "timer",
.flags = IORESOURCE_BUSY
}, {
.start = 0x80,
.end = 0x8f,
.name = "dma page reg",
.flags = IORESOURCE_BUSY
}, {
.start = 0xc0,
.end = 0xdf,
.name = "dma2",
.flags = IORESOURCE_BUSY
}
};
void __init plat_mem_setup(void)
{
int i;
/* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
/* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
/* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
set_io_port_base(JAZZ_PORT_BASE);
#ifdef CONFIG_EISA
EISA_bus = 1;
#endif
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < ARRAY_SIZE(jazz_io_resources); i++)
request_resource(&ioport_resource, jazz_io_resources + i);
/* The RTC is outside the port address space */
_machine_restart = jazz_machine_restart;
#ifdef CONFIG_VT
screen_info = (struct screen_info) {
.orig_video_cols = 160,
.orig_video_lines = 64,
.orig_video_points = 16,
};
#endif
add_preferred_console("ttyS", 0, "9600");
}
#ifdef CONFIG_OLIVETTI_M700
#define UART_CLK 1843200
#else
/* Some Jazz machines seem to have an 8MHz crystal clock but I don't know
exactly which ones ... XXX */
#define UART_CLK (8000000 / 16) /* ( 3072000 / 16) */
#endif
#define MEMPORT(_base, _irq) \
{ \
.mapbase = (_base), \
.membase = (void *)(_base), \
.irq = (_irq), \
.uartclk = UART_CLK, \
.iotype = UPIO_MEM, \
.flags = UPF_BOOT_AUTOCONF, \
}
static struct plat_serial8250_port jazz_serial_data[] = {
MEMPORT(JAZZ_SERIAL1_BASE, JAZZ_SERIAL1_IRQ),
MEMPORT(JAZZ_SERIAL2_BASE, JAZZ_SERIAL2_IRQ),
{ },
};
static struct platform_device jazz_serial8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = jazz_serial_data,
},
};
static struct resource jazz_esp_rsrc[] = {
{
.start = JAZZ_SCSI_BASE,
.end = JAZZ_SCSI_BASE + 31,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_SCSI_DMA,
.end = JAZZ_SCSI_DMA,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_SCSI_IRQ,
.end = JAZZ_SCSI_IRQ,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device jazz_esp_pdev = {
.name = "jazz_esp",
.num_resources = ARRAY_SIZE(jazz_esp_rsrc),
.resource = jazz_esp_rsrc
};
static struct resource jazz_sonic_rsrc[] = {
{
.start = JAZZ_ETHERNET_BASE,
.end = JAZZ_ETHERNET_BASE + 0xff,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_ETHERNET_IRQ,
.end = JAZZ_ETHERNET_IRQ,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device jazz_sonic_pdev = {
.name = "jazzsonic",
.num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
.resource = jazz_sonic_rsrc
};
static struct resource jazz_cmos_rsrc[] = {
{
.start = 0x70,
.end = 0x71,
.flags = IORESOURCE_IO
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device jazz_cmos_pdev = {
.name = "rtc_cmos",
.num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
.resource = jazz_cmos_rsrc
};
static struct platform_device pcspeaker_pdev = {
.name = "pcspkr",
.id = -1,
};
static int __init jazz_setup_devinit(void)
{
platform_device_register(&jazz_serial8250_device);
platform_device_register(&jazz_esp_pdev);
platform_device_register(&jazz_sonic_pdev);
platform_device_register(&jazz_cmos_pdev);
platform_device_register(&pcspeaker_pdev);
return 0;
}
device_initcall(jazz_setup_devinit);
| gpl-2.0 |
zarboz/jet_442 | arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c | 4781 | 17604 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-mdio.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
void __cvmx_interrupt_gmxx_enable(int interface);
void __cvmx_interrupt_asxx_enable(int block);
/**
* Probe RGMII ports and determine the number present
*
* @interface: Interface to probe
*
* Returns Number of RGMII/GMII/MII ports (0-4).
*/
int __cvmx_helper_rgmii_probe(int interface)
{
int num_ports = 0;
union cvmx_gmxx_inf_mode mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.type) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)) {
cvmx_dprintf("ERROR: RGMII initialize called in "
"SPI interface\n");
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/*
* On these chips "type" says we're in
* GMII/MII mode. This limits us to 2 ports
*/
num_ports = 2;
} else {
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
__func__);
}
} else {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)) {
num_ports = 4;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
num_ports = 3;
} else {
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
__func__);
}
}
return num_ports;
}
/**
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @port: IPD port number to loop.
*/
void cvmx_helper_rgmii_internal_loopback(int port)
{
int interface = (port >> 4) & 1;
int index = port & 0xf;
uint64_t tmp;
union cvmx_gmxx_prtx_cfg gmx_cfg;
gmx_cfg.u64 = 0;
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
}
/**
* Workaround ASX setup errata with CN38XX pass1
*
* @interface: Interface to setup
* @port: Port to setup (0..3)
* @cpu_clock_hz:
* Chip frequency in Hertz
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_errata_asx_pass1(int interface, int port,
int cpu_clock_hz)
{
/* Set hi water mark as per errata GMX-4 */
if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
else
cvmx_dprintf("Illegal clock frequency (%d). "
"CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
return 0;
}
/**
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @interface: PKO Interface to configure (0 or 1)
*
* Returns Zero on success
*/
int __cvmx_helper_rgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
union cvmx_gmxx_inf_mode mode;
union cvmx_asxx_tx_prt_en asx_tx;
union cvmx_asxx_rx_prt_en asx_rx;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.en == 0)
return -1;
if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
/* Ignore SPI interfaces */
return -1;
/* Configure the ASX registers needed to use the RGMII ports */
asx_tx.u64 = 0;
asx_tx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
asx_rx.u64 = 0;
asx_rx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
/* Configure the GMX registers needed to use the RGMII ports */
for (port = 0; port < num_ports; port++) {
/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
__cvmx_helper_setup_gmx() */
if (cvmx_octeon_is_pass1())
__cvmx_helper_errata_asx_pass1(interface, port,
sys_info_ptr->
cpu_clock_hz);
else {
/*
* Configure more flexible RGMII preamble
* checking. Pass 1 doesn't support this
* feature.
*/
union cvmx_gmxx_rxx_frm_ctl frm_ctl;
frm_ctl.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
(port, interface));
/* New field, so must be compile time */
frm_ctl.s.pre_free = 1;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
frm_ctl.u64);
}
/*
* Each pause frame transmitted will ask for about 10M
* bit times before resume. If buffer space comes
* available before that time has expired, an XON
* pause frame (0 time) will be transmitted to restart
* the flow.
*/
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
20000);
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
(port, interface), 19000);
if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
16);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
16);
} else {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
24);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
24);
}
}
__cvmx_helper_setup_gmx(interface, num_ports);
/* enable the ports now */
for (port = 0; port < num_ports; port++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
(interface, port));
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
gmx_cfg.u64);
}
__cvmx_interrupt_asxx_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_asxx_prt_loop asxx_prt_loop;
asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
if (asxx_prt_loop.s.int_loop & (1 << index)) {
/* Force 1Gbps full duplex on internal loopback */
cvmx_helper_link_info_t result;
result.u64 = 0;
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
return result;
} else
return __cvmx_helper_board_link_get(ipd_port);
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_rgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info)
{
int result = 0;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_gmxx_prtx_cfg original_gmx_cfg;
union cvmx_gmxx_prtx_cfg new_gmx_cfg;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
int i;
/* Ignore speed sets in the simulator */
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
return 0;
/* Read the current settings so we know the current enable state */
original_gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
new_gmx_cfg = original_gmx_cfg;
/* Disable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
~(1 << index));
memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
/* Disable all queues so that TX should become idle */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
pko_mem_queue_qos.s.pid = ipd_port;
pko_mem_queue_qos.s.qid = queue;
pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
pko_mem_queue_qos.s.qos_mask = 0;
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
}
/* Disable backpressure */
gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
gmx_tx_ovr_bp.s.bp &= ~(1 << index);
gmx_tx_ovr_bp.s.en |= 1 << index;
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
/*
* Poll the GMX state machine waiting for it to become
* idle. Preferably we should only change speed when it is
* idle. If it doesn't become idle we will still do the speed
* change, but there is a slight chance that GMX will
* lockup.
*/
cvmx_write_csr(CVMX_NPI_DBG_SELECT,
interface * 0x800 + index * 0x100 + 0x880);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
==, 0, 10000);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
==, 0, 10000);
/* Disable the port before we make any changes */
new_gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Set full/half duplex */
if (cvmx_octeon_is_pass1())
/* Half duplex is broken for 38XX Pass 1 */
new_gmx_cfg.s.duplex = 1;
else if (!link_info.s.link_up)
/* Force full duplex on down links */
new_gmx_cfg.s.duplex = 1;
else
new_gmx_cfg.s.duplex = link_info.s.full_duplex;
/* Set the link speed. Anything unknown is set to 1Gbps */
if (link_info.s.speed == 10) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else if (link_info.s.speed == 100) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else {
new_gmx_cfg.s.slottime = 1;
new_gmx_cfg.s.speed = 1;
}
/* Adjust the clocks */
if (link_info.s.speed == 10) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else if (link_info.s.speed == 100) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
union cvmx_gmxx_inf_mode mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
/*
* Port .en .type .p0mii Configuration
* ---- --- ----- ------ -----------------------------------------
* X 0 X X All links are disabled.
* 0 1 X 0 Port 0 is RGMII
* 0 1 X 1 Port 0 is MII
* 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
* 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
* MII port is selected by GMX_PRT1_CFG[SPEED].
*/
/* In MII mode, CLK_CNT = 1. */
if (((index == 0) && (mode.s.p0mii == 1))
|| ((index != 0) && (mode.s.type == 1))) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK
(index, interface), 1);
}
}
}
/* Do a read to make sure all setup stuff is complete */
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Save the new GMX setting without enabling the port */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
/* Enable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
index));
/* Re-enable the TX path */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
pko_mem_queue_qos_save[i].u64);
}
/* Restore backpressure */
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
/* Restore the GMX enable state. Port config is complete */
new_gmx_cfg.s.en = original_gmx_cfg.s.en;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
return result;
}
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
int original_enable;
union cvmx_gmxx_prtx_cfg gmx_cfg;
union cvmx_asxx_prt_loop asxx_prt_loop;
/* Read the current enable state and save it */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
original_enable = gmx_cfg.s.en;
/* Force port to be disabled */
gmx_cfg.s.en = 0;
if (enable_internal) {
/* Force speed if we're doing internal loopback */
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
/* Set the loopback bits */
asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
if (enable_internal)
asxx_prt_loop.s.int_loop |= 1 << index;
else
asxx_prt_loop.s.int_loop &= ~(1 << index);
if (enable_external)
asxx_prt_loop.s.ext_loop |= 1 << index;
else
asxx_prt_loop.s.ext_loop &= ~(1 << index);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
/* Force enables in internal loopback */
if (enable_internal) {
uint64_t tmp;
tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
(1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
(1 << index) | tmp);
original_enable = 1;
}
/* Restore the enable state */
gmx_cfg.s.en = original_enable;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
| gpl-2.0 |
bju2000/android_kernel_samsung_hlte | arch/mips/sgi-ip22/ip22-mc.c | 4781 | 6874 | /*
* ip22-mc.c: Routines for manipulating SGI Memory Controller.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu) - Indigo2 changes
* Copyright (C) 2003 Ladislav Michl (ladis@linux-mips.org)
* Copyright (C) 2004 Peter Fuerst (pf@net.alphadv.de) - IP28
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/sgialib.h>
#include <asm/sgi/mc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
struct sgimc_regs *sgimc;
EXPORT_SYMBOL(sgimc);
static inline unsigned long get_bank_addr(unsigned int memconfig)
{
return ((memconfig & SGIMC_MCONFIG_BASEADDR) <<
((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22));
}
static inline unsigned long get_bank_size(unsigned int memconfig)
{
return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) <<
((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
}
static inline unsigned int get_bank_config(int bank)
{
unsigned int res = bank > 1 ? sgimc->mconfig1 : sgimc->mconfig0;
return bank % 2 ? res & 0xffff : res >> 16;
}
struct mem {
unsigned long addr;
unsigned long size;
};
/*
* Detect installed memory, do some sanity checks and notify kernel about it
*/
static void __init probe_memory(void)
{
int i, j, found, cnt = 0;
struct mem bank[4];
struct mem space[2] = {{SGIMC_SEG0_BADDR, 0}, {SGIMC_SEG1_BADDR, 0}};
printk(KERN_INFO "MC: Probing memory configuration:\n");
for (i = 0; i < ARRAY_SIZE(bank); i++) {
unsigned int tmp = get_bank_config(i);
if (!(tmp & SGIMC_MCONFIG_BVALID))
continue;
bank[cnt].size = get_bank_size(tmp);
bank[cnt].addr = get_bank_addr(tmp);
printk(KERN_INFO " bank%d: %3ldM @ %08lx\n",
i, bank[cnt].size / 1024 / 1024, bank[cnt].addr);
cnt++;
}
/* And you thought bubble sort is dead algorithm... */
do {
unsigned long addr, size;
found = 0;
for (i = 1; i < cnt; i++)
if (bank[i-1].addr > bank[i].addr) {
addr = bank[i].addr;
size = bank[i].size;
bank[i].addr = bank[i-1].addr;
bank[i].size = bank[i-1].size;
bank[i-1].addr = addr;
bank[i-1].size = size;
found = 1;
}
} while (found);
/* Figure out how are memory banks mapped into spaces */
for (i = 0; i < cnt; i++) {
found = 0;
for (j = 0; j < ARRAY_SIZE(space) && !found; j++)
if (space[j].addr + space[j].size == bank[i].addr) {
space[j].size += bank[i].size;
found = 1;
}
/* There is either hole or overlapping memory */
if (!found)
printk(KERN_CRIT "MC: Memory configuration mismatch "
"(%08lx), expect Bus Error soon\n",
bank[i].addr);
}
for (i = 0; i < ARRAY_SIZE(space); i++)
if (space[i].size)
add_memory_region(space[i].addr, space[i].size,
BOOT_MEM_RAM);
}
void __init sgimc_init(void)
{
u32 tmp;
/* ioremap can't fail */
sgimc = (struct sgimc_regs *)
ioremap(SGIMC_BASE, sizeof(struct sgimc_regs));
printk(KERN_INFO "MC: SGI memory controller Revision %d\n",
(int) sgimc->systemid & SGIMC_SYSID_MASKREV);
/* Place the MC into a known state. This must be done before
* interrupts are first enabled etc.
*/
/* Step 0: Make sure we turn off the watchdog in case it's
* still running (which might be the case after a
* soft reboot).
*/
tmp = sgimc->cpuctrl0;
tmp &= ~SGIMC_CCTRL0_WDOG;
sgimc->cpuctrl0 = tmp;
/* Step 1: The CPU/GIO error status registers will not latch
* up a new error status until the register has been
* cleared by the cpu. These status registers are
* cleared by writing any value to them.
*/
sgimc->cstat = sgimc->gstat = 0;
/* Step 2: Enable all parity checking in cpu control register
* zero.
*/
/* don't touch parity settings for IP28 */
tmp = sgimc->cpuctrl0;
#ifndef CONFIG_SGI_IP28
tmp |= SGIMC_CCTRL0_EPERRGIO | SGIMC_CCTRL0_EPERRMEM;
#endif
tmp |= SGIMC_CCTRL0_R4KNOCHKPARR;
sgimc->cpuctrl0 = tmp;
/* Step 3: Setup the MC write buffer depth, this is controlled
* in cpu control register 1 in the lower 4 bits.
*/
tmp = sgimc->cpuctrl1;
tmp &= ~0xf;
tmp |= 0xd;
sgimc->cpuctrl1 = tmp;
/* Step 4: Initialize the RPSS divider register to run as fast
* as it can correctly operate. The register is laid
* out as follows:
*
* ----------------------------------------
* | RESERVED | INCREMENT | DIVIDER |
* ----------------------------------------
* 31 16 15 8 7 0
*
* DIVIDER determines how often a 'tick' happens,
* INCREMENT determines by how the RPSS increment
* registers value increases at each 'tick'. Thus,
* for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
*/
sgimc->divider = 0x101;
/* Step 5: Initialize GIO64 arbitrator configuration register.
*
* NOTE: HPC init code in sgihpc_init() must run before us because
* we need to know Guiness vs. FullHouse and the board
* revision on this machine. You have been warned.
*/
/* First the basic invariants across all GIO64 implementations. */
tmp = sgimc->giopar & SGIMC_GIOPAR_GFX64; /* keep gfx 64bit settings */
tmp |= SGIMC_GIOPAR_HPC64; /* All 1st HPC's interface at 64bits */
tmp |= SGIMC_GIOPAR_ONEBUS; /* Only one physical GIO bus exists */
if (ip22_is_fullhouse()) {
/* Fullhouse specific settings. */
if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC at 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp0 pipelines */
tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
tmp |= SGIMC_GIOPAR_RTIMEEXP0; /* exp0 is realtime */
} else {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp[01] pipelined */
tmp |= SGIMC_GIOPAR_PLINEEXP1;
tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
}
} else {
/* Guiness specific settings. */
tmp |= SGIMC_GIOPAR_EISA64; /* MC talks to EISA at 64bits */
tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
}
sgimc->giopar = tmp; /* poof */
probe_memory();
}
void __init prom_meminit(void) {}
void __init prom_free_prom_memory(void)
{
#ifdef CONFIG_SGI_IP28
u32 mconfig1;
unsigned long flags;
spinlock_t lock;
/*
* because ARCS accesses memory uncached we wait until ARCS
* isn't needed any longer, before we switch from slow to
* normal mode
*/
spin_lock_irqsave(&lock, flags);
mconfig1 = sgimc->mconfig1;
/* map ECC register */
sgimc->mconfig1 = (mconfig1 & 0xffff0000) | 0x2060;
iob();
/* switch to normal mode */
*(unsigned long *)PHYS_TO_XKSEG_UNCACHED(0x60000000) = 0;
iob();
/* reduce WR_COL */
sgimc->cmacc = (sgimc->cmacc & ~0xf) | 4;
iob();
/* restore old config */
sgimc->mconfig1 = mconfig1;
iob();
spin_unlock_irqrestore(&lock, flags);
#endif
}
| gpl-2.0 |
RomaVis/eeenote-kernel | arch/arm/mach-omap2/clockdomains2430_data.c | 5037 | 4973 | /*
* OMAP2xxx clockdomains
*
* Copyright (C) 2008-2009 Texas Instruments, Inc.
* Copyright (C) 2008-2010 Nokia Corporation
*
* Paul Walmsley, Jouni Högander
*
* This file contains clockdomains and clockdomain wakeup dependencies
* for OMAP2xxx chips. Some notes:
*
* A useful validation rule for struct clockdomain: Any clockdomain
* referenced by a wkdep_srcs must have a dep_bit assigned. So
* wkdep_srcs are really just software-controllable dependencies.
* Non-software-controllable dependencies do exist, but they are not
* encoded below (yet).
*
* 24xx does not support programmable sleep dependencies (SLEEPDEP)
*
* The overly-specific dep_bit names are due to a bit name collision
* with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift
* value are the same for all powerdomains: 2
*
* XXX should dep_bit be a mask, so we can test to see if it is 0 as a
* sanity check?
* XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE
*/
/*
* To-Do List
* -> Port the Sleep/Wakeup dependencies for the domains
* from the Power domain framework
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include "clockdomain.h"
#include "prm2xxx_3xxx.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-24xx.h"
#include "prm-regbits-24xx.h"
/*
* Clockdomain dependencies for wkdeps
*
* XXX Hardware dependencies (e.g., dependencies that cannot be
* changed in software) are not included here yet, but should be.
*/
/* Wakeup dependency source arrays */
/* 2430-specific possible wakeup dependencies */
/* 2430 PM_WKDEP_CORE: DSP, GFX, MPU, WKUP, MDM */
static struct clkdm_dep core_2430_wkdeps[] = {
{ .clkdm_name = "dsp_clkdm" },
{ .clkdm_name = "gfx_clkdm" },
{ .clkdm_name = "mpu_clkdm" },
{ .clkdm_name = "wkup_clkdm" },
{ .clkdm_name = "mdm_clkdm" },
{ NULL },
};
/* 2430 PM_WKDEP_MPU: CORE, DSP, WKUP, MDM */
static struct clkdm_dep mpu_2430_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
{ .clkdm_name = "core_l4_clkdm" },
{ .clkdm_name = "dsp_clkdm" },
{ .clkdm_name = "wkup_clkdm" },
{ .clkdm_name = "mdm_clkdm" },
{ NULL },
};
/* 2430 PM_WKDEP_MDM: CORE, MPU, WKUP */
static struct clkdm_dep mdm_2430_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
{ .clkdm_name = "core_l4_clkdm" },
{ .clkdm_name = "mpu_clkdm" },
{ .clkdm_name = "wkup_clkdm" },
{ NULL },
};
/*
* 2430-only clockdomains
*/
static struct clockdomain mpu_2430_clkdm = {
.name = "mpu_clkdm",
.pwrdm = { .name = "mpu_pwrdm" },
.flags = CLKDM_CAN_HWSUP_SWSUP,
.wkdep_srcs = mpu_2430_wkdeps,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK,
};
/* Another case of bit name collisions between several registers: EN_MDM */
static struct clockdomain mdm_clkdm = {
.name = "mdm_clkdm",
.pwrdm = { .name = "mdm_pwrdm" },
.flags = CLKDM_CAN_HWSUP_SWSUP,
.dep_bit = OMAP2430_PM_WKDEP_MPU_EN_MDM_SHIFT,
.wkdep_srcs = mdm_2430_wkdeps,
.clktrctrl_mask = OMAP2430_AUTOSTATE_MDM_MASK,
};
static struct clockdomain dsp_2430_clkdm = {
.name = "dsp_clkdm",
.pwrdm = { .name = "dsp_pwrdm" },
.flags = CLKDM_CAN_HWSUP_SWSUP,
.dep_bit = OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT,
.wkdep_srcs = dsp_24xx_wkdeps,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK,
};
static struct clockdomain gfx_2430_clkdm = {
.name = "gfx_clkdm",
.pwrdm = { .name = "gfx_pwrdm" },
.flags = CLKDM_CAN_HWSUP_SWSUP,
.wkdep_srcs = gfx_24xx_wkdeps,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK,
};
/*
* XXX add usecounting for clkdm dependencies, otherwise the presence
* of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
* could cause trouble
*/
static struct clockdomain core_l3_2430_clkdm = {
.name = "core_l3_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.flags = CLKDM_CAN_HWSUP,
.dep_bit = OMAP24XX_EN_CORE_SHIFT,
.wkdep_srcs = core_2430_wkdeps,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK,
};
/*
* XXX add usecounting for clkdm dependencies, otherwise the presence
* of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
* could cause trouble
*/
static struct clockdomain core_l4_2430_clkdm = {
.name = "core_l4_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.flags = CLKDM_CAN_HWSUP,
.dep_bit = OMAP24XX_EN_CORE_SHIFT,
.wkdep_srcs = core_2430_wkdeps,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK,
};
static struct clockdomain dss_2430_clkdm = {
.name = "dss_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.flags = CLKDM_CAN_HWSUP,
.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK,
};
static struct clockdomain *clockdomains_omap243x[] __initdata = {
&wkup_common_clkdm,
&cm_common_clkdm,
&prm_common_clkdm,
&mpu_2430_clkdm,
&mdm_clkdm,
&dsp_2430_clkdm,
&gfx_2430_clkdm,
&core_l3_2430_clkdm,
&core_l4_2430_clkdm,
&dss_2430_clkdm,
NULL,
};
void __init omap243x_clockdomains_init(void)
{
if (!cpu_is_omap243x())
return;
clkdm_register_platform_funcs(&omap2_clkdm_operations);
clkdm_register_clkdms(clockdomains_omap243x);
clkdm_complete_init();
}
| gpl-2.0 |
IllusionRom-deprecated/android_kernel_samsung_aries | arch/microblaze/lib/muldi3.c | 7597 | 1635 | #include <linux/module.h>
#include "libgcc.h"
#define DWtype long long
#define UWtype unsigned long
#define UHWtype unsigned short
#define W_TYPE_SIZE 32
#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
/* If we still don't have umul_ppmm, define it using plain C. */
#if !defined(umul_ppmm)
#define umul_ppmm(w1, w0, u, v) \
do { \
UWtype __x0, __x1, __x2, __x3; \
UHWtype __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart(u); \
__uh = __ll_highpart(u); \
__vl = __ll_lowpart(v); \
__vh = __ll_highpart(v); \
\
__x0 = (UWtype) __ul * __vl; \
__x1 = (UWtype) __ul * __vh; \
__x2 = (UWtype) __uh * __vl; \
__x3 = (UWtype) __uh * __vh; \
\
__x1 += __ll_highpart(__x0); /* this can't give carry */\
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos */ \
\
(w1) = __x3 + __ll_highpart(__x1); \
(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
} while (0)
#endif
#if !defined(__umulsidi3)
#define __umulsidi3(u, v) ({ \
DWunion __w; \
umul_ppmm(__w.s.high, __w.s.low, u, v); \
__w.ll; \
})
#endif
DWtype __muldi3(DWtype u, DWtype v)
{
const DWunion uu = {.ll = u};
const DWunion vv = {.ll = v};
DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+ (UWtype) uu.s.high * (UWtype) vv.s.low);
return w.ll;
}
EXPORT_SYMBOL(__muldi3);
| gpl-2.0 |
akuster/linux-yocto-3.14 | security/keys/encrypted-keys/ecryptfs_format.c | 10413 | 2628 | /*
* ecryptfs_format.c: helper functions for the encrypted key type
*
* Copyright (C) 2006 International Business Machines Corp.
* Copyright (C) 2010 Politecnico di Torino, Italy
* TORSEC group -- http://security.polito.it
*
* Authors:
* Michael A. Halcrow <mahalcro@us.ibm.com>
* Tyler Hicks <tyhicks@ou.edu>
* Roberto Sassu <roberto.sassu@polito.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*/
#include <linux/module.h>
#include "ecryptfs_format.h"
u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
{
return auth_tok->token.password.session_key_encryption_key;
}
EXPORT_SYMBOL(ecryptfs_get_auth_tok_key);
/*
* ecryptfs_get_versions()
*
* Source code taken from the software 'ecryptfs-utils' version 83.
*
*/
void ecryptfs_get_versions(int *major, int *minor, int *file_version)
{
*major = ECRYPTFS_VERSION_MAJOR;
*minor = ECRYPTFS_VERSION_MINOR;
if (file_version)
*file_version = ECRYPTFS_SUPPORTED_FILE_VERSION;
}
EXPORT_SYMBOL(ecryptfs_get_versions);
/*
* ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure
*
* Fill the ecryptfs_auth_tok structure with required ecryptfs data.
* The source code is inspired to the original function generate_payload()
* shipped with the software 'ecryptfs-utils' version 83.
*
*/
int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
const char *key_desc)
{
int major, minor;
ecryptfs_get_versions(&major, &minor, NULL);
auth_tok->version = (((uint16_t)(major << 8) & 0xFF00)
| ((uint16_t)minor & 0x00FF));
auth_tok->token_type = ECRYPTFS_PASSWORD;
strncpy((char *)auth_tok->token.password.signature, key_desc,
ECRYPTFS_PASSWORD_SIG_SIZE);
auth_tok->token.password.session_key_encryption_key_bytes =
ECRYPTFS_MAX_KEY_BYTES;
/*
* Removed auth_tok->token.password.salt and
* auth_tok->token.password.session_key_encryption_key
* initialization from the original code
*/
/* TODO: Make the hash parameterizable via policy */
auth_tok->token.password.flags |=
ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET;
/* The kernel code will encrypt the session key. */
auth_tok->session_key.encrypted_key[0] = 0;
auth_tok->session_key.encrypted_key_size = 0;
/* Default; subject to change by kernel eCryptfs */
auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512;
auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD);
return 0;
}
EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
MODULE_LICENSE("GPL");
| gpl-2.0 |
wm8120/Linux-eMMC-journaling | mm/page-writeback.c | 174 | 71538 | /*
* mm/page-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Contains functions related to writing back dirty pages at the
* address_space level.
*
* 10Apr2002 Andrew Morton
* Initial version
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/blkdev.h>
#include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
#include <linux/timer.h>
#include <linux/sched/rt.h>
#include <trace/events/writeback.h>
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
*/
#define MAX_PAUSE max(HZ/5, 1)
/*
* Try to keep balance_dirty_pages() call intervals higher than this many pages
* by raising pause time to max_pause when falls below it.
*/
#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
/*
* Estimate write bandwidth at 200ms intervals.
*/
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
#define RATELIMIT_CALC_SHIFT 10
/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
static long ratelimit_pages = 32;
/* The following parameters are exported via /proc/sys/vm */
/*
* Start background writeback (via writeback threads) at this percentage
*/
int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
* dirty_background_ratio * the amount of dirtyable memory
*/
unsigned long dirty_background_bytes;
/*
* free highmem will not be subtracted from the total free memory
* for calculating free ratios if vm_highmem_is_dirtyable is true
*/
int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
* vm_dirty_ratio * the amount of dirtyable memory
*/
unsigned long vm_dirty_bytes;
/*
* The interval between `kupdate'-style writebacks
*/
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
EXPORT_SYMBOL_GPL(dirty_writeback_interval);
/*
* The longest time for which data is allowed to remain dirty
*/
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
*/
int block_dump;
/*
* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
* a full sync is triggered after this time elapses without any disk activity.
*/
int laptop_mode;
EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
unsigned long global_dirty_limit;
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
* We do this by keeping a floating proportion between BDIs, based on page
* writeback completions [end_page_writeback()]. Those devices that write out
* pages fastest will get the larger share, while the slower will get a smaller
* share.
*
* We use page writeout completions because we are interested in getting rid of
* dirty pages. Having them written out is the primary goal.
*
* We introduce a concept of time, a period over which we measure these events,
* because demand can/will vary over time. The length of this period itself is
* measured in page writeback completions.
*
*/
static struct fprop_global writeout_completions;
static void writeout_period(unsigned long t);
/* Timer for aging of writeout_completions */
static struct timer_list writeout_period_timer =
TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
static unsigned long writeout_period_time = 0;
/*
* Length of period for aging writeout fractions of bdis. This is an
* arbitrarily chosen number. The longer the period, the slower fractions will
* reflect changes in current writeout rate.
*/
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
/*
* In a memory zone, there is a certain amount of pages we consider
* available for the page cache, which is essentially the number of
* free and reclaimable pages, minus some zone reserves to protect
* lowmem and the ability to uphold the zone's watermarks without
* requiring writeback.
*
* This number of dirtyable pages is the base value of which the
* user-configurable dirty ratio is the effictive number of pages that
* are allowed to be actually dirtied. Per individual zone, or
* globally by using the sum of dirtyable pages over all zones.
*
* Because the user is allowed to specify the dirty limit globally as
* absolute number of bytes, calculating the per-zone dirty limit can
* require translating the configured limit into a percentage of
* global dirtyable memory first.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
* the zone's dirty balance reserve and the above calculation
* will underflow. However we still want to add in nodes
* which are below threshold (negative values) to get a more
* accurate calculation but make sure that the total never
* underflows.
*/
if ((long)x < 0)
x = 0;
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
/**
* global_dirtyable_memory - number of globally dirtyable pages
*
* Returns the global number of pages potentially available for dirty
* page cache. This is the base value for the global dirty limits.
*/
static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
x -= min(x, dirty_balance_reserve);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
/* Subtract min_free_kbytes */
x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10));
return x + 1; /* Ensure that we never return 0 */
}
/*
* global_dirty_limits - background-writeback and dirty-throttling thresholds
*
* Calculate the dirty thresholds based on sysctl parameters
* - vm.dirty_background_ratio or vm.dirty_background_bytes
* - vm.dirty_ratio or vm.dirty_bytes
* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
* real-time tasks.
*/
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
unsigned long background;
unsigned long dirty;
unsigned long uninitialized_var(available_memory);
struct task_struct *tsk;
if (!vm_dirty_bytes || !dirty_background_bytes)
available_memory = global_dirtyable_memory();
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
else
dirty = (vm_dirty_ratio * available_memory) / 100;
if (dirty_background_bytes)
background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
else
background = (dirty_background_ratio * available_memory) / 100;
if (background >= dirty)
background = dirty / 2;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
trace_global_dirty_state(background, dirty);
}
/**
* zone_dirtyable_memory - number of dirtyable pages in a zone
* @zone: the zone
*
* Returns the zone's number of pages potentially available for dirty
* page cache. This is the base value for the per-zone dirty limits.
*/
static unsigned long zone_dirtyable_memory(struct zone *zone)
{
/*
* The effective global number of dirtyable pages may exclude
* highmem as a big-picture measure to keep the ratio between
* dirty memory and lowmem reasonable.
*
* But this function is purely about the individual zone and a
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone);
/* don't allow this to underflow */
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
return nr_pages;
}
/**
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
* @zone: the zone
*
* Returns the maximum number of dirty pages allowed in a zone, based
* on the zone's dirtyable memory.
*/
static unsigned long zone_dirty_limit(struct zone *zone)
{
unsigned long zone_memory = zone_dirtyable_memory(zone);
struct task_struct *tsk = current;
unsigned long dirty;
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
zone_memory / global_dirtyable_memory();
else
dirty = vm_dirty_ratio * zone_memory / 100;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
dirty += dirty / 4;
return dirty;
}
/**
* zone_dirty_ok - tells whether a zone is within its dirty limits
* @zone: the zone to check
*
* Returns %true when the dirty pages in @zone are within the zone's
* dirty limit, %false if the limit is exceeded.
*/
bool zone_dirty_ok(struct zone *zone)
{
unsigned long limit = zone_dirty_limit(zone);
return zone_page_state(zone, NR_FILE_DIRTY) +
zone_page_state(zone, NR_UNSTABLE_NFS) +
zone_page_state(zone, NR_WRITEBACK) <= limit;
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_bytes = 0;
return ret;
}
int dirty_background_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_ratio = 0;
return ret;
}
int dirty_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_ratio = vm_dirty_ratio;
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
writeback_set_ratelimit();
vm_dirty_bytes = 0;
}
return ret;
}
int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
unsigned long old_bytes = vm_dirty_bytes;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
writeback_set_ratelimit();
vm_dirty_ratio = 0;
}
return ret;
}
static unsigned long wp_next_time(unsigned long cur_time)
{
cur_time += VM_COMPLETIONS_PERIOD_LEN;
/* 0 has a special meaning... */
if (!cur_time)
return 1;
return cur_time;
}
/*
* Increment the BDI's writeout completion count and the global writeout
* completion count. Called from test_clear_page_writeback().
*/
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{
__inc_bdi_stat(bdi, BDI_WRITTEN);
__fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
bdi->max_prop_frac);
/* First event after period switching was turned off? */
if (!unlikely(writeout_period_time)) {
/*
* We can race with other __bdi_writeout_inc calls here but
* it does not cause any harm since the resulting time when
* timer will fire and what is in writeout_period_time will be
* roughly the same.
*/
writeout_period_time = wp_next_time(jiffies);
mod_timer(&writeout_period_timer, writeout_period_time);
}
}
void bdi_writeout_inc(struct backing_dev_info *bdi)
{
unsigned long flags;
local_irq_save(flags);
__bdi_writeout_inc(bdi);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
/*
* Obtain an accurate fraction of the BDI's portion.
*/
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
long *numerator, long *denominator)
{
fprop_fraction_percpu(&writeout_completions, &bdi->completions,
numerator, denominator);
}
/*
* On idle system, we can be called long after we scheduled because we use
* deferred timers so count with missed periods.
*/
static void writeout_period(unsigned long t)
{
int miss_periods = (jiffies - writeout_period_time) /
VM_COMPLETIONS_PERIOD_LEN;
if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
writeout_period_time = wp_next_time(writeout_period_time +
miss_periods * VM_COMPLETIONS_PERIOD_LEN);
mod_timer(&writeout_period_timer, writeout_period_time);
} else {
/*
* Aging has zeroed all fractions. Stop wasting CPU on period
* updates.
*/
writeout_period_time = 0;
}
}
/*
* bdi_min_ratio keeps the sum of the minimum dirty shares of all
* registered backing devices, which, for obvious reasons, can not
* exceed 100%.
*/
static unsigned int bdi_min_ratio;
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int ret = 0;
spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
min_ratio -= bdi->min_ratio;
if (bdi_min_ratio + min_ratio < 100) {
bdi_min_ratio += min_ratio;
bdi->min_ratio += min_ratio;
} else {
ret = -EINVAL;
}
}
spin_unlock_bh(&bdi_lock);
return ret;
}
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
int ret = 0;
if (max_ratio > 100)
return -EINVAL;
spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
}
spin_unlock_bh(&bdi_lock);
return ret;
}
EXPORT_SYMBOL(bdi_set_max_ratio);
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
unsigned long bg_thresh)
{
return (thresh + bg_thresh) / 2;
}
static unsigned long hard_dirty_limit(unsigned long thresh)
{
return max(thresh, global_dirty_limit);
}
/**
* bdi_dirty_limit - @bdi's share of dirty throttling threshold
* @bdi: the backing_dev_info to query
* @dirty: global dirty limit in pages
*
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
*
* Note that balance_dirty_pages() will only seriously take it as a hard limit
* when sleeping max_pause per page is not enough to keep the dirty pages under
* control. For example, when the device is completely stalled due to some error
* conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
* In the other normal situations, it acts more gently by throttling the tasks
* more (rather than completely block them) when the bdi dirty pages go high.
*
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
* - piling up dirty pages (that will take long time to sync) on slow devices
*
* The bdi's share of dirty limit will be adapting to its throughput and
* bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
*/
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
{
u64 bdi_dirty;
long numerator, denominator;
/*
* Calculate this BDI's share of the dirty ratio.
*/
bdi_writeout_fraction(bdi, &numerator, &denominator);
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator;
do_div(bdi_dirty, denominator);
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;
return bdi_dirty;
}
/*
* Dirty position control.
*
* (o) global/bdi setpoints
*
* We want the dirty pages be balanced around the global/bdi setpoints.
* When the number of dirty pages is higher/lower than the setpoint, the
* dirty position control ratio (and hence task dirty ratelimit) will be
* decreased/increased to bring the dirty pages back to the setpoint.
*
* pos_ratio = 1 << RATELIMIT_CALC_SHIFT
*
* if (dirty < setpoint) scale up pos_ratio
* if (dirty > setpoint) scale down pos_ratio
*
* if (bdi_dirty < bdi_setpoint) scale up pos_ratio
* if (bdi_dirty > bdi_setpoint) scale down pos_ratio
*
* task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
*
* (o) global control line
*
* ^ pos_ratio
* |
* | |<===== global dirty control scope ======>|
* 2.0 .............*
* | .*
* | . *
* | . *
* | . *
* | . *
* | . *
* 1.0 ................................*
* | . . *
* | . . *
* | . . *
* | . . *
* | . . *
* 0 +------------.------------------.----------------------*------------->
* freerun^ setpoint^ limit^ dirty pages
*
* (o) bdi control line
*
* ^ pos_ratio
* |
* | *
* | *
* | *
* | *
* | * |<=========== span ============>|
* 1.0 .......................*
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* | . *
* 1/4 ...............................................* * * * * * * * * * * *
* | . .
* | . .
* | . .
* 0 +----------------------.-------------------------------.------------->
* bdi_setpoint^ x_intercept^
*
* The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
* be smoothly throttled down to normal if it starts high in situations like
* - start writing to a slow SD card and a fast disk at the same time. The SD
* card's bdi_dirty may rush to many times higher than bdi_setpoint.
* - the bdi dirty thresh drops quickly due to change of JBOD workload
*/
static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty)
{
unsigned long write_bw = bdi->avg_write_bandwidth;
unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
unsigned long limit = hard_dirty_limit(thresh);
unsigned long x_intercept;
unsigned long setpoint; /* dirty pages' target balance point */
unsigned long bdi_setpoint;
unsigned long span;
long long pos_ratio; /* for scaling up/down the rate limit */
long x;
if (unlikely(dirty >= limit))
return 0;
/*
* global setpoint
*
* setpoint - dirty 3
* f(dirty) := 1.0 + (----------------)
* limit - setpoint
*
* it's a 3rd order polynomial that subjects to
*
* (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
* (2) f(setpoint) = 1.0 => the balance point
* (3) f(limit) = 0 => the hard limit
* (4) df/dx <= 0 => negative feedback control
* (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
* => fast response on large errors; small oscillation near setpoint
*/
setpoint = (freerun + limit) / 2;
x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
limit - setpoint + 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
/*
* We have computed basic pos_ratio above based on global situation. If
* the bdi is over/under its share of dirty pages, we want to scale
* pos_ratio further down/up. That is done by the following mechanism.
*/
/*
* bdi setpoint
*
* f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
*
* x_intercept - bdi_dirty
* := --------------------------
* x_intercept - bdi_setpoint
*
* The main bdi control line is a linear function that subjects to
*
* (1) f(bdi_setpoint) = 1.0
* (2) k = - 1 / (8 * write_bw) (in single bdi case)
* or equally: x_intercept = bdi_setpoint + 8 * write_bw
*
* For single bdi case, the dirty pages are observed to fluctuate
* regularly within range
* [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
* for various filesystems, where (2) can yield in a reasonable 12.5%
* fluctuation range for pos_ratio.
*
* For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
* own size, so move the slope over accordingly and choose a slope that
* yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
/*
* It's very possible that bdi_thresh is close to 0 not because the
* device is slow, but that it has remained inactive for long time.
* Honour such devices a reasonable good (hopefully IO efficient)
* threshold, so that the occasional writes won't be blocked and active
* writes can rampup the threshold quickly.
*/
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
*/
x = div_u64((u64)bdi_thresh << 16, thresh + 1);
bdi_setpoint = setpoint * (u64)x >> 16;
/*
* Use span=(8*write_bw) in single bdi case as indicated by
* (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
*
* bdi_thresh thresh - bdi_thresh
* span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
* thresh thresh
*/
span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
x_intercept = bdi_setpoint + span;
if (bdi_dirty < x_intercept - span / 4) {
pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
x_intercept - bdi_setpoint + 1);
} else
pos_ratio /= 4;
/*
* bdi reserve area, safeguard against dirty pool underrun and disk idle
* It may push the desired control point of global dirty pages higher
* than setpoint.
*/
x_intercept = bdi_thresh / 2;
if (bdi_dirty < x_intercept) {
if (bdi_dirty > x_intercept / 8)
pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
else
pos_ratio *= 8;
}
return pos_ratio;
}
static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
unsigned long elapsed,
unsigned long written)
{
const unsigned long period = roundup_pow_of_two(3 * HZ);
unsigned long avg = bdi->avg_write_bandwidth;
unsigned long old = bdi->write_bandwidth;
u64 bw;
/*
* bw = written * HZ / elapsed
*
* bw * elapsed + write_bandwidth * (period - elapsed)
* write_bandwidth = ---------------------------------------------------
* period
*/
bw = written - bdi->written_stamp;
bw *= HZ;
if (unlikely(elapsed > period)) {
do_div(bw, elapsed);
avg = bw;
goto out;
}
bw += (u64)bdi->write_bandwidth * (period - elapsed);
bw >>= ilog2(period);
/*
* one more level of smoothing, for filtering out sudden spikes
*/
if (avg > old && old >= (unsigned long)bw)
avg -= (avg - old) >> 3;
if (avg < old && old <= (unsigned long)bw)
avg += (old - avg) >> 3;
out:
bdi->write_bandwidth = bw;
bdi->avg_write_bandwidth = avg;
}
/*
* The global dirtyable memory and dirty threshold could be suddenly knocked
* down by a large amount (eg. on the startup of KVM in a swapless system).
* This may throw the system into deep dirty exceeded state and throttle
* heavy/light dirtiers alike. To retain good responsiveness, maintain
* global_dirty_limit for tracking slowly down to the knocked down dirty
* threshold.
*/
static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
{
unsigned long limit = global_dirty_limit;
/*
* Follow up in one step.
*/
if (limit < thresh) {
limit = thresh;
goto update;
}
/*
* Follow down slowly. Use the higher one as the target, because thresh
* may drop below dirty. This is exactly the reason to introduce
* global_dirty_limit which is guaranteed to lie above the dirty pages.
*/
thresh = max(thresh, dirty);
if (limit > thresh) {
limit -= (limit - thresh) >> 5;
goto update;
}
return;
update:
global_dirty_limit = limit;
}
static void global_update_bandwidth(unsigned long thresh,
unsigned long dirty,
unsigned long now)
{
static DEFINE_SPINLOCK(dirty_lock);
static unsigned long update_time;
/*
* check locklessly first to optimize away locking for the most time
*/
if (time_before(now, update_time + BANDWIDTH_INTERVAL))
return;
spin_lock(&dirty_lock);
if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
update_dirty_limit(thresh, dirty);
update_time = now;
}
spin_unlock(&dirty_lock);
}
/*
* Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
*
* Normal bdi tasks will be curbed at or below it in long term.
* Obviously it should be around (write_bw / N) when there are N dd tasks.
*/
static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty,
unsigned long dirtied,
unsigned long elapsed)
{
unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
unsigned long limit = hard_dirty_limit(thresh);
unsigned long setpoint = (freerun + limit) / 2;
unsigned long write_bw = bdi->avg_write_bandwidth;
unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
unsigned long dirty_rate;
unsigned long task_ratelimit;
unsigned long balanced_dirty_ratelimit;
unsigned long pos_ratio;
unsigned long step;
unsigned long x;
/*
* The dirty rate will match the writeout rate in long term, except
* when dirty pages are truncated by userspace or re-dirtied by FS.
*/
dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
bdi_thresh, bdi_dirty);
/*
* task_ratelimit reflects each dd's dirty rate for the past 200ms.
*/
task_ratelimit = (u64)dirty_ratelimit *
pos_ratio >> RATELIMIT_CALC_SHIFT;
task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
/*
* A linear estimation of the "balanced" throttle rate. The theory is,
* if there are N dd tasks, each throttled at task_ratelimit, the bdi's
* dirty_rate will be measured to be (N * task_ratelimit). So the below
* formula will yield the balanced rate limit (write_bw / N).
*
* Note that the expanded form is not a pure rate feedback:
* rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
* but also takes pos_ratio into account:
* rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
*
* (1) is not realistic because pos_ratio also takes part in balancing
* the dirty rate. Consider the state
* pos_ratio = 0.5 (3)
* rate = 2 * (write_bw / N) (4)
* If (1) is used, it will stuck in that state! Because each dd will
* be throttled at
* task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
* yielding
* dirty_rate = N * task_ratelimit = write_bw (6)
* put (6) into (1) we get
* rate_(i+1) = rate_(i) (7)
*
* So we end up using (2) to always keep
* rate_(i+1) ~= (write_bw / N) (8)
* regardless of the value of pos_ratio. As long as (8) is satisfied,
* pos_ratio is able to drive itself to 1.0, which is not only where
* the dirty count meet the setpoint, but also where the slope of
* pos_ratio is most flat and hence task_ratelimit is least fluctuated.
*/
balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
dirty_rate | 1);
/*
* balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
*/
if (unlikely(balanced_dirty_ratelimit > write_bw))
balanced_dirty_ratelimit = write_bw;
/*
* We could safely do this and return immediately:
*
* bdi->dirty_ratelimit = balanced_dirty_ratelimit;
*
* However to get a more stable dirty_ratelimit, the below elaborated
* code makes use of task_ratelimit to filter out singular points and
* limit the step size.
*
* The below code essentially only uses the relative value of
*
* task_ratelimit - dirty_ratelimit
* = (pos_ratio - 1) * dirty_ratelimit
*
* which reflects the direction and size of dirty position error.
*/
/*
* dirty_ratelimit will follow balanced_dirty_ratelimit iff
* task_ratelimit is on the same side of dirty_ratelimit, too.
* For example, when
* - dirty_ratelimit > balanced_dirty_ratelimit
* - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
* lowering dirty_ratelimit will help meet both the position and rate
* control targets. Otherwise, don't update dirty_ratelimit if it will
* only help meet the rate target. After all, what the users ultimately
* feel and care are stable dirty rate and small position error.
*
* |task_ratelimit - dirty_ratelimit| is used to limit the step size
* and filter out the singular points of balanced_dirty_ratelimit. Which
* keeps jumping around randomly and can even leap far away at times
* due to the small 200ms estimation period of dirty_rate (we want to
* keep that period small to reduce time lags).
*/
step = 0;
if (dirty < setpoint) {
x = min(bdi->balanced_dirty_ratelimit,
min(balanced_dirty_ratelimit, task_ratelimit));
if (dirty_ratelimit < x)
step = x - dirty_ratelimit;
} else {
x = max(bdi->balanced_dirty_ratelimit,
max(balanced_dirty_ratelimit, task_ratelimit));
if (dirty_ratelimit > x)
step = dirty_ratelimit - x;
}
/*
* Don't pursue 100% rate matching. It's impossible since the balanced
* rate itself is constantly fluctuating. So decrease the track speed
* when it gets close to the target. Helps eliminate pointless tremors.
*/
step >>= dirty_ratelimit / (2 * step + 1);
/*
* Limit the tracking speed to avoid overshooting.
*/
step = (step + 7) / 8;
if (dirty_ratelimit < balanced_dirty_ratelimit)
dirty_ratelimit += step;
else
dirty_ratelimit -= step;
bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
}
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty,
unsigned long start_time)
{
unsigned long now = jiffies;
unsigned long elapsed = now - bdi->bw_time_stamp;
unsigned long dirtied;
unsigned long written;
/*
* rate-limit, only update once every 200ms.
*/
if (elapsed < BANDWIDTH_INTERVAL)
return;
dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
/*
* Skip quiet periods when disk bandwidth is under-utilized.
* (at least 1s idle time between two flusher runs)
*/
if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
goto snapshot;
if (thresh) {
global_update_bandwidth(thresh, dirty, now);
bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
bdi_thresh, bdi_dirty,
dirtied, elapsed);
}
bdi_update_write_bandwidth(bdi, elapsed, written);
snapshot:
bdi->dirtied_stamp = dirtied;
bdi->written_stamp = written;
bdi->bw_time_stamp = now;
}
static void bdi_update_bandwidth(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty,
unsigned long start_time)
{
if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
return;
spin_lock(&bdi->wb.list_lock);
__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
bdi_thresh, bdi_dirty, start_time);
spin_unlock(&bdi->wb.list_lock);
}
/*
* After a task dirtied this many pages, balance_dirty_pages_ratelimited()
* will look to see if it needs to start dirty throttling.
*
* If dirty_poll_interval is too low, big NUMA machines will call the expensive
* global_page_state() too often. So scale it near-sqrt to the safety margin
* (the number of pages we may dirty without exceeding the dirty limits).
*/
static unsigned long dirty_poll_interval(unsigned long dirty,
unsigned long thresh)
{
if (thresh > dirty)
return 1UL << (ilog2(thresh - dirty) >> 1);
return 1;
}
static long bdi_max_pause(struct backing_dev_info *bdi,
unsigned long bdi_dirty)
{
long bw = bdi->avg_write_bandwidth;
long t;
/*
* Limit pause time for small memory systems. If sleeping for too long
* time, a small pool of dirty/writeback pages may go empty and disk go
* idle.
*
* 8 serves as the safety ratio.
*/
t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
t++;
return min_t(long, t, MAX_PAUSE);
}
static long bdi_min_pause(struct backing_dev_info *bdi,
long max_pause,
unsigned long task_ratelimit,
unsigned long dirty_ratelimit,
int *nr_dirtied_pause)
{
long hi = ilog2(bdi->avg_write_bandwidth);
long lo = ilog2(bdi->dirty_ratelimit);
long t; /* target pause */
long pause; /* estimated next pause */
int pages; /* target nr_dirtied_pause */
/* target for 10ms pause on 1-dd case */
t = max(1, HZ / 100);
/*
* Scale up pause time for concurrent dirtiers in order to reduce CPU
* overheads.
*
* (N * 10ms) on 2^N concurrent tasks.
*/
if (hi > lo)
t += (hi - lo) * (10 * HZ) / 1024;
/*
* This is a bit convoluted. We try to base the next nr_dirtied_pause
* on the much more stable dirty_ratelimit. However the next pause time
* will be computed based on task_ratelimit and the two rate limits may
* depart considerably at some time. Especially if task_ratelimit goes
* below dirty_ratelimit/2 and the target pause is max_pause, the next
* pause time will be max_pause*2 _trimmed down_ to max_pause. As a
* result task_ratelimit won't be executed faithfully, which could
* eventually bring down dirty_ratelimit.
*
* We apply two rules to fix it up:
* 1) try to estimate the next pause time and if necessary, use a lower
* nr_dirtied_pause so as not to exceed max_pause. When this happens,
* nr_dirtied_pause will be "dancing" with task_ratelimit.
* 2) limit the target pause time to max_pause/2, so that the normal
* small fluctuations of task_ratelimit won't trigger rule (1) and
* nr_dirtied_pause will remain as stable as dirty_ratelimit.
*/
t = min(t, 1 + max_pause / 2);
pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
/*
* Tiny nr_dirtied_pause is found to hurt I/O performance in the test
* case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
* When the 16 consecutive reads are often interrupted by some dirty
* throttling pause during the async writes, cfq will go into idles
* (deadline is fine). So push nr_dirtied_pause as high as possible
* until reaches DIRTY_POLL_THRESH=32 pages.
*/
if (pages < DIRTY_POLL_THRESH) {
t = max_pause;
pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
if (pages > DIRTY_POLL_THRESH) {
pages = DIRTY_POLL_THRESH;
t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
}
}
pause = HZ * pages / (task_ratelimit + 1);
if (pause > max_pause) {
t = max_pause;
pages = task_ratelimit * t / roundup_pow_of_two(HZ);
}
*nr_dirtied_pause = pages;
/*
* The minimal pause time will normally be half the target pause time.
*/
return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
}
/*
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
* the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
* If we're over `background_thresh' then the writeback threads are woken to
* perform some writeout.
*/
static void balance_dirty_pages(struct address_space *mapping,
unsigned long pages_dirtied)
{
unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
unsigned long bdi_reclaimable;
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
unsigned long bdi_dirty;
unsigned long freerun;
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
long period;
long pause;
long max_pause;
long min_pause;
int nr_dirtied_pause;
bool dirty_exceeded = false;
unsigned long task_ratelimit;
unsigned long dirty_ratelimit;
unsigned long pos_ratio;
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long start_time = jiffies;
for (;;) {
unsigned long now = jiffies;
/*
* Unstable writes are a feature of certain networked
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
*/
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
global_dirty_limits(&background_thresh, &dirty_thresh);
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
freerun = dirty_freerun_ceiling(dirty_thresh,
background_thresh);
if (nr_dirty <= freerun) {
current->dirty_paused_when = now;
current->nr_dirtied = 0;
current->nr_dirtied_pause =
dirty_poll_interval(nr_dirty, dirty_thresh);
break;
}
if (unlikely(!writeback_in_progress(bdi)))
bdi_start_background_writeback(bdi);
/*
* bdi_thresh is not treated as some limiting factor as
* dirty_thresh, due to reasons
* - in JBOD setup, bdi_thresh can fluctuate a lot
* - in a system with HDD and USB key, the USB key may somehow
* go into state (bdi_dirty >> bdi_thresh) either because
* bdi_dirty starts high, or because bdi_thresh drops low.
* In this case we don't want to hard throttle the USB key
* dirtiers for 100 seconds until bdi_dirty drops under
* bdi_thresh. Instead the auxiliary bdi control line in
* bdi_position_ratio() will let the dirtier task progress
* at some rate <= (write_bw / 2) for bringing down bdi_dirty.
*/
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
/*
* In order to avoid the stacked BDI deadlock we need
* to ensure we accurately count the 'dirty' pages when
* the threshold is low.
*
* Otherwise it would be possible to get thresh+n pages
* reported dirty, even though there are thresh-m pages
* actually dirty; with m+n sitting in the percpu
* deltas.
*/
if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
bdi_dirty = bdi_reclaimable +
bdi_stat_sum(bdi, BDI_WRITEBACK);
} else {
bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
bdi_dirty = bdi_reclaimable +
bdi_stat(bdi, BDI_WRITEBACK);
}
dirty_exceeded = (bdi_dirty > bdi_thresh) &&
(nr_dirty > dirty_thresh);
if (dirty_exceeded && !bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;
bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
nr_dirty, bdi_thresh, bdi_dirty,
start_time);
dirty_ratelimit = bdi->dirty_ratelimit;
pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
background_thresh, nr_dirty,
bdi_thresh, bdi_dirty);
task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
RATELIMIT_CALC_SHIFT;
max_pause = bdi_max_pause(bdi, bdi_dirty);
min_pause = bdi_min_pause(bdi, max_pause,
task_ratelimit, dirty_ratelimit,
&nr_dirtied_pause);
if (unlikely(task_ratelimit == 0)) {
period = max_pause;
pause = max_pause;
goto pause;
}
period = HZ * pages_dirtied / task_ratelimit;
pause = period;
if (current->dirty_paused_when)
pause -= now - current->dirty_paused_when;
/*
* For less than 1s think time (ext3/4 may block the dirtier
* for up to 800ms from time to time on 1-HDD; so does xfs,
* however at much less frequency), try to compensate it in
* future periods by updating the virtual time; otherwise just
* do a reset, as it may be a light dirtier.
*/
if (pause < min_pause) {
trace_balance_dirty_pages(bdi,
dirty_thresh,
background_thresh,
nr_dirty,
bdi_thresh,
bdi_dirty,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
period,
min(pause, 0L),
start_time);
if (pause < -HZ) {
current->dirty_paused_when = now;
current->nr_dirtied = 0;
} else if (period) {
current->dirty_paused_when += period;
current->nr_dirtied = 0;
} else if (current->nr_dirtied_pause <= pages_dirtied)
current->nr_dirtied_pause += pages_dirtied;
break;
}
if (unlikely(pause > max_pause)) {
/* for occasional dropped task_ratelimit */
now += min(pause - max_pause, max_pause);
pause = max_pause;
}
pause:
trace_balance_dirty_pages(bdi,
dirty_thresh,
background_thresh,
nr_dirty,
bdi_thresh,
bdi_dirty,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
period,
pause,
start_time);
__set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause);
current->dirty_paused_when = now + pause;
current->nr_dirtied = 0;
current->nr_dirtied_pause = nr_dirtied_pause;
/*
* This is typically equal to (nr_dirty < dirty_thresh) and can
* also keep "1000+ dd on a slow USB stick" under control.
*/
if (task_ratelimit)
break;
/*
* In the case of an unresponding NFS server and the NFS dirty
* pages exceeds dirty_thresh, give the other good bdi's a pipe
* to go through, so that tasks on them still remain responsive.
*
* In theory 1 page is enough to keep the comsumer-producer
* pipe going: the flusher cleans 1 page => the task dirties 1
* more page. However bdi_dirty has accounting errors. So use
* the larger and more IO friendly bdi_stat_error.
*/
if (bdi_dirty <= bdi_stat_error(bdi))
break;
if (fatal_signal_pending(current))
break;
}
if (!dirty_exceeded && bdi->dirty_exceeded)
bdi->dirty_exceeded = 0;
if (writeback_in_progress(bdi))
return;
/*
* In laptop mode, we wait until hitting the higher threshold before
* starting background writeout, and then write out all the way down
* to the lower threshold. So slow writers cause minimal disk activity.
*
* In normal mode, we start background writeout at the lower
* background_thresh, to keep the amount of dirty memory low.
*/
if (laptop_mode)
return;
if (nr_reclaimable > background_thresh)
bdi_start_background_writeback(bdi);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
{
if (set_page_dirty(page) || page_mkwrite) {
struct address_space *mapping = page_mapping(page);
if (mapping)
balance_dirty_pages_ratelimited(mapping);
}
}
static DEFINE_PER_CPU(int, bdp_ratelimits);
/*
* Normal tasks are throttled by
* loop {
* dirty tsk->nr_dirtied_pause pages;
* take a snap in balance_dirty_pages();
* }
* However there is a worst case. If every task exit immediately when dirtied
* (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
* called to throttle the page dirties. The solution is to save the not yet
* throttled page dirties in dirty_throttle_leaks on task exit and charge them
* randomly into the running tasks. This works well for the above worst case,
* as the new task will pick up and accumulate the old task's leaked dirty
* count and eventually get throttled.
*/
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
/**
* balance_dirty_pages_ratelimited - balance dirty memory state
* @mapping: address_space which was dirtied
*
* Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's
* dirty state and will initiate writeback if needed.
*
* On really big machines, get_writeback_state is expensive, so try to avoid
* calling it too often (ratelimiting). But once we're over the dirty memory
* limit we decrease the ratelimiting by a lot, to prevent individual processes
* from overshooting the limit by (ratelimit_pages) each.
*/
void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
struct backing_dev_info *bdi = mapping->backing_dev_info;
int ratelimit;
int *p;
if (!bdi_cap_account_dirty(bdi))
return;
ratelimit = current->nr_dirtied_pause;
if (bdi->dirty_exceeded)
ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
preempt_disable();
/*
* This prevents one CPU to accumulate too many dirtied pages without
* calling into balance_dirty_pages(), which can happen when there are
* 1000+ tasks, all of them start dirtying pages at exactly the same
* time, hence all honoured too large initial task->nr_dirtied_pause.
*/
p = &__get_cpu_var(bdp_ratelimits);
if (unlikely(current->nr_dirtied >= ratelimit))
*p = 0;
else if (unlikely(*p >= ratelimit_pages)) {
*p = 0;
ratelimit = 0;
}
/*
* Pick up the dirtied pages by the exited tasks. This avoids lots of
* short-lived tasks (eg. gcc invocations in a kernel build) escaping
* the dirty throttling and livelock other long-run dirtiers.
*/
p = &__get_cpu_var(dirty_throttle_leaks);
if (*p > 0 && current->nr_dirtied < ratelimit) {
unsigned long nr_pages_dirtied;
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
*p -= nr_pages_dirtied;
current->nr_dirtied += nr_pages_dirtied;
}
preempt_enable();
if (unlikely(current->nr_dirtied >= ratelimit))
balance_dirty_pages(mapping, current->nr_dirtied);
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
void throttle_vm_writeout(gfp_t gfp_mask)
{
unsigned long background_thresh;
unsigned long dirty_thresh;
for ( ; ; ) {
global_dirty_limits(&background_thresh, &dirty_thresh);
dirty_thresh = hard_dirty_limit(dirty_thresh);
/*
* Boost the allowable dirty threshold a bit for page
* allocators so they don't get DoS'ed by heavy writers
*/
dirty_thresh += dirty_thresh / 10; /* wheeee... */
if (global_page_state(NR_UNSTABLE_NFS) +
global_page_state(NR_WRITEBACK) <= dirty_thresh)
break;
congestion_wait(BLK_RW_ASYNC, HZ/10);
/*
* The caller might hold locks which can prevent IO completion
* or progress in the filesystem. So we cannot just sit here
* waiting for IO to complete.
*/
if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
break;
}
}
/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
return 0;
}
#ifdef CONFIG_BLOCK
void laptop_mode_timer_fn(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
int nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
/*
* We want to write everything out, not just down to the dirty
* threshold
*/
if (bdi_has_dirty_io(&q->backing_dev_info))
bdi_start_writeback(&q->backing_dev_info, nr_pages,
WB_REASON_LAPTOP_TIMER);
}
/*
* We've spun up the disk and we're in laptop mode: schedule writeback
* of all dirty data a few seconds from now. If the flush is already scheduled
* then push it back - the user is still using the disk.
*/
void laptop_io_completion(struct backing_dev_info *info)
{
mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
}
/*
* We're in laptop mode and we've just synced. The sync's writes will have
* caused another writeback to be scheduled by laptop_io_completion.
* Nothing needs to be written back anymore, so we unschedule the writeback.
*/
void laptop_sync_completion(void)
{
struct backing_dev_info *bdi;
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
del_timer(&bdi->laptop_mode_wb_timer);
rcu_read_unlock();
}
#endif
/*
* If ratelimit_pages is too high then we can get into dirty-data overload
* if a large number of processes all perform writes at the same time.
* If it is too low then SMP machines will call the (expensive)
* get_writeback_state too often.
*
* Here we set ratelimit_pages to a level which ensures that when all CPUs are
* dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
* thresholds.
*/
void writeback_set_ratelimit(void)
{
unsigned long background_thresh;
unsigned long dirty_thresh;
global_dirty_limits(&background_thresh, &dirty_thresh);
global_dirty_limit = dirty_thresh;
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
if (ratelimit_pages < 16)
ratelimit_pages = 16;
}
static int __cpuinit
ratelimit_handler(struct notifier_block *self, unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DEAD:
writeback_set_ratelimit();
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static struct notifier_block __cpuinitdata ratelimit_nb = {
.notifier_call = ratelimit_handler,
.next = NULL,
};
/*
* Called early on to tune the page writeback dirty limits.
*
* We used to scale dirty pages according to how total memory
* related to pages that could be allocated for buffers (by
* comparing nr_free_buffer_pages() to vm_total_pages.
*
* However, that was when we used "dirty_ratio" to scale with
* all memory, and we don't do that any more. "dirty_ratio"
* is now applied to total non-HIGHPAGE memory (by subtracting
* totalhigh_pages from vm_total_pages), and as such we can't
* get into the old insane situation any more where we had
* large amounts of dirty pages compared to a small amount of
* non-HIGHMEM memory.
*
* But we might still want to scale the dirty_ratio by how
* much memory the box has..
*/
void __init page_writeback_init(void)
{
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
fprop_global_init(&writeout_completions);
}
/**
* tag_pages_for_writeback - tag pages to be written by write_cache_pages
* @mapping: address space structure to write
* @start: starting page index
* @end: ending page index (inclusive)
*
* This function scans the page range from @start to @end (inclusive) and tags
* all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
* that write_cache_pages (or whoever calls this function) will then use
* TOWRITE tag to identify pages eligible for writeback. This mechanism is
* used to avoid livelocking of writeback by a process steadily creating new
* dirty pages in the file (thus it is important for this function to be quick
* so that it can tag pages faster than a dirtying process can create them).
*/
/*
* We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
*/
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
#define WRITEBACK_TAG_BATCH 4096
unsigned long tagged;
do {
spin_lock_irq(&mapping->tree_lock);
tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
&start, end, WRITEBACK_TAG_BATCH,
PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
spin_unlock_irq(&mapping->tree_lock);
WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
cond_resched();
/* We check 'start' to handle wrapping when end == ~0UL */
} while (tagged >= WRITEBACK_TAG_BATCH && start);
}
EXPORT_SYMBOL(tag_pages_for_writeback);
/**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
* @writepage: function called for each page
* @data: data passed to writepage function
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
* but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
* and msync() need to guarantee that all the data which was dirty at the time
* the call was made get new I/O started against them. If wbc->sync_mode is
* WB_SYNC_ALL then we were called for data integrity and we must wait for
* existing IO to complete.
*
* To avoid livelocks (when other process dirties new pages), we first tag
* pages which should be written back with TOWRITE tag and only then start
* writing them. For data-integrity sync we have to be careful so that we do
* not miss some pages (e.g., because some other process has cleared TOWRITE
* tag we set). The rule we follow is that TOWRITE tag can be cleared only
* by the process clearing the DIRTY tag (and submitting the page for IO).
*/
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int cycled;
int range_whole = 0;
int tag;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
if (index == 0)
cycled = 1;
else
cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
int i;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end) {
/*
* can't be range_cyclic (1st pass) because
* end == -1 in that case.
*/
done = 1;
break;
}
done_index = page->index;
lock_page(page);
/*
* Page truncated or invalidated. We can freely skip it
* then, even for data integrity operations: the page
* has disappeared concurrently, so there could be no
* real expectation of this data interity operation
* even if there is now a new, dirty page at the same
* pagecache address.
*/
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
trace_wbc_writepage(wbc, mapping->backing_dev_info);
ret = (*writepage)(page, wbc, data);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
} else {
/*
* done_index is set past this page,
* so media errors will not choke
* background writeout for the entire
* file. This has consequences for
* range_cyclic semantics (ie. it may
* not be suitable for data integrity
* writeout).
*/
done_index = page->index + 1;
done = 1;
break;
}
}
/*
* We stop writing back only if we are not doing
* integrity sync. In case of integrity sync we have to
* keep going until we have written all the pages
* we tagged for writeback prior to entering this loop.
*/
if (--wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
}
pagevec_release(&pvec);
cond_resched();
}
if (!cycled && !done) {
/*
* range_cyclic:
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
cycled = 1;
index = 0;
end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
return ret;
}
EXPORT_SYMBOL(write_cache_pages);
/*
* Function used by generic_writepages to call the real writepage
* function and set the mapping flags on error
*/
static int __writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = mapping->a_ops->writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
}
/**
* generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
*
* This is a library function, which implements the writepages()
* address_space_operation.
*/
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct blk_plug plug;
int ret;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
return 0;
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, __writepage, mapping);
blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(generic_writepages);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
int ret;
if (wbc->nr_to_write <= 0)
return 0;
if (mapping->a_ops->writepages)
ret = mapping->a_ops->writepages(mapping, wbc);
else
ret = generic_writepages(mapping, wbc);
return ret;
}
/**
* write_one_page - write out a single page and optionally wait on I/O
* @page: the page to write
* @wait: if true, wait on writeout
*
* The page must be locked by the caller and will be unlocked upon return.
*
* write_one_page() returns a negative error code if I/O failed.
*/
int write_one_page(struct page *page, int wait)
{
struct address_space *mapping = page->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
};
BUG_ON(!PageLocked(page));
if (wait)
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
page_cache_get(page);
ret = mapping->a_ops->writepage(page, &wbc);
if (ret == 0 && wait) {
wait_on_page_writeback(page);
if (PageError(page))
ret = -EIO;
}
page_cache_release(page);
} else {
unlock_page(page);
}
return ret;
}
EXPORT_SYMBOL(write_one_page);
/*
* For address_spaces which do not use buffers nor write back.
*/
int __set_page_dirty_no_writeback(struct page *page)
{
if (!PageDirty(page))
return !TestSetPageDirty(page);
return 0;
}
/*
* Helper function for set_page_dirty family.
* NOTE: This relies on being atomic wrt interrupts.
*/
void account_page_dirtied(struct page *page, struct address_space *mapping)
{
trace_writeback_dirty_page(page, mapping);
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
task_io_account_write(PAGE_CACHE_SIZE);
current->nr_dirtied++;
this_cpu_inc(bdp_ratelimits);
}
}
EXPORT_SYMBOL(account_page_dirtied);
/*
* Helper function for set_page_writeback family.
* NOTE: Unlike account_page_dirtied this does not rely on being atomic
* wrt interrupts.
*/
void account_page_writeback(struct page *page)
{
inc_zone_page_state(page, NR_WRITEBACK);
}
EXPORT_SYMBOL(account_page_writeback);
/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
* This is also used when a single buffer is being dirtied: we want to set the
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
* Most callers have locked the page, which pins the address_space in memory.
* But zap_pte_range() does not lock the page, however in that case the
* mapping is pinned by the vma's ->vm_file reference.
*
* We take care to handle the case where the page was truncated from the
* mapping by re-checking page_mapping() inside tree_lock.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
if (!mapping)
return 1;
spin_lock_irq(&mapping->tree_lock);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&mapping->tree_lock);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
/*
* Call this whenever redirtying a page, to de-account the dirty counters
* (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
* counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
* systematic errors in balanced_dirty_ratelimit and the dirty pages position
* control.
*/
void account_page_redirty(struct page *page)
{
struct address_space *mapping = page->mapping;
if (mapping && mapping_cap_account_dirty(mapping)) {
current->nr_dirtied--;
dec_zone_page_state(page, NR_DIRTIED);
dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
}
}
EXPORT_SYMBOL(account_page_redirty);
/*
* When a writepage implementation decides that it doesn't want to write this
* page for some reason, it should redirty the locked page via
* redirty_page_for_writepage() and it should then unlock the page and return 0
*/
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
wbc->pages_skipped++;
account_page_redirty(page);
return __set_page_dirty_nobuffers(page);
}
EXPORT_SYMBOL(redirty_page_for_writepage);
/*
* Dirty a page.
*
* For pages with a mapping this should be done under the page lock
* for the benefit of asynchronous memory errors who prefer a consistent
* dirty state. This rule can be broken in some special cases,
* but should be better not to.
*
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
int set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
if (likely(mapping)) {
int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
/*
* readahead/lru_deactivate_page could remain
* PG_readahead/PG_reclaim due to race with end_page_writeback
* About readahead, if the page is written, the flags would be
* reset. So no problem.
* About lru_deactivate_page, if the page is redirty, the flag
* will be reset. So no problem. but if the page is used by readahead
* it will confuse readahead and make it restart the size rampup
* process. But it's a trivial problem.
*/
ClearPageReclaim(page);
#ifdef CONFIG_BLOCK
if (!spd)
spd = __set_page_dirty_buffers;
#endif
return (*spd)(page);
}
if (!PageDirty(page)) {
if (!TestSetPageDirty(page))
return 1;
}
return 0;
}
EXPORT_SYMBOL(set_page_dirty);
/*
* set_page_dirty() is racy if the caller has no reference against
* page->mapping->host, and if the page is unlocked. This is because another
* CPU could truncate the page off the mapping and then free the mapping.
*
* Usually, the page _is_ locked, or the caller is a user-space process which
* holds a reference on the inode by having an open file.
*
* In other cases, the page should be locked before running set_page_dirty().
*/
int set_page_dirty_lock(struct page *page)
{
int ret;
lock_page(page);
ret = set_page_dirty(page);
unlock_page(page);
return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);
/*
* Clear a page's dirty flag, while caring for dirty memory accounting.
* Returns true if the page was previously dirty.
*
* This is for preparing to put the page under writeout. We leave the page
* tagged as dirty in the radix tree so that a concurrent write-for-sync
* can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
* implementation will run either set_page_writeback() or set_page_dirty(),
* at which stage we bring the page's dirty flag and radix-tree dirty tag
* back into sync.
*
* This incoherency between the page's dirty flag and radix-tree tag is
* unfortunate, but it only exists while the page is locked.
*/
int clear_page_dirty_for_io(struct page *page)
{
struct address_space *mapping = page_mapping(page);
BUG_ON(!PageLocked(page));
if (mapping && mapping_cap_account_dirty(mapping)) {
/*
* Yes, Virginia, this is indeed insane.
*
* We use this sequence to make sure that
* (a) we account for dirty stats properly
* (b) we tell the low-level filesystem to
* mark the whole page dirty if it was
* dirty in a pagetable. Only to then
* (c) clean the page again and return 1 to
* cause the writeback.
*
* This way we avoid all nasty races with the
* dirty bit in multiple places and clearing
* them concurrently from different threads.
*
* Note! Normally the "set_page_dirty(page)"
* has no effect on the actual dirty bit - since
* that will already usually be set. But we
* need the side effects, and it can help us
* avoid races.
*
* We basically use the page "master dirty bit"
* as a serialization point for all the different
* threads doing their things.
*/
if (page_mkclean(page))
set_page_dirty(page);
/*
* We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty
* at this point. We do this by having them hold the
* page lock at some point after installing their
* pte, but before marking the page dirty.
* Pages are always locked coming in here, so we get
* the desired exclusion. See mm/memory.c:do_wp_page()
* for more comments.
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
return 1;
}
return 0;
}
return TestClearPageDirty(page);
}
EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestClearPageWriteback(page);
if (ret) {
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi)) {
__dec_bdi_stat(bdi, BDI_WRITEBACK);
__bdi_writeout_inc(bdi);
}
}
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestClearPageWriteback(page);
}
if (ret) {
dec_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN);
}
return ret;
}
int test_set_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page);
if (!ret) {
radix_tree_tag_set(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi))
__inc_bdi_stat(bdi, BDI_WRITEBACK);
}
if (!PageDirty(page))
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_TOWRITE);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestSetPageWriteback(page);
}
if (!ret)
account_page_writeback(page);
return ret;
}
EXPORT_SYMBOL(test_set_page_writeback);
/*
* Return true if any of the pages in the mapping are marked with the
* passed tag.
*/
int mapping_tagged(struct address_space *mapping, int tag)
{
return radix_tree_tagged(&mapping->page_tree, tag);
}
EXPORT_SYMBOL(mapping_tagged);
/**
* wait_for_stable_page() - wait for writeback to finish, if necessary.
* @page: The page to wait on.
*
* This function determines if the given page is related to a backing device
* that requires page contents to be held stable during writeback. If so, then
* it will wait for any pending writeback to complete.
*/
void wait_for_stable_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct backing_dev_info *bdi = mapping->backing_dev_info;
if (!bdi_cap_stable_pages_required(bdi))
return;
wait_on_page_writeback(page);
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
| gpl-2.0 |
onealtom/MYD-C335X-Linux-Kernel | arch/arm/mach-at91/board-1arm.c | 174 | 2746 | /*
* linux/arch/arm/mach-at91/board-1arm.c
*
* Copyright (C) 2005 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/board.h>
#include <mach/cpu.h>
#include "generic.h"
static void __init onearm_init_early(void)
{
/* Set cpu type: PQFP */
at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
/* Initialize processor: 18.432 MHz crystal */
at91_initialize(18432000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
/* USART1 on ttyS2 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static struct at91_eth_data __initdata onearm_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata onearm_usbh_data = {
.ports = 1,
};
static struct at91_udc_data __initdata onearm_udc_data = {
.vbus_pin = AT91_PIN_PC2,
.pullup_pin = AT91_PIN_PC3,
};
static void __init onearm_board_init(void)
{
/* Serial */
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&onearm_eth_data);
/* USB Host */
at91_add_device_usbh(&onearm_usbh_data);
/* USB Device */
at91_add_device_udc(&onearm_udc_data);
}
MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
.init_early = onearm_init_early,
.init_irq = at91_init_irq_default,
.init_machine = onearm_board_init,
MACHINE_END
| gpl-2.0 |
NEKTech-Labs/NEKTech-Linux | drivers/net/ethernet/broadcom/sb1250-mac.c | 174 | 66792 | /*
* Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
* Copyright (c) 2006, 2007 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*
* This driver is designed for the Broadcom SiByte SOC built-in
* Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
*
* Updated to the driver model and the PHY abstraction layer
* by Maciej W. Rozycki.
*/
#include <linux/bug.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
/* Operational parameters that usually are not changed. */
#define CONFIG_SBMAC_COALESCE
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (2*HZ)
MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
/* A few user-configurable values which may be modified when a driver
module is loaded. */
/* 1 normal messages, 0 quiet .. 7 verbose. */
static int debug = 1;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug messages");
#ifdef CONFIG_SBMAC_COALESCE
static int int_pktcnt_tx = 255;
module_param(int_pktcnt_tx, int, S_IRUGO);
MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
static int int_timeout_tx = 255;
module_param(int_timeout_tx, int, S_IRUGO);
MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
static int int_pktcnt_rx = 64;
module_param(int_pktcnt_rx, int, S_IRUGO);
MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
static int int_timeout_rx = 64;
module_param(int_timeout_rx, int, S_IRUGO);
MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#endif
#include <asm/sibyte/board.h>
#include <asm/sibyte/sb1250.h>
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#else
#error invalid SiByte MAC configuration
#endif
#include <asm/sibyte/sb1250_scd.h>
#include <asm/sibyte/sb1250_mac.h>
#include <asm/sibyte/sb1250_dma.h>
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#define UNIT_INT(n) (K_INT_MAC_0 + (n))
#else
#error invalid SiByte MAC configuration
#endif
#ifdef K_INT_PHY
#define SBMAC_PHY_INT K_INT_PHY
#else
#define SBMAC_PHY_INT PHY_POLL
#endif
/**********************************************************************
* Simple types
********************************************************************* */
enum sbmac_speed {
sbmac_speed_none = 0,
sbmac_speed_10 = SPEED_10,
sbmac_speed_100 = SPEED_100,
sbmac_speed_1000 = SPEED_1000,
};
enum sbmac_duplex {
sbmac_duplex_none = -1,
sbmac_duplex_half = DUPLEX_HALF,
sbmac_duplex_full = DUPLEX_FULL,
};
enum sbmac_fc {
sbmac_fc_none,
sbmac_fc_disabled,
sbmac_fc_frame,
sbmac_fc_collision,
sbmac_fc_carrier,
};
enum sbmac_state {
sbmac_state_uninit,
sbmac_state_off,
sbmac_state_on,
sbmac_state_broken,
};
/**********************************************************************
* Macros
********************************************************************* */
#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
(d)->sbdma_dscrtable : (d)->f+1)
#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
/**********************************************************************
* DMA Descriptor structure
********************************************************************* */
struct sbdmadscr {
uint64_t dscr_a;
uint64_t dscr_b;
};
/**********************************************************************
* DMA Controller structure
********************************************************************* */
struct sbmacdma {
/*
* This stuff is used to identify the channel and the registers
* associated with it.
*/
struct sbmac_softc *sbdma_eth; /* back pointer to associated
MAC */
int sbdma_channel; /* channel number */
int sbdma_txdir; /* direction (1=transmit) */
int sbdma_maxdescr; /* total # of descriptors
in ring */
#ifdef CONFIG_SBMAC_COALESCE
int sbdma_int_pktcnt;
/* # descriptors rx/tx
before interrupt */
int sbdma_int_timeout;
/* # usec rx/tx interrupt */
#endif
void __iomem *sbdma_config0; /* DMA config register 0 */
void __iomem *sbdma_config1; /* DMA config register 1 */
void __iomem *sbdma_dscrbase;
/* descriptor base address */
void __iomem *sbdma_dscrcnt; /* descriptor count register */
void __iomem *sbdma_curdscr; /* current descriptor
address */
void __iomem *sbdma_oodpktlost;
/* pkt drop (rx only) */
/*
* This stuff is for maintenance of the ring
*/
void *sbdma_dscrtable_unaligned;
struct sbdmadscr *sbdma_dscrtable;
/* base of descriptor table */
struct sbdmadscr *sbdma_dscrtable_end;
/* end of descriptor table */
struct sk_buff **sbdma_ctxtable;
/* context table, one
per descr */
dma_addr_t sbdma_dscrtable_phys;
/* and also the phys addr */
struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */
struct sbdmadscr *sbdma_remptr; /* next dscr for sw
to remove */
};
/**********************************************************************
* Ethernet softc structure
********************************************************************* */
struct sbmac_softc {
/*
* Linux-specific things
*/
struct net_device *sbm_dev; /* pointer to linux device */
struct napi_struct napi;
struct phy_device *phy_dev; /* the associated PHY device */
struct mii_bus *mii_bus; /* the MII bus */
int phy_irq[PHY_MAX_ADDR];
spinlock_t sbm_lock; /* spin lock */
int sbm_devflags; /* current device flags */
/*
* Controller-specific things
*/
void __iomem *sbm_base; /* MAC's base address */
enum sbmac_state sbm_state; /* current state */
void __iomem *sbm_macenable; /* MAC Enable Register */
void __iomem *sbm_maccfg; /* MAC Config Register */
void __iomem *sbm_fifocfg; /* FIFO Config Register */
void __iomem *sbm_framecfg; /* Frame Config Register */
void __iomem *sbm_rxfilter; /* Receive Filter Register */
void __iomem *sbm_isr; /* Interrupt Status Register */
void __iomem *sbm_imr; /* Interrupt Mask Register */
void __iomem *sbm_mdio; /* MDIO Register */
enum sbmac_speed sbm_speed; /* current speed */
enum sbmac_duplex sbm_duplex; /* current duplex */
enum sbmac_fc sbm_fc; /* cur. flow control setting */
int sbm_pause; /* current pause setting */
int sbm_link; /* current link state */
unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */
struct sbmacdma sbm_rxdma;
int rx_hw_checksum;
int sbe_idx;
};
/**********************************************************************
* Externs
********************************************************************* */
/**********************************************************************
* Prototypes
********************************************************************* */
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
int txrx, int maxdescr);
static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *m);
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
static void sbdma_emptyring(struct sbmacdma *d);
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int work_to_do, int poll);
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int poll);
static int sbmac_initctx(struct sbmac_softc *s);
static void sbmac_channel_start(struct sbmac_softc *s);
static void sbmac_channel_stop(struct sbmac_softc *s);
static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
enum sbmac_state);
static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
static uint64_t sbmac_addr2reg(unsigned char *ptr);
static irqreturn_t sbmac_intr(int irq, void *dev_instance);
static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
static void sbmac_setmulti(struct sbmac_softc *sc);
static int sbmac_init(struct platform_device *pldev, long long base);
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
enum sbmac_fc fc);
static int sbmac_open(struct net_device *dev);
static void sbmac_tx_timeout (struct net_device *dev);
static void sbmac_set_rx_mode(struct net_device *dev);
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sbmac_close(struct net_device *dev);
static int sbmac_poll(struct napi_struct *napi, int budget);
static void sbmac_mii_poll(struct net_device *dev);
static int sbmac_mii_probe(struct net_device *dev);
static void sbmac_mii_sync(void __iomem *sbm_mdio);
static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
int bitcnt);
static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 val);
/**********************************************************************
* Globals
********************************************************************* */
static char sbmac_string[] = "sb1250-mac";
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
/**********************************************************************
* MDIO constants
********************************************************************* */
#define MII_COMMAND_START 0x01
#define MII_COMMAND_READ 0x02
#define MII_COMMAND_WRITE 0x01
#define MII_COMMAND_ACK 0x02
#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
#define ENABLE 1
#define DISABLE 0
/**********************************************************************
* SBMAC_MII_SYNC(sbm_mdio)
*
* Synchronize with the MII - send a pattern of bits to the MII
* that will guarantee that it is ready to accept a command.
*
* Input parameters:
* sbm_mdio - address of the MAC's MDIO register
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_mii_sync(void __iomem *sbm_mdio)
{
int cnt;
uint64_t bits;
int mac_mdio_genc;
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
for (cnt = 0; cnt < 32; cnt++) {
__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
}
}
/**********************************************************************
* SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
*
* Send some bits to the MII. The bits to be sent are right-
* justified in the 'data' parameter.
*
* Input parameters:
* sbm_mdio - address of the MAC's MDIO register
* data - data to send
* bitcnt - number of bits to send
********************************************************************* */
static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
int bitcnt)
{
int i;
uint64_t bits;
unsigned int curmask;
int mac_mdio_genc;
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
bits = M_MAC_MDIO_DIR_OUTPUT;
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
curmask = 1 << (bitcnt - 1);
for (i = 0; i < bitcnt; i++) {
if (data & curmask)
bits |= M_MAC_MDIO_OUT;
else bits &= ~M_MAC_MDIO_OUT;
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
curmask >>= 1;
}
}
/**********************************************************************
* SBMAC_MII_READ(bus, phyaddr, regidx)
* Read a PHY register.
*
* Input parameters:
* bus - MDIO bus handle
* phyaddr - PHY's address
* regnum - index of register to read
*
* Return value:
* value read, or 0xffff if an error occurred.
********************************************************************* */
static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{
struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
void __iomem *sbm_mdio = sc->sbm_mdio;
int idx;
int error;
int regval;
int mac_mdio_genc;
/*
* Synchronize ourselves so that the PHY knows the next
* thing coming down is a command
*/
sbmac_mii_sync(sbm_mdio);
/*
* Send the data to the PHY. The sequence is
* a "start" command (2 bits)
* a "read" command (2 bits)
* the PHY addr (5 bits)
* the register index (5 bits)
*/
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
sbmac_mii_senddata(sbm_mdio, regidx, 5);
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
/*
* Switch the port around without a clock transition.
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
/*
* Send out a clock pulse to signal we want the status
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
sbm_mdio);
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
/*
* If an error occurred, the PHY will signal '1' back
*/
error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
/*
* Issue an 'idle' clock pulse, but keep the direction
* the same.
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
sbm_mdio);
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
regval = 0;
for (idx = 0; idx < 16; idx++) {
regval <<= 1;
if (error == 0) {
if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
regval |= 1;
}
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
sbm_mdio);
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
}
/* Switch back to output */
__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
if (error == 0)
return regval;
return 0xffff;
}
/**********************************************************************
* SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
*
* Write a value to a PHY register.
*
* Input parameters:
* bus - MDIO bus handle
* phyaddr - PHY to use
* regidx - register within the PHY
* regval - data to write to register
*
* Return value:
* 0 for success
********************************************************************* */
static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 regval)
{
struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
void __iomem *sbm_mdio = sc->sbm_mdio;
int mac_mdio_genc;
sbmac_mii_sync(sbm_mdio);
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
sbmac_mii_senddata(sbm_mdio, regidx, 5);
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
sbmac_mii_senddata(sbm_mdio, regval, 16);
mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
return 0;
}
/**********************************************************************
* SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
*
* Initialize a DMA channel context. Since there are potentially
* eight DMA channels per MAC, it's nice to do this in a standard
* way.
*
* Input parameters:
* d - struct sbmacdma (DMA channel context)
* s - struct sbmac_softc (pointer to a MAC)
* chan - channel number (0..1 right now)
* txrx - Identifies DMA_TX or DMA_RX for channel direction
* maxdescr - number of descriptors
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
int txrx, int maxdescr)
{
#ifdef CONFIG_SBMAC_COALESCE
int int_pktcnt, int_timeout;
#endif
/*
* Save away interesting stuff in the structure
*/
d->sbdma_eth = s;
d->sbdma_channel = chan;
d->sbdma_txdir = txrx;
#if 0
/* RMON clearing */
s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
#endif
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
/*
* initialize register pointers
*/
d->sbdma_config0 =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
d->sbdma_config1 =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
d->sbdma_dscrbase =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
d->sbdma_dscrcnt =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
d->sbdma_curdscr =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
if (d->sbdma_txdir)
d->sbdma_oodpktlost = NULL;
else
d->sbdma_oodpktlost =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
/*
* Allocate memory for the ring
*/
d->sbdma_maxdescr = maxdescr;
d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
sizeof(*d->sbdma_dscrtable),
GFP_KERNEL);
/*
* The descriptor table must be aligned to at least 16 bytes or the
* MAC will corrupt it.
*/
d->sbdma_dscrtable = (struct sbdmadscr *)
ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
sizeof(*d->sbdma_dscrtable));
d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
/*
* And context table
*/
d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
#ifdef CONFIG_SBMAC_COALESCE
/*
* Setup Rx/Tx DMA coalescing defaults
*/
int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
if ( int_pktcnt ) {
d->sbdma_int_pktcnt = int_pktcnt;
} else {
d->sbdma_int_pktcnt = 1;
}
int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
if ( int_timeout ) {
d->sbdma_int_timeout = int_timeout;
} else {
d->sbdma_int_timeout = 0;
}
#endif
}
/**********************************************************************
* SBDMA_CHANNEL_START(d)
*
* Initialize the hardware registers for a DMA channel.
*
* Input parameters:
* d - DMA channel to init (context must be previously init'd
* rxtx - DMA_RX or DMA_TX depending on what type of channel
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
{
/*
* Turn on the DMA channel
*/
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
0, d->sbdma_config1);
__raw_writeq(M_DMA_EOP_INT_EN |
V_DMA_RINGSZ(d->sbdma_maxdescr) |
V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
0, d->sbdma_config0);
#else
__raw_writeq(0, d->sbdma_config1);
__raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
0, d->sbdma_config0);
#endif
__raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
/*
* Initialize ring pointers
*/
d->sbdma_addptr = d->sbdma_dscrtable;
d->sbdma_remptr = d->sbdma_dscrtable;
}
/**********************************************************************
* SBDMA_CHANNEL_STOP(d)
*
* Initialize the hardware registers for a DMA channel.
*
* Input parameters:
* d - DMA channel to init (context must be previously init'd
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_channel_stop(struct sbmacdma *d)
{
/*
* Turn off the DMA channel
*/
__raw_writeq(0, d->sbdma_config1);
__raw_writeq(0, d->sbdma_dscrbase);
__raw_writeq(0, d->sbdma_config0);
/*
* Zero ring pointers
*/
d->sbdma_addptr = NULL;
d->sbdma_remptr = NULL;
}
static inline void sbdma_align_skb(struct sk_buff *skb,
unsigned int power2, unsigned int offset)
{
unsigned char *addr = skb->data;
unsigned char *newaddr = PTR_ALIGN(addr, power2);
skb_reserve(skb, newaddr - addr + offset);
}
/**********************************************************************
* SBDMA_ADD_RCVBUFFER(d,sb)
*
* Add a buffer to the specified DMA channel. For receive channels,
* this queues a buffer for inbound packets.
*
* Input parameters:
* sc - softc structure
* d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one
*
* Return value:
* 0 if buffer could not be added (ring is full)
* 1 if buffer added successfully
********************************************************************* */
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *sb)
{
struct net_device *dev = sc->sbm_dev;
struct sbdmadscr *dsc;
struct sbdmadscr *nextdsc;
struct sk_buff *sb_new = NULL;
int pktsize = ENET_PACKET_SIZE;
/* get pointer to our current place in the ring */
dsc = d->sbdma_addptr;
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
/*
* figure out if the ring is full - if the next descriptor
* is the same as the one that we're going to remove from
* the ring, the ring is full
*/
if (nextdsc == d->sbdma_remptr) {
return -ENOSPC;
}
/*
* Allocate a sk_buff if we don't already have one.
* If we do have an sk_buff, reset it so that it's empty.
*
* Note: sk_buffs don't seem to be guaranteed to have any sort
* of alignment when they are allocated. Therefore, allocate enough
* extra space to make sure that:
*
* 1. the data does not start in the middle of a cache line.
* 2. The data does not end in the middle of a cache line
* 3. The buffer can be aligned such that the IP addresses are
* naturally aligned.
*
* Remember, the SOCs MAC writes whole cache lines at a time,
* without reading the old contents first. So, if the sk_buff's
* data portion starts in the middle of a cache line, the SOC
* DMA will trash the beginning (and ending) portions.
*/
if (sb == NULL) {
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
if (sb_new == NULL) {
pr_info("%s: sk_buff allocation failed\n",
d->sbdma_eth->sbm_dev->name);
return -ENOBUFS;
}
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
else {
sb_new = sb;
/*
* nothing special to reinit buffer, it's already aligned
* and sb->data already points to a good place.
*/
}
/*
* fill in the descriptor
*/
#ifdef CONFIG_SBMAC_COALESCE
/*
* Do not interrupt per DMA transfer.
*/
dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
#else
dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
M_DMA_DSCRA_INTERRUPT;
#endif
/* receiving: no options */
dsc->dscr_b = 0;
/*
* fill in the context
*/
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
/*
* point at next packet
*/
d->sbdma_addptr = nextdsc;
/*
* Give the buffer to the DMA engine.
*/
__raw_writeq(1, d->sbdma_dscrcnt);
return 0; /* we did it */
}
/**********************************************************************
* SBDMA_ADD_TXBUFFER(d,sb)
*
* Add a transmit buffer to the specified DMA channel, causing a
* transmit to start.
*
* Input parameters:
* d - DMA channel descriptor
* sb - sk_buff to add
*
* Return value:
* 0 transmit queued successfully
* otherwise error code
********************************************************************* */
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
{
struct sbdmadscr *dsc;
struct sbdmadscr *nextdsc;
uint64_t phys;
uint64_t ncb;
int length;
/* get pointer to our current place in the ring */
dsc = d->sbdma_addptr;
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
/*
* figure out if the ring is full - if the next descriptor
* is the same as the one that we're going to remove from
* the ring, the ring is full
*/
if (nextdsc == d->sbdma_remptr) {
return -ENOSPC;
}
/*
* Under Linux, it's not necessary to copy/coalesce buffers
* like it is on NetBSD. We think they're all contiguous,
* but that may not be true for GBE.
*/
length = sb->len;
/*
* fill in the descriptor. Note that the number of cache
* blocks in the descriptor is the number of blocks
* *spanned*, so we need to add in the offset (if any)
* while doing the calculation.
*/
phys = virt_to_phys(sb->data);
ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
dsc->dscr_a = phys |
V_DMA_DSCRA_A_SIZE(ncb) |
#ifndef CONFIG_SBMAC_COALESCE
M_DMA_DSCRA_INTERRUPT |
#endif
M_DMA_ETHTX_SOP;
/* transmitting: set outbound options and length */
dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
V_DMA_DSCRB_PKT_SIZE(length);
/*
* fill in the context
*/
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
/*
* point at next packet
*/
d->sbdma_addptr = nextdsc;
/*
* Give the buffer to the DMA engine.
*/
__raw_writeq(1, d->sbdma_dscrcnt);
return 0; /* we did it */
}
/**********************************************************************
* SBDMA_EMPTYRING(d)
*
* Free all allocated sk_buffs on the specified DMA channel;
*
* Input parameters:
* d - DMA channel
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_emptyring(struct sbmacdma *d)
{
int idx;
struct sk_buff *sb;
for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
sb = d->sbdma_ctxtable[idx];
if (sb) {
dev_kfree_skb(sb);
d->sbdma_ctxtable[idx] = NULL;
}
}
}
/**********************************************************************
* SBDMA_FILLRING(d)
*
* Fill the specified DMA channel (must be receive channel)
* with sk_buffs
*
* Input parameters:
* sc - softc structure
* d - DMA channel
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
{
int idx;
for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
break;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void sbmac_netpoll(struct net_device *netdev)
{
struct sbmac_softc *sc = netdev_priv(netdev);
int irq = sc->sbm_dev->irq;
__raw_writeq(0, sc->sbm_imr);
sbmac_intr(irq, netdev);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
sc->sbm_imr);
#else
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
#endif
}
#endif
/**********************************************************************
* SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
*
* Process "completed" receive buffers on the specified DMA channel.
*
* Input parameters:
* sc - softc structure
* d - DMA channel context
* work_to_do - no. of packets to process before enabling interrupt
* again (for NAPI)
* poll - 1: using polling (for NAPI)
*
* Return value:
* nothing
********************************************************************* */
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int work_to_do, int poll)
{
struct net_device *dev = sc->sbm_dev;
int curidx;
int hwidx;
struct sbdmadscr *dsc;
struct sk_buff *sb;
int len;
int work_done = 0;
int dropped = 0;
prefetch(d);
again:
/* Check if the HW dropped any frames */
dev->stats.rx_fifo_errors
+= __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
while (work_to_do-- > 0) {
/*
* figure out where we are (as an index) and where
* the hardware is (also as an index)
*
* This could be done faster if (for example) the
* descriptor table was page-aligned and contiguous in
* both virtual and physical memory -- you could then
* just compare the low-order bits of the virtual address
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
dsc = d->sbdma_remptr;
curidx = dsc - d->sbdma_dscrtable;
prefetch(dsc);
prefetch(&d->sbdma_ctxtable[curidx]);
hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
d->sbdma_dscrtable_phys) /
sizeof(*d->sbdma_dscrtable);
/*
* If they're the same, that means we've processed all
* of the descriptors up to (but not including) the one that
* the hardware is working on right now.
*/
if (curidx == hwidx)
goto done;
/*
* Otherwise, get the packet's sk_buff ptr back
*/
sb = d->sbdma_ctxtable[curidx];
d->sbdma_ctxtable[curidx] = NULL;
len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
/*
* Check packet status. If good, process it.
* If not, silently drop it and put it back on the
* receive ring.
*/
if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
/*
* Add a new buffer to replace the old one. If we fail
* to allocate a buffer, we're going to drop this
* packet and put it right back on the receive ring.
*/
if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
-ENOBUFS)) {
dev->stats.rx_dropped++;
/* Re-add old buffer */
sbdma_add_rcvbuffer(sc, d, sb);
/* No point in continuing at the moment */
printk(KERN_ERR "dropped packet (1)\n");
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
goto done;
} else {
/*
* Set length into the packet
*/
skb_put(sb,len);
/*
* Buffer has been replaced on the
* receive ring. Pass the buffer to
* the kernel
*/
sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
/* Check hw IPv4/TCP checksum if supported */
if (sc->rx_hw_checksum == ENABLE) {
if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
!((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
sb->ip_summed = CHECKSUM_UNNECESSARY;
/* don't need to set sb->csum */
} else {
skb_checksum_none_assert(sb);
}
}
prefetch(sb->data);
prefetch((const void *)(((char *)sb->data)+32));
if (poll)
dropped = netif_receive_skb(sb);
else
dropped = netif_rx(sb);
if (dropped == NET_RX_DROP) {
dev->stats.rx_dropped++;
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
goto done;
}
else {
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
}
}
} else {
/*
* Packet was mangled somehow. Just drop it and
* put it back on the receive ring.
*/
dev->stats.rx_errors++;
sbdma_add_rcvbuffer(sc, d, sb);
}
/*
* .. and advance to the next buffer.
*/
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
work_done++;
}
if (!poll) {
work_to_do = 32;
goto again; /* collect fifo drop statistics again */
}
done:
return work_done;
}
/**********************************************************************
* SBDMA_TX_PROCESS(sc,d)
*
* Process "completed" transmit buffers on the specified DMA channel.
* This is normally called within the interrupt service routine.
* Note that this isn't really ideal for priority channels, since
* it processes all of the packets on a given channel before
* returning.
*
* Input parameters:
* sc - softc structure
* d - DMA channel context
* poll - 1: using polling (for NAPI)
*
* Return value:
* nothing
********************************************************************* */
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int poll)
{
struct net_device *dev = sc->sbm_dev;
int curidx;
int hwidx;
struct sbdmadscr *dsc;
struct sk_buff *sb;
unsigned long flags;
int packets_handled = 0;
spin_lock_irqsave(&(sc->sbm_lock), flags);
if (d->sbdma_remptr == d->sbdma_addptr)
goto end_unlock;
hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
for (;;) {
/*
* figure out where we are (as an index) and where
* the hardware is (also as an index)
*
* This could be done faster if (for example) the
* descriptor table was page-aligned and contiguous in
* both virtual and physical memory -- you could then
* just compare the low-order bits of the virtual address
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
curidx = d->sbdma_remptr - d->sbdma_dscrtable;
/*
* If they're the same, that means we've processed all
* of the descriptors up to (but not including) the one that
* the hardware is working on right now.
*/
if (curidx == hwidx)
break;
/*
* Otherwise, get the packet's sk_buff ptr back
*/
dsc = &(d->sbdma_dscrtable[curidx]);
sb = d->sbdma_ctxtable[curidx];
d->sbdma_ctxtable[curidx] = NULL;
/*
* Stats
*/
dev->stats.tx_bytes += sb->len;
dev->stats.tx_packets++;
/*
* for transmits, we just free buffers.
*/
dev_kfree_skb_irq(sb);
/*
* .. and advance to the next buffer.
*/
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
packets_handled++;
}
/*
* Decide if we should wake up the protocol or not.
* Other drivers seem to do this when we reach a low
* watermark on the transmit queue.
*/
if (packets_handled)
netif_wake_queue(d->sbdma_eth->sbm_dev);
end_unlock:
spin_unlock_irqrestore(&(sc->sbm_lock), flags);
}
/**********************************************************************
* SBMAC_INITCTX(s)
*
* Initialize an Ethernet context structure - this is called
* once per MAC on the 1250. Memory is allocated here, so don't
* call it again from inside the ioctl routines that bring the
* interface up/down
*
* Input parameters:
* s - sbmac context structure
*
* Return value:
* 0
********************************************************************* */
static int sbmac_initctx(struct sbmac_softc *s)
{
/*
* figure out the addresses of some ports
*/
s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
s->sbm_isr = s->sbm_base + R_MAC_STATUS;
s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
/*
* Initialize the DMA channels. Right now, only one per MAC is used
* Note: Only do this _once_, as it allocates memory from the kernel!
*/
sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
/*
* initial state is OFF
*/
s->sbm_state = sbmac_state_off;
return 0;
}
static void sbdma_uninitctx(struct sbmacdma *d)
{
if (d->sbdma_dscrtable_unaligned) {
kfree(d->sbdma_dscrtable_unaligned);
d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
}
if (d->sbdma_ctxtable) {
kfree(d->sbdma_ctxtable);
d->sbdma_ctxtable = NULL;
}
}
static void sbmac_uninitctx(struct sbmac_softc *sc)
{
sbdma_uninitctx(&(sc->sbm_txdma));
sbdma_uninitctx(&(sc->sbm_rxdma));
}
/**********************************************************************
* SBMAC_CHANNEL_START(s)
*
* Start packet processing on this MAC.
*
* Input parameters:
* s - sbmac structure
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_channel_start(struct sbmac_softc *s)
{
uint64_t reg;
void __iomem *port;
uint64_t cfg,fifo,framecfg;
int idx, th_value;
/*
* Don't do this if running
*/
if (s->sbm_state == sbmac_state_on)
return;
/*
* Bring the controller out of reset, but leave it off.
*/
__raw_writeq(0, s->sbm_macenable);
/*
* Ignore all received packets
*/
__raw_writeq(0, s->sbm_rxfilter);
/*
* Calculate values for various control registers.
*/
cfg = M_MAC_RETRY_EN |
M_MAC_TX_HOLD_SOP_EN |
V_MAC_TX_PAUSE_CNT_16K |
M_MAC_AP_STAT_EN |
M_MAC_FAST_SYNC |
M_MAC_SS_EN |
0;
/*
* Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
* and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
* Use a larger RD_THRSH for gigabit
*/
if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
th_value = 28;
else
th_value = 64;
fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
((s->sbm_speed == sbmac_speed_1000)
? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
V_MAC_TX_RL_THRSH(4) |
V_MAC_RX_PL_THRSH(4) |
V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
V_MAC_RX_RL_THRSH(8) |
0;
framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
V_MAC_MAX_FRAMESZ_DEFAULT |
V_MAC_BACKOFF_SEL(1);
/*
* Clear out the hash address map
*/
port = s->sbm_base + R_MAC_HASH_BASE;
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
__raw_writeq(0, port);
port += sizeof(uint64_t);
}
/*
* Clear out the exact-match table
*/
port = s->sbm_base + R_MAC_ADDR_BASE;
for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
__raw_writeq(0, port);
port += sizeof(uint64_t);
}
/*
* Clear out the DMA Channel mapping table registers
*/
port = s->sbm_base + R_MAC_CHUP0_BASE;
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
__raw_writeq(0, port);
port += sizeof(uint64_t);
}
port = s->sbm_base + R_MAC_CHLO0_BASE;
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
__raw_writeq(0, port);
port += sizeof(uint64_t);
}
/*
* Program the hardware address. It goes into the hardware-address
* register as well as the first filter register.
*/
reg = sbmac_addr2reg(s->sbm_hwaddr);
port = s->sbm_base + R_MAC_ADDR_BASE;
__raw_writeq(reg, port);
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
/*
* Pass1 SOCs do not receive packets addressed to the
* destination address in the R_MAC_ETHERNET_ADDR register.
* Set the value to zero.
*/
__raw_writeq(0, port);
#else
__raw_writeq(reg, port);
#endif
/*
* Set the receive filter for no packets, and write values
* to the various config registers
*/
__raw_writeq(0, s->sbm_rxfilter);
__raw_writeq(0, s->sbm_imr);
__raw_writeq(framecfg, s->sbm_framecfg);
__raw_writeq(fifo, s->sbm_fifocfg);
__raw_writeq(cfg, s->sbm_maccfg);
/*
* Initialize DMA channels (rings should be ok now)
*/
sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
/*
* Configure the speed, duplex, and flow control
*/
sbmac_set_speed(s,s->sbm_speed);
sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
/*
* Fill the receive ring
*/
sbdma_fillring(s, &(s->sbm_rxdma));
/*
* Turn on the rest of the bits in the enable register
*/
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
__raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0, s->sbm_macenable);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
__raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0 |
M_MAC_RX_ENABLE |
M_MAC_TX_ENABLE, s->sbm_macenable);
#else
#error invalid SiByte MAC configuration
#endif
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
#else
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
#endif
/*
* Enable receiving unicasts and broadcasts
*/
__raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
/*
* we're running now.
*/
s->sbm_state = sbmac_state_on;
/*
* Program multicast addresses
*/
sbmac_setmulti(s);
/*
* If channel was in promiscuous mode before, turn that on
*/
if (s->sbm_devflags & IFF_PROMISC) {
sbmac_promiscuous_mode(s,1);
}
}
/**********************************************************************
* SBMAC_CHANNEL_STOP(s)
*
* Stop packet processing on this MAC.
*
* Input parameters:
* s - sbmac structure
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_channel_stop(struct sbmac_softc *s)
{
/* don't do this if already stopped */
if (s->sbm_state == sbmac_state_off)
return;
/* don't accept any packets, disable all interrupts */
__raw_writeq(0, s->sbm_rxfilter);
__raw_writeq(0, s->sbm_imr);
/* Turn off ticker */
/* XXX */
/* turn off receiver and transmitter */
__raw_writeq(0, s->sbm_macenable);
/* We're stopped now. */
s->sbm_state = sbmac_state_off;
/*
* Stop DMA channels (rings should be ok now)
*/
sbdma_channel_stop(&(s->sbm_rxdma));
sbdma_channel_stop(&(s->sbm_txdma));
/* Empty the receive and transmit rings */
sbdma_emptyring(&(s->sbm_rxdma));
sbdma_emptyring(&(s->sbm_txdma));
}
/**********************************************************************
* SBMAC_SET_CHANNEL_STATE(state)
*
* Set the channel's state ON or OFF
*
* Input parameters:
* state - new state
*
* Return value:
* old state
********************************************************************* */
static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
enum sbmac_state state)
{
enum sbmac_state oldstate = sc->sbm_state;
/*
* If same as previous state, return
*/
if (state == oldstate) {
return oldstate;
}
/*
* If new state is ON, turn channel on
*/
if (state == sbmac_state_on) {
sbmac_channel_start(sc);
}
else {
sbmac_channel_stop(sc);
}
/*
* Return previous state
*/
return oldstate;
}
/**********************************************************************
* SBMAC_PROMISCUOUS_MODE(sc,onoff)
*
* Turn on or off promiscuous mode
*
* Input parameters:
* sc - softc
* onoff - 1 to turn on, 0 to turn off
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
{
uint64_t reg;
if (sc->sbm_state != sbmac_state_on)
return;
if (onoff) {
reg = __raw_readq(sc->sbm_rxfilter);
reg |= M_MAC_ALLPKT_EN;
__raw_writeq(reg, sc->sbm_rxfilter);
}
else {
reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~M_MAC_ALLPKT_EN;
__raw_writeq(reg, sc->sbm_rxfilter);
}
}
/**********************************************************************
* SBMAC_SETIPHDR_OFFSET(sc,onoff)
*
* Set the iphdr offset as 15 assuming ethernet encapsulation
*
* Input parameters:
* sc - softc
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
{
uint64_t reg;
/* Hard code the off set to 15 for now */
reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
__raw_writeq(reg, sc->sbm_rxfilter);
/* BCM1250 pass1 didn't have hardware checksum. Everything
later does. */
if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
sc->rx_hw_checksum = DISABLE;
} else {
sc->rx_hw_checksum = ENABLE;
}
}
/**********************************************************************
* SBMAC_ADDR2REG(ptr)
*
* Convert six bytes into the 64-bit register value that
* we typically write into the SBMAC's address/mcast registers
*
* Input parameters:
* ptr - pointer to 6 bytes
*
* Return value:
* register value
********************************************************************* */
static uint64_t sbmac_addr2reg(unsigned char *ptr)
{
uint64_t reg = 0;
ptr += 6;
reg |= (uint64_t) *(--ptr);
reg <<= 8;
reg |= (uint64_t) *(--ptr);
reg <<= 8;
reg |= (uint64_t) *(--ptr);
reg <<= 8;
reg |= (uint64_t) *(--ptr);
reg <<= 8;
reg |= (uint64_t) *(--ptr);
reg <<= 8;
reg |= (uint64_t) *(--ptr);
return reg;
}
/**********************************************************************
* SBMAC_SET_SPEED(s,speed)
*
* Configure LAN speed for the specified MAC.
* Warning: must be called when MAC is off!
*
* Input parameters:
* s - sbmac structure
* speed - speed to set MAC to (see enum sbmac_speed)
*
* Return value:
* 1 if successful
* 0 indicates invalid parameters
********************************************************************* */
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
{
uint64_t cfg;
uint64_t framecfg;
/*
* Save new current values
*/
s->sbm_speed = speed;
if (s->sbm_state == sbmac_state_on)
return 0; /* save for next restart */
/*
* Read current register values
*/
cfg = __raw_readq(s->sbm_maccfg);
framecfg = __raw_readq(s->sbm_framecfg);
/*
* Mask out the stuff we want to change
*/
cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
M_MAC_SLOT_SIZE);
/*
* Now add in the new bits
*/
switch (speed) {
case sbmac_speed_10:
framecfg |= V_MAC_IFG_RX_10 |
V_MAC_IFG_TX_10 |
K_MAC_IFG_THRSH_10 |
V_MAC_SLOT_SIZE_10;
cfg |= V_MAC_SPEED_SEL_10MBPS;
break;
case sbmac_speed_100:
framecfg |= V_MAC_IFG_RX_100 |
V_MAC_IFG_TX_100 |
V_MAC_IFG_THRSH_100 |
V_MAC_SLOT_SIZE_100;
cfg |= V_MAC_SPEED_SEL_100MBPS ;
break;
case sbmac_speed_1000:
framecfg |= V_MAC_IFG_RX_1000 |
V_MAC_IFG_TX_1000 |
V_MAC_IFG_THRSH_1000 |
V_MAC_SLOT_SIZE_1000;
cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
break;
default:
return 0;
}
/*
* Send the bits back to the hardware
*/
__raw_writeq(framecfg, s->sbm_framecfg);
__raw_writeq(cfg, s->sbm_maccfg);
return 1;
}
/**********************************************************************
* SBMAC_SET_DUPLEX(s,duplex,fc)
*
* Set Ethernet duplex and flow control options for this MAC
* Warning: must be called when MAC is off!
*
* Input parameters:
* s - sbmac structure
* duplex - duplex setting (see enum sbmac_duplex)
* fc - flow control setting (see enum sbmac_fc)
*
* Return value:
* 1 if ok
* 0 if an invalid parameter combination was specified
********************************************************************* */
static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
enum sbmac_fc fc)
{
uint64_t cfg;
/*
* Save new current values
*/
s->sbm_duplex = duplex;
s->sbm_fc = fc;
if (s->sbm_state == sbmac_state_on)
return 0; /* save for next restart */
/*
* Read current register values
*/
cfg = __raw_readq(s->sbm_maccfg);
/*
* Mask off the stuff we're about to change
*/
cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
switch (duplex) {
case sbmac_duplex_half:
switch (fc) {
case sbmac_fc_disabled:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
break;
case sbmac_fc_collision:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
break;
case sbmac_fc_carrier:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
break;
case sbmac_fc_frame: /* not valid in half duplex */
default: /* invalid selection */
return 0;
}
break;
case sbmac_duplex_full:
switch (fc) {
case sbmac_fc_disabled:
cfg |= V_MAC_FC_CMD_DISABLED;
break;
case sbmac_fc_frame:
cfg |= V_MAC_FC_CMD_ENABLED;
break;
case sbmac_fc_collision: /* not valid in full duplex */
case sbmac_fc_carrier: /* not valid in full duplex */
default:
return 0;
}
break;
default:
return 0;
}
/*
* Send the bits back to the hardware
*/
__raw_writeq(cfg, s->sbm_maccfg);
return 1;
}
/**********************************************************************
* SBMAC_INTR()
*
* Interrupt handler for MAC interrupts
*
* Input parameters:
* MAC structure
*
* Return value:
* nothing
********************************************************************* */
static irqreturn_t sbmac_intr(int irq,void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
struct sbmac_softc *sc = netdev_priv(dev);
uint64_t isr;
int handled = 0;
/*
* Read the ISR (this clears the bits in the real
* register, except for counter addr)
*/
isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
if (isr == 0)
return IRQ_RETVAL(0);
handled = 1;
/*
* Transmits on channel 0
*/
if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
if (napi_schedule_prep(&sc->napi)) {
__raw_writeq(0, sc->sbm_imr);
__napi_schedule(&sc->napi);
/* Depend on the exit from poll to reenable intr */
}
else {
/* may leave some packets behind */
sbdma_rx_process(sc,&(sc->sbm_rxdma),
SBMAC_MAX_RXDESCR * 2, 0);
}
}
return IRQ_RETVAL(handled);
}
/**********************************************************************
* SBMAC_START_TX(skb,dev)
*
* Start output on the specified interface. Basically, we
* queue as many buffers as we can until the ring fills up, or
* we run off the end of the queue, whichever comes first.
*
* Input parameters:
*
*
* Return value:
* nothing
********************************************************************* */
static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
/* lock eth irq */
spin_lock_irqsave(&sc->sbm_lock, flags);
/*
* Put the buffer on the transmit ring. If we
* don't have room, stop the queue.
*/
if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
/* XXX save skb that we could not send */
netif_stop_queue(dev);
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
}
/**********************************************************************
* SBMAC_SETMULTI(sc)
*
* Reprogram the multicast table into the hardware, given
* the list of multicasts associated with the interface
* structure.
*
* Input parameters:
* sc - softc
*
* Return value:
* nothing
********************************************************************* */
static void sbmac_setmulti(struct sbmac_softc *sc)
{
uint64_t reg;
void __iomem *port;
int idx;
struct netdev_hw_addr *ha;
struct net_device *dev = sc->sbm_dev;
/*
* Clear out entire multicast table. We do this by nuking
* the entire hash table and all the direct matches except
* the first one, which is used for our station address
*/
for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
__raw_writeq(0, port);
}
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
__raw_writeq(0, port);
}
/*
* Clear the filter to say we don't want any multicasts.
*/
reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
__raw_writeq(reg, sc->sbm_rxfilter);
if (dev->flags & IFF_ALLMULTI) {
/*
* Enable ALL multicasts. Do this by inverting the
* multicast enable bit.
*/
reg = __raw_readq(sc->sbm_rxfilter);
reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
__raw_writeq(reg, sc->sbm_rxfilter);
return;
}
/*
* Progam new multicast entries. For now, only use the
* perfect filter. In the future we'll need to use the
* hash filter if the perfect filter overflows
*/
/* XXX only using perfect filter for now, need to use hash
* XXX if the table overflows */
idx = 1; /* skip station address */
netdev_for_each_mc_addr(ha, dev) {
if (idx == MAC_ADDR_COUNT)
break;
reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
}
/*
* Enable the "accept multicast bits" if we programmed at least one
* multicast.
*/
if (idx > 1) {
reg = __raw_readq(sc->sbm_rxfilter);
reg |= M_MAC_MCAST_EN;
__raw_writeq(reg, sc->sbm_rxfilter);
}
}
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
{
if (new_mtu > ENET_PACKET_SIZE)
return -EINVAL;
_dev->mtu = new_mtu;
pr_info("changing the mtu to %d\n", new_mtu);
return 0;
}
static const struct net_device_ops sbmac_netdev_ops = {
.ndo_open = sbmac_open,
.ndo_stop = sbmac_close,
.ndo_start_xmit = sbmac_start_tx,
.ndo_set_rx_mode = sbmac_set_rx_mode,
.ndo_tx_timeout = sbmac_tx_timeout,
.ndo_do_ioctl = sbmac_mii_ioctl,
.ndo_change_mtu = sb1250_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sbmac_netpoll,
#endif
};
/**********************************************************************
* SBMAC_INIT(dev)
*
* Attach routine - init hardware and hook ourselves into linux
*
* Input parameters:
* dev - net_device structure
*
* Return value:
* status
********************************************************************* */
static int sbmac_init(struct platform_device *pldev, long long base)
{
struct net_device *dev = dev_get_drvdata(&pldev->dev);
int idx = pldev->id;
struct sbmac_softc *sc = netdev_priv(dev);
unsigned char *eaddr;
uint64_t ea_reg;
int i;
int err;
sc->sbm_dev = dev;
sc->sbe_idx = idx;
eaddr = sc->sbm_hwaddr;
/*
* Read the ethernet address. The firmware left this programmed
* for us in the ethernet address register for each mac.
*/
ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
__raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
for (i = 0; i < 6; i++) {
eaddr[i] = (uint8_t) (ea_reg & 0xFF);
ea_reg >>= 8;
}
for (i = 0; i < 6; i++) {
dev->dev_addr[i] = eaddr[i];
}
/*
* Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables.
*/
sbmac_initctx(sc);
/*
* Set up Linux device callins
*/
spin_lock_init(&(sc->sbm_lock));
dev->netdev_ops = &sbmac_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
dev->irq = UNIT_INT(idx);
/* This is needed for PASS2 for Rx H/W checksum feature */
sbmac_set_iphdr_offset(sc);
sc->mii_bus = mdiobus_alloc();
if (sc->mii_bus == NULL) {
err = -ENOMEM;
goto uninit_ctx;
}
sc->mii_bus->name = sbmac_mdio_string;
snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
sc->mii_bus->priv = sc;
sc->mii_bus->read = sbmac_mii_read;
sc->mii_bus->write = sbmac_mii_write;
sc->mii_bus->irq = sc->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; ++i)
sc->mii_bus->irq[i] = SBMAC_PHY_INT;
sc->mii_bus->parent = &pldev->dev;
/*
* Probe PHY address
*/
err = mdiobus_register(sc->mii_bus);
if (err) {
printk(KERN_ERR "%s: unable to register MDIO bus\n",
dev->name);
goto free_mdio;
}
dev_set_drvdata(&pldev->dev, sc->mii_bus);
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "%s.%d: unable to register netdev\n",
sbmac_string, idx);
goto unreg_mdio;
}
pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
if (sc->rx_hw_checksum == ENABLE)
pr_info("%s: enabling TCP rcv checksum\n", dev->name);
/*
* Display Ethernet address (this is called during the config
* process so we need to finish off the config message that
* was being displayed)
*/
pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
dev->name, base, eaddr);
return 0;
unreg_mdio:
mdiobus_unregister(sc->mii_bus);
dev_set_drvdata(&pldev->dev, NULL);
free_mdio:
mdiobus_free(sc->mii_bus);
uninit_ctx:
sbmac_uninitctx(sc);
return err;
}
static int sbmac_open(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
int err;
if (debug > 1)
pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
/*
* map/route interrupt (clear status first, in case something
* weird is pending; we haven't initialized the mac registers
* yet)
*/
__raw_readq(sc->sbm_isr);
err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
if (err) {
printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
dev->irq);
goto out_err;
}
sc->sbm_speed = sbmac_speed_none;
sc->sbm_duplex = sbmac_duplex_none;
sc->sbm_fc = sbmac_fc_none;
sc->sbm_pause = -1;
sc->sbm_link = 0;
/*
* Attach to the PHY
*/
err = sbmac_mii_probe(dev);
if (err)
goto out_unregister;
/*
* Turn on the channel
*/
sbmac_set_channel_state(sc,sbmac_state_on);
netif_start_queue(dev);
sbmac_set_rx_mode(dev);
phy_start(sc->phy_dev);
napi_enable(&sc->napi);
return 0;
out_unregister:
free_irq(dev->irq, dev);
out_err:
return err;
}
static int sbmac_mii_probe(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
struct phy_device *phy_dev;
int i;
for (i = 0; i < PHY_MAX_ADDR; i++) {
phy_dev = sc->mii_bus->phy_map[i];
if (phy_dev)
break;
}
if (!phy_dev) {
printk(KERN_ERR "%s: no PHY found\n", dev->name);
return -ENXIO;
}
phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
return PTR_ERR(phy_dev);
}
/* Remove any features not supported by the controller */
phy_dev->supported &= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_MII |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
phy_dev->advertising = phy_dev->supported;
pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
dev->name, phy_dev->drv->name,
dev_name(&phy_dev->dev), phy_dev->irq);
sc->phy_dev = phy_dev;
return 0;
}
static void sbmac_mii_poll(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
struct phy_device *phy_dev = sc->phy_dev;
unsigned long flags;
enum sbmac_fc fc;
int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
link_chg = (sc->sbm_link != phy_dev->link);
speed_chg = (sc->sbm_speed != phy_dev->speed);
duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
pause_chg = (sc->sbm_pause != phy_dev->pause);
if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
return; /* Hmmm... */
if (!phy_dev->link) {
if (link_chg) {
sc->sbm_link = phy_dev->link;
sc->sbm_speed = sbmac_speed_none;
sc->sbm_duplex = sbmac_duplex_none;
sc->sbm_fc = sbmac_fc_disabled;
sc->sbm_pause = -1;
pr_info("%s: link unavailable\n", dev->name);
}
return;
}
if (phy_dev->duplex == DUPLEX_FULL) {
if (phy_dev->pause)
fc = sbmac_fc_frame;
else
fc = sbmac_fc_disabled;
} else
fc = sbmac_fc_collision;
fc_chg = (sc->sbm_fc != fc);
pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
spin_lock_irqsave(&sc->sbm_lock, flags);
sc->sbm_speed = phy_dev->speed;
sc->sbm_duplex = phy_dev->duplex;
sc->sbm_fc = fc;
sc->sbm_pause = phy_dev->pause;
sc->sbm_link = phy_dev->link;
if ((speed_chg || duplex_chg || fc_chg) &&
sc->sbm_state != sbmac_state_off) {
/*
* something changed, restart the channel
*/
if (debug > 1)
pr_debug("%s: restarting channel "
"because PHY state changed\n", dev->name);
sbmac_channel_stop(sc);
sbmac_channel_start(sc);
}
spin_unlock_irqrestore(&sc->sbm_lock, flags);
}
static void sbmac_tx_timeout (struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&sc->sbm_lock, flags);
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
}
static void sbmac_set_rx_mode(struct net_device *dev)
{
unsigned long flags;
struct sbmac_softc *sc = netdev_priv(dev);
spin_lock_irqsave(&sc->sbm_lock, flags);
if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
/*
* Promiscuous changed.
*/
if (dev->flags & IFF_PROMISC) {
sbmac_promiscuous_mode(sc,1);
}
else {
sbmac_promiscuous_mode(sc,0);
}
}
spin_unlock_irqrestore(&sc->sbm_lock, flags);
/*
* Program the multicasts. Do this every time.
*/
sbmac_setmulti(sc);
}
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct sbmac_softc *sc = netdev_priv(dev);
if (!netif_running(dev) || !sc->phy_dev)
return -EINVAL;
return phy_mii_ioctl(sc->phy_dev, rq, cmd);
}
static int sbmac_close(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
napi_disable(&sc->napi);
phy_stop(sc->phy_dev);
sbmac_set_channel_state(sc, sbmac_state_off);
netif_stop_queue(dev);
if (debug > 1)
pr_debug("%s: Shutting down ethercard\n", dev->name);
phy_disconnect(sc->phy_dev);
sc->phy_dev = NULL;
free_irq(dev->irq, dev);
sbdma_emptyring(&(sc->sbm_txdma));
sbdma_emptyring(&(sc->sbm_rxdma));
return 0;
}
static int sbmac_poll(struct napi_struct *napi, int budget)
{
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
int work_done;
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
if (work_done < budget) {
napi_complete(napi);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
sc->sbm_imr);
#else
__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
#endif
}
return work_done;
}
static int __devinit sbmac_probe(struct platform_device *pldev)
{
struct net_device *dev;
struct sbmac_softc *sc;
void __iomem *sbm_base;
struct resource *res;
u64 sbmac_orig_hwaddr;
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
BUG_ON(!res);
sbm_base = ioremap_nocache(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
err = -ENOMEM;
goto out_out;
}
/*
* The R_MAC_ETHERNET_ADDR register will be set to some nonzero
* value for us by the firmware if we're going to use this MAC.
* If we find a zero, skip this MAC.
*/
sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
if (sbmac_orig_hwaddr == 0) {
err = 0;
goto out_unmap;
}
/*
* Okay, cool. Initialize this MAC.
*/
dev = alloc_etherdev(sizeof(struct sbmac_softc));
if (!dev) {
printk(KERN_ERR "%s: unable to allocate etherdev\n",
dev_name(&pldev->dev));
err = -ENOMEM;
goto out_unmap;
}
dev_set_drvdata(&pldev->dev, dev);
SET_NETDEV_DEV(dev, &pldev->dev);
sc = netdev_priv(dev);
sc->sbm_base = sbm_base;
err = sbmac_init(pldev, res->start);
if (err)
goto out_kfree;
return 0;
out_kfree:
free_netdev(dev);
__raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
out_unmap:
iounmap(sbm_base);
out_out:
return err;
}
static int __exit sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = dev_get_drvdata(&pldev->dev);
struct sbmac_softc *sc = netdev_priv(dev);
unregister_netdev(dev);
sbmac_uninitctx(sc);
mdiobus_unregister(sc->mii_bus);
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
return 0;
}
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
.driver = {
.name = sbmac_string,
.owner = THIS_MODULE,
},
};
static int __init sbmac_init_module(void)
{
return platform_driver_register(&sbmac_driver);
}
static void __exit sbmac_cleanup_module(void)
{
platform_driver_unregister(&sbmac_driver);
}
module_init(sbmac_init_module);
module_exit(sbmac_cleanup_module);
| gpl-2.0 |
vgstef/MuseScore | thirdparty/freetype/src/base/ftapi.c | 174 | 4042 | /***************************************************************************/
/* */
/* ftapi.c */
/* */
/* The FreeType compatibility functions (body). */
/* */
/* Copyright 2002-2015 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
/* modified, and distributed under the terms of the FreeType project */
/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
/* this file you indicate that you have read the license and */
/* understand and accept it fully. */
/* */
/***************************************************************************/
#include <ft2build.h>
#include FT_LIST_H
#include FT_OUTLINE_H
#include FT_INTERNAL_OBJECTS_H
#include FT_INTERNAL_DEBUG_H
#include FT_INTERNAL_STREAM_H
#include FT_TRUETYPE_TABLES_H
#include FT_OUTLINE_H
/*************************************************************************/
/*************************************************************************/
/*************************************************************************/
/**** ****/
/**** ****/
/**** C O M P A T I B I L I T Y ****/
/**** ****/
/**** ****/
/*************************************************************************/
/*************************************************************************/
/*************************************************************************/
/* backwards compatibility API */
FT_BASE_DEF( void )
FT_New_Memory_Stream( FT_Library library,
FT_Byte* base,
FT_ULong size,
FT_Stream stream )
{
FT_UNUSED( library );
FT_Stream_OpenMemory( stream, base, size );
}
FT_BASE_DEF( FT_Error )
FT_Seek_Stream( FT_Stream stream,
FT_ULong pos )
{
return FT_Stream_Seek( stream, pos );
}
FT_BASE_DEF( FT_Error )
FT_Skip_Stream( FT_Stream stream,
FT_Long distance )
{
return FT_Stream_Skip( stream, distance );
}
FT_BASE_DEF( FT_Error )
FT_Read_Stream( FT_Stream stream,
FT_Byte* buffer,
FT_ULong count )
{
return FT_Stream_Read( stream, buffer, count );
}
FT_BASE_DEF( FT_Error )
FT_Read_Stream_At( FT_Stream stream,
FT_ULong pos,
FT_Byte* buffer,
FT_ULong count )
{
return FT_Stream_ReadAt( stream, pos, buffer, count );
}
FT_BASE_DEF( FT_Error )
FT_Extract_Frame( FT_Stream stream,
FT_ULong count,
FT_Byte** pbytes )
{
return FT_Stream_ExtractFrame( stream, count, pbytes );
}
FT_BASE_DEF( void )
FT_Release_Frame( FT_Stream stream,
FT_Byte** pbytes )
{
FT_Stream_ReleaseFrame( stream, pbytes );
}
FT_BASE_DEF( FT_Error )
FT_Access_Frame( FT_Stream stream,
FT_ULong count )
{
return FT_Stream_EnterFrame( stream, count );
}
FT_BASE_DEF( void )
FT_Forget_Frame( FT_Stream stream )
{
FT_Stream_ExitFrame( stream );
}
/* END */
| gpl-2.0 |
iamroot12D/linux | fs/cifs/link.c | 174 | 18251 | /*
* fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
#ifdef CONFIG_CIFS_SMB2
#include "smb2proto.h"
#endif
/*
* M-F Symlink Functions - Begin
*/
#define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
#define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
#define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
#define CIFS_MF_SYMLINK_FILE_SIZE \
(CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN)
#define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n"
#define CIFS_MF_SYMLINK_MD5_FORMAT \
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n"
#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) \
md5_hash[0], md5_hash[1], md5_hash[2], md5_hash[3], \
md5_hash[4], md5_hash[5], md5_hash[6], md5_hash[7], \
md5_hash[8], md5_hash[9], md5_hash[10], md5_hash[11],\
md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
static int
symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
{
int rc;
unsigned int size;
struct crypto_shash *md5;
struct sdesc *sdescmd5;
md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(md5)) {
rc = PTR_ERR(md5);
cifs_dbg(VFS, "%s: Crypto md5 allocation error %d\n",
__func__, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!sdescmd5) {
rc = -ENOMEM;
goto symlink_hash_err;
}
sdescmd5->shash.tfm = md5;
sdescmd5->shash.flags = 0x0;
rc = crypto_shash_init(&sdescmd5->shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__);
goto symlink_hash_err;
}
rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len);
if (rc) {
cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
goto symlink_hash_err;
}
rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
if (rc)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
symlink_hash_err:
crypto_free_shash(md5);
kfree(sdescmd5);
return rc;
}
static int
parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
char **_link_str)
{
int rc;
unsigned int link_len;
const char *md5_str1;
const char *link_str;
u8 md5_hash[16];
char md5_str2[34];
if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
return -EINVAL;
md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET];
link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET];
rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len);
if (rc != 1)
return -EINVAL;
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
return rc;
}
snprintf(md5_str2, sizeof(md5_str2),
CIFS_MF_SYMLINK_MD5_FORMAT,
CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
if (strncmp(md5_str1, md5_str2, 17) != 0)
return -EINVAL;
if (_link_str) {
*_link_str = kstrndup(link_str, link_len, GFP_KERNEL);
if (!*_link_str)
return -ENOMEM;
}
*_link_len = link_len;
return 0;
}
static int
format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
{
int rc;
unsigned int link_len;
unsigned int ofs;
u8 md5_hash[16];
if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
return -EINVAL;
link_len = strlen(link_str);
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
return -ENAMETOOLONG;
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
return rc;
}
snprintf(buf, buf_len,
CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
link_len,
CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
memcpy(buf + ofs, link_str, link_len);
ofs += link_len;
if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
buf[ofs] = '\n';
ofs++;
}
while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
buf[ofs] = ' ';
ofs++;
}
return 0;
}
bool
couldbe_mf_symlink(const struct cifs_fattr *fattr)
{
if (!S_ISREG(fattr->cf_mode))
/* it's not a symlink */
return false;
if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE)
/* it's not a symlink */
return false;
return true;
}
static int
create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *fromName,
const char *toName)
{
int rc;
u8 *buf;
unsigned int bytes_written = 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
rc = format_mf_symlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
if (rc)
goto out;
if (tcon->ses->server->ops->create_mf_symlink)
rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
cifs_sb, fromName, buf, &bytes_written);
else
rc = -EOPNOTSUPP;
if (rc)
goto out;
if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
rc = -EIO;
out:
kfree(buf);
return rc;
}
static int
query_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const unsigned char *path,
char **symlinkinfo)
{
int rc;
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (tcon->ses->server->ops->query_mf_symlink)
rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
cifs_sb, path, buf, &bytes_read);
else
rc = -ENOSYS;
if (rc)
goto out;
if (bytes_read == 0) { /* not a symlink */
rc = -EINVAL;
goto out;
}
rc = parse_mf_symlink(buf, bytes_read, &link_len, symlinkinfo);
out:
kfree(buf);
return rc;
}
int
check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
const unsigned char *path)
{
int rc;
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
if (!couldbe_mf_symlink(fattr))
/* it's not a symlink */
return 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (tcon->ses->server->ops->query_mf_symlink)
rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
cifs_sb, path, buf, &bytes_read);
else
rc = -ENOSYS;
if (rc)
goto out;
if (bytes_read == 0) /* not a symlink */
goto out;
rc = parse_mf_symlink(buf, bytes_read, &link_len, NULL);
if (rc == -EINVAL) {
/* it's not a symlink */
rc = 0;
goto out;
}
if (rc != 0)
goto out;
/* it is a symlink */
fattr->cf_eof = link_len;
fattr->cf_mode &= ~S_IFMT;
fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
fattr->cf_dtype = DT_LNK;
out:
kfree(buf);
return rc;
}
/*
* SMB 1.0 Protocol specific functions
*/
int
cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const unsigned char *path,
char *pbuf, unsigned int *pbytes_read)
{
int rc;
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
FILE_ALL_INFO file_info;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
oparms.create_options = CREATE_NOT_DIR;
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
oparms.reconnect = false;
rc = CIFS_open(xid, &oparms, &oplock, &file_info);
if (rc)
return rc;
if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
rc = -ENOENT;
/* it's not a symlink */
goto out;
}
io_parms.netfid = fid.netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
out:
CIFSSMBClose(xid, tcon, fid.netfid);
return rc;
}
int
cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const unsigned char *path,
char *pbuf, unsigned int *pbytes_written)
{
int rc;
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
oparms.create_options = create_options;
oparms.disposition = FILE_CREATE;
oparms.path = path;
oparms.fid = &fid;
oparms.reconnect = false;
rc = CIFS_open(xid, &oparms, &oplock, NULL);
if (rc)
return rc;
io_parms.netfid = fid.netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf, NULL, 0);
CIFSSMBClose(xid, tcon, fid.netfid);
return rc;
}
/*
* SMB 2.1/SMB3 Protocol specific functions
*/
#ifdef CONFIG_CIFS_SMB2
int
smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const unsigned char *path,
char *pbuf, unsigned int *pbytes_read)
{
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_II;
struct smb2_file_all_info *pfile_info = NULL;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
oparms.create_options = CREATE_NOT_DIR;
if (backup_cred(cifs_sb))
oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.disposition = FILE_OPEN;
oparms.fid = &fid;
oparms.reconnect = false;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (utf16_path == NULL)
return -ENOMEM;
pfile_info = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (pfile_info == NULL) {
kfree(utf16_path);
return -ENOMEM;
}
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL);
if (rc)
goto qmf_out_open_fail;
if (pfile_info->EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
/* it's not a symlink */
rc = -ENOENT; /* Is there a better rc to return? */
goto qmf_out;
}
io_parms.netfid = fid.netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
io_parms.persistent_fid = fid.persistent_fid;
io_parms.volatile_fid = fid.volatile_fid;
rc = SMB2_read(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
qmf_out:
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
qmf_out_open_fail:
kfree(utf16_path);
kfree(pfile_info);
return rc;
}
int
smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const unsigned char *path,
char *pbuf, unsigned int *pbytes_written)
{
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
struct kvec iov[2];
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
oparms.create_options = create_options;
oparms.disposition = FILE_CREATE;
oparms.fid = &fid;
oparms.reconnect = false;
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
if (rc) {
kfree(utf16_path);
return rc;
}
io_parms.netfid = fid.netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
io_parms.persistent_fid = fid.persistent_fid;
io_parms.volatile_fid = fid.volatile_fid;
/* iov[0] is reserved for smb header */
iov[1].iov_base = pbuf;
iov[1].iov_len = CIFS_MF_SYMLINK_FILE_SIZE;
rc = SMB2_write(xid, &io_parms, pbytes_written, iov, 1);
/* Make sure we wrote all of the symlink data */
if ((rc == 0) && (*pbytes_written != CIFS_MF_SYMLINK_FILE_SIZE))
rc = -EIO;
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
kfree(utf16_path);
return rc;
}
#endif /* CONFIG_CIFS_SMB2 */
/*
* M-F Symlink Functions - End
*/
int
cifs_hardlink(struct dentry *old_file, struct inode *inode,
struct dentry *direntry)
{
int rc = -EACCES;
unsigned int xid;
char *from_name = NULL;
char *to_name = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsInodeInfo *cifsInode;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = get_xid();
from_name = build_path_from_dentry(old_file);
to_name = build_path_from_dentry(direntry);
if ((from_name == NULL) || (to_name == NULL)) {
rc = -ENOMEM;
goto cifs_hl_exit;
}
if (tcon->unix_ext)
rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
else {
server = tcon->ses->server;
if (!server->ops->create_hardlink) {
rc = -ENOSYS;
goto cifs_hl_exit;
}
rc = server->ops->create_hardlink(xid, tcon, from_name, to_name,
cifs_sb);
if ((rc == -EIO) || (rc == -EINVAL))
rc = -EOPNOTSUPP;
}
d_drop(direntry); /* force new lookup from server of target */
/*
* if source file is cached (oplocked) revalidate will not go to server
* until the file is closed or oplock broken so update nlinks locally
*/
if (d_really_is_positive(old_file)) {
cifsInode = CIFS_I(d_inode(old_file));
if (rc == 0) {
spin_lock(&d_inode(old_file)->i_lock);
inc_nlink(d_inode(old_file));
spin_unlock(&d_inode(old_file)->i_lock);
/*
* parent dir timestamps will update from srv within a
* second, would it really be worth it to set the parent
* dir cifs inode time to zero to force revalidate
* (faster) for it too?
*/
}
/*
* if not oplocked will force revalidate to get info on source
* file from srv. Note Samba server prior to 4.2 has bug -
* not updating src file ctime on hardlinks but Windows servers
* handle it properly
*/
cifsInode->time = 0;
/*
* Will update parent dir timestamps from srv within a second.
* Would it really be worth it to set the parent dir (cifs
* inode) time field to zero to force revalidate on parent
* directory faster ie
*
* CIFS_I(inode)->time = 0;
*/
}
cifs_hl_exit:
kfree(from_name);
kfree(to_name);
free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
void *
cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
{
struct inode *inode = d_inode(direntry);
int rc = -ENOMEM;
unsigned int xid;
char *full_path = NULL;
char *target_path = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
xid = get_xid();
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
tlink = NULL;
goto out;
}
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
full_path = build_path_from_dentry(direntry);
if (!full_path)
goto out;
cifs_dbg(FYI, "Full path: %s inode = 0x%p\n", full_path, inode);
rc = -EACCES;
/*
* First try Minshall+French Symlinks, if configured
* and fallback to UNIX Extensions Symlinks.
*/
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
rc = query_mf_symlink(xid, tcon, cifs_sb, full_path,
&target_path);
if (rc != 0 && server->ops->query_symlink)
rc = server->ops->query_symlink(xid, tcon, full_path,
&target_path, cifs_sb);
kfree(full_path);
out:
if (rc != 0) {
kfree(target_path);
target_path = ERR_PTR(rc);
}
free_xid(xid);
if (tlink)
cifs_put_tlink(tlink);
nd_set_link(nd, target_path);
return NULL;
}
int
cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
{
int rc = -EOPNOTSUPP;
unsigned int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
xid = get_xid();
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto symlink_exit;
}
pTcon = tlink_tcon(tlink);
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto symlink_exit;
}
cifs_dbg(FYI, "Full path: %s\n", full_path);
cifs_dbg(FYI, "symname is %s\n", symname);
/* BB what if DFS and this volume is on different share? BB */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
else if (pTcon->unix_ext)
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
/* else
rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
cifs_sb_target->local_nls); */
if (rc == 0) {
if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else
rc = cifs_get_inode_info(&newinode, full_path, NULL,
inode->i_sb, xid, NULL);
if (rc != 0) {
cifs_dbg(FYI, "Create symlink ok, getinodeinfo fail rc = %d\n",
rc);
} else {
d_instantiate(direntry, newinode);
}
}
symlink_exit:
kfree(full_path);
cifs_put_tlink(tlink);
free_xid(xid);
return rc;
}
| gpl-2.0 |
rassillon/android_kernel_samsung_grandneove3g | drivers/dma/ste_dma40.c | 430 | 95963 | /*
* Copyright (C) Ericsson AB 2007-2008
* Copyright (C) ST-Ericsson SA 2008-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_data/dma-ste-dma40.h>
#include "dmaengine.h"
#include "ste_dma40_ll.h"
#define D40_NAME "dma40"
#define D40_PHY_CHAN -1
/* For masking out/in 2 bit channel positions */
#define D40_CHAN_POS(chan) (2 * (chan / 2))
#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
/* Milliseconds */
#define DMA40_AUTOSUSPEND_DELAY 100
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
/* Max number of links per event group */
#define D40_LCLA_LINK_PER_EVENT_GRP 128
#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
/* Attempts before giving up to trying to get pages that are aligned */
#define MAX_LCLA_ALLOC_ATTEMPTS 256
/* Bit markings for allocation map */
#define D40_ALLOC_FREE (1 << 31)
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
#define MAX(a, b) (((a) < (b)) ? (b) : (a))
/**
* enum 40_command - The different commands and/or statuses.
*
* @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
* @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
* @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
* @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
*/
enum d40_command {
D40_DMA_STOP = 0,
D40_DMA_RUN = 1,
D40_DMA_SUSPEND_REQ = 2,
D40_DMA_SUSPENDED = 3
};
/*
* enum d40_events - The different Event Enables for the event lines.
*
* @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
* @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
* @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
* @D40_ROUND_EVENTLINE: Status check for event line.
*/
enum d40_events {
D40_DEACTIVATE_EVENTLINE = 0,
D40_ACTIVATE_EVENTLINE = 1,
D40_SUSPEND_REQ_EVENTLINE = 2,
D40_ROUND_EVENTLINE = 3
};
/*
* These are the registers that has to be saved and later restored
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
static u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
D40_DREG_PRMSO,
D40_DREG_PRMOE,
D40_DREG_PRMOO,
};
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
/*
* since 9540 and 8540 has the same HW revision
* use v4a for 9540 or ealier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
* DB8500v1 has revision 2
* DB8500v2 has revision 3
* AP9540v1 has revision 4
* DB8540v1 has revision 4
* TODO: Check if all these registers have to be saved/restored on dma40 v4a
*/
static u32 d40_backup_regs_v4a[] = {
D40_DREG_PSEG1,
D40_DREG_PSEG2,
D40_DREG_PSEG3,
D40_DREG_PSEG4,
D40_DREG_PCEG1,
D40_DREG_PCEG2,
D40_DREG_PCEG3,
D40_DREG_PCEG4,
D40_DREG_RSEG1,
D40_DREG_RSEG2,
D40_DREG_RSEG3,
D40_DREG_RSEG4,
D40_DREG_RCEG1,
D40_DREG_RCEG2,
D40_DREG_RCEG3,
D40_DREG_RCEG4,
};
#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
static u32 d40_backup_regs_v4b[] = {
D40_DREG_CPSEG1,
D40_DREG_CPSEG2,
D40_DREG_CPSEG3,
D40_DREG_CPSEG4,
D40_DREG_CPSEG5,
D40_DREG_CPCEG1,
D40_DREG_CPCEG2,
D40_DREG_CPCEG3,
D40_DREG_CPCEG4,
D40_DREG_CPCEG5,
D40_DREG_CRSEG1,
D40_DREG_CRSEG2,
D40_DREG_CRSEG3,
D40_DREG_CRSEG4,
D40_DREG_CRSEG5,
D40_DREG_CRCEG1,
D40_DREG_CRCEG2,
D40_DREG_CRCEG3,
D40_DREG_CRCEG4,
D40_DREG_CRCEG5,
};
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
D40_CHAN_REG_SSLNK,
D40_CHAN_REG_SDCFG,
D40_CHAN_REG_SDELT,
D40_CHAN_REG_SDPTR,
D40_CHAN_REG_SDLNK,
};
/**
* struct d40_interrupt_lookup - lookup table for interrupt handler
*
* @src: Interrupt mask register.
* @clr: Interrupt clear register.
* @is_error: true if this is an error interrupt.
* @offset: start delta in the lookup_log_chans in d40_base. If equals to
* D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
*/
struct d40_interrupt_lookup {
u32 src;
u32 clr;
bool is_error;
int offset;
};
static struct d40_interrupt_lookup il_v4a[] = {
{D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
{D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
{D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
{D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
{D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
{D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
{D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
};
static struct d40_interrupt_lookup il_v4b[] = {
{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
{D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
{D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
};
/**
* struct d40_reg_val - simple lookup struct
*
* @reg: The register.
* @val: The value that belongs to the register in reg.
*/
struct d40_reg_val {
unsigned int reg;
unsigned int val;
};
static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
/* Clock every part of the DMA block from start */
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
};
static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
/* Clock every part of the DMA block from start */
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
};
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
* @base: Pointer to memory area when the pre_alloc_lli's are not large
* enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
* pre_alloc_lli is used.
* @dma_addr: DMA address, if mapped
* @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
* @pre_alloc_lli: Pre allocated area for the most common case of transfers,
* one buffer to one buffer.
*/
struct d40_lli_pool {
void *base;
int size;
dma_addr_t dma_addr;
/* Space for dst and src, plus an extra for padding */
u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
};
/**
* struct d40_desc - A descriptor is one DMA job.
*
* @lli_phy: LLI settings for physical channel. Both src and dst=
* points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
* lli_len equals one.
* @lli_log: Same as above but for logical channels.
* @lli_pool: The pool with two entries pre-allocated.
* @lli_len: Number of llis of current descriptor.
* @lli_current: Number of transferred llis.
* @lcla_alloc: Number of LCLA entries allocated.
* @txd: DMA engine struct. Used for among other things for communication
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
* @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
struct d40_desc {
/* LLI physical */
struct d40_phy_lli_bidir lli_phy;
/* LLI logical */
struct d40_log_lli_bidir lli_log;
struct d40_lli_pool lli_pool;
int lli_len;
int lli_current;
int lcla_alloc;
struct dma_async_tx_descriptor txd;
struct list_head node;
bool is_in_client_list;
bool cyclic;
};
/**
* struct d40_lcla_pool - LCLA pool settings and data.
*
* @base: The virtual address of LCLA. 18 bit aligned.
* @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
* This pointer is only there for clean-up on error.
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
* @lock: Lock to protect the content in this struct.
* @alloc_map: big map over which LCLA entry is own by which job.
*/
struct d40_lcla_pool {
void *base;
dma_addr_t dma_addr;
void *base_unaligned;
int pages;
spinlock_t lock;
struct d40_desc **alloc_map;
};
/**
* struct d40_phy_res - struct for handling eventlines mapped to physical
* channels.
*
* @lock: A lock protection this entity.
* @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
* event line number.
* @use_soft_lli: To mark if the linked lists of channel are managed by SW.
*/
struct d40_phy_res {
spinlock_t lock;
bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
bool use_soft_lli;
};
struct d40_base;
/**
* struct d40_chan - Struct that describes a channel.
*
* @lock: A spinlock to protect this struct.
* @log_num: The logical number, if any of this channel.
* @pending_tx: The number of pending transfers. Used between interrupt handler
* and tasklet.
* @busy: Set to true when transfer is ongoing on this channel.
* @phy_chan: Pointer to physical channel which this instance runs on. If this
* point is NULL, then the channel is not allocated.
* @chan: DMA engine handle.
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
* @done: Completed jobs
* @queue: Queued jobs.
* @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
*
* This struct can either "be" a logical or a physical channel.
*/
struct d40_chan {
spinlock_t lock;
int log_num;
int pending_tx;
bool busy;
struct d40_phy_res *phy_chan;
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head client;
struct list_head pending_queue;
struct list_head active;
struct list_head done;
struct list_head queue;
struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
bool configured;
struct d40_base *base;
/* Default register configurations */
u32 src_def_cfg;
u32 dst_def_cfg;
struct d40_def_lcsp log_def;
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
enum dma_transfer_direction runtime_direction;
};
/**
* struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
* controller
*
* @backup: the pointer to the registers address array for backup
* @backup_size: the size of the registers address array for backup
* @realtime_en: the realtime enable register
* @realtime_clear: the realtime clear register
* @high_prio_en: the high priority enable register
* @high_prio_clear: the high priority clear register
* @interrupt_en: the interrupt enable register
* @interrupt_clear: the interrupt clear register
* @il: the pointer to struct d40_interrupt_lookup
* @il_size: the size of d40_interrupt_lookup array
* @init_reg: the pointer to the struct d40_reg_val
* @init_reg_size: the size of d40_reg_val array
*/
struct d40_gen_dmac {
u32 *backup;
u32 backup_size;
u32 realtime_en;
u32 realtime_clear;
u32 high_prio_en;
u32 high_prio_clear;
u32 interrupt_en;
u32 interrupt_clear;
struct d40_interrupt_lookup *il;
u32 il_size;
struct d40_reg_val *init_reg;
u32 init_reg_size;
};
/**
* struct d40_base - The big global struct, one for each probe'd instance.
*
* @interrupt_lock: Lock used to make sure one interrupt is handle a time.
* @execmd_lock: Lock for execute command usage since several channels share
* the same physical register.
* @dev: The device structure.
* @virtbase: The virtual base address of the DMA's register.
* @rev: silicon revision detected.
* @clk: Pointer to the DMA clock structure.
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
* @num_phy_chans: The number of physical channels. Read from HW. This
* is the number of available channels for this driver, not counting "Secure
* mode" allocated physical channels.
* @num_log_chans: The number of logical channels. Calculated from
* num_phy_chans.
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
* @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
* @lookup_phy_chans: Used to map interrupt number to physical channel. Points
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
* @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
* @reg_val_backup: Here the values of some hardware registers are stored
* before the DMA is powered off. They are restored when the power is back on.
* @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
* later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @initialized: true if the dma has been initialized
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
*/
struct d40_base {
spinlock_t interrupt_lock;
spinlock_t execmd_lock;
struct device *dev;
void __iomem *virtbase;
u8 rev:4;
struct clk *clk;
phys_addr_t phy_start;
resource_size_t phy_size;
int irq;
int num_phy_chans;
int num_log_chans;
struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct d40_chan *phy_chans;
struct d40_chan *log_chans;
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
void *lcpa_base;
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ];
u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
bool initialized;
struct d40_gen_dmac gen_dmac;
};
static struct device *chan2dev(struct d40_chan *d40c)
{
return &d40c->chan.dev->device;
}
static bool chan_is_physical(struct d40_chan *chan)
{
return chan->log_num == D40_PHY_CHAN;
}
static bool chan_is_logical(struct d40_chan *chan)
{
return !chan_is_physical(chan);
}
static void __iomem *chan_base(struct d40_chan *chan)
{
return chan->base->virtbase + D40_DREG_PCBASE +
chan->phy_chan->num * D40_DREG_PCDELTA;
}
#define d40_err(dev, format, arg...) \
dev_err(dev, "[%s] " format, __func__, ## arg)
#define chan_err(d40c, format, arg...) \
d40_err(chan2dev(d40c), format, ## arg)
static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
int lli_len)
{
bool is_log = chan_is_logical(d40c);
u32 align;
void *base;
if (is_log)
align = sizeof(struct d40_log_lli);
else
align = sizeof(struct d40_phy_lli);
if (lli_len == 1) {
base = d40d->lli_pool.pre_alloc_lli;
d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
d40d->lli_pool.base = NULL;
} else {
d40d->lli_pool.size = lli_len * 2 * align;
base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
d40d->lli_pool.base = base;
if (d40d->lli_pool.base == NULL)
return -ENOMEM;
}
if (is_log) {
d40d->lli_log.src = PTR_ALIGN(base, align);
d40d->lli_log.dst = d40d->lli_log.src + lli_len;
d40d->lli_pool.dma_addr = 0;
} else {
d40d->lli_phy.src = PTR_ALIGN(base, align);
d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
d40d->lli_phy.src,
d40d->lli_pool.size,
DMA_TO_DEVICE);
if (dma_mapping_error(d40c->base->dev,
d40d->lli_pool.dma_addr)) {
kfree(d40d->lli_pool.base);
d40d->lli_pool.base = NULL;
d40d->lli_pool.dma_addr = 0;
return -ENOMEM;
}
}
return 0;
}
static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
if (d40d->lli_pool.dma_addr)
dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
d40d->lli_pool.size, DMA_TO_DEVICE);
kfree(d40d->lli_pool.base);
d40d->lli_pool.base = NULL;
d40d->lli_pool.size = 0;
d40d->lli_log.src = NULL;
d40d->lli_log.dst = NULL;
d40d->lli_phy.src = NULL;
d40d->lli_phy.dst = NULL;
}
static int d40_lcla_alloc_one(struct d40_chan *d40c,
struct d40_desc *d40d)
{
unsigned long flags;
int i;
int ret = -EINVAL;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
/*
* Allocate both src and dst at the same time, therefore the half
* start on 1 since 0 can't be used since zero is used as end marker.
*/
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
if (!d40c->base->lcla_pool.alloc_map[idx]) {
d40c->base->lcla_pool.alloc_map[idx] = d40d;
d40d->lcla_alloc++;
ret = i;
break;
}
}
spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
return ret;
}
static int d40_lcla_free_all(struct d40_chan *d40c,
struct d40_desc *d40d)
{
unsigned long flags;
int i;
int ret = -EINVAL;
if (chan_is_physical(d40c))
return 0;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
d40c->base->lcla_pool.alloc_map[idx] = NULL;
d40d->lcla_alloc--;
if (d40d->lcla_alloc == 0) {
ret = 0;
break;
}
}
}
spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
return ret;
}
static void d40_desc_remove(struct d40_desc *d40d)
{
list_del(&d40d->node);
}
static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
{
struct d40_desc *desc = NULL;
if (!list_empty(&d40c->client)) {
struct d40_desc *d;
struct d40_desc *_d;
list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
}
}
if (!desc)
desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
if (desc)
INIT_LIST_HEAD(&desc->node);
return desc;
}
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
d40_pool_lli_free(d40c, d40d);
d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
{
list_add_tail(&desc->node, &d40c->active);
}
static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
{
struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
struct d40_phy_lli *lli_src = desc->lli_phy.src;
void __iomem *base = chan_base(chan);
writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
}
static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
{
list_add_tail(&desc->node, &d40c->done);
}
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
{
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
struct d40_log_lli_bidir *lli = &desc->lli_log;
int lli_current = desc->lli_current;
int lli_len = desc->lli_len;
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
* We may have partially running cyclic transfers, in case we did't get
* enough LCLA entries.
*/
linkback = cyclic && lli_current == 0;
/*
* For linkback, we need one LCLA even with only one link, because we
* can't link back to the one in LCPA space
*/
if (linkback || (lli_len - lli_current > 1)) {
/*
* If the channel is expected to use only soft_lli don't
* allocate a lcla. This is to avoid a HW issue that exists
* in some controller during a peripheral to memory transfer
* that uses linked lists.
*/
if (!(chan->phy_chan->use_soft_lli &&
chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
curr_lcla = d40_lcla_alloc_one(chan, desc);
first_lcla = curr_lcla;
}
/*
* For linkback, we normally load the LCPA in the loop since we need to
* link it to the second LCLA and not the first. However, if we
* couldn't even get a first LCLA, then we have to run in LCPA and
* reload manually.
*/
if (!linkback || curr_lcla == -EINVAL) {
unsigned int flags = 0;
if (curr_lcla == -EINVAL)
flags |= LLI_TERM_INT;
d40_log_lli_lcpa_write(chan->lcpa,
&lli->dst[lli_current],
&lli->src[lli_current],
curr_lcla,
flags);
lli_current++;
}
if (curr_lcla < 0)
goto out;
for (; lli_current < lli_len; lli_current++) {
unsigned int lcla_offset = chan->phy_chan->num * 1024 +
8 * curr_lcla * 2;
struct d40_log_lli *lcla = pool->base + lcla_offset;
unsigned int flags = 0;
int next_lcla;
if (lli_current + 1 < lli_len)
next_lcla = d40_lcla_alloc_one(chan, desc);
else
next_lcla = linkback ? first_lcla : -EINVAL;
if (cyclic || next_lcla == -EINVAL)
flags |= LLI_TERM_INT;
if (linkback && curr_lcla == first_lcla) {
/* First link goes in both LCPA and LCLA */
d40_log_lli_lcpa_write(chan->lcpa,
&lli->dst[lli_current],
&lli->src[lli_current],
next_lcla, flags);
}
/*
* One unused LCLA in the cyclic case if the very first
* next_lcla fails...
*/
d40_log_lli_lcla_write(lcla,
&lli->dst[lli_current],
&lli->src[lli_current],
next_lcla, flags);
/*
* Cache maintenance is not needed if lcla is
* mapped in esram
*/
if (!use_esram_lcla) {
dma_sync_single_range_for_device(chan->base->dev,
pool->dma_addr, lcla_offset,
2 * sizeof(struct d40_log_lli),
DMA_TO_DEVICE);
}
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
lli_current++;
break;
}
}
out:
desc->lli_current = lli_current;
}
static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
{
if (chan_is_physical(d40c)) {
d40_phy_lli_load(d40c, d40d);
d40d->lli_current = d40d->lli_len;
} else
d40_log_lli_to_lcxa(d40c, d40d);
}
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
{
struct d40_desc *d;
if (list_empty(&d40c->active))
return NULL;
d = list_first_entry(&d40c->active,
struct d40_desc,
node);
return d;
}
/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
d40_desc_remove(desc);
desc->is_in_client_list = false;
list_add_tail(&desc->node, &d40c->pending_queue);
}
static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
{
struct d40_desc *d;
if (list_empty(&d40c->pending_queue))
return NULL;
d = list_first_entry(&d40c->pending_queue,
struct d40_desc,
node);
return d;
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
{
struct d40_desc *d;
if (list_empty(&d40c->queue))
return NULL;
d = list_first_entry(&d40c->queue,
struct d40_desc,
node);
return d;
}
static struct d40_desc *d40_first_done(struct d40_chan *d40c)
{
if (list_empty(&d40c->done))
return NULL;
return list_first_entry(&d40c->done, struct d40_desc, node);
}
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
if (psize == STEDMA40_PSIZE_LOG_1)
return 1;
} else {
if (psize == STEDMA40_PSIZE_PHY_1)
return 1;
}
return 2 << psize;
}
/*
* The dma only supports transmitting packages up to
* STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
* dma elements required to send the entire sg list
*/
static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
{
int dmalen;
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
seg_max -= (1 << max_w);
if (!IS_ALIGNED(size, 1 << max_w))
return -EINVAL;
if (size <= seg_max)
dmalen = 1;
else {
dmalen = size / seg_max;
if (dmalen * seg_max < size)
dmalen++;
}
return dmalen;
}
static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
u32 data_width1, u32 data_width2)
{
struct scatterlist *sg;
int i;
int len = 0;
int ret;
for_each_sg(sgl, sg, sg_len, i) {
ret = d40_size_2_dmalen(sg_dma_len(sg),
data_width1, data_width2);
if (ret < 0)
return ret;
len += ret;
}
return len;
}
#ifdef CONFIG_PM
static void dma40_backup(void __iomem *baseaddr, u32 *backup,
u32 *regaddr, int num, bool save)
{
int i;
for (i = 0; i < num; i++) {
void __iomem *addr = baseaddr + regaddr[i];
if (save)
backup[i] = readl_relaxed(addr);
else
writel_relaxed(backup[i], addr);
}
}
static void d40_save_restore_registers(struct d40_base *base, bool save)
{
int i;
/* Save/Restore channel specific registers */
for (i = 0; i < base->num_phy_chans; i++) {
void __iomem *addr;
int idx;
if (base->phy_res[i].reserved)
continue;
addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
idx = i * ARRAY_SIZE(d40_backup_regs_chan);
dma40_backup(addr, &base->reg_val_backup_chan[idx],
d40_backup_regs_chan,
ARRAY_SIZE(d40_backup_regs_chan),
save);
}
/* Save/Restore global registers */
dma40_backup(base->virtbase, base->reg_val_backup,
d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
save);
/* Save/Restore registers only existing on dma40 v3 and later */
if (base->gen_dmac.backup)
dma40_backup(base->virtbase, base->reg_val_backup_v4,
base->gen_dmac.backup,
base->gen_dmac.backup_size,
save);
}
#else
static void d40_save_restore_registers(struct d40_base *base, bool save)
{
}
#endif
static int __d40_execute_command_phy(struct d40_chan *d40c,
enum d40_command command)
{
u32 status;
int i;
void __iomem *active_reg;
int ret = 0;
unsigned long flags;
u32 wmask;
if (command == D40_DMA_STOP) {
ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
if (ret)
return ret;
}
spin_lock_irqsave(&d40c->base->execmd_lock, flags);
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
if (command == D40_DMA_SUSPEND_REQ) {
status = (readl(active_reg) &
D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
D40_CHAN_POS(d40c->phy_chan->num);
if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
goto done;
}
wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
active_reg);
if (command == D40_DMA_SUSPEND_REQ) {
for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
status = (readl(active_reg) &
D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
D40_CHAN_POS(d40c->phy_chan->num);
cpu_relax();
/*
* Reduce the number of bus accesses while
* waiting for the DMA to suspend.
*/
udelay(3);
if (status == D40_DMA_STOP ||
status == D40_DMA_SUSPENDED)
break;
}
if (i == D40_SUSPEND_MAX_IT) {
chan_err(d40c,
"unable to suspend the chl %d (log: %d) status %x\n",
d40c->phy_chan->num, d40c->log_num,
status);
dump_stack();
ret = -EBUSY;
}
}
done:
spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
return ret;
}
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
struct d40_desc *_d;
/* Release completed descriptors */
while ((d40d = d40_first_done(d40c))) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release queued descriptors waiting for transfer */
while ((d40d = d40_first_queued(d40c))) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release pending descriptors */
while ((d40d = d40_first_pending(d40c))) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release descriptors in prepare queue */
if (!list_empty(&d40c->prepare_queue))
list_for_each_entry_safe(d40d, _d,
&d40c->prepare_queue, node) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
d40c->pending_tx = 0;
}
static void __d40_config_set_event(struct d40_chan *d40c,
enum d40_events event_type, u32 event,
int reg)
{
void __iomem *addr = chan_base(d40c) + reg;
int tries;
u32 status;
switch (event_type) {
case D40_DEACTIVATE_EVENTLINE:
writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
| ~D40_EVENTLINE_MASK(event), addr);
break;
case D40_SUSPEND_REQ_EVENTLINE:
status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
D40_EVENTLINE_POS(event);
if (status == D40_DEACTIVATE_EVENTLINE ||
status == D40_SUSPEND_REQ_EVENTLINE)
break;
writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
| ~D40_EVENTLINE_MASK(event), addr);
for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
D40_EVENTLINE_POS(event);
cpu_relax();
/*
* Reduce the number of bus accesses while
* waiting for the DMA to suspend.
*/
udelay(3);
if (status == D40_DEACTIVATE_EVENTLINE)
break;
}
if (tries == D40_SUSPEND_MAX_IT) {
chan_err(d40c,
"unable to stop the event_line chl %d (log: %d)"
"status %x\n", d40c->phy_chan->num,
d40c->log_num, status);
}
break;
case D40_ACTIVATE_EVENTLINE:
/*
* The hardware sometimes doesn't register the enable when src and dst
* event lines are active on the same logical channel. Retry to ensure
* it does. Usually only one retry is sufficient.
*/
tries = 100;
while (--tries) {
writel((D40_ACTIVATE_EVENTLINE <<
D40_EVENTLINE_POS(event)) |
~D40_EVENTLINE_MASK(event), addr);
if (readl(addr) & D40_EVENTLINE_MASK(event))
break;
}
if (tries != 99)
dev_dbg(chan2dev(d40c),
"[%s] workaround enable S%cLNK (%d tries)\n",
__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
100 - tries);
WARN_ON(!tries);
break;
case D40_ROUND_EVENTLINE:
BUG();
break;
}
}
static void d40_config_set_event(struct d40_chan *d40c,
enum d40_events event_type)
{
/* Enable event line connected to device (or memcpy) */
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
}
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
}
}
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
void __iomem *chanbase = chan_base(d40c);
u32 val;
val = readl(chanbase + D40_CHAN_REG_SSLNK);
val |= readl(chanbase + D40_CHAN_REG_SDLNK);
return val;
}
static int
__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
{
unsigned long flags;
int ret = 0;
u32 active_status;
void __iomem *active_reg;
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
switch (command) {
case D40_DMA_STOP:
case D40_DMA_SUSPEND_REQ:
active_status = (readl(active_reg) &
D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
D40_CHAN_POS(d40c->phy_chan->num);
if (active_status == D40_DMA_RUN)
d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
else
d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
ret = __d40_execute_command_phy(d40c, command);
break;
case D40_DMA_RUN:
d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
ret = __d40_execute_command_phy(d40c, command);
break;
case D40_DMA_SUSPENDED:
BUG();
break;
}
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
return ret;
}
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
{
if (chan_is_logical(d40c))
return __d40_execute_command_log(d40c, command);
else
return __d40_execute_command_phy(d40c, command);
}
static u32 d40_get_prmo(struct d40_chan *d40c)
{
static const unsigned int phy_map[] = {
[STEDMA40_PCHAN_BASIC_MODE]
= D40_DREG_PRMO_PCHAN_BASIC,
[STEDMA40_PCHAN_MODULO_MODE]
= D40_DREG_PRMO_PCHAN_MODULO,
[STEDMA40_PCHAN_DOUBLE_DST_MODE]
= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
};
static const unsigned int log_map[] = {
[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
};
if (chan_is_physical(d40c))
return phy_map[d40c->dma_cfg.mode_opt];
else
return log_map[d40c->dma_cfg.mode_opt];
}
static void d40_config_write(struct d40_chan *d40c)
{
u32 addr_base;
u32 var;
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
/* Setup channel mode to logical or physical */
var = ((u32)(chan_is_logical(d40c)) + 1) <<
D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
/* Setup operational mode option register */
var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
if (chan_is_logical(d40c)) {
int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
& D40_SREG_ELEM_LOG_LIDX_MASK;
void __iomem *chanbase = chan_base(d40c);
/* Set default config for CFG reg */
writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
/* Clear LNK which will be used by d40_chan_has_events() */
writel(0, chanbase + D40_CHAN_REG_SSLNK);
writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
static u32 d40_residue(struct d40_chan *d40c)
{
u32 num_elt;
if (chan_is_logical(d40c))
num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
>> D40_MEM_LCSP2_ECNT_POS;
else {
u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
>> D40_SREG_ELEM_PHY_ECNT_POS;
}
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
}
static bool d40_tx_is_linked(struct d40_chan *d40c)
{
bool is_link;
if (chan_is_logical(d40c))
is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
else
is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
& D40_SREG_LNK_PHYS_LNK_MASK;
return is_link;
}
static int d40_pause(struct d40_chan *d40c)
{
int res = 0;
unsigned long flags;
if (!d40c->busy)
return 0;
pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
static int d40_resume(struct d40_chan *d40c)
{
int res = 0;
unsigned long flags;
if (!d40c->busy)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
pm_runtime_get_sync(d40c->base->dev);
/* If bytes left to transfer or linked tx resume job */
if (d40_residue(d40c) || d40_tx_is_linked(d40c))
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct d40_chan *d40c = container_of(tx->chan,
struct d40_chan,
chan);
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
dma_cookie_t cookie;
spin_lock_irqsave(&d40c->lock, flags);
cookie = dma_cookie_assign(tx);
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
return cookie;
}
static int d40_start(struct d40_chan *d40c)
{
return d40_channel_execute_command(d40c, D40_DMA_RUN);
}
static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
{
struct d40_desc *d40d;
int err;
/* Start queued jobs, if any */
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
if (!d40c->busy) {
d40c->busy = true;
pm_runtime_get_sync(d40c->base->dev);
}
/* Remove from queue */
d40_desc_remove(d40d);
/* Add to active queue */
d40_desc_submit(d40c, d40d);
/* Initiate DMA job */
d40_desc_load(d40c, d40d);
/* Start dma job */
err = d40_start(d40c);
if (err)
return NULL;
}
return d40d;
}
/* called from interrupt context */
static void dma_tc_handle(struct d40_chan *d40c)
{
struct d40_desc *d40d;
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
if (d40d == NULL)
return;
if (d40d->cyclic) {
/*
* If this was a paritially loaded list, we need to reloaded
* it, and only when the list is completed. We need to check
* for done because the interrupt will hit for every link, and
* not just the last one.
*/
if (d40d->lli_current < d40d->lli_len
&& !d40_tx_is_linked(d40c)
&& !d40_residue(d40c)) {
d40_lcla_free_all(d40c, d40d);
d40_desc_load(d40c, d40d);
(void) d40_start(d40c);
if (d40d->lli_current == d40d->lli_len)
d40d->lli_current = 0;
}
} else {
d40_lcla_free_all(d40c, d40d);
if (d40d->lli_current < d40d->lli_len) {
d40_desc_load(d40c, d40d);
/* Start dma job */
(void) d40_start(d40c);
return;
}
if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
}
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
}
d40c->pending_tx++;
tasklet_schedule(&d40c->tasklet);
}
static void dma_tasklet(unsigned long data)
{
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
dma_async_tx_callback callback;
void *callback_param;
spin_lock_irqsave(&d40c->lock, flags);
/* Get first entry from the done list */
d40d = d40_first_done(d40c);
if (d40d == NULL) {
/* Check if we have reached here for cyclic job */
d40d = d40_first_active_get(d40c);
if (d40d == NULL || !d40d->cyclic)
goto err;
}
if (!d40d->cyclic)
dma_cookie_complete(&d40d->txd);
/*
* If terminating a channel pending_tx is set to zero.
* This prevents any finished active jobs to return to the client.
*/
if (d40c->pending_tx == 0) {
spin_unlock_irqrestore(&d40c->lock, flags);
return;
}
/* Callback to client */
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
} else if (!d40d->is_in_client_list) {
d40_desc_remove(d40d);
d40_lcla_free_all(d40c, d40d);
list_add_tail(&d40d->node, &d40c->client);
d40d->is_in_client_list = true;
}
}
d40c->pending_tx--;
if (d40c->pending_tx)
tasklet_schedule(&d40c->tasklet);
spin_unlock_irqrestore(&d40c->lock, flags);
if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
callback(callback_param);
return;
err:
/* Rescue manouver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
}
static irqreturn_t d40_handle_interrupt(int irq, void *data)
{
int i;
u32 idx;
u32 row;
long chan = -1;
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
u32 regs[base->gen_dmac.il_size];
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
spin_lock_irqsave(&base->interrupt_lock, flags);
/* Read interrupt status of both logical and physical channels */
for (i = 0; i < il_size; i++)
regs[i] = readl(base->virtbase + il[i].src);
for (;;) {
chan = find_next_bit((unsigned long *)regs,
BITS_PER_LONG * il_size, chan + 1);
/* No more set bits found? */
if (chan == BITS_PER_LONG * il_size)
break;
row = chan / BITS_PER_LONG;
idx = chan & (BITS_PER_LONG - 1);
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
else
d40c = base->lookup_log_chans[il[row].offset + idx];
if (!d40c) {
/*
* No error because this can happen if something else
* in the system is using the channel.
*/
continue;
}
/* ACK interrupt */
writel(1 << idx, base->virtbase + il[row].clr);
spin_lock(&d40c->lock);
if (!il[row].is_error)
dma_tc_handle(d40c);
else
d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
chan, il[row].offset, idx);
spin_unlock(&d40c->lock);
}
spin_unlock_irqrestore(&base->interrupt_lock, flags);
return IRQ_HANDLED;
}
static int d40_validate_conf(struct d40_chan *d40c,
struct stedma40_chan_cfg *conf)
{
int res = 0;
u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
if (!conf->dir) {
chan_err(d40c, "Invalid direction.\n");
res = -EINVAL;
}
if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
d40c->runtime_addr == 0) {
chan_err(d40c, "Invalid TX channel address (%d)\n",
conf->dst_dev_type);
res = -EINVAL;
}
if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
d40c->runtime_addr == 0) {
chan_err(d40c, "Invalid RX channel address (%d)\n",
conf->src_dev_type);
res = -EINVAL;
}
if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
chan_err(d40c, "Invalid dst\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
chan_err(d40c, "Invalid src\n");
res = -EINVAL;
}
if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
chan_err(d40c, "No event line\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
(src_event_group != dst_event_group)) {
chan_err(d40c, "Invalid event group\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
/*
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
*/
chan_err(d40c, "periph to periph not supported\n");
res = -EINVAL;
}
if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
(1 << conf->src_info.data_width) !=
d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
(1 << conf->dst_info.data_width)) {
/*
* The DMAC hardware only supports
* src (burst x width) == dst (burst x width)
*/
chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
res = -EINVAL;
}
return res;
}
static bool d40_alloc_mask_set(struct d40_phy_res *phy,
bool is_src, int log_event_line, bool is_log,
bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
*first_user = ((phy->allocated_src | phy->allocated_dst)
== D40_ALLOC_FREE);
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
phy->allocated_dst == D40_ALLOC_FREE) {
phy->allocated_dst = D40_ALLOC_PHY;
phy->allocated_src = D40_ALLOC_PHY;
goto found;
} else
goto not_found;
}
/* Logical channel */
if (is_src) {
if (phy->allocated_src == D40_ALLOC_PHY)
goto not_found;
if (phy->allocated_src == D40_ALLOC_FREE)
phy->allocated_src = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_src & (1 << log_event_line))) {
phy->allocated_src |= 1 << log_event_line;
goto found;
} else
goto not_found;
} else {
if (phy->allocated_dst == D40_ALLOC_PHY)
goto not_found;
if (phy->allocated_dst == D40_ALLOC_FREE)
phy->allocated_dst = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_dst & (1 << log_event_line))) {
phy->allocated_dst |= 1 << log_event_line;
goto found;
} else
goto not_found;
}
not_found:
spin_unlock_irqrestore(&phy->lock, flags);
return false;
found:
spin_unlock_irqrestore(&phy->lock, flags);
return true;
}
static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
int log_event_line)
{
unsigned long flags;
bool is_free = false;
spin_lock_irqsave(&phy->lock, flags);
if (!log_event_line) {
phy->allocated_dst = D40_ALLOC_FREE;
phy->allocated_src = D40_ALLOC_FREE;
is_free = true;
goto out;
}
/* Logical channel */
if (is_src) {
phy->allocated_src &= ~(1 << log_event_line);
if (phy->allocated_src == D40_ALLOC_LOG_FREE)
phy->allocated_src = D40_ALLOC_FREE;
} else {
phy->allocated_dst &= ~(1 << log_event_line);
if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
phy->allocated_dst = D40_ALLOC_FREE;
}
is_free = ((phy->allocated_src | phy->allocated_dst) ==
D40_ALLOC_FREE);
out:
spin_unlock_irqrestore(&phy->lock, flags);
return is_free;
}
static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
int event_line;
struct d40_phy_res *phys;
int i;
int j;
int log_num;
int num_phy_chans;
bool is_src;
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
dev_type = d40c->dma_cfg.src_dev_type;
log_num = 2 * dev_type;
is_src = true;
} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* dst event lines are used for logical memcpy */
dev_type = d40c->dma_cfg.dst_dev_type;
log_num = 2 * dev_type + 1;
is_src = false;
} else
return -EINVAL;
event_group = D40_TYPE_TO_GROUP(dev_type);
event_line = D40_TYPE_TO_EVENT(dev_type);
if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* Find physical half channel */
if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
} else {
for (i = 0; i < num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
}
}
} else
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
is_log,
first_phy_user))
goto found_phy;
}
}
return -EINVAL;
found_phy:
d40c->phy_chan = &phys[i];
d40c->log_num = D40_PHY_CHAN;
goto out;
}
if (dev_type == -1)
return -EINVAL;
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
if ((i != phy_num) && (i != phy_num + 1)) {
dev_err(chan2dev(d40c),
"invalid fixed phy channel %d\n", i);
return -EINVAL;
}
if (d40_alloc_mask_set(&phys[i], is_src, event_line,
is_log, first_phy_user))
goto found_log;
dev_err(chan2dev(d40c),
"could not allocate fixed phy channel %d\n", i);
return -EINVAL;
}
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
* channels.
*/
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
event_line, is_log,
first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
event_line, is_log,
first_phy_user))
goto found_log;
}
}
}
return -EINVAL;
found_log:
d40c->phy_chan = &phys[i];
d40c->log_num = log_num;
out:
if (is_log)
d40c->base->lookup_log_chans[d40c->log_num] = d40c;
else
d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
return 0;
}
static int d40_config_memcpy(struct d40_chan *d40c)
{
dma_cap_mask_t cap = d40c->chan.device->cap_mask;
if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
memcpy[d40c->chan.chan_id];
} else if (dma_has_cap(DMA_MEMCPY, cap) &&
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
} else {
chan_err(d40c, "No memcpy\n");
return -EINVAL;
}
return 0;
}
static int d40_free_dma(struct d40_chan *d40c)
{
int res = 0;
u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
/* Terminate all queued and active transfers */
d40_term_all(d40c);
if (phy == NULL) {
chan_err(d40c, "phy == null\n");
return -EINVAL;
}
if (phy->allocated_src == D40_ALLOC_FREE &&
phy->allocated_dst == D40_ALLOC_FREE) {
chan_err(d40c, "channel already free\n");
return -EINVAL;
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
is_src = false;
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
is_src = true;
} else {
chan_err(d40c, "Unknown direction\n");
return -EINVAL;
}
pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "stop failed\n");
goto out;
}
d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
if (chan_is_logical(d40c))
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
else
d40c->base->lookup_phy_chans[phy->num] = NULL;
if (d40c->busy) {
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
out:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
{
void __iomem *chanbase = chan_base(d40c);
bool is_paused = false;
unsigned long flags;
void __iomem *active_reg;
u32 status;
u32 event;
spin_lock_irqsave(&d40c->lock, flags);
if (chan_is_physical(d40c)) {
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
status = (readl(active_reg) &
D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
D40_CHAN_POS(d40c->phy_chan->num);
if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
is_paused = true;
goto _exit;
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
status = readl(chanbase + D40_CHAN_REG_SDLNK);
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
chan_err(d40c, "Unknown direction\n");
goto _exit;
}
status = (status & D40_EVENTLINE_MASK(event)) >>
D40_EVENTLINE_POS(event);
if (status != D40_DMA_RUN)
is_paused = true;
_exit:
spin_unlock_irqrestore(&d40c->lock, flags);
return is_paused;
}
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
u32 bytes_left;
unsigned long flags;
spin_lock_irqsave(&d40c->lock, flags);
bytes_left = d40_residue(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
return bytes_left;
}
static int
d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
struct scatterlist *sg_src, struct scatterlist *sg_dst,
unsigned int sg_len, dma_addr_t src_dev_addr,
dma_addr_t dst_dev_addr)
{
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
struct stedma40_half_channel_info *src_info = &cfg->src_info;
struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
int ret;
ret = d40_log_sg_to_lli(sg_src, sg_len,
src_dev_addr,
desc->lli_log.src,
chan->log_def.lcsp1,
src_info->data_width,
dst_info->data_width);
ret = d40_log_sg_to_lli(sg_dst, sg_len,
dst_dev_addr,
desc->lli_log.dst,
chan->log_def.lcsp3,
dst_info->data_width,
src_info->data_width);
return ret < 0 ? ret : 0;
}
static int
d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
struct scatterlist *sg_src, struct scatterlist *sg_dst,
unsigned int sg_len, dma_addr_t src_dev_addr,
dma_addr_t dst_dev_addr)
{
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
struct stedma40_half_channel_info *src_info = &cfg->src_info;
struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
unsigned long flags = 0;
int ret;
if (desc->cyclic)
flags |= LLI_CYCLIC | LLI_TERM_INT;
ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
desc->lli_phy.src,
virt_to_phys(desc->lli_phy.src),
chan->src_def_cfg,
src_info, dst_info, flags);
ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
desc->lli_phy.dst,
virt_to_phys(desc->lli_phy.dst),
chan->dst_def_cfg,
dst_info, src_info, flags);
dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
desc->lli_pool.size, DMA_TO_DEVICE);
return ret < 0 ? ret : 0;
}
static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
{
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
struct d40_desc *desc;
int ret;
desc = d40_desc_get(chan);
if (!desc)
return NULL;
desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
cfg->dst_info.data_width);
if (desc->lli_len < 0) {
chan_err(chan, "Unaligned size\n");
goto err;
}
ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
if (ret < 0) {
chan_err(chan, "Could not allocate lli\n");
goto err;
}
desc->lli_current = 0;
desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit;
dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
return desc;
err:
d40_desc_free(chan, desc);
return NULL;
}
static dma_addr_t
d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
dma_addr_t addr = 0;
if (chan->runtime_addr)
return chan->runtime_addr;
if (direction == DMA_DEV_TO_MEM)
addr = plat->dev_rx[cfg->src_dev_type];
else if (direction == DMA_MEM_TO_DEV)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
}
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
dma_addr_t dst_dev_addr = 0;
struct d40_desc *desc;
unsigned long flags;
int ret;
if (!chan->phy_chan) {
chan_err(chan, "Cannot prepare unallocated channel\n");
return NULL;
}
spin_lock_irqsave(&chan->lock, flags);
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
if (desc == NULL)
goto err;
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
if (direction != DMA_TRANS_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
if (direction == DMA_DEV_TO_MEM)
src_dev_addr = dev_addr;
else if (direction == DMA_MEM_TO_DEV)
dst_dev_addr = dev_addr;
}
if (chan_is_logical(chan))
ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
sg_len, src_dev_addr, dst_dev_addr);
else
ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
sg_len, src_dev_addr, dst_dev_addr);
if (ret) {
chan_err(chan, "Failed to prepare %s sg job: %d\n",
chan_is_logical(chan) ? "log" : "phy", ret);
goto err;
}
/*
* add descriptor to the prepare queue in order to be able
* to free them later in terminate_all
*/
list_add_tail(&desc->node, &chan->prepare_queue);
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
err:
if (desc)
d40_desc_free(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
return NULL;
}
bool stedma40_filter(struct dma_chan *chan, void *data)
{
struct stedma40_chan_cfg *info = data;
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
int err;
if (data) {
err = d40_validate_conf(d40c, info);
if (!err)
d40c->dma_cfg = *info;
} else
err = d40_config_memcpy(d40c);
if (!err)
d40c->configured = true;
return err == 0;
}
EXPORT_SYMBOL(stedma40_filter);
static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
bool realtime = d40c->dma_cfg.realtime;
bool highprio = d40c->dma_cfg.high_priority;
u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event;
u32 prioreg;
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
/*
* Due to a hardware bug, in some cases a logical channel triggered by
* a high priority destination event line can generate extra packet
* transactions.
*
* The workaround is to not set the high priority level for the
* destination event lines that trigger logical channels.
*/
if (!src && chan_is_logical(d40c))
highprio = false;
prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
/* Destination event lines are stored in the upper halfword */
if (!src)
bit <<= 16;
writel(bit, d40c->base->virtbase + prioreg + group * 4);
writel(bit, d40c->base->virtbase + rtreg + group * 4);
}
static void d40_set_prio_realtime(struct d40_chan *d40c)
{
if (d40c->base->rev < 3)
return;
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
__d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
__d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
}
/* DMA ENGINE functions */
static int d40_alloc_chan_resources(struct dma_chan *chan)
{
int err;
unsigned long flags;
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
bool is_free_phy;
spin_lock_irqsave(&d40c->lock, flags);
dma_cookie_init(chan);
/* If no dma configuration is set use default configuration (memcpy) */
if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
chan_err(d40c, "Failed to configure memcpy channel\n");
goto fail;
}
}
err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
d40c->configured = false;
goto fail;
}
pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
d40_set_prio_realtime(d40c);
if (chan_is_logical(d40c)) {
d40_log_cfg(&d40c->dma_cfg,
&d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
else
d40c->lcpa = d40c->base->lcpa_base +
d40c->dma_cfg.dst_dev_type *
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
chan_is_logical(d40c) ? "logical" : "physical",
d40c->phy_chan->num,
d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
* on the same physical resource, only the first write is necessary.
*/
if (is_free_phy)
d40_config_write(d40c);
fail:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
static void d40_free_chan_resources(struct dma_chan *chan)
{
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
int err;
unsigned long flags;
if (d40c->phy_chan == NULL) {
chan_err(d40c, "Cannot free unallocated channel\n");
return;
}
spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c);
if (err)
chan_err(d40c, "Failed to free channel\n");
spin_unlock_irqrestore(&d40c->lock, flags);
}
static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
dma_addr_t dst,
dma_addr_t src,
size_t size,
unsigned long dma_flags)
{
struct scatterlist dst_sg;
struct scatterlist src_sg;
sg_init_table(&dst_sg, 1);
sg_init_table(&src_sg, 1);
sg_dma_address(&dst_sg) = dst;
sg_dma_address(&src_sg) = src;
sg_dma_len(&dst_sg) = size;
sg_dma_len(&src_sg) = size;
return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *
d40_prep_memcpy_sg(struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long dma_flags)
{
if (dst_nents != src_nents)
return NULL;
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long dma_flags, void *context)
{
if (!is_slave_direction(direction))
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
}
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
enum dma_transfer_direction direction, unsigned long flags,
void *context)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
struct scatterlist *sg;
int i;
sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
dma_addr += period_len;
}
sg[periods].offset = 0;
sg_dma_len(&sg[periods]) = 0;
sg[periods].page_link =
((unsigned long)sg | 0x01) & ~0x02;
txd = d40_prep_sg(chan, sg, sg, periods, direction,
DMA_PREP_INTERRUPT);
kfree(sg);
return txd;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
enum dma_status ret;
if (d40c->phy_chan == NULL) {
chan_err(d40c, "Cannot read status of unallocated channel\n");
return -EINVAL;
}
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
ret = DMA_PAUSED;
return ret;
}
static void d40_issue_pending(struct dma_chan *chan)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
unsigned long flags;
if (d40c->phy_chan == NULL) {
chan_err(d40c, "Channel is not allocated!\n");
return;
}
spin_lock_irqsave(&d40c->lock, flags);
list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
/* Busy means that queued jobs are already being processed */
if (!d40c->busy)
(void) d40_queue_start(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
}
static void d40_terminate_all(struct dma_chan *chan)
{
unsigned long flags;
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
int ret;
spin_lock_irqsave(&d40c->lock, flags);
pm_runtime_get_sync(d40c->base->dev);
ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (ret)
chan_err(d40c, "Failed to stop channel\n");
d40_term_all(d40c);
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
if (d40c->busy) {
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->busy = false;
spin_unlock_irqrestore(&d40c->lock, flags);
}
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
enum dma_slave_buswidth width,
u32 maxburst)
{
enum stedma40_periph_data_width addr_width;
int psize;
switch (width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
addr_width = STEDMA40_BYTE_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
addr_width = STEDMA40_HALFWORD_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
addr_width = STEDMA40_WORD_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_8_BYTES:
addr_width = STEDMA40_DOUBLEWORD_WIDTH;
break;
default:
dev_err(d40c->base->dev,
"illegal peripheral address width "
"requested (%d)\n",
width);
return -EINVAL;
}
if (chan_is_logical(d40c)) {
if (maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
else if (maxburst >= 8)
psize = STEDMA40_PSIZE_LOG_8;
else if (maxburst >= 4)
psize = STEDMA40_PSIZE_LOG_4;
else
psize = STEDMA40_PSIZE_LOG_1;
} else {
if (maxburst >= 16)
psize = STEDMA40_PSIZE_PHY_16;
else if (maxburst >= 8)
psize = STEDMA40_PSIZE_PHY_8;
else if (maxburst >= 4)
psize = STEDMA40_PSIZE_PHY_4;
else
psize = STEDMA40_PSIZE_PHY_1;
}
info->data_width = addr_width;
info->psize = psize;
info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
return 0;
}
/* Runtime reconfiguration extension */
static int d40_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
enum dma_slave_buswidth src_addr_width, dst_addr_width;
dma_addr_t config_addr;
u32 src_maxburst, dst_maxburst;
int ret;
src_addr_width = config->src_addr_width;
src_maxburst = config->src_maxburst;
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_DEV_TO_MEM) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
config_addr = config->src_addr;
if (dev_addr_rx)
dev_dbg(d40c->base->dev,
"channel has a pre-wired RX address %08x "
"overriding with %08x\n",
dev_addr_rx, config_addr);
if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
dev_dbg(d40c->base->dev,
"channel was not configured for peripheral "
"to memory transfer (%d) overriding\n",
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
/* Configure the memory side */
if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
dst_addr_width = src_addr_width;
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
} else if (config->direction == DMA_MEM_TO_DEV) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
config_addr = config->dst_addr;
if (dev_addr_tx)
dev_dbg(d40c->base->dev,
"channel has a pre-wired TX address %08x "
"overriding with %08x\n",
dev_addr_tx, config_addr);
if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
dev_dbg(d40c->base->dev,
"channel was not configured for memory "
"to peripheral transfer (%d) overriding\n",
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
/* Configure the memory side */
if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
src_addr_width = dst_addr_width;
if (src_maxburst == 0)
src_maxburst = dst_maxburst;
} else {
dev_err(d40c->base->dev,
"unrecognized channel direction %d\n",
config->direction);
return -EINVAL;
}
if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
src_maxburst,
src_addr_width,
dst_maxburst,
dst_addr_width);
return -EINVAL;
}
if (src_maxburst > 16) {
src_maxburst = 16;
dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
} else if (dst_maxburst > 16) {
dst_maxburst = 16;
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
}
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
if (ret)
return ret;
ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
dst_addr_width,
dst_maxburst);
if (ret)
return ret;
/* Fill in register values */
if (chan_is_logical(d40c))
d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
else
d40_phy_cfg(cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, false);
/* These settings will take precedence later */
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
dev_dbg(d40c->base->dev,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
return 0;
}
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
if (d40c->phy_chan == NULL) {
chan_err(d40c, "Channel is not allocated!\n");
return -EINVAL;
}
switch (cmd) {
case DMA_TERMINATE_ALL:
d40_terminate_all(chan);
return 0;
case DMA_PAUSE:
return d40_pause(d40c);
case DMA_RESUME:
return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
return d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
default:
break;
}
/* Other commands are unimplemented */
return -ENXIO;
}
/* Initialization functions */
static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
struct d40_chan *chans, int offset,
int num_chans)
{
int i = 0;
struct d40_chan *d40c;
INIT_LIST_HEAD(&dma->channels);
for (i = offset; i < offset + num_chans; i++) {
d40c = &chans[i];
d40c->base = base;
d40c->chan.device = dma;
spin_lock_init(&d40c->lock);
d40c->log_num = D40_PHY_CHAN;
INIT_LIST_HEAD(&d40c->done);
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);
list_add_tail(&d40c->chan.device_node,
&dma->channels);
}
}
static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
{
if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
dev->device_prep_slave_sg = d40_prep_slave_sg;
if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
dev->device_prep_dma_memcpy = d40_prep_memcpy;
/*
* This controller can only access address at even
* 32bit boundaries, i.e. 2^2
*/
dev->copy_align = 2;
}
if (dma_has_cap(DMA_SG, dev->cap_mask))
dev->device_prep_dma_sg = d40_prep_memcpy_sg;
if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
dev->device_alloc_chan_resources = d40_alloc_chan_resources;
dev->device_free_chan_resources = d40_free_chan_resources;
dev->device_issue_pending = d40_issue_pending;
dev->device_tx_status = d40_tx_status;
dev->device_control = d40_control;
dev->dev = base->dev;
}
static int __init d40_dmaengine_init(struct d40_base *base,
int num_reserved_chans)
{
int err ;
d40_chan_init(base, &base->dma_slave, base->log_chans,
0, base->num_log_chans);
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
goto failure1;
}
d40_chan_init(base, &base->dma_memcpy, base->log_chans,
base->num_log_chans, base->plat_data->memcpy_len);
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
if (err) {
d40_err(base->dev,
"Failed to regsiter memcpy only channels\n");
goto failure2;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
0, num_reserved_chans);
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
dma_cap_set(DMA_SG, base->dma_both.cap_mask);
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both);
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
goto failure3;
}
return 0;
failure3:
dma_async_device_unregister(&base->dma_memcpy);
failure2:
dma_async_device_unregister(&base->dma_slave);
failure1:
return err;
}
/* Suspend resume functionality */
#ifdef CONFIG_PM
static int dma40_pm_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
return ret;
}
static int dma40_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
d40_save_restore_registers(base, true);
/* Don't disable/enable clocks for v1 due to HW bugs */
if (base->rev != 1)
writel_relaxed(base->gcc_pwr_off_mask,
base->virtbase + D40_DREG_GCC);
return 0;
}
static int dma40_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
if (base->initialized)
d40_save_restore_registers(base, false);
writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
base->virtbase + D40_DREG_GCC);
return 0;
}
static int dma40_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
if (base->lcpa_regulator)
ret = regulator_enable(base->lcpa_regulator);
return ret;
}
static const struct dev_pm_ops dma40_pm_ops = {
.suspend = dma40_pm_suspend,
.runtime_suspend = dma40_runtime_suspend,
.runtime_resume = dma40_runtime_resume,
.resume = dma40_resume,
};
#define DMA40_PM_OPS (&dma40_pm_ops)
#else
#define DMA40_PM_OPS NULL
#endif
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
{
int i;
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
for (i = 0; i < base->num_phy_chans; i++) {
base->phy_res[i].num = i;
odd_even_bit += 2 * ((i % 2) == 0);
if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
base->phy_res[i].reserved = true;
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
D40_DREG_GCC_SRC);
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
D40_DREG_GCC_DST);
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
}
/* Mark disabled channels as occupied */
for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
int chan = base->plat_data->disabled_channels[i];
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
base->phy_res[chan].reserved = true;
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
D40_DREG_GCC_SRC);
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
/* Mark soft_lli channels */
for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
int chan = base->plat_data->soft_lli_chans[i];
base->phy_res[chan].use_soft_lli = true;
}
dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans);
/* Verify settings extended vs standard */
val[0] = readl(base->virtbase + D40_DREG_PRTYP);
for (i = 0; i < base->num_phy_chans; i++) {
if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
(val[0] & 0x3) != 1)
dev_info(base->dev,
"[%s] INFO: channel %d is misconfigured (%d)\n",
__func__, i, val[0] & 0x3);
val[0] = val[0] >> 2;
}
/*
* To keep things simple, Enable all clocks initially.
* The clocks will get managed later post channel allocation.
* The clocks for the event lines on which reserved channels exists
* are not managed here.
*/
writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
base->gcc_pwr_off_mask = gcc;
return num_phy_chans_avail;
}
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
struct stedma40_platform_data *plat_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
struct d40_base *base = NULL;
int num_log_chans = 0;
int num_phy_chans;
int clk_ret = -EINVAL;
int i;
u32 pid;
u32 cid;
u8 rev;
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
d40_err(&pdev->dev, "No matching clock found\n");
goto failure;
}
clk_ret = clk_prepare_enable(clk);
if (clk_ret) {
d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
goto failure;
}
/* Get IO for DMAC base address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
if (!res)
goto failure;
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O base") == NULL)
goto failure;
virtbase = ioremap(res->start, resource_size(res));
if (!virtbase)
goto failure;
/* This is just a regular AMBA PrimeCell ID actually */
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
& 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
& 255) << (i * 8);
if (cid != AMBA_CID) {
d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
goto failure;
}
if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
AMBA_MANF_BITS(pid),
AMBA_VENDOR_ST);
goto failure;
}
/*
* HW revision:
* DB8500ed has revision 0
* ? has revision 1
* DB8500v1 has revision 2
* DB8500v2 has revision 3
* AP9540v1 has revision 4
* DB8540v1 has revision 4
*/
rev = AMBA_REV_BITS(pid);
plat_data = pdev->dev.platform_data;
/* The number of physical channels on this HW */
if (plat_data->num_of_phy_chans)
num_phy_chans = plat_data->num_of_phy_chans;
else
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
rev, res->start, num_phy_chans);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported",
rev);
goto failure;
}
/* Count the number of logical channels in use */
for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_rx[i] != 0)
num_log_chans++;
for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_tx[i] != 0)
num_log_chans++;
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + plat_data->memcpy_len) *
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
d40_err(&pdev->dev, "Out of memory\n");
goto failure;
}
base->rev = rev;
base->clk = clk;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
base->phy_start = res->start;
base->phy_size = resource_size(res);
base->virtbase = virtbase;
base->plat_data = plat_data;
base->dev = &pdev->dev;
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
if (base->plat_data->num_of_phy_chans == 14) {
base->gen_dmac.backup = d40_backup_regs_v4b;
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
base->gen_dmac.il = il_v4b;
base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
base->gen_dmac.init_reg = dma_init_reg_v4b;
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
} else {
if (base->rev >= 3) {
base->gen_dmac.backup = d40_backup_regs_v4a;
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
}
base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
base->gen_dmac.realtime_en = D40_DREG_RSEG1;
base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
base->gen_dmac.il = il_v4a;
base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
base->gen_dmac.init_reg = dma_init_reg_v4a;
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
}
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
GFP_KERNEL);
if (!base->phy_res)
goto failure;
base->lookup_phy_chans = kzalloc(num_phy_chans *
sizeof(struct d40_chan *),
GFP_KERNEL);
if (!base->lookup_phy_chans)
goto failure;
if (num_log_chans + plat_data->memcpy_len) {
/*
* The max number of logical channels are event lines for all
* src devices and dst devices
*/
base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
sizeof(struct d40_chan *),
GFP_KERNEL);
if (!base->lookup_log_chans)
goto failure;
}
base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
sizeof(d40_backup_regs_chan),
GFP_KERNEL);
if (!base->reg_val_backup_chan)
goto failure;
base->lcla_pool.alloc_map =
kzalloc(num_phy_chans * sizeof(struct d40_desc *)
* D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (base->desc_slab == NULL)
goto failure;
return base;
failure:
if (!clk_ret)
clk_disable_unprepare(clk);
if (!IS_ERR(clk))
clk_put(clk);
if (virtbase)
iounmap(virtbase);
if (res)
release_mem_region(res->start,
resource_size(res));
if (virtbase)
iounmap(virtbase);
if (base) {
kfree(base->lcla_pool.alloc_map);
kfree(base->reg_val_backup_chan);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
kfree(base);
}
return NULL;
}
static void __init d40_hw_init(struct d40_base *base)
{
int i;
u32 prmseo[2] = {0, 0};
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
u32 pcmis = 0;
u32 pcicr = 0;
struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
u32 reg_size = base->gen_dmac.init_reg_size;
for (i = 0; i < reg_size; i++)
writel(dma_init_reg[i].val,
base->virtbase + dma_init_reg[i].reg);
/* Configure all our dma channels to default settings */
for (i = 0; i < base->num_phy_chans; i++) {
activeo[i % 2] = activeo[i % 2] << 2;
if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
== D40_ALLOC_PHY) {
activeo[i % 2] |= 3;
continue;
}
/* Enable interrupt # */
pcmis = (pcmis << 1) | 1;
/* Clear interrupt # */
pcicr = (pcicr << 1) | 1;
/* Set channel to physical mode */
prmseo[i % 2] = prmseo[i % 2] << 2;
prmseo[i % 2] |= 1;
}
writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
/* Write which interrupt to enable */
writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
/* Write which interrupt to clear */
writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
/* These are __initdata and cannot be accessed after init */
base->gen_dmac.init_reg = NULL;
base->gen_dmac.init_reg_size = 0;
}
static int __init d40_lcla_allocate(struct d40_base *base)
{
struct d40_lcla_pool *pool = &base->lcla_pool;
unsigned long *page_list;
int i, j;
int ret = 0;
/*
* This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
* To full fill this hardware requirement without wasting 256 kb
* we allocate pages until we get an aligned one.
*/
page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
GFP_KERNEL);
if (!page_list) {
ret = -ENOMEM;
goto failure;
}
/* Calculating how many pages that are required */
base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
page_list[i] = __get_free_pages(GFP_KERNEL,
base->lcla_pool.pages);
if (!page_list[i]) {
d40_err(base->dev, "Failed to allocate %d pages.\n",
base->lcla_pool.pages);
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
goto failure;
}
if ((virt_to_phys((void *)page_list[i]) &
(LCLA_ALIGNMENT - 1)) == 0)
break;
}
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
base->lcla_pool.base = (void *)page_list[i];
} else {
/*
* After many attempts and no succees with finding the correct
* alignment, try with allocating a big buffer.
*/
dev_warn(base->dev,
"[%s] Failed to get %d pages @ 18 bit align.\n",
__func__, base->lcla_pool.pages);
base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
base->num_phy_chans +
LCLA_ALIGNMENT,
GFP_KERNEL);
if (!base->lcla_pool.base_unaligned) {
ret = -ENOMEM;
goto failure;
}
base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
LCLA_ALIGNMENT);
}
pool->dma_addr = dma_map_single(base->dev, pool->base,
SZ_1K * base->num_phy_chans,
DMA_TO_DEVICE);
if (dma_mapping_error(base->dev, pool->dma_addr)) {
pool->dma_addr = 0;
ret = -ENOMEM;
goto failure;
}
writel(virt_to_phys(base->lcla_pool.base),
base->virtbase + D40_DREG_LCLA);
failure:
kfree(page_list);
return ret;
}
static int __init d40_probe(struct platform_device *pdev)
{
int err;
int ret = -ENOENT;
struct d40_base *base;
struct resource *res = NULL;
int num_reserved_chans;
u32 val;
base = d40_hw_detect_init(pdev);
if (!base)
goto failure;
num_reserved_chans = d40_phy_res_init(base);
platform_set_drvdata(pdev, base);
spin_lock_init(&base->interrupt_lock);
spin_lock_init(&base->execmd_lock);
/* Get IO for logical channel parameter address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
if (!res) {
ret = -ENOENT;
d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
goto failure;
}
base->lcpa_size = resource_size(res);
base->phy_lcpa = res->start;
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
d40_err(&pdev->dev,
"Failed to request LCPA region 0x%x-0x%x\n",
res->start, res->end);
goto failure;
}
/* We make use of ESRAM memory for this. */
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
"[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
__func__, val, res->start);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
base->lcpa_base = ioremap(res->start, resource_size(res));
if (!base->lcpa_base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
/* If lcla has to be located in ESRAM we don't need to allocate */
if (base->plat_data->use_esram_lcla) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"lcla_esram");
if (!res) {
ret = -ENOENT;
d40_err(&pdev->dev,
"No \"lcla_esram\" memory resource\n");
goto failure;
}
base->lcla_pool.base = ioremap(res->start,
resource_size(res));
if (!base->lcla_pool.base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
goto failure;
}
writel(res->start, base->virtbase + D40_DREG_LCLA);
} else {
ret = d40_lcla_allocate(base);
if (ret) {
d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
goto failure;
}
}
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
d40_err(&pdev->dev, "No IRQ defined\n");
goto failure;
}
pm_runtime_irq_safe(base->dev);
pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(base->dev);
pm_runtime_enable(base->dev);
pm_runtime_resume(base->dev);
if (base->plat_data->use_esram_lcla) {
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
if (IS_ERR(base->lcpa_regulator)) {
d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
base->lcpa_regulator = NULL;
goto failure;
}
ret = regulator_enable(base->lcpa_regulator);
if (ret) {
d40_err(&pdev->dev,
"Failed to enable lcpa_regulator\n");
regulator_put(base->lcpa_regulator);
base->lcpa_regulator = NULL;
goto failure;
}
}
base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
base->dev->dma_parms = &base->dma_parms;
err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (err) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
goto failure;
}
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
return 0;
failure:
if (base) {
if (base->desc_slab)
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
iounmap(base->lcla_pool.base);
base->lcla_pool.base = NULL;
}
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
DMA_TO_DEVICE);
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
kfree(base->lcla_pool.base_unaligned);
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
if (base->phy_start)
release_mem_region(base->phy_start,
base->phy_size);
if (base->clk) {
clk_disable_unprepare(base->clk);
clk_put(base->clk);
}
if (base->lcpa_regulator) {
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
kfree(base);
}
d40_err(&pdev->dev, "probe failed\n");
return ret;
}
static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
.pm = DMA40_PM_OPS,
},
};
static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
subsys_initcall(stedma40_init);
| gpl-2.0 |
scottellis/linux-beagleboard | drivers/video/fbdev/pvr2fb.c | 942 | 31500 | /*
* drivers/video/pvr2fb.c
*
* Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega
* Dreamcast.
*
* Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org>
* Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org>
*
* This driver is mostly based on the excellent amifb and vfb sources. It uses
* an odd scheme for converting hardware values to/from framebuffer values,
* here are some hacked-up formulas:
*
* The Dreamcast has screen offsets from each side of its four borders and
* the start offsets of the display window. I used these values to calculate
* 'pseudo' values (think of them as placeholders) for the fb video mode, so
* that when it came time to convert these values back into their hardware
* values, I could just add mode- specific offsets to get the correct mode
* settings:
*
* left_margin = diwstart_h - borderstart_h;
* right_margin = borderstop_h - (diwstart_h + xres);
* upper_margin = diwstart_v - borderstart_v;
* lower_margin = borderstop_v - (diwstart_h + yres);
*
* hsync_len = borderstart_h + (hsync_total - borderstop_h);
* vsync_len = borderstart_v + (vsync_total - borderstop_v);
*
* Then, when it's time to convert back to hardware settings, the only
* constants are the borderstart_* offsets, all other values are derived from
* the fb video mode:
*
* // PAL
* borderstart_h = 116;
* borderstart_v = 44;
* ...
* borderstop_h = borderstart_h + hsync_total - hsync_len;
* ...
* diwstart_v = borderstart_v - upper_margin;
*
* However, in the current implementation, the borderstart values haven't had
* the benefit of being fully researched, so some modes may be broken.
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#ifdef CONFIG_SH_DREAMCAST
#include <asm/machvec.h>
#include <mach-dreamcast/mach/sysasic.h>
#endif
#ifdef CONFIG_PVR2_DMA
#include <linux/pagemap.h>
#include <mach/dma.h>
#include <asm/dma.h>
#endif
#ifdef CONFIG_SH_STORE_QUEUES
#include <linux/uaccess.h>
#include <cpu/sq.h>
#endif
#ifndef PCI_DEVICE_ID_NEC_NEON250
# define PCI_DEVICE_ID_NEC_NEON250 0x0067
#endif
/* 2D video registers */
#define DISP_BASE par->mmio_base
#define DISP_BRDRCOLR (DISP_BASE + 0x40)
#define DISP_DIWMODE (DISP_BASE + 0x44)
#define DISP_DIWADDRL (DISP_BASE + 0x50)
#define DISP_DIWADDRS (DISP_BASE + 0x54)
#define DISP_DIWSIZE (DISP_BASE + 0x5c)
#define DISP_SYNCCONF (DISP_BASE + 0xd0)
#define DISP_BRDRHORZ (DISP_BASE + 0xd4)
#define DISP_SYNCSIZE (DISP_BASE + 0xd8)
#define DISP_BRDRVERT (DISP_BASE + 0xdc)
#define DISP_DIWCONF (DISP_BASE + 0xe8)
#define DISP_DIWHSTRT (DISP_BASE + 0xec)
#define DISP_DIWVSTRT (DISP_BASE + 0xf0)
#define DISP_PIXDEPTH (DISP_BASE + 0x108)
/* Pixel clocks, one for TV output, doubled for VGA output */
#define TV_CLK 74239
#define VGA_CLK 37119
/* This is for 60Hz - the VTOTAL is doubled for interlaced modes */
#define PAL_HTOTAL 863
#define PAL_VTOTAL 312
#define NTSC_HTOTAL 857
#define NTSC_VTOTAL 262
/* Supported cable types */
enum { CT_VGA, CT_NONE, CT_RGB, CT_COMPOSITE };
/* Supported video output types */
enum { VO_PAL, VO_NTSC, VO_VGA };
/* Supported palette types */
enum { PAL_ARGB1555, PAL_RGB565, PAL_ARGB4444, PAL_ARGB8888 };
struct pvr2_params { unsigned int val; char *name; };
static struct pvr2_params cables[] = {
{ CT_VGA, "VGA" }, { CT_RGB, "RGB" }, { CT_COMPOSITE, "COMPOSITE" },
};
static struct pvr2_params outputs[] = {
{ VO_PAL, "PAL" }, { VO_NTSC, "NTSC" }, { VO_VGA, "VGA" },
};
/*
* This describes the current video mode
*/
static struct pvr2fb_par {
unsigned int hsync_total; /* Clocks/line */
unsigned int vsync_total; /* Lines/field */
unsigned int borderstart_h;
unsigned int borderstop_h;
unsigned int borderstart_v;
unsigned int borderstop_v;
unsigned int diwstart_h; /* Horizontal offset of the display field */
unsigned int diwstart_v; /* Vertical offset of the display field, for
interlaced modes, this is the long field */
unsigned long disp_start; /* Address of image within VRAM */
unsigned char is_interlaced; /* Is the display interlaced? */
unsigned char is_doublescan; /* Are scanlines output twice? (doublescan) */
unsigned char is_lowres; /* Is horizontal pixel-doubling enabled? */
unsigned long mmio_base; /* MMIO base */
u32 palette[16];
} *currentpar;
static struct fb_info *fb_info;
static struct fb_fix_screeninfo pvr2_fix = {
.id = "NEC PowerVR2",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.ypanstep = 1,
.ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
static struct fb_var_screeninfo pvr2_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel =16,
.red = { 11, 5, 0 },
.green = { 5, 6, 0 },
.blue = { 0, 5, 0 },
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
static int cable_type = CT_VGA;
static int video_output = VO_VGA;
static int nopan = 0;
static int nowrap = 1;
/*
* We do all updating, blanking, etc. during the vertical retrace period
*/
static unsigned int do_vmode_full = 0; /* Change the video mode */
static unsigned int do_vmode_pan = 0; /* Update the video mode */
static short do_blank = 0; /* (Un)Blank the screen */
static unsigned int is_blanked = 0; /* Is the screen blanked? */
#ifdef CONFIG_SH_STORE_QUEUES
static unsigned long pvr2fb_map;
#endif
#ifdef CONFIG_PVR2_DMA
static unsigned int shdma = PVR2_CASCADE_CHAN;
static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
#endif
static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info);
static int pvr2fb_blank(int blank, struct fb_info *info);
static unsigned long get_line_length(int xres_virtual, int bpp);
static void set_color_bitfields(struct fb_var_screeninfo *var);
static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
static int pvr2fb_set_par(struct fb_info *info);
static void pvr2_update_display(struct fb_info *info);
static void pvr2_init_display(struct fb_info *info);
static void pvr2_do_blank(void);
static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id);
static int pvr2_init_cable(void);
static int pvr2_get_param(const struct pvr2_params *p, const char *s,
int val, int size);
#ifdef CONFIG_PVR2_DMA
static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
size_t count, loff_t *ppos);
#endif
static struct fb_ops pvr2fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = pvr2fb_setcolreg,
.fb_blank = pvr2fb_blank,
.fb_check_var = pvr2fb_check_var,
.fb_set_par = pvr2fb_set_par,
#ifdef CONFIG_PVR2_DMA
.fb_write = pvr2fb_write,
#endif
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static struct fb_videomode pvr2_modedb[] = {
/*
* Broadcast video modes (PAL and NTSC). I'm unfamiliar with
* PAL-M and PAL-N, but from what I've read both modes parallel PAL and
* NTSC, so it shouldn't be a problem (I hope).
*/
{
/* 640x480 @ 60Hz interlaced (NTSC) */
"ntsc_640x480i", 60, 640, 480, TV_CLK, 38, 33, 0, 18, 146, 26,
FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
}, {
/* 640x240 @ 60Hz (NTSC) */
/* XXX: Broken! Don't use... */
"ntsc_640x240", 60, 640, 240, TV_CLK, 38, 33, 0, 0, 146, 22,
FB_SYNC_BROADCAST, FB_VMODE_YWRAP
}, {
/* 640x480 @ 60hz (VGA) */
"vga_640x480", 60, 640, 480, VGA_CLK, 38, 33, 0, 18, 146, 26,
0, FB_VMODE_YWRAP
},
};
#define NUM_TOTAL_MODES ARRAY_SIZE(pvr2_modedb)
#define DEFMODE_NTSC 0
#define DEFMODE_PAL 0
#define DEFMODE_VGA 2
static int defmode = DEFMODE_NTSC;
static char *mode_option = NULL;
static inline void pvr2fb_set_pal_type(unsigned int type)
{
struct pvr2fb_par *par = (struct pvr2fb_par *)fb_info->par;
fb_writel(type, par->mmio_base + 0x108);
}
static inline void pvr2fb_set_pal_entry(struct pvr2fb_par *par,
unsigned int regno,
unsigned int val)
{
fb_writel(val, par->mmio_base + 0x1000 + (4 * regno));
}
static int pvr2fb_blank(int blank, struct fb_info *info)
{
do_blank = blank ? blank : -1;
return 0;
}
static inline unsigned long get_line_length(int xres_virtual, int bpp)
{
return (unsigned long)((((xres_virtual*bpp)+31)&~31) >> 3);
}
static void set_color_bitfields(struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
case 16: /* RGB 565 */
pvr2fb_set_pal_type(PAL_RGB565);
var->red.offset = 11; var->red.length = 5;
var->green.offset = 5; var->green.length = 6;
var->blue.offset = 0; var->blue.length = 5;
var->transp.offset = 0; var->transp.length = 0;
break;
case 24: /* RGB 888 */
var->red.offset = 16; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
var->transp.offset = 0; var->transp.length = 0;
break;
case 32: /* ARGB 8888 */
pvr2fb_set_pal_type(PAL_ARGB8888);
var->red.offset = 16; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
var->transp.offset = 24; var->transp.length = 8;
break;
}
}
static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
unsigned int tmp;
if (regno > info->cmap.len)
return 1;
/*
* We only support the hardware palette for 16 and 32bpp. It's also
* expected that the palette format has been set by the time we get
* here, so we don't waste time setting it again.
*/
switch (info->var.bits_per_pixel) {
case 16: /* RGB 565 */
tmp = (red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
pvr2fb_set_pal_entry(par, regno, tmp);
break;
case 24: /* RGB 888 */
red >>= 8; green >>= 8; blue >>= 8;
tmp = (red << 16) | (green << 8) | blue;
break;
case 32: /* ARGB 8888 */
red >>= 8; green >>= 8; blue >>= 8;
tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
pvr2fb_set_pal_entry(par, regno, tmp);
break;
default:
pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
return 1;
}
if (regno < 16)
((u32*)(info->pseudo_palette))[regno] = tmp;
return 0;
}
static int pvr2fb_set_par(struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
struct fb_var_screeninfo *var = &info->var;
unsigned long line_length;
unsigned int vtotal;
/*
* XXX: It's possible that a user could use a VGA box, change the cable
* type in hardware (i.e. switch from VGA<->composite), then change
* modes (i.e. switching to another VT). If that happens we should
* automagically change the output format to cope, but currently I
* don't have a VGA box to make sure this works properly.
*/
cable_type = pvr2_init_cable();
if (cable_type == CT_VGA && video_output != VO_VGA)
video_output = VO_VGA;
var->vmode &= FB_VMODE_MASK;
if (var->vmode & FB_VMODE_INTERLACED && video_output != VO_VGA)
par->is_interlaced = 1;
/*
* XXX: Need to be more creative with this (i.e. allow doublecan for
* PAL/NTSC output).
*/
if (var->vmode & FB_VMODE_DOUBLE && video_output == VO_VGA)
par->is_doublescan = 1;
par->hsync_total = var->left_margin + var->xres + var->right_margin +
var->hsync_len;
par->vsync_total = var->upper_margin + var->yres + var->lower_margin +
var->vsync_len;
if (var->sync & FB_SYNC_BROADCAST) {
vtotal = par->vsync_total;
if (par->is_interlaced)
vtotal /= 2;
if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) {
/* XXX: Check for start values here... */
/* XXX: Check hardware for PAL-compatibility */
par->borderstart_h = 116;
par->borderstart_v = 44;
} else {
/* NTSC video output */
par->borderstart_h = 126;
par->borderstart_v = 18;
}
} else {
/* VGA mode */
/* XXX: What else needs to be checked? */
/*
* XXX: We have a little freedom in VGA modes, what ranges
* should be here (i.e. hsync/vsync totals, etc.)?
*/
par->borderstart_h = 126;
par->borderstart_v = 40;
}
/* Calculate the remainding offsets */
par->diwstart_h = par->borderstart_h + var->left_margin;
par->diwstart_v = par->borderstart_v + var->upper_margin;
par->borderstop_h = par->diwstart_h + var->xres +
var->right_margin;
par->borderstop_v = par->diwstart_v + var->yres +
var->lower_margin;
if (!par->is_interlaced)
par->borderstop_v /= 2;
if (info->var.xres < 640)
par->is_lowres = 1;
line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
par->disp_start = info->fix.smem_start + (line_length * var->yoffset) * line_length;
info->fix.line_length = line_length;
return 0;
}
static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
unsigned int vtotal, hsync_total;
unsigned long line_length;
if (var->pixclock != TV_CLK && var->pixclock != VGA_CLK) {
pr_debug("Invalid pixclock value %d\n", var->pixclock);
return -EINVAL;
}
if (var->xres < 320)
var->xres = 320;
if (var->yres < 240)
var->yres = 240;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel <= 24)
var->bits_per_pixel = 24;
else if (var->bits_per_pixel <= 32)
var->bits_per_pixel = 32;
set_color_bitfields(var);
if (var->vmode & FB_VMODE_YWRAP) {
if (var->xoffset || var->yoffset < 0 ||
var->yoffset >= var->yres_virtual) {
var->xoffset = var->yoffset = 0;
} else {
if (var->xoffset > var->xres_virtual - var->xres ||
var->yoffset > var->yres_virtual - var->yres ||
var->xoffset < 0 || var->yoffset < 0)
var->xoffset = var->yoffset = 0;
}
} else {
var->xoffset = var->yoffset = 0;
}
/*
* XXX: Need to be more creative with this (i.e. allow doublecan for
* PAL/NTSC output).
*/
if (var->yres < 480 && video_output == VO_VGA)
var->vmode |= FB_VMODE_DOUBLE;
if (video_output != VO_VGA) {
var->sync |= FB_SYNC_BROADCAST;
var->vmode |= FB_VMODE_INTERLACED;
} else {
var->sync &= ~FB_SYNC_BROADCAST;
var->vmode &= ~FB_VMODE_INTERLACED;
var->vmode |= FB_VMODE_NONINTERLACED;
}
if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) {
var->right_margin = par->borderstop_h -
(par->diwstart_h + var->xres);
var->left_margin = par->diwstart_h - par->borderstart_h;
var->hsync_len = par->borderstart_h +
(par->hsync_total - par->borderstop_h);
var->upper_margin = par->diwstart_v - par->borderstart_v;
var->lower_margin = par->borderstop_v -
(par->diwstart_v + var->yres);
var->vsync_len = par->borderstop_v +
(par->vsync_total - par->borderstop_v);
}
hsync_total = var->left_margin + var->xres + var->right_margin +
var->hsync_len;
vtotal = var->upper_margin + var->yres + var->lower_margin +
var->vsync_len;
if (var->sync & FB_SYNC_BROADCAST) {
if (var->vmode & FB_VMODE_INTERLACED)
vtotal /= 2;
if (vtotal > (PAL_VTOTAL + NTSC_VTOTAL)/2) {
/* PAL video output */
/* XXX: Should be using a range here ... ? */
if (hsync_total != PAL_HTOTAL) {
pr_debug("invalid hsync total for PAL\n");
return -EINVAL;
}
} else {
/* NTSC video output */
if (hsync_total != NTSC_HTOTAL) {
pr_debug("invalid hsync total for NTSC\n");
return -EINVAL;
}
}
}
/* Check memory sizes */
line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
if (line_length * var->yres_virtual > info->fix.smem_len)
return -ENOMEM;
return 0;
}
static void pvr2_update_display(struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *) info->par;
struct fb_var_screeninfo *var = &info->var;
/* Update the start address of the display image */
fb_writel(par->disp_start, DISP_DIWADDRL);
fb_writel(par->disp_start +
get_line_length(var->xoffset+var->xres, var->bits_per_pixel),
DISP_DIWADDRS);
}
/*
* Initialize the video mode. Currently, the 16bpp and 24bpp modes aren't
* very stable. It's probably due to the fact that a lot of the 2D video
* registers are still undocumented.
*/
static void pvr2_init_display(struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *) info->par;
struct fb_var_screeninfo *var = &info->var;
unsigned int diw_height, diw_width, diw_modulo = 1;
unsigned int bytesperpixel = var->bits_per_pixel >> 3;
/* hsync and vsync totals */
fb_writel((par->vsync_total << 16) | par->hsync_total, DISP_SYNCSIZE);
/* column height, modulo, row width */
/* since we're "panning" within vram, we need to offset things based
* on the offset from the virtual x start to our real gfx. */
if (video_output != VO_VGA && par->is_interlaced)
diw_modulo += info->fix.line_length / 4;
diw_height = (par->is_interlaced ? var->yres / 2 : var->yres);
diw_width = get_line_length(var->xres, var->bits_per_pixel) / 4;
fb_writel((diw_modulo << 20) | (--diw_height << 10) | --diw_width,
DISP_DIWSIZE);
/* display address, long and short fields */
fb_writel(par->disp_start, DISP_DIWADDRL);
fb_writel(par->disp_start +
get_line_length(var->xoffset+var->xres, var->bits_per_pixel),
DISP_DIWADDRS);
/* border horizontal, border vertical, border color */
fb_writel((par->borderstart_h << 16) | par->borderstop_h, DISP_BRDRHORZ);
fb_writel((par->borderstart_v << 16) | par->borderstop_v, DISP_BRDRVERT);
fb_writel(0, DISP_BRDRCOLR);
/* display window start position */
fb_writel(par->diwstart_h, DISP_DIWHSTRT);
fb_writel((par->diwstart_v << 16) | par->diwstart_v, DISP_DIWVSTRT);
/* misc. settings */
fb_writel((0x16 << 16) | par->is_lowres, DISP_DIWCONF);
/* clock doubler (for VGA), scan doubler, display enable */
fb_writel(((video_output == VO_VGA) << 23) |
(par->is_doublescan << 1) | 1, DISP_DIWMODE);
/* bits per pixel */
fb_writel(fb_readl(DISP_DIWMODE) | (--bytesperpixel << 2), DISP_DIWMODE);
fb_writel(bytesperpixel << 2, DISP_PIXDEPTH);
/* video enable, color sync, interlace,
* hsync and vsync polarity (currently unused) */
fb_writel(0x100 | ((par->is_interlaced /*|4*/) << 4), DISP_SYNCCONF);
}
/* Simulate blanking by making the border cover the entire screen */
#define BLANK_BIT (1<<3)
static void pvr2_do_blank(void)
{
struct pvr2fb_par *par = currentpar;
unsigned long diwconf;
diwconf = fb_readl(DISP_DIWCONF);
if (do_blank > 0)
fb_writel(diwconf | BLANK_BIT, DISP_DIWCONF);
else
fb_writel(diwconf & ~BLANK_BIT, DISP_DIWCONF);
is_blanked = do_blank > 0 ? do_blank : 0;
}
static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id)
{
struct fb_info *info = dev_id;
if (do_vmode_pan || do_vmode_full)
pvr2_update_display(info);
if (do_vmode_full)
pvr2_init_display(info);
if (do_vmode_pan)
do_vmode_pan = 0;
if (do_vmode_full)
do_vmode_full = 0;
if (do_blank) {
pvr2_do_blank();
do_blank = 0;
}
return IRQ_HANDLED;
}
/*
* Determine the cable type and initialize the cable output format. Don't do
* anything if the cable type has been overidden (via "cable:XX").
*/
#define PCTRA 0xff80002c
#define PDTRA 0xff800030
#define VOUTC 0xa0702c00
static int pvr2_init_cable(void)
{
if (cable_type < 0) {
fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
PCTRA);
cable_type = (fb_readw(PDTRA) >> 8) & 3;
}
/* Now select the output format (either composite or other) */
/* XXX: Save the previous val first, as this reg is also AICA
related */
if (cable_type == CT_COMPOSITE)
fb_writel(3 << 8, VOUTC);
else if (cable_type == CT_RGB)
fb_writel(1 << 9, VOUTC);
else
fb_writel(0, VOUTC);
return cable_type;
}
#ifdef CONFIG_PVR2_DMA
static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
size_t count, loff_t *ppos)
{
unsigned long dst, start, end, len;
unsigned int nr_pages;
struct page **pages;
int ret, i;
nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
nr_pages, WRITE, 0, pages);
if (ret < nr_pages) {
nr_pages = ret;
ret = -EINVAL;
goto out_unmap;
}
dma_configure_channel(shdma, 0x12c1);
dst = (unsigned long)fb_info->screen_base + *ppos;
start = (unsigned long)page_address(pages[0]);
end = (unsigned long)page_address(pages[nr_pages]);
len = nr_pages << PAGE_SHIFT;
/* Half-assed contig check */
if (start + len == end) {
/* As we do this in one shot, it's either all or nothing.. */
if ((*ppos + len) > fb_info->fix.smem_len) {
ret = -ENOSPC;
goto out_unmap;
}
dma_write(shdma, start, 0, len);
dma_write(pvr2dma, 0, dst, len);
dma_wait_for_completion(pvr2dma);
goto out;
}
/* Not contiguous, writeout per-page instead.. */
for (i = 0; i < nr_pages; i++, dst += PAGE_SIZE) {
if ((*ppos + (i << PAGE_SHIFT)) > fb_info->fix.smem_len) {
ret = -ENOSPC;
goto out_unmap;
}
dma_write_page(shdma, (unsigned long)page_address(pages[i]), 0);
dma_write_page(pvr2dma, 0, dst);
dma_wait_for_completion(pvr2dma);
}
out:
*ppos += count;
ret = count;
out_unmap:
for (i = 0; i < nr_pages; i++)
page_cache_release(pages[i]);
kfree(pages);
return ret;
}
#endif /* CONFIG_PVR2_DMA */
/**
* pvr2fb_common_init
*
* Common init code for the PVR2 chips.
*
* This mostly takes care of the common aspects of the fb setup and
* registration. It's expected that the board-specific init code has
* already setup pvr2_fix with something meaningful at this point.
*
* Device info reporting is also done here, as well as picking a sane
* default from the modedb. For board-specific modelines, simply define
* a per-board modedb.
*
* Also worth noting is that the cable and video output types are likely
* always going to be VGA for the PCI-based PVR2 boards, but we leave this
* in for flexibility anyways. Who knows, maybe someone has tv-out on a
* PCI-based version of these things ;-)
*/
static int pvr2fb_common_init(void)
{
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
pvr2_fix.smem_len);
if (!fb_info->screen_base) {
printk(KERN_ERR "pvr2fb: Failed to remap smem space\n");
goto out_err;
}
par->mmio_base = (unsigned long)ioremap_nocache(pvr2_fix.mmio_start,
pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
goto out_err;
}
fb_memset(fb_info->screen_base, 0, pvr2_fix.smem_len);
pvr2_fix.ypanstep = nopan ? 0 : 1;
pvr2_fix.ywrapstep = nowrap ? 0 : 1;
fb_info->fbops = &pvr2fb_ops;
fb_info->fix = pvr2_fix;
fb_info->par = currentpar;
fb_info->pseudo_palette = currentpar->palette;
fb_info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
if (video_output == VO_VGA)
defmode = DEFMODE_VGA;
if (!mode_option)
mode_option = "640x480@60";
if (!fb_find_mode(&fb_info->var, fb_info, mode_option, pvr2_modedb,
NUM_TOTAL_MODES, &pvr2_modedb[defmode], 16))
fb_info->var = pvr2_var;
fb_alloc_cmap(&fb_info->cmap, 256, 0);
if (register_framebuffer(fb_info) < 0)
goto out_err;
/*Must write PIXDEPTH to register before anything is displayed - so force init */
pvr2_init_display(fb_info);
modememused = get_line_length(fb_info->var.xres_virtual,
fb_info->var.bits_per_pixel);
modememused *= fb_info->var.yres_virtual;
rev = fb_readl(par->mmio_base + 0x04);
fb_info(fb_info, "%s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
modememused >> 10,
(unsigned long)(fb_info->fix.smem_len >> 10));
fb_info(fb_info, "Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
fb_info->var.xres, fb_info->var.yres,
fb_info->var.bits_per_pixel,
get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
(char *)pvr2_get_param(cables, NULL, cable_type, 3),
(char *)pvr2_get_param(outputs, NULL, video_output, 3));
#ifdef CONFIG_SH_STORE_QUEUES
fb_notice(fb_info, "registering with SQ API\n");
pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
fb_info->fix.id, PAGE_SHARED);
fb_notice(fb_info, "Mapped video memory to SQ addr 0x%lx\n",
pvr2fb_map);
#endif
return 0;
out_err:
if (fb_info->screen_base)
iounmap(fb_info->screen_base);
if (par->mmio_base)
iounmap((void *)par->mmio_base);
return -ENXIO;
}
#ifdef CONFIG_SH_DREAMCAST
static int __init pvr2fb_dc_init(void)
{
if (!mach_is_dreamcast())
return -ENXIO;
/* Make a guess at the monitor based on the attached cable */
if (pvr2_init_cable() == CT_VGA) {
fb_info->monspecs.hfmin = 30000;
fb_info->monspecs.hfmax = 70000;
fb_info->monspecs.vfmin = 60;
fb_info->monspecs.vfmax = 60;
} else {
/* Not VGA, using a TV (taken from acornfb) */
fb_info->monspecs.hfmin = 15469;
fb_info->monspecs.hfmax = 15781;
fb_info->monspecs.vfmin = 49;
fb_info->monspecs.vfmax = 51;
}
/*
* XXX: This needs to pull default video output via BIOS or other means
*/
if (video_output < 0) {
if (cable_type == CT_VGA) {
video_output = VO_VGA;
} else {
video_output = VO_NTSC;
}
}
/*
* Nothing exciting about the DC PVR2 .. only a measly 8MiB.
*/
pvr2_fix.smem_start = 0xa5000000; /* RAM starts here */
pvr2_fix.smem_len = 8 << 20;
pvr2_fix.mmio_start = 0xa05f8000; /* registers start here */
pvr2_fix.mmio_len = 0x2000;
if (request_irq(HW_EVENT_VSYNC, pvr2fb_interrupt, IRQF_SHARED,
"pvr2 VBL handler", fb_info)) {
return -EBUSY;
}
#ifdef CONFIG_PVR2_DMA
if (request_dma(pvr2dma, "pvr2") != 0) {
free_irq(HW_EVENT_VSYNC, fb_info);
return -EBUSY;
}
#endif
return pvr2fb_common_init();
}
static void __exit pvr2fb_dc_exit(void)
{
if (fb_info->screen_base) {
iounmap(fb_info->screen_base);
fb_info->screen_base = NULL;
}
if (currentpar->mmio_base) {
iounmap((void *)currentpar->mmio_base);
currentpar->mmio_base = 0;
}
free_irq(HW_EVENT_VSYNC, fb_info);
#ifdef CONFIG_PVR2_DMA
free_dma(pvr2dma);
#endif
}
#endif /* CONFIG_SH_DREAMCAST */
#ifdef CONFIG_PCI
static int pvr2fb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
ret = pci_enable_device(pdev);
if (ret) {
printk(KERN_ERR "pvr2fb: PCI enable failed\n");
return ret;
}
ret = pci_request_regions(pdev, "pvr2fb");
if (ret) {
printk(KERN_ERR "pvr2fb: PCI request regions failed\n");
return ret;
}
/*
* Slightly more exciting than the DC PVR2 .. 16MiB!
*/
pvr2_fix.smem_start = pci_resource_start(pdev, 0);
pvr2_fix.smem_len = pci_resource_len(pdev, 0);
pvr2_fix.mmio_start = pci_resource_start(pdev, 1);
pvr2_fix.mmio_len = pci_resource_len(pdev, 1);
fb_info->device = &pdev->dev;
return pvr2fb_common_init();
}
static void pvr2fb_pci_remove(struct pci_dev *pdev)
{
if (fb_info->screen_base) {
iounmap(fb_info->screen_base);
fb_info->screen_base = NULL;
}
if (currentpar->mmio_base) {
iounmap((void *)currentpar->mmio_base);
currentpar->mmio_base = 0;
}
pci_release_regions(pdev);
}
static struct pci_device_id pvr2fb_pci_tbl[] = {
{ PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NEON250,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, pvr2fb_pci_tbl);
static struct pci_driver pvr2fb_pci_driver = {
.name = "pvr2fb",
.id_table = pvr2fb_pci_tbl,
.probe = pvr2fb_pci_probe,
.remove = pvr2fb_pci_remove,
};
static int __init pvr2fb_pci_init(void)
{
return pci_register_driver(&pvr2fb_pci_driver);
}
static void __exit pvr2fb_pci_exit(void)
{
pci_unregister_driver(&pvr2fb_pci_driver);
}
#endif /* CONFIG_PCI */
static int pvr2_get_param(const struct pvr2_params *p, const char *s, int val,
int size)
{
int i;
for (i = 0 ; i < size ; i++ ) {
if (s != NULL) {
if (!strncasecmp(p[i].name, s, strlen(s)))
return p[i].val;
} else {
if (p[i].val == val)
return (int)p[i].name;
}
}
return -1;
}
/*
* Parse command arguments. Supported arguments are:
* inverse Use inverse color maps
* cable:composite|rgb|vga Override the video cable type
* output:NTSC|PAL|VGA Override the video output format
*
* <xres>x<yres>[-<bpp>][@<refresh>] or,
* <name>[-<bpp>][@<refresh>] Startup using this video mode
*/
#ifndef MODULE
static int __init pvr2fb_setup(char *options)
{
char *this_opt;
char cable_arg[80];
char output_arg[80];
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ","))) {
if (!*this_opt)
continue;
if (!strcmp(this_opt, "inverse")) {
fb_invert_cmaps();
} else if (!strncmp(this_opt, "cable:", 6)) {
strcpy(cable_arg, this_opt + 6);
} else if (!strncmp(this_opt, "output:", 7)) {
strcpy(output_arg, this_opt + 7);
} else if (!strncmp(this_opt, "nopan", 5)) {
nopan = 1;
} else if (!strncmp(this_opt, "nowrap", 6)) {
nowrap = 1;
} else {
mode_option = this_opt;
}
}
if (*cable_arg)
cable_type = pvr2_get_param(cables, cable_arg, 0, 3);
if (*output_arg)
video_output = pvr2_get_param(outputs, output_arg, 0, 3);
return 0;
}
#endif
static struct pvr2_board {
int (*init)(void);
void (*exit)(void);
char name[16];
} board_driver[] __refdata = {
#ifdef CONFIG_SH_DREAMCAST
{ pvr2fb_dc_init, pvr2fb_dc_exit, "Sega DC PVR2" },
#endif
#ifdef CONFIG_PCI
{ pvr2fb_pci_init, pvr2fb_pci_exit, "PCI PVR2" },
#endif
{ 0, },
};
static int __init pvr2fb_init(void)
{
int i, ret = -ENODEV;
int size;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("pvr2fb", &option))
return -ENODEV;
pvr2fb_setup(option);
#endif
size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL);
if (!fb_info) {
printk(KERN_ERR "Failed to allocate memory for fb_info\n");
return -ENOMEM;
}
currentpar = fb_info->par;
for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
struct pvr2_board *pvr_board = board_driver + i;
if (!pvr_board->init)
continue;
ret = pvr_board->init();
if (ret != 0) {
printk(KERN_ERR "pvr2fb: Failed init of %s device\n",
pvr_board->name);
framebuffer_release(fb_info);
break;
}
}
return ret;
}
static void __exit pvr2fb_exit(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
struct pvr2_board *pvr_board = board_driver + i;
if (pvr_board->exit)
pvr_board->exit();
}
#ifdef CONFIG_SH_STORE_QUEUES
sq_unmap(pvr2fb_map);
#endif
unregister_framebuffer(fb_info);
framebuffer_release(fb_info);
}
module_init(pvr2fb_init);
module_exit(pvr2fb_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Framebuffer driver for NEC PowerVR 2 based graphics boards");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Radium-Devices/Radium_shamu | sound/soc/au1x/dbdma2.c | 2478 | 10070 | /*
* Au12x0/Au1550 PSC ALSA ASoC audio support.
*
* (c) 2007-2008 MSC Vertriebsges.m.b.H.,
* Manuel Lauss <manuel.lauss@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DMA glue for Au1x-PSC audio.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include "psc.h"
/*#define PCM_DEBUG*/
#define MSG(x...) printk(KERN_INFO "au1xpsc_pcm: " x)
#ifdef PCM_DEBUG
#define DBG MSG
#else
#define DBG(x...) do {} while (0)
#endif
struct au1xpsc_audio_dmadata {
/* DDMA control data */
unsigned int ddma_id; /* DDMA direction ID for this PSC */
u32 ddma_chan; /* DDMA context */
/* PCM context (for irq handlers) */
struct snd_pcm_substream *substream;
unsigned long curr_period; /* current segment DDMA is working on */
unsigned long q_period; /* queue period(s) */
dma_addr_t dma_area; /* address of queued DMA area */
dma_addr_t dma_area_s; /* start address of DMA area */
unsigned long pos; /* current byte position being played */
unsigned long periods; /* number of SG segments in total */
unsigned long period_bytes; /* size in bytes of one SG segment */
/* runtime data */
int msbits;
};
/*
* These settings are somewhat okay, at least on my machine audio plays
* almost skip-free. Especially the 64kB buffer seems to help a LOT.
*/
#define AU1XPSC_PERIOD_MIN_BYTES 1024
#define AU1XPSC_BUFFER_MIN_BYTES 65536
#define AU1XPSC_PCM_FMTS \
(SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | \
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | \
SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE | \
SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE | \
SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE | \
0)
/* PCM hardware DMA capabilities - platform specific */
static const struct snd_pcm_hardware au1xpsc_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH,
.formats = AU1XPSC_PCM_FMTS,
.period_bytes_min = AU1XPSC_PERIOD_MIN_BYTES,
.period_bytes_max = 4096 * 1024 - 1,
.periods_min = 2,
.periods_max = 4096, /* 2 to as-much-as-you-like */
.buffer_bytes_max = 4096 * 1024 - 1,
.fifo_size = 16, /* fifo entries of AC97/I2S PSC */
};
static void au1x_pcm_queue_tx(struct au1xpsc_audio_dmadata *cd)
{
au1xxx_dbdma_put_source(cd->ddma_chan, cd->dma_area,
cd->period_bytes, DDMA_FLAGS_IE);
/* update next-to-queue period */
++cd->q_period;
cd->dma_area += cd->period_bytes;
if (cd->q_period >= cd->periods) {
cd->q_period = 0;
cd->dma_area = cd->dma_area_s;
}
}
static void au1x_pcm_queue_rx(struct au1xpsc_audio_dmadata *cd)
{
au1xxx_dbdma_put_dest(cd->ddma_chan, cd->dma_area,
cd->period_bytes, DDMA_FLAGS_IE);
/* update next-to-queue period */
++cd->q_period;
cd->dma_area += cd->period_bytes;
if (cd->q_period >= cd->periods) {
cd->q_period = 0;
cd->dma_area = cd->dma_area_s;
}
}
static void au1x_pcm_dmatx_cb(int irq, void *dev_id)
{
struct au1xpsc_audio_dmadata *cd = dev_id;
cd->pos += cd->period_bytes;
if (++cd->curr_period >= cd->periods) {
cd->pos = 0;
cd->curr_period = 0;
}
snd_pcm_period_elapsed(cd->substream);
au1x_pcm_queue_tx(cd);
}
static void au1x_pcm_dmarx_cb(int irq, void *dev_id)
{
struct au1xpsc_audio_dmadata *cd = dev_id;
cd->pos += cd->period_bytes;
if (++cd->curr_period >= cd->periods) {
cd->pos = 0;
cd->curr_period = 0;
}
snd_pcm_period_elapsed(cd->substream);
au1x_pcm_queue_rx(cd);
}
static void au1x_pcm_dbdma_free(struct au1xpsc_audio_dmadata *pcd)
{
if (pcd->ddma_chan) {
au1xxx_dbdma_stop(pcd->ddma_chan);
au1xxx_dbdma_reset(pcd->ddma_chan);
au1xxx_dbdma_chan_free(pcd->ddma_chan);
pcd->ddma_chan = 0;
pcd->msbits = 0;
}
}
/* in case of missing DMA ring or changed TX-source / RX-dest bit widths,
* allocate (or reallocate) a 2-descriptor DMA ring with bit depth according
* to ALSA-supplied sample depth. This is due to limitations in the dbdma api
* (cannot adjust source/dest widths of already allocated descriptor ring).
*/
static int au1x_pcm_dbdma_realloc(struct au1xpsc_audio_dmadata *pcd,
int stype, int msbits)
{
/* DMA only in 8/16/32 bit widths */
if (msbits == 24)
msbits = 32;
/* check current config: correct bits and descriptors allocated? */
if ((pcd->ddma_chan) && (msbits == pcd->msbits))
goto out; /* all ok! */
au1x_pcm_dbdma_free(pcd);
if (stype == SNDRV_PCM_STREAM_CAPTURE)
pcd->ddma_chan = au1xxx_dbdma_chan_alloc(pcd->ddma_id,
DSCR_CMD0_ALWAYS,
au1x_pcm_dmarx_cb, (void *)pcd);
else
pcd->ddma_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
pcd->ddma_id,
au1x_pcm_dmatx_cb, (void *)pcd);
if (!pcd->ddma_chan)
return -ENOMEM;
au1xxx_dbdma_set_devwidth(pcd->ddma_chan, msbits);
au1xxx_dbdma_ring_alloc(pcd->ddma_chan, 2);
pcd->msbits = msbits;
au1xxx_dbdma_stop(pcd->ddma_chan);
au1xxx_dbdma_reset(pcd->ddma_chan);
out:
return 0;
}
static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss)
{
struct snd_soc_pcm_runtime *rtd = ss->private_data;
struct au1xpsc_audio_dmadata *pcd =
snd_soc_platform_get_drvdata(rtd->platform);
return &pcd[ss->stream];
}
static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct au1xpsc_audio_dmadata *pcd;
int stype, ret;
ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
if (ret < 0)
goto out;
stype = substream->stream;
pcd = to_dmadata(substream);
DBG("runtime->dma_area = 0x%08lx dma_addr_t = 0x%08lx dma_size = %d "
"runtime->min_align %d\n",
(unsigned long)runtime->dma_area,
(unsigned long)runtime->dma_addr, runtime->dma_bytes,
runtime->min_align);
DBG("bits %d frags %d frag_bytes %d is_rx %d\n", params->msbits,
params_periods(params), params_period_bytes(params), stype);
ret = au1x_pcm_dbdma_realloc(pcd, stype, params->msbits);
if (ret) {
MSG("DDMA channel (re)alloc failed!\n");
goto out;
}
pcd->substream = substream;
pcd->period_bytes = params_period_bytes(params);
pcd->periods = params_periods(params);
pcd->dma_area_s = pcd->dma_area = runtime->dma_addr;
pcd->q_period = 0;
pcd->curr_period = 0;
pcd->pos = 0;
ret = 0;
out:
return ret;
}
static int au1xpsc_pcm_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
static int au1xpsc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
au1xxx_dbdma_reset(pcd->ddma_chan);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
au1x_pcm_queue_rx(pcd);
au1x_pcm_queue_rx(pcd);
} else {
au1x_pcm_queue_tx(pcd);
au1x_pcm_queue_tx(pcd);
}
return 0;
}
static int au1xpsc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
u32 c = to_dmadata(substream)->ddma_chan;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
au1xxx_dbdma_start(c);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
au1xxx_dbdma_stop(c);
break;
default:
return -EINVAL;
}
return 0;
}
static snd_pcm_uframes_t
au1xpsc_pcm_pointer(struct snd_pcm_substream *substream)
{
return bytes_to_frames(substream->runtime, to_dmadata(substream)->pos);
}
static int au1xpsc_pcm_open(struct snd_pcm_substream *substream)
{
struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int stype = substream->stream, *dmaids;
dmaids = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if (!dmaids)
return -ENODEV; /* whoa, has ordering changed? */
pcd->ddma_id = dmaids[stype];
snd_soc_set_runtime_hwparams(substream, &au1xpsc_pcm_hardware);
return 0;
}
static int au1xpsc_pcm_close(struct snd_pcm_substream *substream)
{
au1x_pcm_dbdma_free(to_dmadata(substream));
return 0;
}
static struct snd_pcm_ops au1xpsc_pcm_ops = {
.open = au1xpsc_pcm_open,
.close = au1xpsc_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = au1xpsc_pcm_hw_params,
.hw_free = au1xpsc_pcm_hw_free,
.prepare = au1xpsc_pcm_prepare,
.trigger = au1xpsc_pcm_trigger,
.pointer = au1xpsc_pcm_pointer,
};
static void au1xpsc_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, AU1XPSC_BUFFER_MIN_BYTES, (4096 * 1024) - 1);
return 0;
}
/* au1xpsc audio platform */
static struct snd_soc_platform_driver au1xpsc_soc_platform = {
.ops = &au1xpsc_pcm_ops,
.pcm_new = au1xpsc_pcm_new,
.pcm_free = au1xpsc_pcm_free_dma_buffers,
};
static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
{
struct au1xpsc_audio_dmadata *dmadata;
dmadata = devm_kzalloc(&pdev->dev,
2 * sizeof(struct au1xpsc_audio_dmadata),
GFP_KERNEL);
if (!dmadata)
return -ENOMEM;
platform_set_drvdata(pdev, dmadata);
return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform);
}
static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver au1xpsc_pcm_driver = {
.driver = {
.name = "au1xpsc-pcm",
.owner = THIS_MODULE,
},
.probe = au1xpsc_pcm_drvprobe,
.remove = au1xpsc_pcm_drvremove,
};
module_platform_driver(au1xpsc_pcm_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au12x0/Au1550 PSC Audio DMA driver");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
1N4148/SAMSUNG_OSRC_DUMPS | drivers/net/atl1c/atl1c_ethtool.c | 2990 | 9019 | /*
* Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
#include "atl1c.h"
static int atl1c_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
ecmd->advertising |= hw->autoneg_advertised;
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
ecmd->autoneg = AUTONEG_ENABLE;
return 0;
}
static int atl1c_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u16 autoneg_advertised;
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
if (ecmd->autoneg == AUTONEG_ENABLE) {
autoneg_advertised = ADVERTISED_Autoneg;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
if (speed == SPEED_1000) {
if (ecmd->duplex != DUPLEX_FULL) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
"1000M half is invalid\n");
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
}
autoneg_advertised = ADVERTISED_1000baseT_Full;
} else if (speed == SPEED_100) {
if (ecmd->duplex == DUPLEX_FULL)
autoneg_advertised = ADVERTISED_100baseT_Full;
else
autoneg_advertised = ADVERTISED_100baseT_Half;
} else {
if (ecmd->duplex == DUPLEX_FULL)
autoneg_advertised = ADVERTISED_10baseT_Full;
else
autoneg_advertised = ADVERTISED_10baseT_Half;
}
}
if (hw->autoneg_advertised != autoneg_advertised) {
hw->autoneg_advertised = autoneg_advertised;
if (atl1c_restart_autoneg(hw) != 0) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
"ethtool speed/duplex setting failed\n");
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
}
}
clear_bit(__AT_RESETTING, &adapter->flags);
return 0;
}
static u32 atl1c_get_msglevel(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void atl1c_set_msglevel(struct net_device *netdev, u32 data)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static int atl1c_get_regs_len(struct net_device *netdev)
{
return AT_REGS_LEN;
}
static void atl1c_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
memset(p, 0, AT_REGS_LEN);
regs->version = 0;
AT_READ_REG(hw, REG_VPD_CAP, p++);
AT_READ_REG(hw, REG_PM_CTRL, p++);
AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
AT_READ_REG(hw, REG_TWSI_CTRL, p++);
AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++);
AT_READ_REG(hw, REG_MASTER_CTRL, p++);
AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++);
AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++);
AT_READ_REG(hw, REG_GPHY_CTRL, p++);
AT_READ_REG(hw, REG_LINK_CTRL, p++);
AT_READ_REG(hw, REG_IDLE_STATUS, p++);
AT_READ_REG(hw, REG_MDIO_CTRL, p++);
AT_READ_REG(hw, REG_SERDES_LOCK, p++);
AT_READ_REG(hw, REG_MAC_CTRL, p++);
AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++);
AT_READ_REG(hw, REG_RX_HASH_TABLE, p++);
AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++);
AT_READ_REG(hw, REG_RXQ_CTRL, p++);
AT_READ_REG(hw, REG_TXQ_CTRL, p++);
AT_READ_REG(hw, REG_MTU, p++);
AT_READ_REG(hw, REG_WOL_CTRL, p++);
atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
regs_buff[73] = (u32) phy_data;
atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
regs_buff[74] = (u32) phy_data;
}
static int atl1c_get_eeprom_len(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (atl1c_check_eeprom_exist(&adapter->hw))
return AT_EEPROM_LEN;
else
return 0;
}
static int atl1c_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 *eeprom_buff;
int first_dword, last_dword;
int ret_val = 0;
int i;
if (eeprom->len == 0)
return -EINVAL;
if (!atl1c_check_eeprom_exist(hw)) /* not exist */
return -EINVAL;
eeprom->magic = adapter->pdev->vendor |
(adapter->pdev->device << 16);
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
eeprom_buff = kmalloc(sizeof(u32) *
(last_dword - first_dword + 1), GFP_KERNEL);
if (eeprom_buff == NULL)
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
kfree(eeprom_buff);
return -EIO;
}
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
kfree(eeprom_buff);
return ret_val;
return 0;
}
static void atl1c_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1c_get_regs_len(netdev);
drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
}
static void atl1c_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = 0;
if (adapter->wol & AT_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & AT_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & AT_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & AT_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
}
static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= AT_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static int atl1c_nway_reset(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
atl1c_reinit_locked(adapter);
return 0;
}
static const struct ethtool_ops atl1c_ethtool_ops = {
.get_settings = atl1c_get_settings,
.set_settings = atl1c_set_settings,
.get_drvinfo = atl1c_get_drvinfo,
.get_regs_len = atl1c_get_regs_len,
.get_regs = atl1c_get_regs,
.get_wol = atl1c_get_wol,
.set_wol = atl1c_set_wol,
.get_msglevel = atl1c_get_msglevel,
.set_msglevel = atl1c_set_msglevel,
.nway_reset = atl1c_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = atl1c_get_eeprom_len,
.get_eeprom = atl1c_get_eeprom,
};
void atl1c_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
}
| gpl-2.0 |
librae8226/linux-3.0 | drivers/char/nwflash.c | 3246 | 13987 | /*
* Flash memory interface rev.5 driver for the Intel
* Flash chips used on the NetWinder.
*
* 20/08/2000 RMK use __ioremap to map flash into virtual memory
* make a few more places use "volatile"
* 22/05/2001 RMK - Lock read against write
* - merge printk level changes (with mods) from Alan Cox.
* - use *ppos as the file position, not file->f_pos.
* - fix check for out of range pos and r/w size
*
* Please note that we are tampering with the only flash chip in the
* machine, which contains the bootup code. We therefore have the
* power to convert these machines into doorstops...
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/system.h>
#include <asm/uaccess.h>
/*****************************************************************************/
#include <asm/nwflash.h>
#define NWFLASH_VERSION "6.4"
static DEFINE_MUTEX(flash_mutex);
static void kick_open(void);
static int get_flash_id(void);
static int erase_block(int nBlock);
static int write_block(unsigned long p, const char __user *buf, int count);
#define KFLASH_SIZE 1024*1024 //1 Meg
#define KFLASH_SIZE4 4*1024*1024 //4 Meg
#define KFLASH_ID 0x89A6 //Intel flash
#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
static int flashdebug; //if set - we will display progress msgs
static int gbWriteEnable;
static int gbWriteBase64Enable;
static volatile unsigned char *FLASH_BASE;
static int gbFlashSize = KFLASH_SIZE;
static DEFINE_MUTEX(nwflash_mutex);
static int get_flash_id(void)
{
volatile unsigned int c1, c2;
/*
* try to get flash chip ID
*/
kick_open();
c2 = inb(0x80);
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90;
udelay(15);
c1 = *(volatile unsigned char *) FLASH_BASE;
c2 = inb(0x80);
/*
* on 4 Meg flash the second byte is actually at offset 2...
*/
if (c1 == 0xB0)
c2 = *(volatile unsigned char *) (FLASH_BASE + 2);
else
c2 = *(volatile unsigned char *) (FLASH_BASE + 1);
c2 += (c1 << 8);
/*
* set it back to read mode
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
if (c2 == KFLASH_ID4)
gbFlashSize = KFLASH_SIZE4;
return c2;
}
static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
mutex_lock(&flash_mutex);
switch (cmd) {
case CMD_WRITE_DISABLE:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
break;
case CMD_WRITE_ENABLE:
gbWriteEnable = 1;
break;
case CMD_WRITE_BASE64K_ENABLE:
gbWriteBase64Enable = 1;
break;
default:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
mutex_unlock(&flash_mutex);
return -EINVAL;
}
mutex_unlock(&flash_mutex);
return 0;
}
static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
ssize_t ret;
if (flashdebug)
printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
"buffer=%p, count=0x%zx.\n", *ppos, buf, size);
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
mutex_unlock(&nwflash_mutex);
return ret;
}
static ssize_t flash_write(struct file *file, const char __user *buf,
size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
int written;
int nBlock, temp, rc;
int i, j;
if (flashdebug)
printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n",
p, buf, count);
if (!gbWriteEnable)
return -EINVAL;
if (p < 64 * 1024 && (!gbWriteBase64Enable))
return -EINVAL;
/*
* check for out of range pos or count
*/
if (p >= gbFlashSize)
return count ? -ENXIO : 0;
if (count > gbFlashSize - p)
count = gbFlashSize - p;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
written = 0;
leds_event(led_claim);
leds_event(led_green_on);
nBlock = (int) p >> 16; //block # of 64K bytes
/*
* # of 64K blocks to erase and write
*/
temp = ((int) (p + count) >> 16) - nBlock + 1;
/*
* write ends at exactly 64k boundary?
*/
if (((int) (p + count) & 0xFFFF) == 0)
temp -= 1;
if (flashdebug)
printk(KERN_DEBUG "flash_write: writing %d block(s) "
"starting at %d.\n", temp, nBlock);
for (; temp; temp--, nBlock++) {
if (flashdebug)
printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock);
/*
* first we have to erase the block(s), where we will write...
*/
i = 0;
j = 0;
RetryBlock:
do {
rc = erase_block(nBlock);
i++;
} while (rc && i < 10);
if (rc) {
printk(KERN_ERR "flash_write: erase error %x\n", rc);
break;
}
if (flashdebug)
printk(KERN_DEBUG "flash_write: writing offset %lX, "
"from buf %p, bytes left %X.\n", p, buf,
count - written);
/*
* write_block will limit write to space left in this block
*/
rc = write_block(p, buf, count - written);
j++;
/*
* if somehow write verify failed? Can't happen??
*/
if (!rc) {
/*
* retry up to 10 times
*/
if (j < 10)
goto RetryBlock;
else
/*
* else quit with error...
*/
rc = -1;
}
if (rc < 0) {
printk(KERN_ERR "flash_write: write error %X\n", rc);
break;
}
p += rc;
buf += rc;
written += rc;
*ppos += rc;
if (flashdebug)
printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
}
/*
* restore reg on exit
*/
leds_event(led_release);
mutex_unlock(&nwflash_mutex);
return written;
}
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
* though, in that case (0).
*
* also note that seeking relative to the "end of file" isn't supported:
* it has no meaning, so it returns -EINVAL.
*/
static loff_t flash_llseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
mutex_lock(&flash_mutex);
if (flashdebug)
printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n",
(unsigned int) offset, orig);
switch (orig) {
case 0:
if (offset < 0) {
ret = -EINVAL;
break;
}
if ((unsigned int) offset > gbFlashSize) {
ret = -EINVAL;
break;
}
file->f_pos = (unsigned int) offset;
ret = file->f_pos;
break;
case 1:
if ((file->f_pos + offset) > gbFlashSize) {
ret = -EINVAL;
break;
}
if ((file->f_pos + offset) < 0) {
ret = -EINVAL;
break;
}
file->f_pos += offset;
ret = file->f_pos;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&flash_mutex);
return ret;
}
/*
* assume that main Write routine did the parameter checking...
* so just go ahead and erase, what requested!
*/
static int erase_block(int nBlock)
{
volatile unsigned int c1;
volatile unsigned char *pWritePtr;
unsigned long timeout;
int temp, temp1;
/*
* orange LED == erase
*/
leds_event(led_amber_on);
/*
* reset footbridge to the correct offset 0 (...0..3)
*/
*CSR_ROMWRITEREG = 0;
/*
* dummy ROM read
*/
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
kick_open();
/*
* reset status if old errors
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
/*
* erase a block...
* aim at the middle of a current block...
*/
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16)));
/*
* dummy read
*/
c1 = *pWritePtr;
kick_open();
/*
* erase
*/
*(volatile unsigned char *) pWritePtr = 0x20;
/*
* confirm
*/
*(volatile unsigned char *) pWritePtr = 0xD0;
/*
* wait 10 ms
*/
msleep(10);
/*
* wait while erasing in process (up to 10 sec)
*/
timeout = jiffies + 10 * HZ;
c1 = 0;
while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
msleep(10);
/*
* read any address
*/
c1 = *(volatile unsigned char *) (pWritePtr);
// printk("Flash_erase: status=%X.\n",c1);
}
/*
* set flash for normal read access
*/
kick_open();
// *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF;
*(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation
/*
* check if erase errors were reported
*/
if (c1 & 0x20) {
printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr);
/*
* reset error
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
return -2;
}
/*
* just to make sure - verify if erased OK...
*/
msleep(10);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16)));
for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) {
if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) {
printk(KERN_ERR "flash_erase: verify err at %p = %X\n",
pWritePtr, temp1);
return -1;
}
}
return 0;
}
/*
* write_block will limit number of bytes written to the space in this block
*/
static int write_block(unsigned long p, const char __user *buf, int count)
{
volatile unsigned int c1;
volatile unsigned int c2;
unsigned char *pWritePtr;
unsigned int uAddress;
unsigned int offset;
unsigned long timeout;
unsigned long timeout1;
/*
* red LED == write
*/
leds_event(led_amber_off);
leds_event(led_red_on);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
/*
* check if write will end in this block....
*/
offset = p & 0xFFFF;
if (offset + count > 0x10000)
count = 0x10000 - offset;
/*
* wait up to 30 sec for this block
*/
timeout = jiffies + 30 * HZ;
for (offset = 0; offset < count; offset++, pWritePtr++) {
uAddress = (unsigned int) pWritePtr;
uAddress &= 0xFFFFFFFC;
if (__get_user(c2, buf + offset))
return -EFAULT;
WriteRetry:
/*
* dummy read
*/
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
/*
* kick open the write gate
*/
kick_open();
/*
* program footbridge to the correct offset...0..3
*/
*CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3;
/*
* write cmd
*/
*(volatile unsigned char *) (uAddress) = 0x40;
/*
* data to write
*/
*(volatile unsigned char *) (uAddress) = c2;
/*
* get status
*/
*(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70;
c1 = 0;
/*
* wait up to 1 sec for this byte
*/
timeout1 = jiffies + 1 * HZ;
/*
* while not ready...
*/
while (!(c1 & 0x80) && time_before(jiffies, timeout1))
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
/*
* if timeout getting status
*/
if (time_after_eq(jiffies, timeout1)) {
kick_open();
/*
* reset err
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
goto WriteRetry;
}
/*
* switch on read access, as a default flash operation mode
*/
kick_open();
/*
* read access
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
/*
* if hardware reports an error writing, and not timeout -
* reset the chip and retry
*/
if (c1 & 0x10) {
kick_open();
/*
* reset err
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
/*
* before timeout?
*/
if (time_before(jiffies, timeout)) {
if (flashdebug)
printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n",
pWritePtr - FLASH_BASE);
/*
* no LED == waiting
*/
leds_event(led_amber_off);
/*
* wait couple ms
*/
msleep(10);
/*
* red LED == write
*/
leds_event(led_red_on);
goto WriteRetry;
} else {
printk(KERN_ERR "write_block: timeout at 0x%X\n",
pWritePtr - FLASH_BASE);
/*
* return error -2
*/
return -2;
}
}
}
/*
* green LED == read/verify
*/
leds_event(led_amber_off);
leds_event(led_green_on);
msleep(10);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
for (offset = 0; offset < count; offset++) {
char c, c1;
if (__get_user(c, buf))
return -EFAULT;
buf++;
if ((c1 = *pWritePtr++) != c) {
printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n",
pWritePtr - FLASH_BASE, c1, c);
return 0;
}
}
return count;
}
static void kick_open(void)
{
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
*/
udelay(25);
}
static const struct file_operations flash_fops =
{
.owner = THIS_MODULE,
.llseek = flash_llseek,
.read = flash_read,
.write = flash_write,
.unlocked_ioctl = flash_ioctl,
};
static struct miscdevice flash_miscdev =
{
FLASH_MINOR,
"nwflash",
&flash_fops
};
static int __init nwflash_init(void)
{
int ret = -ENODEV;
if (machine_is_netwinder()) {
int id;
FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4);
if (!FLASH_BASE)
goto out;
id = get_flash_id();
if ((id != KFLASH_ID) && (id != KFLASH_ID4)) {
ret = -ENXIO;
iounmap((void *)FLASH_BASE);
printk("Flash: incorrect ID 0x%04X.\n", id);
goto out;
}
printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n",
NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024));
ret = misc_register(&flash_miscdev);
if (ret < 0) {
iounmap((void *)FLASH_BASE);
}
}
out:
return ret;
}
static void __exit nwflash_exit(void)
{
misc_deregister(&flash_miscdev);
iounmap((void *)FLASH_BASE);
}
MODULE_LICENSE("GPL");
module_param(flashdebug, bool, 0644);
module_init(nwflash_init);
module_exit(nwflash_exit);
| gpl-2.0 |
GreatDevs/kernel_sony_msm8974 | drivers/usb/misc/diag_bridge_test.c | 3502 | 4398 | /*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/crc-ccitt.h>
#include <mach/diag_bridge.h>
#define DRIVER_DESC "USB host diag bridge driver test"
#define DRIVER_VERSION "1.0"
#define RD_BUF_SIZE 2048
#define DIAG_TEST_CONNECTED 0
struct diag_test_dev {
char *read_buf;
struct work_struct read_w;
unsigned long flags;
struct diag_bridge_ops ops;
};
static struct diag_test_dev *__dev;
static struct dentry *dent;
static void
diag_test_read_complete_cb(void *d, char *buf, size_t size, size_t actual)
{
if (actual < 0) {
pr_err("%s: read complete err\n", __func__);
return;
}
print_hex_dump(KERN_INFO, "to_host:", 0, 1, 1, buf, actual, false);
}
static void diag_test_read_work(struct work_struct *w)
{
struct diag_test_dev *dev =
container_of(w, struct diag_test_dev, read_w);
memset(dev->read_buf, 0, RD_BUF_SIZE);
diag_bridge_read(dev->read_buf, RD_BUF_SIZE);
}
static void
diag_test_write_complete_cb(void *d, char *buf, size_t size, size_t actual)
{
struct diag_test_dev *dev = d;
if (actual > 0)
schedule_work(&dev->read_w);
}
#if defined(CONFIG_DEBUG_FS)
#define DEBUG_BUF_SIZE 1024
static ssize_t send_ping_cmd(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct diag_test_dev *dev = __dev;
unsigned char *buf;
int temp = sizeof(unsigned char) * 4;
if (!dev)
return -ENODEV;
buf = kmalloc(temp, GFP_KERNEL);
if (!buf) {
pr_err("%s: unable to allocate mem for ping cmd\n",
__func__);
return -ENOMEM;
}
/* hdlc encoded ping command */
buf[0] = 0x0C;
buf[1] = 0x14;
buf[2] = 0x3A;
buf[3] = 0x7E;
diag_bridge_write(buf, temp);
return count;
}
const struct file_operations diag_test_ping_ops = {
.write = send_ping_cmd,
};
static void diag_test_debug_init(void)
{
struct dentry *dfile;
dent = debugfs_create_dir("diag_test", 0);
if (IS_ERR(dent))
return;
dfile = debugfs_create_file("send_ping", 0444, dent,
0, &diag_test_ping_ops);
if (!dfile || IS_ERR(dfile))
debugfs_remove(dent);
}
#else
static void diag_test_debug_init(void) { }
#endif
static int diag_test_remove(struct platform_device *pdev)
{
diag_bridge_close();
if (dent) {
debugfs_remove_recursive(dent);
dent = NULL;
}
return 0;
}
static int diag_test_probe(struct platform_device *pdev)
{
struct diag_test_dev *dev = __dev;
int ret = 0;
pr_info("%s:\n", __func__);
ret = diag_bridge_open(&dev->ops);
if (ret)
pr_err("diag open failed: %d", ret);
diag_test_debug_init();
return ret;
}
static struct platform_driver diag_test = {
.remove = diag_test_remove,
.probe = diag_test_probe,
.driver = {
.name = "diag_bridge",
.owner = THIS_MODULE,
},
};
static int __init diag_test_init(void)
{
struct diag_test_dev *dev;
int ret = 0;
pr_info("%s\n", __func__);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
__dev = dev;
dev->ops.read_complete_cb = diag_test_read_complete_cb;
dev->ops.write_complete_cb = diag_test_write_complete_cb;
dev->read_buf = kmalloc(RD_BUF_SIZE, GFP_KERNEL);
if (!dev->read_buf) {
pr_err("%s: unable to allocate read buffer\n", __func__);
kfree(dev);
return -ENOMEM;
}
dev->ops.ctxt = dev;
INIT_WORK(&dev->read_w, diag_test_read_work);
ret = platform_driver_register(&diag_test);
if (ret)
pr_err("%s: platform driver %s register failed %d\n",
__func__, diag_test.driver.name, ret);
return ret;
}
static void __exit diag_test_exit(void)
{
struct diag_test_dev *dev = __dev;
pr_info("%s:\n", __func__);
if (test_bit(DIAG_TEST_CONNECTED, &dev->flags))
diag_bridge_close();
kfree(dev->read_buf);
kfree(dev);
}
module_init(diag_test_init);
module_exit(diag_test_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
RockchipOpensourceCommunity/kernel-rockchip-next | arch/arm/mach-mmp/brownstone.c | 4014 | 5653 | /*
* linux/arch/arm/mach-mmp/brownstone.c
*
* Support for the Marvell Brownstone Development Platform.
*
* Copyright (C) 2009-2010 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio-pxa.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/max8649.h>
#include <linux/regulator/fixed.h>
#include <linux/mfd/max8925.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/addr-map.h>
#include <mach/mfp-mmp2.h>
#include <mach/mmp2.h>
#include <mach/irqs.h>
#include "common.h"
#define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40)
#define GPIO_5V_ENABLE (89)
static unsigned long brownstone_pin_config[] __initdata = {
/* UART1 */
GPIO29_UART1_RXD,
GPIO30_UART1_TXD,
/* UART3 */
GPIO51_UART3_RXD,
GPIO52_UART3_TXD,
/* DFI */
GPIO168_DFI_D0,
GPIO167_DFI_D1,
GPIO166_DFI_D2,
GPIO165_DFI_D3,
GPIO107_DFI_D4,
GPIO106_DFI_D5,
GPIO105_DFI_D6,
GPIO104_DFI_D7,
GPIO111_DFI_D8,
GPIO164_DFI_D9,
GPIO163_DFI_D10,
GPIO162_DFI_D11,
GPIO161_DFI_D12,
GPIO110_DFI_D13,
GPIO109_DFI_D14,
GPIO108_DFI_D15,
GPIO143_ND_nCS0,
GPIO144_ND_nCS1,
GPIO147_ND_nWE,
GPIO148_ND_nRE,
GPIO150_ND_ALE,
GPIO149_ND_CLE,
GPIO112_ND_RDY0,
GPIO160_ND_RDY1,
/* PMIC */
PMIC_PMIC_INT | MFP_LPM_EDGE_FALL,
/* MMC0 */
GPIO131_MMC1_DAT3 | MFP_PULL_HIGH,
GPIO132_MMC1_DAT2 | MFP_PULL_HIGH,
GPIO133_MMC1_DAT1 | MFP_PULL_HIGH,
GPIO134_MMC1_DAT0 | MFP_PULL_HIGH,
GPIO136_MMC1_CMD | MFP_PULL_HIGH,
GPIO139_MMC1_CLK,
GPIO140_MMC1_CD | MFP_PULL_LOW,
GPIO141_MMC1_WP | MFP_PULL_LOW,
/* MMC1 */
GPIO37_MMC2_DAT3 | MFP_PULL_HIGH,
GPIO38_MMC2_DAT2 | MFP_PULL_HIGH,
GPIO39_MMC2_DAT1 | MFP_PULL_HIGH,
GPIO40_MMC2_DAT0 | MFP_PULL_HIGH,
GPIO41_MMC2_CMD | MFP_PULL_HIGH,
GPIO42_MMC2_CLK,
/* MMC2 */
GPIO165_MMC3_DAT7 | MFP_PULL_HIGH,
GPIO162_MMC3_DAT6 | MFP_PULL_HIGH,
GPIO166_MMC3_DAT5 | MFP_PULL_HIGH,
GPIO163_MMC3_DAT4 | MFP_PULL_HIGH,
GPIO167_MMC3_DAT3 | MFP_PULL_HIGH,
GPIO164_MMC3_DAT2 | MFP_PULL_HIGH,
GPIO168_MMC3_DAT1 | MFP_PULL_HIGH,
GPIO111_MMC3_DAT0 | MFP_PULL_HIGH,
GPIO112_MMC3_CMD | MFP_PULL_HIGH,
GPIO151_MMC3_CLK,
/* 5V regulator */
GPIO89_GPIO,
};
static struct pxa_gpio_platform_data mmp2_gpio_pdata = {
.irq_base = MMP_GPIO_TO_IRQ(0),
};
static struct regulator_consumer_supply max8649_supply[] = {
REGULATOR_SUPPLY("vcc_core", NULL),
};
static struct regulator_init_data max8649_init_data = {
.constraints = {
.name = "vcc_core range",
.min_uV = 1150000,
.max_uV = 1280000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8649_supply[0],
};
static struct max8649_platform_data brownstone_max8649_info = {
.mode = 2, /* VID1 = 1, VID0 = 0 */
.extclk = 0,
.ramp_timing = MAX8649_RAMP_32MV,
.regulator = &max8649_init_data,
};
static struct regulator_consumer_supply brownstone_v_5vp_supplies[] = {
REGULATOR_SUPPLY("v_5vp", NULL),
};
static struct regulator_init_data brownstone_v_5vp_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(brownstone_v_5vp_supplies),
.consumer_supplies = brownstone_v_5vp_supplies,
};
static struct fixed_voltage_config brownstone_v_5vp = {
.supply_name = "v_5vp",
.microvolts = 5000000,
.gpio = GPIO_5V_ENABLE,
.enable_high = 1,
.enabled_at_boot = 1,
.init_data = &brownstone_v_5vp_data,
};
static struct platform_device brownstone_v_5vp_device = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
.platform_data = &brownstone_v_5vp,
},
};
static struct max8925_platform_data brownstone_max8925_info = {
.irq_base = MMP_NR_IRQS,
};
static struct i2c_board_info brownstone_twsi1_info[] = {
[0] = {
.type = "max8649",
.addr = 0x60,
.platform_data = &brownstone_max8649_info,
},
[1] = {
.type = "max8925",
.addr = 0x3c,
.irq = IRQ_MMP2_PMIC,
.platform_data = &brownstone_max8925_info,
},
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
.clk_delay_cycles = 0x1f,
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = {
.clk_delay_cycles = 0x1f,
.flags = PXA_FLAG_CARD_PERMANENT
| PXA_FLAG_SD_8_BIT_CAPABLE_SLOT,
};
static struct sram_platdata mmp2_asram_platdata = {
.pool_name = "asram",
.granularity = SRAM_GRANULARITY,
};
static struct sram_platdata mmp2_isram_platdata = {
.pool_name = "isram",
.granularity = SRAM_GRANULARITY,
};
static void __init brownstone_init(void)
{
mfp_config(ARRAY_AND_SIZE(brownstone_pin_config));
/* on-chip devices */
mmp2_add_uart(1);
mmp2_add_uart(3);
platform_device_add_data(&mmp2_device_gpio, &mmp2_gpio_pdata,
sizeof(struct pxa_gpio_platform_data));
platform_device_register(&mmp2_device_gpio);
mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info));
mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */
mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */
mmp2_add_asram(&mmp2_asram_platdata);
mmp2_add_isram(&mmp2_isram_platdata);
/* enable 5v regulator */
platform_device_register(&brownstone_v_5vp_device);
}
MACHINE_START(BROWNSTONE, "Brownstone Development Platform")
/* Maintainer: Haojian Zhuang <haojian.zhuang@marvell.com> */
.map_io = mmp_map_io,
.nr_irqs = BROWNSTONE_NR_IRQS,
.init_irq = mmp2_init_irq,
.init_time = mmp2_timer_init,
.init_machine = brownstone_init,
.restart = mmp_restart,
MACHINE_END
| gpl-2.0 |
mrjaydee82/SinLessKernel-4.4.4 | drivers/md/dm-region-hash.c | 5038 | 18427 | /*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/dm-dirty-log.h>
#include <linux/dm-region-hash.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "dm.h"
#define DM_MSG_PREFIX "region hash"
/*-----------------------------------------------------------------
* Region hash
*
* The mirror splits itself up into discrete regions. Each
* region can be in one of three states: clean, dirty,
* nosync. There is no need to put clean regions in the hash.
*
* In addition to being present in the hash table a region _may_
* be present on one of three lists.
*
* clean_regions: Regions on this list have no io pending to
* them, they are in sync, we are no longer interested in them,
* they are dull. dm_rh_update_states() will remove them from the
* hash table.
*
* quiesced_regions: These regions have been spun down, ready
* for recovery. rh_recovery_start() will remove regions from
* this list and hand them to kmirrord, which will schedule the
* recovery io with kcopyd.
*
* recovered_regions: Regions that kcopyd has successfully
* recovered. dm_rh_update_states() will now schedule any delayed
* io, up the recovery_count, and remove the region from the
* hash.
*
* There are 2 locks:
* A rw spin lock 'hash_lock' protects just the hash table,
* this is never held in write mode from interrupt context,
* which I believe means that we only have to disable irqs when
* doing a write lock.
*
* An ordinary spin lock 'region_lock' that protects the three
* lists in the region_hash, with the 'state', 'list' and
* 'delayed_bios' fields of the regions. This is used from irq
* context, so all other uses will have to suspend local irqs.
*---------------------------------------------------------------*/
struct dm_region_hash {
uint32_t region_size;
unsigned region_shift;
/* holds persistent region state */
struct dm_dirty_log *log;
/* hash table */
rwlock_t hash_lock;
mempool_t *region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
struct list_head *buckets;
unsigned max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
/*
* If there was a flush failure no regions can be marked clean.
*/
int flush_failure;
void *context;
sector_t target_begin;
/* Callback function to schedule bios writes */
void (*dispatch_bios)(void *context, struct bio_list *bios);
/* Callback function to wakeup callers worker thread. */
void (*wakeup_workers)(void *context);
/* Callback function to wakeup callers recovery waiters. */
void (*wakeup_all_recovery_waiters)(void *context);
};
struct dm_region {
struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
region_t key;
int state;
struct list_head hash_list;
struct list_head list;
atomic_t pending;
struct bio_list delayed_bios;
};
/*
* Conversion fns
*/
static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
{
return sector >> rh->region_shift;
}
sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
{
return region << rh->region_shift;
}
EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
void *dm_rh_region_context(struct dm_region *reg)
{
return reg->rh->context;
}
EXPORT_SYMBOL_GPL(dm_rh_region_context);
region_t dm_rh_get_region_key(struct dm_region *reg)
{
return reg->key;
}
EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
{
return rh->region_size;
}
EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
/*
* FIXME: shall we pass in a structure instead of all these args to
* dm_region_hash_create()????
*/
#define RH_HASH_MULT 2654435387U
#define RH_HASH_SHIFT 12
#define MIN_REGIONS 64
struct dm_region_hash *dm_region_hash_create(
void *context, void (*dispatch_bios)(void *context,
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
sector_t target_begin, unsigned max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions)
{
struct dm_region_hash *rh;
unsigned nr_buckets, max_buckets;
size_t i;
/*
* Calculate a suitable number of buckets for our hash
* table.
*/
max_buckets = nr_regions >> 6;
for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
;
nr_buckets >>= 1;
rh = kmalloc(sizeof(*rh), GFP_KERNEL);
if (!rh) {
DMERR("unable to allocate region hash memory");
return ERR_PTR(-ENOMEM);
}
rh->context = context;
rh->dispatch_bios = dispatch_bios;
rh->wakeup_workers = wakeup_workers;
rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
rh->target_begin = target_begin;
rh->max_recovery = max_recovery;
rh->log = log;
rh->region_size = region_size;
rh->region_shift = ffs(region_size) - 1;
rwlock_init(&rh->hash_lock);
rh->mask = nr_buckets - 1;
rh->nr_buckets = nr_buckets;
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < nr_buckets; i++)
INIT_LIST_HEAD(rh->buckets + i);
spin_lock_init(&rh->region_lock);
sema_init(&rh->recovery_count, 0);
atomic_set(&rh->recovery_in_flight, 0);
INIT_LIST_HEAD(&rh->clean_regions);
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->flush_failure = 0;
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct dm_region));
if (!rh->region_pool) {
vfree(rh->buckets);
kfree(rh);
rh = ERR_PTR(-ENOMEM);
}
return rh;
}
EXPORT_SYMBOL_GPL(dm_region_hash_create);
void dm_region_hash_destroy(struct dm_region_hash *rh)
{
unsigned h;
struct dm_region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
for (h = 0; h < rh->nr_buckets; h++) {
list_for_each_entry_safe(reg, nreg, rh->buckets + h,
hash_list) {
BUG_ON(atomic_read(®->pending));
mempool_free(reg, rh->region_pool);
}
}
if (rh->log)
dm_dirty_log_destroy(rh->log);
if (rh->region_pool)
mempool_destroy(rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
{
return rh->log;
}
EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
{
return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
}
static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
struct list_head *bucket = rh->buckets + rh_hash(rh, region);
list_for_each_entry(reg, bucket, hash_list)
if (reg->key == region)
return reg;
return NULL;
}
static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
{
list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key));
}
static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg, *nreg;
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
nreg->rh = rh;
nreg->key = region;
INIT_LIST_HEAD(&nreg->list);
atomic_set(&nreg->pending, 0);
bio_list_init(&nreg->delayed_bios);
write_lock_irq(&rh->hash_lock);
reg = __rh_lookup(rh, region);
if (reg)
/* We lost the race. */
mempool_free(nreg, rh->region_pool);
else {
__rh_insert(rh, nreg);
if (nreg->state == DM_RH_CLEAN) {
spin_lock(&rh->region_lock);
list_add(&nreg->list, &rh->clean_regions);
spin_unlock(&rh->region_lock);
}
reg = nreg;
}
write_unlock_irq(&rh->hash_lock);
return reg;
}
static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
reg = __rh_lookup(rh, region);
if (!reg) {
read_unlock(&rh->hash_lock);
reg = __rh_alloc(rh, region);
read_lock(&rh->hash_lock);
}
return reg;
}
int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
{
int r;
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_lookup(rh, region);
read_unlock(&rh->hash_lock);
if (reg)
return reg->state;
/*
* The region wasn't in the hash, so we fall back to the
* dirty log.
*/
r = rh->log->type->in_sync(rh->log, region, may_block);
/*
* Any error from the dirty log (eg. -EWOULDBLOCK) gets
* taken as a DM_RH_NOSYNC
*/
return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
}
EXPORT_SYMBOL_GPL(dm_rh_get_state);
static void complete_resync_work(struct dm_region *reg, int success)
{
struct dm_region_hash *rh = reg->rh;
rh->log->type->set_region_sync(rh->log, reg->key, success);
/*
* Dispatch the bios before we call 'wake_up_all'.
* This is important because if we are suspending,
* we want to know that recovery is complete and
* the work queue is flushed. If we wake_up_all
* before we dispatch_bios (queue bios and call wake()),
* then we risk suspending before the work queue
* has been properly flushed.
*/
rh->dispatch_bios(rh->context, ®->delayed_bios);
if (atomic_dec_and_test(&rh->recovery_in_flight))
rh->wakeup_all_recovery_waiters(rh->context);
up(&rh->recovery_count);
}
/* dm_rh_mark_nosync
* @ms
* @bio
*
* The bio was written on some mirror(s) but failed on other mirror(s).
* We can successfully endio the bio but should avoid the region being
* marked clean by setting the state DM_RH_NOSYNC.
*
* This function is _not_ safe in interrupt context!
*/
void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
{
unsigned long flags;
struct dm_dirty_log *log = rh->log;
struct dm_region *reg;
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
if (bio->bi_rw & REQ_FLUSH) {
rh->flush_failure = 1;
return;
}
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
read_unlock(&rh->hash_lock);
/* region hash entry should exist because write was in-flight */
BUG_ON(!reg);
BUG_ON(!list_empty(®->list));
spin_lock_irqsave(&rh->region_lock, flags);
/*
* Possible cases:
* 1) DM_RH_DIRTY
* 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
* 3) DM_RH_RECOVERING: flushing pending writes
* Either case, the region should have not been connected to list.
*/
recovering = (reg->state == DM_RH_RECOVERING);
reg->state = DM_RH_NOSYNC;
BUG_ON(!list_empty(®->list));
spin_unlock_irqrestore(&rh->region_lock, flags);
if (recovering)
complete_resync_work(reg, 0);
}
EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
{
struct dm_region *reg, *next;
LIST_HEAD(clean);
LIST_HEAD(recovered);
LIST_HEAD(failed_recovered);
/*
* Quickly grab the lists.
*/
write_lock_irq(&rh->hash_lock);
spin_lock(&rh->region_lock);
if (!list_empty(&rh->clean_regions)) {
list_splice_init(&rh->clean_regions, &clean);
list_for_each_entry(reg, &clean, list)
list_del(®->hash_list);
}
if (!list_empty(&rh->recovered_regions)) {
list_splice_init(&rh->recovered_regions, &recovered);
list_for_each_entry(reg, &recovered, list)
list_del(®->hash_list);
}
if (!list_empty(&rh->failed_recovered_regions)) {
list_splice_init(&rh->failed_recovered_regions,
&failed_recovered);
list_for_each_entry(reg, &failed_recovered, list)
list_del(®->hash_list);
}
spin_unlock(&rh->region_lock);
write_unlock_irq(&rh->hash_lock);
/*
* All the regions on the recovered and clean lists have
* now been pulled out of the system, so no need to do
* any more locking.
*/
list_for_each_entry_safe(reg, next, &recovered, list) {
rh->log->type->clear_region(rh->log, reg->key);
complete_resync_work(reg, 1);
mempool_free(reg, rh->region_pool);
}
list_for_each_entry_safe(reg, next, &failed_recovered, list) {
complete_resync_work(reg, errors_handled ? 0 : 1);
mempool_free(reg, rh->region_pool);
}
list_for_each_entry_safe(reg, next, &clean, list) {
rh->log->type->clear_region(rh->log, reg->key);
mempool_free(reg, rh->region_pool);
}
rh->log->type->flush(rh->log);
}
EXPORT_SYMBOL_GPL(dm_rh_update_states);
static void rh_inc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
spin_lock_irq(&rh->region_lock);
atomic_inc(®->pending);
if (reg->state == DM_RH_CLEAN) {
reg->state = DM_RH_DIRTY;
list_del_init(®->list); /* take off the clean list */
spin_unlock_irq(&rh->region_lock);
rh->log->type->mark_region(rh->log, reg->key);
} else
spin_unlock_irq(&rh->region_lock);
read_unlock(&rh->hash_lock);
}
void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
{
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
if (bio->bi_rw & REQ_FLUSH)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
void dm_rh_dec(struct dm_region_hash *rh, region_t region)
{
unsigned long flags;
struct dm_region *reg;
int should_wake = 0;
read_lock(&rh->hash_lock);
reg = __rh_lookup(rh, region);
read_unlock(&rh->hash_lock);
spin_lock_irqsave(&rh->region_lock, flags);
if (atomic_dec_and_test(®->pending)) {
/*
* There is no pending I/O for this region.
* We can move the region to corresponding list for next action.
* At this point, the region is not yet connected to any list.
*
* If the state is DM_RH_NOSYNC, the region should be kept off
* from clean list.
* The hash entry for DM_RH_NOSYNC will remain in memory
* until the region is recovered or the map is reloaded.
*/
/* do nothing for DM_RH_NOSYNC */
if (unlikely(rh->flush_failure)) {
/*
* If a write flush failed some time ago, we
* don't know whether or not this write made it
* to the disk, so we must resync the device.
*/
reg->state = DM_RH_NOSYNC;
} else if (reg->state == DM_RH_RECOVERING) {
list_add_tail(®->list, &rh->quiesced_regions);
} else if (reg->state == DM_RH_DIRTY) {
reg->state = DM_RH_CLEAN;
list_add(®->list, &rh->clean_regions);
}
should_wake = 1;
}
spin_unlock_irqrestore(&rh->region_lock, flags);
if (should_wake)
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_dec);
/*
* Starts quiescing a region in preparation for recovery.
*/
static int __rh_recovery_prepare(struct dm_region_hash *rh)
{
int r;
region_t region;
struct dm_region *reg;
/*
* Ask the dirty log what's next.
*/
r = rh->log->type->get_resync_work(rh->log, ®ion);
if (r <= 0)
return r;
/*
* Get this region, and start it quiescing by setting the
* recovering flag.
*/
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
read_unlock(&rh->hash_lock);
spin_lock_irq(&rh->region_lock);
reg->state = DM_RH_RECOVERING;
/* Already quiesced ? */
if (atomic_read(®->pending))
list_del_init(®->list);
else
list_move(®->list, &rh->quiesced_regions);
spin_unlock_irq(&rh->region_lock);
return 1;
}
void dm_rh_recovery_prepare(struct dm_region_hash *rh)
{
/* Extra reference to avoid race with dm_rh_stop_recovery */
atomic_inc(&rh->recovery_in_flight);
while (!down_trylock(&rh->recovery_count)) {
atomic_inc(&rh->recovery_in_flight);
if (__rh_recovery_prepare(rh) <= 0) {
atomic_dec(&rh->recovery_in_flight);
up(&rh->recovery_count);
break;
}
}
/* Drop the extra reference */
if (atomic_dec_and_test(&rh->recovery_in_flight))
rh->wakeup_all_recovery_waiters(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
/*
* Returns any quiesced regions.
*/
struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
{
struct dm_region *reg = NULL;
spin_lock_irq(&rh->region_lock);
if (!list_empty(&rh->quiesced_regions)) {
reg = list_entry(rh->quiesced_regions.next,
struct dm_region, list);
list_del_init(®->list); /* remove from the quiesced list */
}
spin_unlock_irq(&rh->region_lock);
return reg;
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
void dm_rh_recovery_end(struct dm_region *reg, int success)
{
struct dm_region_hash *rh = reg->rh;
spin_lock_irq(&rh->region_lock);
if (success)
list_add(®->list, ®->rh->recovered_regions);
else
list_add(®->list, ®->rh->failed_recovered_regions);
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
/* Return recovery in flight count. */
int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
{
return atomic_read(&rh->recovery_in_flight);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
int dm_rh_flush(struct dm_region_hash *rh)
{
return rh->log->type->flush(rh->log);
}
EXPORT_SYMBOL_GPL(dm_rh_flush);
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
{
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
bio_list_add(®->delayed_bios, bio);
read_unlock(&rh->hash_lock);
}
EXPORT_SYMBOL_GPL(dm_rh_delay);
void dm_rh_stop_recovery(struct dm_region_hash *rh)
{
int i;
/* wait for any recovering regions */
for (i = 0; i < rh->max_recovery; i++)
down(&rh->recovery_count);
}
EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
void dm_rh_start_recovery(struct dm_region_hash *rh)
{
int i;
for (i = 0; i < rh->max_recovery; i++)
up(&rh->recovery_count);
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
MODULE_DESCRIPTION(DM_NAME " region hash");
MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
nutterpc/demonkernel-I9505-TW | drivers/md/dm-region-hash.c | 5038 | 18427 | /*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/dm-dirty-log.h>
#include <linux/dm-region-hash.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "dm.h"
#define DM_MSG_PREFIX "region hash"
/*-----------------------------------------------------------------
* Region hash
*
* The mirror splits itself up into discrete regions. Each
* region can be in one of three states: clean, dirty,
* nosync. There is no need to put clean regions in the hash.
*
* In addition to being present in the hash table a region _may_
* be present on one of three lists.
*
* clean_regions: Regions on this list have no io pending to
* them, they are in sync, we are no longer interested in them,
* they are dull. dm_rh_update_states() will remove them from the
* hash table.
*
* quiesced_regions: These regions have been spun down, ready
* for recovery. rh_recovery_start() will remove regions from
* this list and hand them to kmirrord, which will schedule the
* recovery io with kcopyd.
*
* recovered_regions: Regions that kcopyd has successfully
* recovered. dm_rh_update_states() will now schedule any delayed
* io, up the recovery_count, and remove the region from the
* hash.
*
* There are 2 locks:
* A rw spin lock 'hash_lock' protects just the hash table,
* this is never held in write mode from interrupt context,
* which I believe means that we only have to disable irqs when
* doing a write lock.
*
* An ordinary spin lock 'region_lock' that protects the three
* lists in the region_hash, with the 'state', 'list' and
* 'delayed_bios' fields of the regions. This is used from irq
* context, so all other uses will have to suspend local irqs.
*---------------------------------------------------------------*/
struct dm_region_hash {
uint32_t region_size;
unsigned region_shift;
/* holds persistent region state */
struct dm_dirty_log *log;
/* hash table */
rwlock_t hash_lock;
mempool_t *region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
struct list_head *buckets;
unsigned max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
/*
* If there was a flush failure no regions can be marked clean.
*/
int flush_failure;
void *context;
sector_t target_begin;
/* Callback function to schedule bios writes */
void (*dispatch_bios)(void *context, struct bio_list *bios);
/* Callback function to wakeup callers worker thread. */
void (*wakeup_workers)(void *context);
/* Callback function to wakeup callers recovery waiters. */
void (*wakeup_all_recovery_waiters)(void *context);
};
struct dm_region {
struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
region_t key;
int state;
struct list_head hash_list;
struct list_head list;
atomic_t pending;
struct bio_list delayed_bios;
};
/*
* Conversion fns
*/
static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
{
return sector >> rh->region_shift;
}
sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
{
return region << rh->region_shift;
}
EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
void *dm_rh_region_context(struct dm_region *reg)
{
return reg->rh->context;
}
EXPORT_SYMBOL_GPL(dm_rh_region_context);
region_t dm_rh_get_region_key(struct dm_region *reg)
{
return reg->key;
}
EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
{
return rh->region_size;
}
EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
/*
* FIXME: shall we pass in a structure instead of all these args to
* dm_region_hash_create()????
*/
#define RH_HASH_MULT 2654435387U
#define RH_HASH_SHIFT 12
#define MIN_REGIONS 64
struct dm_region_hash *dm_region_hash_create(
void *context, void (*dispatch_bios)(void *context,
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
sector_t target_begin, unsigned max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions)
{
struct dm_region_hash *rh;
unsigned nr_buckets, max_buckets;
size_t i;
/*
* Calculate a suitable number of buckets for our hash
* table.
*/
max_buckets = nr_regions >> 6;
for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
;
nr_buckets >>= 1;
rh = kmalloc(sizeof(*rh), GFP_KERNEL);
if (!rh) {
DMERR("unable to allocate region hash memory");
return ERR_PTR(-ENOMEM);
}
rh->context = context;
rh->dispatch_bios = dispatch_bios;
rh->wakeup_workers = wakeup_workers;
rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
rh->target_begin = target_begin;
rh->max_recovery = max_recovery;
rh->log = log;
rh->region_size = region_size;
rh->region_shift = ffs(region_size) - 1;
rwlock_init(&rh->hash_lock);
rh->mask = nr_buckets - 1;
rh->nr_buckets = nr_buckets;
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < nr_buckets; i++)
INIT_LIST_HEAD(rh->buckets + i);
spin_lock_init(&rh->region_lock);
sema_init(&rh->recovery_count, 0);
atomic_set(&rh->recovery_in_flight, 0);
INIT_LIST_HEAD(&rh->clean_regions);
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->flush_failure = 0;
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct dm_region));
if (!rh->region_pool) {
vfree(rh->buckets);
kfree(rh);
rh = ERR_PTR(-ENOMEM);
}
return rh;
}
EXPORT_SYMBOL_GPL(dm_region_hash_create);
void dm_region_hash_destroy(struct dm_region_hash *rh)
{
unsigned h;
struct dm_region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
for (h = 0; h < rh->nr_buckets; h++) {
list_for_each_entry_safe(reg, nreg, rh->buckets + h,
hash_list) {
BUG_ON(atomic_read(®->pending));
mempool_free(reg, rh->region_pool);
}
}
if (rh->log)
dm_dirty_log_destroy(rh->log);
if (rh->region_pool)
mempool_destroy(rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
{
return rh->log;
}
EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
{
return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
}
static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
struct list_head *bucket = rh->buckets + rh_hash(rh, region);
list_for_each_entry(reg, bucket, hash_list)
if (reg->key == region)
return reg;
return NULL;
}
static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
{
list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key));
}
static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg, *nreg;
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
nreg->rh = rh;
nreg->key = region;
INIT_LIST_HEAD(&nreg->list);
atomic_set(&nreg->pending, 0);
bio_list_init(&nreg->delayed_bios);
write_lock_irq(&rh->hash_lock);
reg = __rh_lookup(rh, region);
if (reg)
/* We lost the race. */
mempool_free(nreg, rh->region_pool);
else {
__rh_insert(rh, nreg);
if (nreg->state == DM_RH_CLEAN) {
spin_lock(&rh->region_lock);
list_add(&nreg->list, &rh->clean_regions);
spin_unlock(&rh->region_lock);
}
reg = nreg;
}
write_unlock_irq(&rh->hash_lock);
return reg;
}
static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
reg = __rh_lookup(rh, region);
if (!reg) {
read_unlock(&rh->hash_lock);
reg = __rh_alloc(rh, region);
read_lock(&rh->hash_lock);
}
return reg;
}
int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
{
int r;
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_lookup(rh, region);
read_unlock(&rh->hash_lock);
if (reg)
return reg->state;
/*
* The region wasn't in the hash, so we fall back to the
* dirty log.
*/
r = rh->log->type->in_sync(rh->log, region, may_block);
/*
* Any error from the dirty log (eg. -EWOULDBLOCK) gets
* taken as a DM_RH_NOSYNC
*/
return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
}
EXPORT_SYMBOL_GPL(dm_rh_get_state);
static void complete_resync_work(struct dm_region *reg, int success)
{
struct dm_region_hash *rh = reg->rh;
rh->log->type->set_region_sync(rh->log, reg->key, success);
/*
* Dispatch the bios before we call 'wake_up_all'.
* This is important because if we are suspending,
* we want to know that recovery is complete and
* the work queue is flushed. If we wake_up_all
* before we dispatch_bios (queue bios and call wake()),
* then we risk suspending before the work queue
* has been properly flushed.
*/
rh->dispatch_bios(rh->context, ®->delayed_bios);
if (atomic_dec_and_test(&rh->recovery_in_flight))
rh->wakeup_all_recovery_waiters(rh->context);
up(&rh->recovery_count);
}
/* dm_rh_mark_nosync
* @ms
* @bio
*
* The bio was written on some mirror(s) but failed on other mirror(s).
* We can successfully endio the bio but should avoid the region being
* marked clean by setting the state DM_RH_NOSYNC.
*
* This function is _not_ safe in interrupt context!
*/
void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
{
unsigned long flags;
struct dm_dirty_log *log = rh->log;
struct dm_region *reg;
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
if (bio->bi_rw & REQ_FLUSH) {
rh->flush_failure = 1;
return;
}
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
read_unlock(&rh->hash_lock);
/* region hash entry should exist because write was in-flight */
BUG_ON(!reg);
BUG_ON(!list_empty(®->list));
spin_lock_irqsave(&rh->region_lock, flags);
/*
* Possible cases:
* 1) DM_RH_DIRTY
* 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
* 3) DM_RH_RECOVERING: flushing pending writes
* Either case, the region should have not been connected to list.
*/
recovering = (reg->state == DM_RH_RECOVERING);
reg->state = DM_RH_NOSYNC;
BUG_ON(!list_empty(®->list));
spin_unlock_irqrestore(&rh->region_lock, flags);
if (recovering)
complete_resync_work(reg, 0);
}
EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
{
struct dm_region *reg, *next;
LIST_HEAD(clean);
LIST_HEAD(recovered);
LIST_HEAD(failed_recovered);
/*
* Quickly grab the lists.
*/
write_lock_irq(&rh->hash_lock);
spin_lock(&rh->region_lock);
if (!list_empty(&rh->clean_regions)) {
list_splice_init(&rh->clean_regions, &clean);
list_for_each_entry(reg, &clean, list)
list_del(®->hash_list);
}
if (!list_empty(&rh->recovered_regions)) {
list_splice_init(&rh->recovered_regions, &recovered);
list_for_each_entry(reg, &recovered, list)
list_del(®->hash_list);
}
if (!list_empty(&rh->failed_recovered_regions)) {
list_splice_init(&rh->failed_recovered_regions,
&failed_recovered);
list_for_each_entry(reg, &failed_recovered, list)
list_del(®->hash_list);
}
spin_unlock(&rh->region_lock);
write_unlock_irq(&rh->hash_lock);
/*
* All the regions on the recovered and clean lists have
* now been pulled out of the system, so no need to do
* any more locking.
*/
list_for_each_entry_safe(reg, next, &recovered, list) {
rh->log->type->clear_region(rh->log, reg->key);
complete_resync_work(reg, 1);
mempool_free(reg, rh->region_pool);
}
list_for_each_entry_safe(reg, next, &failed_recovered, list) {
complete_resync_work(reg, errors_handled ? 0 : 1);
mempool_free(reg, rh->region_pool);
}
list_for_each_entry_safe(reg, next, &clean, list) {
rh->log->type->clear_region(rh->log, reg->key);
mempool_free(reg, rh->region_pool);
}
rh->log->type->flush(rh->log);
}
EXPORT_SYMBOL_GPL(dm_rh_update_states);
static void rh_inc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
spin_lock_irq(&rh->region_lock);
atomic_inc(®->pending);
if (reg->state == DM_RH_CLEAN) {
reg->state = DM_RH_DIRTY;
list_del_init(®->list); /* take off the clean list */
spin_unlock_irq(&rh->region_lock);
rh->log->type->mark_region(rh->log, reg->key);
} else
spin_unlock_irq(&rh->region_lock);
read_unlock(&rh->hash_lock);
}
void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
{
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
if (bio->bi_rw & REQ_FLUSH)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
void dm_rh_dec(struct dm_region_hash *rh, region_t region)
{
unsigned long flags;
struct dm_region *reg;
int should_wake = 0;
read_lock(&rh->hash_lock);
reg = __rh_lookup(rh, region);
read_unlock(&rh->hash_lock);
spin_lock_irqsave(&rh->region_lock, flags);
if (atomic_dec_and_test(®->pending)) {
/*
* There is no pending I/O for this region.
* We can move the region to corresponding list for next action.
* At this point, the region is not yet connected to any list.
*
* If the state is DM_RH_NOSYNC, the region should be kept off
* from clean list.
* The hash entry for DM_RH_NOSYNC will remain in memory
* until the region is recovered or the map is reloaded.
*/
/* do nothing for DM_RH_NOSYNC */
if (unlikely(rh->flush_failure)) {
/*
* If a write flush failed some time ago, we
* don't know whether or not this write made it
* to the disk, so we must resync the device.
*/
reg->state = DM_RH_NOSYNC;
} else if (reg->state == DM_RH_RECOVERING) {
list_add_tail(®->list, &rh->quiesced_regions);
} else if (reg->state == DM_RH_DIRTY) {
reg->state = DM_RH_CLEAN;
list_add(®->list, &rh->clean_regions);
}
should_wake = 1;
}
spin_unlock_irqrestore(&rh->region_lock, flags);
if (should_wake)
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_dec);
/*
* Starts quiescing a region in preparation for recovery.
*/
static int __rh_recovery_prepare(struct dm_region_hash *rh)
{
int r;
region_t region;
struct dm_region *reg;
/*
* Ask the dirty log what's next.
*/
r = rh->log->type->get_resync_work(rh->log, ®ion);
if (r <= 0)
return r;
/*
* Get this region, and start it quiescing by setting the
* recovering flag.
*/
read_lock(&rh->hash_lock);
reg = __rh_find(rh, region);
read_unlock(&rh->hash_lock);
spin_lock_irq(&rh->region_lock);
reg->state = DM_RH_RECOVERING;
/* Already quiesced ? */
if (atomic_read(®->pending))
list_del_init(®->list);
else
list_move(®->list, &rh->quiesced_regions);
spin_unlock_irq(&rh->region_lock);
return 1;
}
void dm_rh_recovery_prepare(struct dm_region_hash *rh)
{
/* Extra reference to avoid race with dm_rh_stop_recovery */
atomic_inc(&rh->recovery_in_flight);
while (!down_trylock(&rh->recovery_count)) {
atomic_inc(&rh->recovery_in_flight);
if (__rh_recovery_prepare(rh) <= 0) {
atomic_dec(&rh->recovery_in_flight);
up(&rh->recovery_count);
break;
}
}
/* Drop the extra reference */
if (atomic_dec_and_test(&rh->recovery_in_flight))
rh->wakeup_all_recovery_waiters(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
/*
* Returns any quiesced regions.
*/
struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
{
struct dm_region *reg = NULL;
spin_lock_irq(&rh->region_lock);
if (!list_empty(&rh->quiesced_regions)) {
reg = list_entry(rh->quiesced_regions.next,
struct dm_region, list);
list_del_init(®->list); /* remove from the quiesced list */
}
spin_unlock_irq(&rh->region_lock);
return reg;
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
void dm_rh_recovery_end(struct dm_region *reg, int success)
{
struct dm_region_hash *rh = reg->rh;
spin_lock_irq(&rh->region_lock);
if (success)
list_add(®->list, ®->rh->recovered_regions);
else
list_add(®->list, ®->rh->failed_recovered_regions);
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
/* Return recovery in flight count. */
int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
{
return atomic_read(&rh->recovery_in_flight);
}
EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
int dm_rh_flush(struct dm_region_hash *rh)
{
return rh->log->type->flush(rh->log);
}
EXPORT_SYMBOL_GPL(dm_rh_flush);
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
{
struct dm_region *reg;
read_lock(&rh->hash_lock);
reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
bio_list_add(®->delayed_bios, bio);
read_unlock(&rh->hash_lock);
}
EXPORT_SYMBOL_GPL(dm_rh_delay);
void dm_rh_stop_recovery(struct dm_region_hash *rh)
{
int i;
/* wait for any recovering regions */
for (i = 0; i < rh->max_recovery; i++)
down(&rh->recovery_count);
}
EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
void dm_rh_start_recovery(struct dm_region_hash *rh)
{
int i;
for (i = 0; i < rh->max_recovery; i++)
up(&rh->recovery_count);
rh->wakeup_workers(rh->context);
}
EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
MODULE_DESCRIPTION(DM_NAME " region hash");
MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mozzwald/linux-sunxi-marsboard-a20 | drivers/leds/leds-sunfire.c | 5550 | 6137 | /* leds-sunfire.c: SUNW,Ultra-Enterprise LED driver.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/fhc.h>
#include <asm/upa.h>
#define DRIVER_NAME "leds-sunfire"
#define PFX DRIVER_NAME ": "
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun Fire LED driver");
MODULE_LICENSE("GPL");
struct sunfire_led {
struct led_classdev led_cdev;
void __iomem *reg;
};
#define to_sunfire_led(d) container_of(d, struct sunfire_led, led_cdev)
static void __clockboard_set(struct led_classdev *led_cdev,
enum led_brightness led_val, u8 bit)
{
struct sunfire_led *p = to_sunfire_led(led_cdev);
u8 reg = upa_readb(p->reg);
switch (bit) {
case CLOCK_CTRL_LLED:
if (led_val)
reg &= ~bit;
else
reg |= bit;
break;
default:
if (led_val)
reg |= bit;
else
reg &= ~bit;
break;
}
upa_writeb(reg, p->reg);
}
static void clockboard_left_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__clockboard_set(led_cdev, led_val, CLOCK_CTRL_LLED);
}
static void clockboard_middle_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__clockboard_set(led_cdev, led_val, CLOCK_CTRL_MLED);
}
static void clockboard_right_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__clockboard_set(led_cdev, led_val, CLOCK_CTRL_RLED);
}
static void __fhc_set(struct led_classdev *led_cdev,
enum led_brightness led_val, u32 bit)
{
struct sunfire_led *p = to_sunfire_led(led_cdev);
u32 reg = upa_readl(p->reg);
switch (bit) {
case FHC_CONTROL_LLED:
if (led_val)
reg &= ~bit;
else
reg |= bit;
break;
default:
if (led_val)
reg |= bit;
else
reg &= ~bit;
break;
}
upa_writel(reg, p->reg);
}
static void fhc_left_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__fhc_set(led_cdev, led_val, FHC_CONTROL_LLED);
}
static void fhc_middle_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__fhc_set(led_cdev, led_val, FHC_CONTROL_MLED);
}
static void fhc_right_set(struct led_classdev *led_cdev,
enum led_brightness led_val)
{
__fhc_set(led_cdev, led_val, FHC_CONTROL_RLED);
}
typedef void (*set_handler)(struct led_classdev *, enum led_brightness);
struct led_type {
const char *name;
set_handler handler;
const char *default_trigger;
};
#define NUM_LEDS_PER_BOARD 3
struct sunfire_drvdata {
struct sunfire_led leds[NUM_LEDS_PER_BOARD];
};
static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
struct led_type *types)
{
struct sunfire_drvdata *p;
int i, err;
if (pdev->num_resources != 1) {
printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n",
pdev->num_resources);
err = -EINVAL;
goto out;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n");
err = -ENOMEM;
goto out;
}
for (i = 0; i < NUM_LEDS_PER_BOARD; i++) {
struct led_classdev *lp = &p->leds[i].led_cdev;
p->leds[i].reg = (void __iomem *) pdev->resource[0].start;
lp->name = types[i].name;
lp->brightness = LED_FULL;
lp->brightness_set = types[i].handler;
lp->default_trigger = types[i].default_trigger;
err = led_classdev_register(&pdev->dev, lp);
if (err) {
printk(KERN_ERR PFX "Could not register %s LED\n",
lp->name);
goto out_unregister_led_cdevs;
}
}
dev_set_drvdata(&pdev->dev, p);
return 0;
out_unregister_led_cdevs:
for (i--; i >= 0; i--)
led_classdev_unregister(&p->leds[i].led_cdev);
kfree(p);
out:
return err;
}
static int __devexit sunfire_led_generic_remove(struct platform_device *pdev)
{
struct sunfire_drvdata *p = dev_get_drvdata(&pdev->dev);
int i;
for (i = 0; i < NUM_LEDS_PER_BOARD; i++)
led_classdev_unregister(&p->leds[i].led_cdev);
kfree(p);
return 0;
}
static struct led_type clockboard_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "clockboard-left",
.handler = clockboard_left_set,
},
{
.name = "clockboard-middle",
.handler = clockboard_middle_set,
},
{
.name = "clockboard-right",
.handler = clockboard_right_set,
.default_trigger= "heartbeat",
},
};
static int __devinit sunfire_clockboard_led_probe(struct platform_device *pdev)
{
return sunfire_led_generic_probe(pdev, clockboard_led_types);
}
static struct led_type fhc_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "fhc-left",
.handler = fhc_left_set,
},
{
.name = "fhc-middle",
.handler = fhc_middle_set,
},
{
.name = "fhc-right",
.handler = fhc_right_set,
.default_trigger= "heartbeat",
},
};
static int __devinit sunfire_fhc_led_probe(struct platform_device *pdev)
{
return sunfire_led_generic_probe(pdev, fhc_led_types);
}
MODULE_ALIAS("platform:sunfire-clockboard-leds");
MODULE_ALIAS("platform:sunfire-fhc-leds");
static struct platform_driver sunfire_clockboard_led_driver = {
.probe = sunfire_clockboard_led_probe,
.remove = __devexit_p(sunfire_led_generic_remove),
.driver = {
.name = "sunfire-clockboard-leds",
.owner = THIS_MODULE,
},
};
static struct platform_driver sunfire_fhc_led_driver = {
.probe = sunfire_fhc_led_probe,
.remove = __devexit_p(sunfire_led_generic_remove),
.driver = {
.name = "sunfire-fhc-leds",
.owner = THIS_MODULE,
},
};
static int __init sunfire_leds_init(void)
{
int err = platform_driver_register(&sunfire_clockboard_led_driver);
if (err) {
printk(KERN_ERR PFX "Could not register clock board LED driver\n");
return err;
}
err = platform_driver_register(&sunfire_fhc_led_driver);
if (err) {
printk(KERN_ERR PFX "Could not register FHC LED driver\n");
platform_driver_unregister(&sunfire_clockboard_led_driver);
}
return err;
}
static void __exit sunfire_leds_exit(void)
{
platform_driver_unregister(&sunfire_clockboard_led_driver);
platform_driver_unregister(&sunfire_fhc_led_driver);
}
module_init(sunfire_leds_init);
module_exit(sunfire_leds_exit);
| gpl-2.0 |
luca020400/android_kernel_motorola_msm8226 | arch/powerpc/sysdev/fsl_pmc.c | 7086 | 2047 | /*
* Suspend/resume support
*
* Copyright 2009 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/of_platform.h>
struct pmc_regs {
__be32 devdisr;
__be32 devdisr2;
__be32 :32;
__be32 :32;
__be32 pmcsr;
#define PMCSR_SLP (1 << 17)
};
static struct device *pmc_dev;
static struct pmc_regs __iomem *pmc_regs;
static int pmc_suspend_enter(suspend_state_t state)
{
int ret;
setbits32(&pmc_regs->pmcsr, PMCSR_SLP);
/* At this point, the CPU is asleep. */
/* Upon resume, wait for SLP bit to be clear. */
ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0,
10000, 10) ? 0 : -ETIMEDOUT;
if (ret)
dev_err(pmc_dev, "tired waiting for SLP bit to clear\n");
return ret;
}
static int pmc_suspend_valid(suspend_state_t state)
{
if (state != PM_SUSPEND_STANDBY)
return 0;
return 1;
}
static const struct platform_suspend_ops pmc_suspend_ops = {
.valid = pmc_suspend_valid,
.enter = pmc_suspend_enter,
};
static int pmc_probe(struct platform_device *ofdev)
{
pmc_regs = of_iomap(ofdev->dev.of_node, 0);
if (!pmc_regs)
return -ENOMEM;
pmc_dev = &ofdev->dev;
suspend_set_ops(&pmc_suspend_ops);
return 0;
}
static const struct of_device_id pmc_ids[] = {
{ .compatible = "fsl,mpc8548-pmc", },
{ .compatible = "fsl,mpc8641d-pmc", },
{ },
};
static struct platform_driver pmc_driver = {
.driver = {
.name = "fsl-pmc",
.owner = THIS_MODULE,
.of_match_table = pmc_ids,
},
.probe = pmc_probe,
};
static int __init pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
device_initcall(pmc_init);
| gpl-2.0 |
etwoc/gc300-kernel | drivers/scsi/FlashPoint.c | 10414 | 200499 | /*
FlashPoint.c -- FlashPoint SCCB Manager for Linux
This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint
Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for
Linux compatibility. It was provided by BusLogic in the form of 16 separate
source files, which would have unnecessarily cluttered the scsi directory, so
the individual files have been combined into this single file.
Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
This file is available under both the GNU General Public License
and a BSD-style copyright; see LICENSE.FlashPoint for details.
*/
#ifdef CONFIG_SCSI_FLASHPOINT
#define MAX_CARDS 8
#undef BUSTYPE_PCI
#define CRCMASK 0xA001
#define FAILURE 0xFFFFFFFFL
struct sccb;
typedef void (*CALL_BK_FN) (struct sccb *);
struct sccb_mgr_info {
unsigned long si_baseaddr;
unsigned char si_present;
unsigned char si_intvect;
unsigned char si_id;
unsigned char si_lun;
unsigned short si_fw_revision;
unsigned short si_per_targ_init_sync;
unsigned short si_per_targ_fast_nego;
unsigned short si_per_targ_ultra_nego;
unsigned short si_per_targ_no_disc;
unsigned short si_per_targ_wide_nego;
unsigned short si_flags;
unsigned char si_card_family;
unsigned char si_bustype;
unsigned char si_card_model[3];
unsigned char si_relative_cardnum;
unsigned char si_reserved[4];
unsigned long si_OS_reserved;
unsigned char si_XlatInfo[4];
unsigned long si_reserved2[5];
unsigned long si_secondary_range;
};
#define SCSI_PARITY_ENA 0x0001
#define LOW_BYTE_TERM 0x0010
#define HIGH_BYTE_TERM 0x0020
#define BUSTYPE_PCI 0x3
#define SUPPORT_16TAR_32LUN 0x0002
#define SOFT_RESET 0x0004
#define EXTENDED_TRANSLATION 0x0008
#define POST_ALL_UNDERRRUNS 0x0040
#define FLAG_SCAM_ENABLED 0x0080
#define FLAG_SCAM_LEVEL2 0x0100
#define HARPOON_FAMILY 0x02
/* SCCB struct used for both SCCB and UCB manager compiles!
* The UCB Manager treats the SCCB as it's 'native hardware structure'
*/
#pragma pack(1)
struct sccb {
unsigned char OperationCode;
unsigned char ControlByte;
unsigned char CdbLength;
unsigned char RequestSenseLength;
unsigned long DataLength;
unsigned long DataPointer;
unsigned char CcbRes[2];
unsigned char HostStatus;
unsigned char TargetStatus;
unsigned char TargID;
unsigned char Lun;
unsigned char Cdb[12];
unsigned char CcbRes1;
unsigned char Reserved1;
unsigned long Reserved2;
unsigned long SensePointer;
CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */
unsigned long SccbIOPort; /* Identifies board base port */
unsigned char SccbStatus;
unsigned char SCCBRes2;
unsigned short SccbOSFlags;
unsigned long Sccb_XferCnt; /* actual transfer count */
unsigned long Sccb_ATC;
unsigned long SccbVirtDataPtr; /* virtual addr for OS/2 */
unsigned long Sccb_res1;
unsigned short Sccb_MGRFlags;
unsigned short Sccb_sgseg;
unsigned char Sccb_scsimsg; /* identify msg for selection */
unsigned char Sccb_tag;
unsigned char Sccb_scsistat;
unsigned char Sccb_idmsg; /* image of last msg in */
struct sccb *Sccb_forwardlink;
struct sccb *Sccb_backlink;
unsigned long Sccb_savedATC;
unsigned char Save_Cdb[6];
unsigned char Save_CdbLen;
unsigned char Sccb_XferState;
unsigned long Sccb_SGoffset;
};
#pragma pack()
#define SCATTER_GATHER_COMMAND 0x02
#define RESIDUAL_COMMAND 0x03
#define RESIDUAL_SG_COMMAND 0x04
#define RESET_COMMAND 0x81
#define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */
#define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */
#define SCCB_DATA_XFER_OUT 0x10 /* Write */
#define SCCB_DATA_XFER_IN 0x08 /* Read */
#define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */
#define BUS_FREE_ST 0
#define SELECT_ST 1
#define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */
#define SELECT_SN_ST 3 /* Select w\ Sync Nego */
#define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */
#define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */
#define COMMAND_ST 6
#define DATA_OUT_ST 7
#define DATA_IN_ST 8
#define DISCONNECT_ST 9
#define ABORT_ST 11
#define F_HOST_XFER_DIR 0x01
#define F_ALL_XFERRED 0x02
#define F_SG_XFER 0x04
#define F_AUTO_SENSE 0x08
#define F_ODD_BALL_CNT 0x10
#define F_NO_DATA_YET 0x80
#define F_STATUSLOADED 0x01
#define F_DEV_SELECTED 0x04
#define SCCB_COMPLETE 0x00 /* SCCB completed without error */
#define SCCB_DATA_UNDER_RUN 0x0C
#define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */
#define SCCB_DATA_OVER_RUN 0x12
#define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */
#define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */
#define SCCB_BM_ERR 0x30 /* BusMaster error. */
#define SCCB_PARITY_ERR 0x34 /* SCSI parity error */
#define SCCB_IN_PROCESS 0x00
#define SCCB_SUCCESS 0x01
#define SCCB_ABORT 0x02
#define SCCB_ERROR 0x04
#define ORION_FW_REV 3110
#define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */
#define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */
#define MAX_SCSI_TAR 16
#define MAX_LUN 32
#define LUN_MASK 0x1f
#define SG_BUF_CNT 16 /*Number of prefetched elements. */
#define SG_ELEMENT_SIZE 8 /*Eight byte per element. */
#define RD_HARPOON(ioport) inb((u32)ioport)
#define RDW_HARPOON(ioport) inw((u32)ioport)
#define RD_HARP32(ioport,offset,data) (data = inl((u32)(ioport + offset)))
#define WR_HARPOON(ioport,val) outb((u8) val, (u32)ioport)
#define WRW_HARPOON(ioport,val) outw((u16)val, (u32)ioport)
#define WR_HARP32(ioport,offset,data) outl(data, (u32)(ioport + offset))
#define TAR_SYNC_MASK (BIT(7)+BIT(6))
#define SYNC_TRYING BIT(6)
#define SYNC_SUPPORTED (BIT(7)+BIT(6))
#define TAR_WIDE_MASK (BIT(5)+BIT(4))
#define WIDE_ENABLED BIT(4)
#define WIDE_NEGOCIATED BIT(5)
#define TAR_TAG_Q_MASK (BIT(3)+BIT(2))
#define TAG_Q_TRYING BIT(2)
#define TAG_Q_REJECT BIT(3)
#define TAR_ALLOW_DISC BIT(0)
#define EE_SYNC_MASK (BIT(0)+BIT(1))
#define EE_SYNC_5MB BIT(0)
#define EE_SYNC_10MB BIT(1)
#define EE_SYNC_20MB (BIT(0)+BIT(1))
#define EE_WIDE_SCSI BIT(7)
struct sccb_mgr_tar_info {
struct sccb *TarSelQ_Head;
struct sccb *TarSelQ_Tail;
unsigned char TarLUN_CA; /*Contingent Allgiance */
unsigned char TarTagQ_Cnt;
unsigned char TarSelQ_Cnt;
unsigned char TarStatus;
unsigned char TarEEValue;
unsigned char TarSyncCtrl;
unsigned char TarReserved[2]; /* for alignment */
unsigned char LunDiscQ_Idx[MAX_LUN];
unsigned char TarLUNBusy[MAX_LUN];
};
struct nvram_info {
unsigned char niModel; /* Model No. of card */
unsigned char niCardNo; /* Card no. */
unsigned long niBaseAddr; /* Port Address of card */
unsigned char niSysConf; /* Adapter Configuration byte - Byte 16 of eeprom map */
unsigned char niScsiConf; /* SCSI Configuration byte - Byte 17 of eeprom map */
unsigned char niScamConf; /* SCAM Configuration byte - Byte 20 of eeprom map */
unsigned char niAdapId; /* Host Adapter ID - Byte 24 of eerpom map */
unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte of targets */
unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name string of Targets */
};
#define MODEL_LT 1
#define MODEL_DL 2
#define MODEL_LW 3
#define MODEL_DW 4
struct sccb_card {
struct sccb *currentSCCB;
struct sccb_mgr_info *cardInfo;
unsigned long ioPort;
unsigned short cmdCounter;
unsigned char discQCount;
unsigned char tagQ_Lst;
unsigned char cardIndex;
unsigned char scanIndex;
unsigned char globalFlags;
unsigned char ourId;
struct nvram_info *pNvRamInfo;
struct sccb *discQ_Tbl[QUEUE_DEPTH];
};
#define F_TAG_STARTED 0x01
#define F_CONLUN_IO 0x02
#define F_DO_RENEGO 0x04
#define F_NO_FILTER 0x08
#define F_GREEN_PC 0x10
#define F_HOST_XFER_ACT 0x20
#define F_NEW_SCCB_CMD 0x40
#define F_UPDATE_EEPROM 0x80
#define ID_STRING_LENGTH 32
#define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */
#define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */
#define ASSIGN_ID 0x00
#define SET_P_FLAG 0x01
#define CFG_CMPLT 0x03
#define DOM_MSTR 0x0F
#define SYNC_PTRN 0x1F
#define ID_0_7 0x18
#define ID_8_F 0x11
#define MISC_CODE 0x14
#define CLR_P_FLAG 0x18
#define INIT_SELTD 0x01
#define LEVEL2_TAR 0x02
enum scam_id_st { ID0, ID1, ID2, ID3, ID4, ID5, ID6, ID7, ID8, ID9, ID10, ID11,
ID12,
ID13, ID14, ID15, ID_UNUSED, ID_UNASSIGNED, ID_ASSIGNED, LEGACY,
CLR_PRIORITY, NO_ID_AVAIL
};
typedef struct SCCBscam_info {
unsigned char id_string[ID_STRING_LENGTH];
enum scam_id_st state;
} SCCBSCAM_INFO;
#define SCSI_REQUEST_SENSE 0x03
#define SCSI_READ 0x08
#define SCSI_WRITE 0x0A
#define SCSI_START_STOP_UNIT 0x1B
#define SCSI_READ_EXTENDED 0x28
#define SCSI_WRITE_EXTENDED 0x2A
#define SCSI_WRITE_AND_VERIFY 0x2E
#define SSGOOD 0x00
#define SSCHECK 0x02
#define SSQ_FULL 0x28
#define SMCMD_COMP 0x00
#define SMEXT 0x01
#define SMSAVE_DATA_PTR 0x02
#define SMREST_DATA_PTR 0x03
#define SMDISC 0x04
#define SMABORT 0x06
#define SMREJECT 0x07
#define SMNO_OP 0x08
#define SMPARITY 0x09
#define SMDEV_RESET 0x0C
#define SMABORT_TAG 0x0D
#define SMINIT_RECOVERY 0x0F
#define SMREL_RECOVERY 0x10
#define SMIDENT 0x80
#define DISC_PRIV 0x40
#define SMSYNC 0x01
#define SMWDTR 0x03
#define SM8BIT 0x00
#define SM16BIT 0x01
#define SMIGNORWR 0x23 /* Ignore Wide Residue */
#define SIX_BYTE_CMD 0x06
#define TWELVE_BYTE_CMD 0x0C
#define ASYNC 0x00
#define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */
#define EEPROM_WD_CNT 256
#define EEPROM_CHECK_SUM 0
#define FW_SIGNATURE 2
#define MODEL_NUMB_0 4
#define MODEL_NUMB_2 6
#define MODEL_NUMB_4 8
#define SYSTEM_CONFIG 16
#define SCSI_CONFIG 17
#define BIOS_CONFIG 18
#define SCAM_CONFIG 20
#define ADAPTER_SCSI_ID 24
#define IGNORE_B_SCAN 32
#define SEND_START_ENA 34
#define DEVICE_ENABLE 36
#define SYNC_RATE_TBL 38
#define SYNC_RATE_TBL01 38
#define SYNC_RATE_TBL23 40
#define SYNC_RATE_TBL45 42
#define SYNC_RATE_TBL67 44
#define SYNC_RATE_TBL89 46
#define SYNC_RATE_TBLab 48
#define SYNC_RATE_TBLcd 50
#define SYNC_RATE_TBLef 52
#define EE_SCAMBASE 256
#define SCAM_ENABLED BIT(2)
#define SCAM_LEVEL2 BIT(3)
#define RENEGO_ENA BIT(10)
#define CONNIO_ENA BIT(11)
#define GREEN_PC_ENA BIT(12)
#define AUTO_RATE_00 00
#define AUTO_RATE_05 01
#define AUTO_RATE_10 02
#define AUTO_RATE_20 03
#define WIDE_NEGO_BIT BIT(7)
#define DISC_ENABLE_BIT BIT(6)
#define hp_vendor_id_0 0x00 /* LSB */
#define ORION_VEND_0 0x4B
#define hp_vendor_id_1 0x01 /* MSB */
#define ORION_VEND_1 0x10
#define hp_device_id_0 0x02 /* LSB */
#define ORION_DEV_0 0x30
#define hp_device_id_1 0x03 /* MSB */
#define ORION_DEV_1 0x81
/* Sub Vendor ID and Sub Device ID only available in
Harpoon Version 2 and higher */
#define hp_sub_device_id_0 0x06 /* LSB */
#define hp_semaphore 0x0C
#define SCCB_MGR_ACTIVE BIT(0)
#define TICKLE_ME BIT(1)
#define SCCB_MGR_PRESENT BIT(3)
#define BIOS_IN_USE BIT(4)
#define hp_sys_ctrl 0x0F
#define STOP_CLK BIT(0) /*Turn off BusMaster Clock */
#define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */
#define HALT_MACH BIT(3) /*Halt State Machine */
#define HARD_ABORT BIT(4) /*Hard Abort */
#define hp_host_blk_cnt 0x13
#define XFER_BLK64 0x06 /* 1 1 0 64 byte per block */
#define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes */
#define hp_int_mask 0x17
#define INT_CMD_COMPL BIT(0) /* DMA command complete */
#define INT_EXT_STATUS BIT(1) /* Extended Status Set */
#define hp_xfer_cnt_lo 0x18
#define hp_xfer_cnt_hi 0x1A
#define hp_xfer_cmd 0x1B
#define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */
#define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */
#define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */
#define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */
#define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */
#define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT))
#define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT))
#define hp_host_addr_lo 0x1C
#define hp_host_addr_hmi 0x1E
#define hp_ee_ctrl 0x22
#define EXT_ARB_ACK BIT(7)
#define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */
#define SEE_MS BIT(5)
#define SEE_CS BIT(3)
#define SEE_CLK BIT(2)
#define SEE_DO BIT(1)
#define SEE_DI BIT(0)
#define EE_READ 0x06
#define EE_WRITE 0x05
#define EWEN 0x04
#define EWEN_ADDR 0x03C0
#define EWDS 0x04
#define EWDS_ADDR 0x0000
#define hp_bm_ctrl 0x26
#define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */
#define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */
#define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */
#define FAST_SINGLE BIT(6) /*?? */
#define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L)
#define hp_sg_addr 0x28
#define hp_page_ctrl 0x29
#define SCATTER_EN BIT(0)
#define SGRAM_ARAM BIT(1)
#define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */
#define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */
#define hp_pci_stat_cfg 0x2D
#define REC_MASTER_ABORT BIT(5) /*received Master abort */
#define hp_rev_num 0x33
#define hp_stack_data 0x34
#define hp_stack_addr 0x35
#define hp_ext_status 0x36
#define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */
#define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */
#define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */
#define CMD_ABORTED BIT(4) /*Command aborted */
#define BM_PARITY_ERR BIT(5) /*parity error on data received */
#define PIO_OVERRUN BIT(6) /*Slave data overrun */
#define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */
#define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \
BM_PARITY_ERR | PIO_OVERRUN)
#define hp_int_status 0x37
#define EXT_STATUS_ON BIT(1) /*Extended status is valid */
#define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */
#define INT_ASSERTED BIT(5) /* */
#define hp_fifo_cnt 0x38
#define hp_intena 0x40
#define RESET BIT(7)
#define PROG_HLT BIT(6)
#define PARITY BIT(5)
#define FIFO BIT(4)
#define SEL BIT(3)
#define SCAM_SEL BIT(2)
#define RSEL BIT(1)
#define TIMEOUT BIT(0)
#define BUS_FREE BIT(15)
#define XFER_CNT_0 BIT(14)
#define PHASE BIT(13)
#define IUNKWN BIT(12)
#define ICMD_COMP BIT(11)
#define ITICKLE BIT(10)
#define IDO_STRT BIT(9)
#define ITAR_DISC BIT(8)
#define AUTO_INT (BIT(12)+BIT(11)+BIT(10)+BIT(9)+BIT(8))
#define CLR_ALL_INT 0xFFFF
#define CLR_ALL_INT_1 0xFF00
#define hp_intstat 0x42
#define hp_scsisig 0x44
#define SCSI_SEL BIT(7)
#define SCSI_BSY BIT(6)
#define SCSI_REQ BIT(5)
#define SCSI_ACK BIT(4)
#define SCSI_ATN BIT(3)
#define SCSI_CD BIT(2)
#define SCSI_MSG BIT(1)
#define SCSI_IOBIT BIT(0)
#define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0))
#define S_MSGO_PH (BIT(2)+BIT(1) )
#define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0))
#define S_DATAI_PH ( BIT(0))
#define S_DATAO_PH 0x00
#define S_ILL_PH ( BIT(1) )
#define hp_scsictrl_0 0x45
#define SEL_TAR BIT(6)
#define ENA_ATN BIT(4)
#define ENA_RESEL BIT(2)
#define SCSI_RST BIT(1)
#define ENA_SCAM_SEL BIT(0)
#define hp_portctrl_0 0x46
#define SCSI_PORT BIT(7)
#define SCSI_INBIT BIT(6)
#define DMA_PORT BIT(5)
#define DMA_RD BIT(4)
#define HOST_PORT BIT(3)
#define HOST_WRT BIT(2)
#define SCSI_BUS_EN BIT(1)
#define START_TO BIT(0)
#define hp_scsireset 0x47
#define SCSI_INI BIT(6)
#define SCAM_EN BIT(5)
#define DMA_RESET BIT(3)
#define HPSCSI_RESET BIT(2)
#define PROG_RESET BIT(1)
#define FIFO_CLR BIT(0)
#define hp_xfercnt_0 0x48
#define hp_xfercnt_2 0x4A
#define hp_fifodata_0 0x4C
#define hp_addstat 0x4E
#define SCAM_TIMER BIT(7)
#define SCSI_MODE8 BIT(3)
#define SCSI_PAR_ERR BIT(0)
#define hp_prgmcnt_0 0x4F
#define hp_selfid_0 0x50
#define hp_selfid_1 0x51
#define hp_arb_id 0x52
#define hp_select_id 0x53
#define hp_synctarg_base 0x54
#define hp_synctarg_12 0x54
#define hp_synctarg_13 0x55
#define hp_synctarg_14 0x56
#define hp_synctarg_15 0x57
#define hp_synctarg_8 0x58
#define hp_synctarg_9 0x59
#define hp_synctarg_10 0x5A
#define hp_synctarg_11 0x5B
#define hp_synctarg_4 0x5C
#define hp_synctarg_5 0x5D
#define hp_synctarg_6 0x5E
#define hp_synctarg_7 0x5F
#define hp_synctarg_0 0x60
#define hp_synctarg_1 0x61
#define hp_synctarg_2 0x62
#define hp_synctarg_3 0x63
#define NARROW_SCSI BIT(4)
#define DEFAULT_OFFSET 0x0F
#define hp_autostart_0 0x64
#define hp_autostart_1 0x65
#define hp_autostart_3 0x67
#define AUTO_IMMED BIT(5)
#define SELECT BIT(6)
#define END_DATA (BIT(7)+BIT(6))
#define hp_gp_reg_0 0x68
#define hp_gp_reg_1 0x69
#define hp_gp_reg_3 0x6B
#define hp_seltimeout 0x6C
#define TO_4ms 0x67 /* 3.9959ms */
#define TO_5ms 0x03 /* 4.9152ms */
#define TO_10ms 0x07 /* 11.xxxms */
#define TO_250ms 0x99 /* 250.68ms */
#define TO_290ms 0xB1 /* 289.99ms */
#define hp_clkctrl_0 0x6D
#define PWR_DWN BIT(6)
#define ACTdeassert BIT(4)
#define CLK_40MHZ (BIT(1) + BIT(0))
#define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ)
#define hp_fiforead 0x6E
#define hp_fifowrite 0x6F
#define hp_offsetctr 0x70
#define hp_xferstat 0x71
#define FIFO_EMPTY BIT(6)
#define hp_portctrl_1 0x72
#define CHK_SCSI_P BIT(3)
#define HOST_MODE8 BIT(0)
#define hp_xfer_pad 0x73
#define ID_UNLOCK BIT(3)
#define hp_scsidata_0 0x74
#define hp_scsidata_1 0x75
#define hp_aramBase 0x80
#define BIOS_DATA_OFFSET 0x60
#define BIOS_RELATIVE_CARD 0x64
#define AR3 (BIT(9) + BIT(8))
#define SDATA BIT(10)
#define CRD_OP BIT(11) /* Cmp Reg. w/ Data */
#define CRR_OP BIT(12) /* Cmp Reg. w. Reg. */
#define CPE_OP (BIT(14)+BIT(11)) /* Cmp SCSI phs & Branch EQ */
#define CPN_OP (BIT(14)+BIT(12)) /* Cmp SCSI phs & Branch NOT EQ */
#define ADATA_OUT 0x00
#define ADATA_IN BIT(8)
#define ACOMMAND BIT(10)
#define ASTATUS (BIT(10)+BIT(8))
#define AMSG_OUT (BIT(10)+BIT(9))
#define AMSG_IN (BIT(10)+BIT(9)+BIT(8))
#define BRH_OP BIT(13) /* Branch */
#define ALWAYS 0x00
#define EQUAL BIT(8)
#define NOT_EQ BIT(9)
#define TCB_OP (BIT(13)+BIT(11)) /* Test condition & branch */
#define FIFO_0 BIT(10)
#define MPM_OP BIT(15) /* Match phase and move data */
#define MRR_OP BIT(14) /* Move DReg. to Reg. */
#define S_IDREG (BIT(2)+BIT(1)+BIT(0))
#define D_AR0 0x00
#define D_AR1 BIT(0)
#define D_BUCKET (BIT(2) + BIT(1) + BIT(0))
#define RAT_OP (BIT(14)+BIT(13)+BIT(11))
#define SSI_OP (BIT(15)+BIT(11))
#define SSI_ITAR_DISC (ITAR_DISC >> 8)
#define SSI_IDO_STRT (IDO_STRT >> 8)
#define SSI_ICMD_COMP (ICMD_COMP >> 8)
#define SSI_ITICKLE (ITICKLE >> 8)
#define SSI_IUNKWN (IUNKWN >> 8)
#define SSI_INO_CC (IUNKWN >> 8)
#define SSI_IRFAIL (IUNKWN >> 8)
#define NP 0x10 /*Next Phase */
#define NTCMD 0x02 /*Non- Tagged Command start */
#define CMDPZ 0x04 /*Command phase */
#define DINT 0x12 /*Data Out/In interrupt */
#define DI 0x13 /*Data Out */
#define DC 0x19 /*Disconnect Message */
#define ST 0x1D /*Status Phase */
#define UNKNWN 0x24 /*Unknown bus action */
#define CC 0x25 /*Command Completion failure */
#define TICK 0x26 /*New target reselected us. */
#define SELCHK 0x28 /*Select & Check SCSI ID latch reg */
#define ID_MSG_STRT hp_aramBase + 0x00
#define NON_TAG_ID_MSG hp_aramBase + 0x06
#define CMD_STRT hp_aramBase + 0x08
#define SYNC_MSGS hp_aramBase + 0x08
#define TAG_STRT 0x00
#define DISCONNECT_START 0x10/2
#define END_DATA_START 0x14/2
#define CMD_ONLY_STRT CMDPZ/2
#define SELCHK_STRT SELCHK/2
#define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;}
/* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \
xfercnt <<= 16,\
xfercnt |= RDW_HARPOON((unsigned short)(port+hp_xfercnt_0)))
*/
#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (unsigned short)(addr & 0x0000FFFFL)),\
addr >>= 16,\
WRW_HARPOON((port+hp_host_addr_hmi), (unsigned short)(addr & 0x0000FFFFL)),\
WR_HARP32(port,hp_xfercnt_0,count),\
WRW_HARPOON((port+hp_xfer_cnt_lo), (unsigned short)(count & 0x0000FFFFL)),\
count >>= 16,\
WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
#define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
#define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
#define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\
WR_HARPOON(port+hp_scsireset, 0x00))
#define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
(RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM)))
#define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
(RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM)))
#define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
(RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE)))
#define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
(RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE)))
static unsigned char FPT_sisyncn(unsigned long port, unsigned char p_card,
unsigned char syncFlag);
static void FPT_ssel(unsigned long port, unsigned char p_card);
static void FPT_sres(unsigned long port, unsigned char p_card,
struct sccb_card *pCurrCard);
static void FPT_shandem(unsigned long port, unsigned char p_card,
struct sccb *pCurrSCCB);
static void FPT_stsyncn(unsigned long port, unsigned char p_card);
static void FPT_sisyncr(unsigned long port, unsigned char sync_pulse,
unsigned char offset);
static void FPT_sssyncv(unsigned long p_port, unsigned char p_id,
unsigned char p_sync_value,
struct sccb_mgr_tar_info *currTar_Info);
static void FPT_sresb(unsigned long port, unsigned char p_card);
static void FPT_sxfrp(unsigned long p_port, unsigned char p_card);
static void FPT_schkdd(unsigned long port, unsigned char p_card);
static unsigned char FPT_RdStack(unsigned long port, unsigned char index);
static void FPT_WrStack(unsigned long portBase, unsigned char index,
unsigned char data);
static unsigned char FPT_ChkIfChipInitialized(unsigned long ioPort);
static void FPT_SendMsg(unsigned long port, unsigned char message);
static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg,
unsigned char error_code);
static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card);
static void FPT_RNVRamData(struct nvram_info *pNvRamInfo);
static unsigned char FPT_siwidn(unsigned long port, unsigned char p_card);
static void FPT_stwidn(unsigned long port, unsigned char p_card);
static void FPT_siwidr(unsigned long port, unsigned char width);
static void FPT_queueSelectFail(struct sccb_card *pCurrCard,
unsigned char p_card);
static void FPT_queueDisconnect(struct sccb *p_SCCB, unsigned char p_card);
static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
struct sccb *p_SCCB, unsigned char p_card);
static void FPT_queueSearchSelect(struct sccb_card *pCurrCard,
unsigned char p_card);
static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code);
static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char card);
static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB,
unsigned char p_card);
static void FPT_utilUpdateResidual(struct sccb *p_SCCB);
static unsigned short FPT_CalcCrc16(unsigned char buffer[]);
static unsigned char FPT_CalcLrc(unsigned char buffer[]);
static void FPT_Wait1Second(unsigned long p_port);
static void FPT_Wait(unsigned long p_port, unsigned char p_delay);
static void FPT_utilEEWriteOnOff(unsigned long p_port, unsigned char p_mode);
static void FPT_utilEEWrite(unsigned long p_port, unsigned short ee_data,
unsigned short ee_addr);
static unsigned short FPT_utilEERead(unsigned long p_port,
unsigned short ee_addr);
static unsigned short FPT_utilEEReadOrg(unsigned long p_port,
unsigned short ee_addr);
static void FPT_utilEESendCmdAddr(unsigned long p_port, unsigned char ee_cmd,
unsigned short ee_addr);
static void FPT_phaseDataOut(unsigned long port, unsigned char p_card);
static void FPT_phaseDataIn(unsigned long port, unsigned char p_card);
static void FPT_phaseCommand(unsigned long port, unsigned char p_card);
static void FPT_phaseStatus(unsigned long port, unsigned char p_card);
static void FPT_phaseMsgOut(unsigned long port, unsigned char p_card);
static void FPT_phaseMsgIn(unsigned long port, unsigned char p_card);
static void FPT_phaseIllegal(unsigned long port, unsigned char p_card);
static void FPT_phaseDecode(unsigned long port, unsigned char p_card);
static void FPT_phaseChkFifo(unsigned long port, unsigned char p_card);
static void FPT_phaseBusFree(unsigned long p_port, unsigned char p_card);
static void FPT_XbowInit(unsigned long port, unsigned char scamFlg);
static void FPT_BusMasterInit(unsigned long p_port);
static void FPT_DiagEEPROM(unsigned long p_port);
static void FPT_dataXferProcessor(unsigned long port,
struct sccb_card *pCurrCard);
static void FPT_busMstrSGDataXferStart(unsigned long port,
struct sccb *pCurrSCCB);
static void FPT_busMstrDataXferStart(unsigned long port,
struct sccb *pCurrSCCB);
static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card,
struct sccb *pCurrSCCB);
static void FPT_hostDataXferRestart(struct sccb *currSCCB);
static unsigned char FPT_SccbMgr_bad_isr(unsigned long p_port,
unsigned char p_card,
struct sccb_card *pCurrCard,
unsigned short p_int);
static void FPT_SccbMgrTableInitAll(void);
static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard,
unsigned char p_card);
static void FPT_SccbMgrTableInitTarget(unsigned char p_card,
unsigned char target);
static void FPT_scini(unsigned char p_card, unsigned char p_our_id,
unsigned char p_power_up);
static int FPT_scarb(unsigned long p_port, unsigned char p_sel_type);
static void FPT_scbusf(unsigned long p_port);
static void FPT_scsel(unsigned long p_port);
static void FPT_scasid(unsigned char p_card, unsigned long p_port);
static unsigned char FPT_scxferc(unsigned long p_port, unsigned char p_data);
static unsigned char FPT_scsendi(unsigned long p_port,
unsigned char p_id_string[]);
static unsigned char FPT_sciso(unsigned long p_port,
unsigned char p_id_string[]);
static void FPT_scwirod(unsigned long p_port, unsigned char p_data_bit);
static void FPT_scwiros(unsigned long p_port, unsigned char p_data_bit);
static unsigned char FPT_scvalq(unsigned char p_quintet);
static unsigned char FPT_scsell(unsigned long p_port, unsigned char targ_id);
static void FPT_scwtsel(unsigned long p_port);
static void FPT_inisci(unsigned char p_card, unsigned long p_port,
unsigned char p_our_id);
static void FPT_scsavdi(unsigned char p_card, unsigned long p_port);
static unsigned char FPT_scmachid(unsigned char p_card,
unsigned char p_id_string[]);
static void FPT_autoCmdCmplt(unsigned long p_port, unsigned char p_card);
static void FPT_autoLoadDefaultMap(unsigned long p_port);
static struct sccb_mgr_tar_info FPT_sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] =
{ {{0}} };
static struct sccb_card FPT_BL_Card[MAX_CARDS] = { {0} };
static SCCBSCAM_INFO FPT_scamInfo[MAX_SCSI_TAR] = { {{0}} };
static struct nvram_info FPT_nvRamInfo[MAX_MB_CARDS] = { {0} };
static unsigned char FPT_mbCards = 0;
static unsigned char FPT_scamHAString[] =
{ 0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C',
' ', 'B', 'T', '-', '9', '3', '0',
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
};
static unsigned short FPT_default_intena = 0;
static void (*FPT_s_PhaseTbl[8]) (unsigned long, unsigned char) = {
0};
/*---------------------------------------------------------------------
*
* Function: FlashPoint_ProbeHostAdapter
*
* Description: Setup and/or Search for cards and return info to caller.
*
*---------------------------------------------------------------------*/
static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
{
static unsigned char first_time = 1;
unsigned char i, j, id, ScamFlg;
unsigned short temp, temp2, temp3, temp4, temp5, temp6;
unsigned long ioport;
struct nvram_info *pCurrNvRam;
ioport = pCardInfo->si_baseaddr;
if (RD_HARPOON(ioport + hp_vendor_id_0) != ORION_VEND_0)
return (int)FAILURE;
if ((RD_HARPOON(ioport + hp_vendor_id_1) != ORION_VEND_1))
return (int)FAILURE;
if ((RD_HARPOON(ioport + hp_device_id_0) != ORION_DEV_0))
return (int)FAILURE;
if ((RD_HARPOON(ioport + hp_device_id_1) != ORION_DEV_1))
return (int)FAILURE;
if (RD_HARPOON(ioport + hp_rev_num) != 0x0f) {
/* For new Harpoon then check for sub_device ID LSB
the bits(0-3) must be all ZERO for compatible with
current version of SCCBMgr, else skip this Harpoon
device. */
if (RD_HARPOON(ioport + hp_sub_device_id_0) & 0x0f)
return (int)FAILURE;
}
if (first_time) {
FPT_SccbMgrTableInitAll();
first_time = 0;
FPT_mbCards = 0;
}
if (FPT_RdStack(ioport, 0) != 0x00) {
if (FPT_ChkIfChipInitialized(ioport) == 0) {
pCurrNvRam = NULL;
WR_HARPOON(ioport + hp_semaphore, 0x00);
FPT_XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
FPT_DiagEEPROM(ioport);
} else {
if (FPT_mbCards < MAX_MB_CARDS) {
pCurrNvRam = &FPT_nvRamInfo[FPT_mbCards];
FPT_mbCards++;
pCurrNvRam->niBaseAddr = ioport;
FPT_RNVRamData(pCurrNvRam);
} else
return (int)FAILURE;
}
} else
pCurrNvRam = NULL;
WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT);
WR_HARPOON(ioport + hp_sys_ctrl, 0x00);
if (pCurrNvRam)
pCardInfo->si_id = pCurrNvRam->niAdapId;
else
pCardInfo->si_id =
(unsigned
char)(FPT_utilEERead(ioport,
(ADAPTER_SCSI_ID /
2)) & (unsigned char)0x0FF);
pCardInfo->si_lun = 0x00;
pCardInfo->si_fw_revision = ORION_FW_REV;
temp2 = 0x0000;
temp3 = 0x0000;
temp4 = 0x0000;
temp5 = 0x0000;
temp6 = 0x0000;
for (id = 0; id < (16 / 2); id++) {
if (pCurrNvRam) {
temp = (unsigned short)pCurrNvRam->niSyncTbl[id];
temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
(((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
} else
temp =
FPT_utilEERead(ioport,
(unsigned short)((SYNC_RATE_TBL / 2)
+ id));
for (i = 0; i < 2; temp >>= 8, i++) {
temp2 >>= 1;
temp3 >>= 1;
temp4 >>= 1;
temp5 >>= 1;
temp6 >>= 1;
switch (temp & 0x3) {
case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
temp6 |= 0x8000; /* Fall through */
case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
temp5 |= 0x8000; /* Fall through */
case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
temp2 |= 0x8000; /* Fall through */
case AUTO_RATE_00: /* Asynchronous */
break;
}
if (temp & DISC_ENABLE_BIT)
temp3 |= 0x8000;
if (temp & WIDE_NEGO_BIT)
temp4 |= 0x8000;
}
}
pCardInfo->si_per_targ_init_sync = temp2;
pCardInfo->si_per_targ_no_disc = temp3;
pCardInfo->si_per_targ_wide_nego = temp4;
pCardInfo->si_per_targ_fast_nego = temp5;
pCardInfo->si_per_targ_ultra_nego = temp6;
if (pCurrNvRam)
i = pCurrNvRam->niSysConf;
else
i = (unsigned
char)(FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)));
if (pCurrNvRam)
ScamFlg = pCurrNvRam->niScamConf;
else
ScamFlg =
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
pCardInfo->si_flags = 0x0000;
if (i & 0x01)
pCardInfo->si_flags |= SCSI_PARITY_ENA;
if (!(i & 0x02))
pCardInfo->si_flags |= SOFT_RESET;
if (i & 0x10)
pCardInfo->si_flags |= EXTENDED_TRANSLATION;
if (ScamFlg & SCAM_ENABLED)
pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
if (ScamFlg & SCAM_LEVEL2)
pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
if (i & 0x04) {
j |= SCSI_TERM_ENA_L;
}
WR_HARPOON(ioport + hp_bm_ctrl, j);
j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
if (i & 0x08) {
j |= SCSI_TERM_ENA_H;
}
WR_HARPOON(ioport + hp_ee_ctrl, j);
if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
pCardInfo->si_card_family = HARPOON_FAMILY;
pCardInfo->si_bustype = BUSTYPE_PCI;
if (pCurrNvRam) {
pCardInfo->si_card_model[0] = '9';
switch (pCurrNvRam->niModel & 0x0f) {
case MODEL_LT:
pCardInfo->si_card_model[1] = '3';
pCardInfo->si_card_model[2] = '0';
break;
case MODEL_LW:
pCardInfo->si_card_model[1] = '5';
pCardInfo->si_card_model[2] = '0';
break;
case MODEL_DL:
pCardInfo->si_card_model[1] = '3';
pCardInfo->si_card_model[2] = '2';
break;
case MODEL_DW:
pCardInfo->si_card_model[1] = '5';
pCardInfo->si_card_model[2] = '2';
break;
}
} else {
temp = FPT_utilEERead(ioport, (MODEL_NUMB_0 / 2));
pCardInfo->si_card_model[0] = (unsigned char)(temp >> 8);
temp = FPT_utilEERead(ioport, (MODEL_NUMB_2 / 2));
pCardInfo->si_card_model[1] = (unsigned char)(temp & 0x00FF);
pCardInfo->si_card_model[2] = (unsigned char)(temp >> 8);
}
if (pCardInfo->si_card_model[1] == '3') {
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
pCardInfo->si_flags |= LOW_BYTE_TERM;
} else if (pCardInfo->si_card_model[2] == '0') {
temp = RD_HARPOON(ioport + hp_xfer_pad);
WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
pCardInfo->si_flags |= LOW_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
pCardInfo->si_flags |= HIGH_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, temp);
} else {
temp = RD_HARPOON(ioport + hp_ee_ctrl);
temp2 = RD_HARPOON(ioport + hp_xfer_pad);
WR_HARPOON(ioport + hp_ee_ctrl, (temp | SEE_CS));
WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4)));
temp3 = 0;
for (i = 0; i < 8; i++) {
temp3 <<= 1;
if (!(RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)))
temp3 |= 1;
WR_HARPOON(ioport + hp_xfer_pad, (temp2 & ~BIT(4)));
WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4)));
}
WR_HARPOON(ioport + hp_ee_ctrl, temp);
WR_HARPOON(ioport + hp_xfer_pad, temp2);
if (!(temp3 & BIT(7)))
pCardInfo->si_flags |= LOW_BYTE_TERM;
if (!(temp3 & BIT(6)))
pCardInfo->si_flags |= HIGH_BYTE_TERM;
}
ARAM_ACCESS(ioport);
for (i = 0; i < 4; i++) {
pCardInfo->si_XlatInfo[i] =
RD_HARPOON(ioport + hp_aramBase + BIOS_DATA_OFFSET + i);
}
/* return with -1 if no sort, else return with
logical card number sorted by BIOS (zero-based) */
pCardInfo->si_relative_cardnum =
(unsigned
char)(RD_HARPOON(ioport + hp_aramBase + BIOS_RELATIVE_CARD) - 1);
SGRAM_ACCESS(ioport);
FPT_s_PhaseTbl[0] = FPT_phaseDataOut;
FPT_s_PhaseTbl[1] = FPT_phaseDataIn;
FPT_s_PhaseTbl[2] = FPT_phaseIllegal;
FPT_s_PhaseTbl[3] = FPT_phaseIllegal;
FPT_s_PhaseTbl[4] = FPT_phaseCommand;
FPT_s_PhaseTbl[5] = FPT_phaseStatus;
FPT_s_PhaseTbl[6] = FPT_phaseMsgOut;
FPT_s_PhaseTbl[7] = FPT_phaseMsgIn;
pCardInfo->si_present = 0x01;
return 0;
}
/*---------------------------------------------------------------------
*
* Function: FlashPoint_HardwareResetHostAdapter
*
* Description: Setup adapter for normal operation (hard reset).
*
*---------------------------------------------------------------------*/
static unsigned long FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
*pCardInfo)
{
struct sccb_card *CurrCard = NULL;
struct nvram_info *pCurrNvRam;
unsigned char i, j, thisCard, ScamFlg;
unsigned short temp, sync_bit_map, id;
unsigned long ioport;
ioport = pCardInfo->si_baseaddr;
for (thisCard = 0; thisCard <= MAX_CARDS; thisCard++) {
if (thisCard == MAX_CARDS) {
return FAILURE;
}
if (FPT_BL_Card[thisCard].ioPort == ioport) {
CurrCard = &FPT_BL_Card[thisCard];
FPT_SccbMgrTableInitCard(CurrCard, thisCard);
break;
}
else if (FPT_BL_Card[thisCard].ioPort == 0x00) {
FPT_BL_Card[thisCard].ioPort = ioport;
CurrCard = &FPT_BL_Card[thisCard];
if (FPT_mbCards)
for (i = 0; i < FPT_mbCards; i++) {
if (CurrCard->ioPort ==
FPT_nvRamInfo[i].niBaseAddr)
CurrCard->pNvRamInfo =
&FPT_nvRamInfo[i];
}
FPT_SccbMgrTableInitCard(CurrCard, thisCard);
CurrCard->cardIndex = thisCard;
CurrCard->cardInfo = pCardInfo;
break;
}
}
pCurrNvRam = CurrCard->pNvRamInfo;
if (pCurrNvRam) {
ScamFlg = pCurrNvRam->niScamConf;
} else {
ScamFlg =
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
}
FPT_BusMasterInit(ioport);
FPT_XbowInit(ioport, ScamFlg);
FPT_autoLoadDefaultMap(ioport);
for (i = 0, id = 0x01; i != pCardInfo->si_id; i++, id <<= 1) {
}
WR_HARPOON(ioport + hp_selfid_0, id);
WR_HARPOON(ioport + hp_selfid_1, 0x00);
WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
CurrCard->ourId = pCardInfo->si_id;
i = (unsigned char)pCardInfo->si_flags;
if (i & SCSI_PARITY_ENA)
WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
if (i & LOW_BYTE_TERM)
j |= SCSI_TERM_ENA_L;
WR_HARPOON(ioport + hp_bm_ctrl, j);
j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
if (i & HIGH_BYTE_TERM)
j |= SCSI_TERM_ENA_H;
WR_HARPOON(ioport + hp_ee_ctrl, j);
if (!(pCardInfo->si_flags & SOFT_RESET)) {
FPT_sresb(ioport, thisCard);
FPT_scini(thisCard, pCardInfo->si_id, 0);
}
if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
CurrCard->globalFlags |= F_NO_FILTER;
if (pCurrNvRam) {
if (pCurrNvRam->niSysConf & 0x10)
CurrCard->globalFlags |= F_GREEN_PC;
} else {
if (FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)) & GREEN_PC_ENA)
CurrCard->globalFlags |= F_GREEN_PC;
}
/* Set global flag to indicate Re-Negotiation to be done on all
ckeck condition */
if (pCurrNvRam) {
if (pCurrNvRam->niScsiConf & 0x04)
CurrCard->globalFlags |= F_DO_RENEGO;
} else {
if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & RENEGO_ENA)
CurrCard->globalFlags |= F_DO_RENEGO;
}
if (pCurrNvRam) {
if (pCurrNvRam->niScsiConf & 0x08)
CurrCard->globalFlags |= F_CONLUN_IO;
} else {
if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & CONNIO_ENA)
CurrCard->globalFlags |= F_CONLUN_IO;
}
temp = pCardInfo->si_per_targ_no_disc;
for (i = 0, id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
if (temp & id)
FPT_sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
}
sync_bit_map = 0x0001;
for (id = 0; id < (MAX_SCSI_TAR / 2); id++) {
if (pCurrNvRam) {
temp = (unsigned short)pCurrNvRam->niSyncTbl[id];
temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
(((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
} else
temp =
FPT_utilEERead(ioport,
(unsigned short)((SYNC_RATE_TBL / 2)
+ id));
for (i = 0; i < 2; temp >>= 8, i++) {
if (pCardInfo->si_per_targ_init_sync & sync_bit_map) {
FPT_sccbMgrTbl[thisCard][id * 2 +
i].TarEEValue =
(unsigned char)temp;
}
else {
FPT_sccbMgrTbl[thisCard][id * 2 +
i].TarStatus |=
SYNC_SUPPORTED;
FPT_sccbMgrTbl[thisCard][id * 2 +
i].TarEEValue =
(unsigned char)(temp & ~EE_SYNC_MASK);
}
/* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) ||
(id*2+i >= 8)){
*/
if (pCardInfo->si_per_targ_wide_nego & sync_bit_map) {
FPT_sccbMgrTbl[thisCard][id * 2 +
i].TarEEValue |=
EE_WIDE_SCSI;
}
else { /* NARROW SCSI */
FPT_sccbMgrTbl[thisCard][id * 2 +
i].TarStatus |=
WIDE_NEGOCIATED;
}
sync_bit_map <<= 1;
}
}
WR_HARPOON((ioport + hp_semaphore),
(unsigned char)(RD_HARPOON((ioport + hp_semaphore)) |
SCCB_MGR_PRESENT));
return (unsigned long)CurrCard;
}
static void FlashPoint_ReleaseHostAdapter(unsigned long pCurrCard)
{
unsigned char i;
unsigned long portBase;
unsigned long regOffset;
unsigned long scamData;
unsigned long *pScamTbl;
struct nvram_info *pCurrNvRam;
pCurrNvRam = ((struct sccb_card *)pCurrCard)->pNvRamInfo;
if (pCurrNvRam) {
FPT_WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel);
FPT_WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf);
FPT_WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf);
FPT_WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf);
FPT_WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId);
for (i = 0; i < MAX_SCSI_TAR / 2; i++)
FPT_WrStack(pCurrNvRam->niBaseAddr,
(unsigned char)(i + 5),
pCurrNvRam->niSyncTbl[i]);
portBase = pCurrNvRam->niBaseAddr;
for (i = 0; i < MAX_SCSI_TAR; i++) {
regOffset = hp_aramBase + 64 + i * 4;
pScamTbl = (unsigned long *)&pCurrNvRam->niScamTbl[i];
scamData = *pScamTbl;
WR_HARP32(portBase, regOffset, scamData);
}
} else {
FPT_WrStack(((struct sccb_card *)pCurrCard)->ioPort, 0, 0);
}
}
static void FPT_RNVRamData(struct nvram_info *pNvRamInfo)
{
unsigned char i;
unsigned long portBase;
unsigned long regOffset;
unsigned long scamData;
unsigned long *pScamTbl;
pNvRamInfo->niModel = FPT_RdStack(pNvRamInfo->niBaseAddr, 0);
pNvRamInfo->niSysConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 1);
pNvRamInfo->niScsiConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 2);
pNvRamInfo->niScamConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 3);
pNvRamInfo->niAdapId = FPT_RdStack(pNvRamInfo->niBaseAddr, 4);
for (i = 0; i < MAX_SCSI_TAR / 2; i++)
pNvRamInfo->niSyncTbl[i] =
FPT_RdStack(pNvRamInfo->niBaseAddr, (unsigned char)(i + 5));
portBase = pNvRamInfo->niBaseAddr;
for (i = 0; i < MAX_SCSI_TAR; i++) {
regOffset = hp_aramBase + 64 + i * 4;
RD_HARP32(portBase, regOffset, scamData);
pScamTbl = (unsigned long *)&pNvRamInfo->niScamTbl[i];
*pScamTbl = scamData;
}
}
static unsigned char FPT_RdStack(unsigned long portBase, unsigned char index)
{
WR_HARPOON(portBase + hp_stack_addr, index);
return RD_HARPOON(portBase + hp_stack_data);
}
static void FPT_WrStack(unsigned long portBase, unsigned char index,
unsigned char data)
{
WR_HARPOON(portBase + hp_stack_addr, index);
WR_HARPOON(portBase + hp_stack_data, data);
}
static unsigned char FPT_ChkIfChipInitialized(unsigned long ioPort)
{
if ((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != FPT_RdStack(ioPort, 4))
return 0;
if ((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT)
!= CLKCTRL_DEFAULT)
return 0;
if ((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) ||
(RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms))
return 1;
return 0;
}
/*---------------------------------------------------------------------
*
* Function: FlashPoint_StartCCB
*
* Description: Start a command pointed to by p_Sccb. When the
* command is completed it will be returned via the
* callback function.
*
*---------------------------------------------------------------------*/
static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
{
unsigned long ioport;
unsigned char thisCard, lun;
struct sccb *pSaveSccb;
CALL_BK_FN callback;
thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
p_Sccb->HostStatus = SCCB_COMPLETE;
p_Sccb->SccbStatus = SCCB_ERROR;
callback = (CALL_BK_FN) p_Sccb->SccbCallback;
if (callback)
callback(p_Sccb);
return;
}
FPT_sinits(p_Sccb, thisCard);
if (!((struct sccb_card *)pCurrCard)->cmdCounter) {
WR_HARPOON(ioport + hp_semaphore,
(RD_HARPOON(ioport + hp_semaphore)
| SCCB_MGR_ACTIVE));
if (((struct sccb_card *)pCurrCard)->globalFlags & F_GREEN_PC) {
WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT);
WR_HARPOON(ioport + hp_sys_ctrl, 0x00);
}
}
((struct sccb_card *)pCurrCard)->cmdCounter++;
if (RD_HARPOON(ioport + hp_semaphore) & BIOS_IN_USE) {
WR_HARPOON(ioport + hp_semaphore,
(RD_HARPOON(ioport + hp_semaphore)
| TICKLE_ME));
if (p_Sccb->OperationCode == RESET_COMMAND) {
pSaveSccb =
((struct sccb_card *)pCurrCard)->currentSCCB;
((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb;
FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard);
((struct sccb_card *)pCurrCard)->currentSCCB =
pSaveSccb;
} else {
FPT_queueAddSccb(p_Sccb, thisCard);
}
}
else if ((RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) {
if (p_Sccb->OperationCode == RESET_COMMAND) {
pSaveSccb =
((struct sccb_card *)pCurrCard)->currentSCCB;
((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb;
FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard);
((struct sccb_card *)pCurrCard)->currentSCCB =
pSaveSccb;
} else {
FPT_queueAddSccb(p_Sccb, thisCard);
}
}
else {
MDISABLE_INT(ioport);
if ((((struct sccb_card *)pCurrCard)->globalFlags & F_CONLUN_IO)
&&
((FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
lun = p_Sccb->Lun;
else
lun = 0;
if ((((struct sccb_card *)pCurrCard)->currentSCCB == NULL) &&
(FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0)
&& (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun]
== 0)) {
((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb;
FPT_ssel(p_Sccb->SccbIOPort, thisCard);
}
else {
if (p_Sccb->OperationCode == RESET_COMMAND) {
pSaveSccb =
((struct sccb_card *)pCurrCard)->
currentSCCB;
((struct sccb_card *)pCurrCard)->currentSCCB =
p_Sccb;
FPT_queueSelectFail(&FPT_BL_Card[thisCard],
thisCard);
((struct sccb_card *)pCurrCard)->currentSCCB =
pSaveSccb;
} else {
FPT_queueAddSccb(p_Sccb, thisCard);
}
}
MENABLE_INT(ioport);
}
}
/*---------------------------------------------------------------------
*
* Function: FlashPoint_AbortCCB
*
* Description: Abort the command pointed to by p_Sccb. When the
* command is completed it will be returned via the
* callback function.
*
*---------------------------------------------------------------------*/
static int FlashPoint_AbortCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
{
unsigned long ioport;
unsigned char thisCard;
CALL_BK_FN callback;
unsigned char TID;
struct sccb *pSaveSCCB;
struct sccb_mgr_tar_info *currTar_Info;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
if (!(RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) {
if (FPT_queueFindSccb(p_Sccb, thisCard)) {
((struct sccb_card *)pCurrCard)->cmdCounter--;
if (!((struct sccb_card *)pCurrCard)->cmdCounter)
WR_HARPOON(ioport + hp_semaphore,
(RD_HARPOON(ioport + hp_semaphore)
& (unsigned
char)(~(SCCB_MGR_ACTIVE |
TICKLE_ME))));
p_Sccb->SccbStatus = SCCB_ABORT;
callback = p_Sccb->SccbCallback;
callback(p_Sccb);
return 0;
}
else {
if (((struct sccb_card *)pCurrCard)->currentSCCB ==
p_Sccb) {
p_Sccb->SccbStatus = SCCB_ABORT;
return 0;
}
else {
TID = p_Sccb->TargID;
if (p_Sccb->Sccb_tag) {
MDISABLE_INT(ioport);
if (((struct sccb_card *)pCurrCard)->
discQ_Tbl[p_Sccb->Sccb_tag] ==
p_Sccb) {
p_Sccb->SccbStatus = SCCB_ABORT;
p_Sccb->Sccb_scsistat =
ABORT_ST;
p_Sccb->Sccb_scsimsg =
SMABORT_TAG;
if (((struct sccb_card *)
pCurrCard)->currentSCCB ==
NULL) {
((struct sccb_card *)
pCurrCard)->
currentSCCB = p_Sccb;
FPT_ssel(ioport,
thisCard);
} else {
pSaveSCCB =
((struct sccb_card
*)pCurrCard)->
currentSCCB;
((struct sccb_card *)
pCurrCard)->
currentSCCB = p_Sccb;
FPT_queueSelectFail((struct sccb_card *)pCurrCard, thisCard);
((struct sccb_card *)
pCurrCard)->
currentSCCB = pSaveSCCB;
}
}
MENABLE_INT(ioport);
return 0;
} else {
currTar_Info =
&FPT_sccbMgrTbl[thisCard][p_Sccb->
TargID];
if (FPT_BL_Card[thisCard].
discQ_Tbl[currTar_Info->
LunDiscQ_Idx[p_Sccb->Lun]]
== p_Sccb) {
p_Sccb->SccbStatus = SCCB_ABORT;
return 0;
}
}
}
}
}
return -1;
}
/*---------------------------------------------------------------------
*
* Function: FlashPoint_InterruptPending
*
* Description: Do a quick check to determine if there is a pending
* interrupt for this card and disable the IRQ Pin if so.
*
*---------------------------------------------------------------------*/
static unsigned char FlashPoint_InterruptPending(unsigned long pCurrCard)
{
unsigned long ioport;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
if (RD_HARPOON(ioport + hp_int_status) & INT_ASSERTED) {
return 1;
}
else
return 0;
}
/*---------------------------------------------------------------------
*
* Function: FlashPoint_HandleInterrupt
*
* Description: This is our entry point when an interrupt is generated
* by the card and the upper level driver passes it on to
* us.
*
*---------------------------------------------------------------------*/
static int FlashPoint_HandleInterrupt(unsigned long pCurrCard)
{
struct sccb *currSCCB;
unsigned char thisCard, result, bm_status, bm_int_st;
unsigned short hp_int;
unsigned char i, target;
unsigned long ioport;
thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
MDISABLE_INT(ioport);
if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
bm_status =
RD_HARPOON(ioport +
hp_ext_status) & (unsigned char)BAD_EXT_STATUS;
else
bm_status = 0;
WR_HARPOON(ioport + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
while ((hp_int =
RDW_HARPOON((ioport +
hp_intstat)) & FPT_default_intena) | bm_status) {
currSCCB = ((struct sccb_card *)pCurrCard)->currentSCCB;
if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) {
result =
FPT_SccbMgr_bad_isr(ioport, thisCard,
((struct sccb_card *)pCurrCard),
hp_int);
WRW_HARPOON((ioport + hp_intstat),
(FIFO | TIMEOUT | RESET | SCAM_SEL));
bm_status = 0;
if (result) {
MENABLE_INT(ioport);
return result;
}
}
else if (hp_int & ICMD_COMP) {
if (!(hp_int & BUS_FREE)) {
/* Wait for the BusFree before starting a new command. We
must also check for being reselected since the BusFree
may not show up if another device reselects us in 1.5us or
less. SRR Wednesday, 3/8/1995.
*/
while (!
(RDW_HARPOON((ioport + hp_intstat)) &
(BUS_FREE | RSEL))) ;
}
if (((struct sccb_card *)pCurrCard)->
globalFlags & F_HOST_XFER_ACT)
FPT_phaseChkFifo(ioport, thisCard);
/* WRW_HARPOON((ioport+hp_intstat),
(BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0));
*/
WRW_HARPOON((ioport + hp_intstat), CLR_ALL_INT_1);
FPT_autoCmdCmplt(ioport, thisCard);
}
else if (hp_int & ITAR_DISC) {
if (((struct sccb_card *)pCurrCard)->
globalFlags & F_HOST_XFER_ACT) {
FPT_phaseChkFifo(ioport, thisCard);
}
if (RD_HARPOON(ioport + hp_gp_reg_1) == SMSAVE_DATA_PTR) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |= F_NO_DATA_YET;
currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
}
currSCCB->Sccb_scsistat = DISCONNECT_ST;
FPT_queueDisconnect(currSCCB, thisCard);
/* Wait for the BusFree before starting a new command. We
must also check for being reselected since the BusFree
may not show up if another device reselects us in 1.5us or
less. SRR Wednesday, 3/8/1995.
*/
while (!
(RDW_HARPOON((ioport + hp_intstat)) &
(BUS_FREE | RSEL))
&& !((RDW_HARPOON((ioport + hp_intstat)) & PHASE)
&& RD_HARPOON((ioport + hp_scsisig)) ==
(SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG |
SCSI_IOBIT))) ;
/*
The additional loop exit condition above detects a timing problem
with the revision D/E harpoon chips. The caller should reset the
host adapter to recover when 0xFE is returned.
*/
if (!
(RDW_HARPOON((ioport + hp_intstat)) &
(BUS_FREE | RSEL))) {
MENABLE_INT(ioport);
return 0xFE;
}
WRW_HARPOON((ioport + hp_intstat),
(BUS_FREE | ITAR_DISC));
((struct sccb_card *)pCurrCard)->globalFlags |=
F_NEW_SCCB_CMD;
}
else if (hp_int & RSEL) {
WRW_HARPOON((ioport + hp_intstat),
(PROG_HLT | RSEL | PHASE | BUS_FREE));
if (RDW_HARPOON((ioport + hp_intstat)) & ITAR_DISC) {
if (((struct sccb_card *)pCurrCard)->
globalFlags & F_HOST_XFER_ACT) {
FPT_phaseChkFifo(ioport, thisCard);
}
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
SMSAVE_DATA_PTR) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |=
F_NO_DATA_YET;
currSCCB->Sccb_savedATC =
currSCCB->Sccb_ATC;
}
WRW_HARPOON((ioport + hp_intstat),
(BUS_FREE | ITAR_DISC));
currSCCB->Sccb_scsistat = DISCONNECT_ST;
FPT_queueDisconnect(currSCCB, thisCard);
}
FPT_sres(ioport, thisCard,
((struct sccb_card *)pCurrCard));
FPT_phaseDecode(ioport, thisCard);
}
else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE))) {
WRW_HARPOON((ioport + hp_intstat),
(IDO_STRT | XFER_CNT_0));
FPT_phaseDecode(ioport, thisCard);
}
else if ((hp_int & IUNKWN) || (hp_int & PROG_HLT)) {
WRW_HARPOON((ioport + hp_intstat),
(PHASE | IUNKWN | PROG_HLT));
if ((RD_HARPOON(ioport + hp_prgmcnt_0) & (unsigned char)
0x3f) < (unsigned char)SELCHK) {
FPT_phaseDecode(ioport, thisCard);
} else {
/* Harpoon problem some SCSI target device respond to selection
with short BUSY pulse (<400ns) this will make the Harpoon is not able
to latch the correct Target ID into reg. x53.
The work around require to correct this reg. But when write to this
reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we
need to read this reg first then restore it later. After update to 0x53 */
i = (unsigned
char)(RD_HARPOON(ioport + hp_fifowrite));
target =
(unsigned
char)(RD_HARPOON(ioport + hp_gp_reg_3));
WR_HARPOON(ioport + hp_xfer_pad,
(unsigned char)ID_UNLOCK);
WR_HARPOON(ioport + hp_select_id,
(unsigned char)(target | target <<
4));
WR_HARPOON(ioport + hp_xfer_pad,
(unsigned char)0x00);
WR_HARPOON(ioport + hp_fifowrite, i);
WR_HARPOON(ioport + hp_autostart_3,
(AUTO_IMMED + TAG_STRT));
}
}
else if (hp_int & XFER_CNT_0) {
WRW_HARPOON((ioport + hp_intstat), XFER_CNT_0);
FPT_schkdd(ioport, thisCard);
}
else if (hp_int & BUS_FREE) {
WRW_HARPOON((ioport + hp_intstat), BUS_FREE);
if (((struct sccb_card *)pCurrCard)->
globalFlags & F_HOST_XFER_ACT) {
FPT_hostDataXferAbort(ioport, thisCard,
currSCCB);
}
FPT_phaseBusFree(ioport, thisCard);
}
else if (hp_int & ITICKLE) {
WRW_HARPOON((ioport + hp_intstat), ITICKLE);
((struct sccb_card *)pCurrCard)->globalFlags |=
F_NEW_SCCB_CMD;
}
if (((struct sccb_card *)pCurrCard)->
globalFlags & F_NEW_SCCB_CMD) {
((struct sccb_card *)pCurrCard)->globalFlags &=
~F_NEW_SCCB_CMD;
if (((struct sccb_card *)pCurrCard)->currentSCCB ==
NULL) {
FPT_queueSearchSelect(((struct sccb_card *)
pCurrCard), thisCard);
}
if (((struct sccb_card *)pCurrCard)->currentSCCB !=
NULL) {
((struct sccb_card *)pCurrCard)->globalFlags &=
~F_NEW_SCCB_CMD;
FPT_ssel(ioport, thisCard);
}
break;
}
} /*end while */
MENABLE_INT(ioport);
return 0;
}
/*---------------------------------------------------------------------
*
* Function: Sccb_bad_isr
*
* Description: Some type of interrupt has occurred which is slightly
* out of the ordinary. We will now decode it fully, in
* this routine. This is broken up in an attempt to save
* processing time.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_SccbMgr_bad_isr(unsigned long p_port,
unsigned char p_card,
struct sccb_card *pCurrCard,
unsigned short p_int)
{
unsigned char temp, ScamFlg;
struct sccb_mgr_tar_info *currTar_Info;
struct nvram_info *pCurrNvRam;
if (RD_HARPOON(p_port + hp_ext_status) &
(BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN)) {
if (pCurrCard->globalFlags & F_HOST_XFER_ACT) {
FPT_hostDataXferAbort(p_port, p_card,
pCurrCard->currentSCCB);
}
if (RD_HARPOON(p_port + hp_pci_stat_cfg) & REC_MASTER_ABORT)
{
WR_HARPOON(p_port + hp_pci_stat_cfg,
(RD_HARPOON(p_port + hp_pci_stat_cfg) &
~REC_MASTER_ABORT));
WR_HARPOON(p_port + hp_host_blk_cnt, 0x00);
}
if (pCurrCard->currentSCCB != NULL) {
if (!pCurrCard->currentSCCB->HostStatus)
pCurrCard->currentSCCB->HostStatus =
SCCB_BM_ERR;
FPT_sxfrp(p_port, p_card);
temp = (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) &
(EXT_ARB_ACK | SCSI_TERM_ENA_H));
WR_HARPOON(p_port + hp_ee_ctrl,
((unsigned char)temp | SEE_MS | SEE_CS));
WR_HARPOON(p_port + hp_ee_ctrl, temp);
if (!
(RDW_HARPOON((p_port + hp_intstat)) &
(BUS_FREE | RESET))) {
FPT_phaseDecode(p_port, p_card);
}
}
}
else if (p_int & RESET) {
WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT);
WR_HARPOON(p_port + hp_sys_ctrl, 0x00);
if (pCurrCard->currentSCCB != NULL) {
if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
FPT_hostDataXferAbort(p_port, p_card,
pCurrCard->currentSCCB);
}
DISABLE_AUTO(p_port);
FPT_sresb(p_port, p_card);
while (RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST) {
}
pCurrNvRam = pCurrCard->pNvRamInfo;
if (pCurrNvRam) {
ScamFlg = pCurrNvRam->niScamConf;
} else {
ScamFlg =
(unsigned char)FPT_utilEERead(p_port,
SCAM_CONFIG / 2);
}
FPT_XbowInit(p_port, ScamFlg);
FPT_scini(p_card, pCurrCard->ourId, 0);
return 0xFF;
}
else if (p_int & FIFO) {
WRW_HARPOON((p_port + hp_intstat), FIFO);
if (pCurrCard->currentSCCB != NULL)
FPT_sxfrp(p_port, p_card);
}
else if (p_int & TIMEOUT) {
DISABLE_AUTO(p_port);
WRW_HARPOON((p_port + hp_intstat),
(PROG_HLT | TIMEOUT | SEL | BUS_FREE | PHASE |
IUNKWN));
pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT;
currTar_Info =
&FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
if ((pCurrCard->globalFlags & F_CONLUN_IO)
&& ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))
currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] =
0;
else
currTar_Info->TarLUNBusy[0] = 0;
if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
currTar_Info->TarSyncCtrl = 0;
currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
}
if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
}
FPT_sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI,
currTar_Info);
FPT_queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card);
}
else if (p_int & SCAM_SEL) {
FPT_scarb(p_port, LEVEL2_TAR);
FPT_scsel(p_port);
FPT_scasid(p_card, p_port);
FPT_scbusf(p_port);
WRW_HARPOON((p_port + hp_intstat), SCAM_SEL);
}
return 0x00;
}
/*---------------------------------------------------------------------
*
* Function: SccbMgrTableInit
*
* Description: Initialize all Sccb manager data structures.
*
*---------------------------------------------------------------------*/
static void FPT_SccbMgrTableInitAll()
{
unsigned char thisCard;
for (thisCard = 0; thisCard < MAX_CARDS; thisCard++) {
FPT_SccbMgrTableInitCard(&FPT_BL_Card[thisCard], thisCard);
FPT_BL_Card[thisCard].ioPort = 0x00;
FPT_BL_Card[thisCard].cardInfo = NULL;
FPT_BL_Card[thisCard].cardIndex = 0xFF;
FPT_BL_Card[thisCard].ourId = 0x00;
FPT_BL_Card[thisCard].pNvRamInfo = NULL;
}
}
/*---------------------------------------------------------------------
*
* Function: SccbMgrTableInit
*
* Description: Initialize all Sccb manager data structures.
*
*---------------------------------------------------------------------*/
static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard,
unsigned char p_card)
{
unsigned char scsiID, qtag;
for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
}
for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) {
FPT_sccbMgrTbl[p_card][scsiID].TarStatus = 0;
FPT_sccbMgrTbl[p_card][scsiID].TarEEValue = 0;
FPT_SccbMgrTableInitTarget(p_card, scsiID);
}
pCurrCard->scanIndex = 0x00;
pCurrCard->currentSCCB = NULL;
pCurrCard->globalFlags = 0x00;
pCurrCard->cmdCounter = 0x00;
pCurrCard->tagQ_Lst = 0x01;
pCurrCard->discQCount = 0;
}
/*---------------------------------------------------------------------
*
* Function: SccbMgrTableInit
*
* Description: Initialize all Sccb manager data structures.
*
*---------------------------------------------------------------------*/
static void FPT_SccbMgrTableInitTarget(unsigned char p_card,
unsigned char target)
{
unsigned char lun, qtag;
struct sccb_mgr_tar_info *currTar_Info;
currTar_Info = &FPT_sccbMgrTbl[p_card][target];
currTar_Info->TarSelQ_Cnt = 0;
currTar_Info->TarSyncCtrl = 0;
currTar_Info->TarSelQ_Head = NULL;
currTar_Info->TarSelQ_Tail = NULL;
currTar_Info->TarTagQ_Cnt = 0;
currTar_Info->TarLUN_CA = 0;
for (lun = 0; lun < MAX_LUN; lun++) {
currTar_Info->TarLUNBusy[lun] = 0;
currTar_Info->LunDiscQ_Idx[lun] = 0;
}
for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
if (FPT_BL_Card[p_card].discQ_Tbl[qtag] != NULL) {
if (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID ==
target) {
FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
FPT_BL_Card[p_card].discQCount--;
}
}
}
}
/*---------------------------------------------------------------------
*
* Function: sfetm
*
* Description: Read in a message byte from the SCSI bus, and check
* for a parity error.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_sfm(unsigned long port, struct sccb *pCurrSCCB)
{
unsigned char message;
unsigned short TimeOutLoop;
TimeOutLoop = 0;
while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
(TimeOutLoop++ < 20000)) {
}
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
message = RD_HARPOON(port + hp_scsidata_0);
WR_HARPOON(port + hp_scsisig, SCSI_ACK + S_MSGI_PH);
if (TimeOutLoop > 20000)
message = 0x00; /* force message byte = 0 if Time Out on Req */
if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
(RD_HARPOON(port + hp_addstat) & SCSI_PAR_ERR)) {
WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
WR_HARPOON(port + hp_xferstat, 0);
WR_HARPOON(port + hp_fiforead, 0);
WR_HARPOON(port + hp_fifowrite, 0);
if (pCurrSCCB != NULL) {
pCurrSCCB->Sccb_scsimsg = SMPARITY;
}
message = 0x00;
do {
ACCEPT_MSG_ATN(port);
TimeOutLoop = 0;
while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
(TimeOutLoop++ < 20000)) {
}
if (TimeOutLoop > 20000) {
WRW_HARPOON((port + hp_intstat), PARITY);
return message;
}
if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) !=
S_MSGI_PH) {
WRW_HARPOON((port + hp_intstat), PARITY);
return message;
}
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
RD_HARPOON(port + hp_scsidata_0);
WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
} while (1);
}
WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
WR_HARPOON(port + hp_xferstat, 0);
WR_HARPOON(port + hp_fiforead, 0);
WR_HARPOON(port + hp_fifowrite, 0);
return message;
}
/*---------------------------------------------------------------------
*
* Function: FPT_ssel
*
* Description: Load up automation and select target device.
*
*---------------------------------------------------------------------*/
static void FPT_ssel(unsigned long port, unsigned char p_card)
{
unsigned char auto_loaded, i, target, *theCCB;
unsigned long cdb_reg;
struct sccb_card *CurrCard;
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
unsigned char lastTag, lun;
CurrCard = &FPT_BL_Card[p_card];
currSCCB = CurrCard->currentSCCB;
target = currSCCB->TargID;
currTar_Info = &FPT_sccbMgrTbl[p_card][target];
lastTag = CurrCard->tagQ_Lst;
ARAM_ACCESS(port);
if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
currSCCB->ControlByte &= ~F_USE_CMD_Q;
if (((CurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
lun = currSCCB->Lun;
else
lun = 0;
if (CurrCard->globalFlags & F_TAG_STARTED) {
if (!(currSCCB->ControlByte & F_USE_CMD_Q)) {
if ((currTar_Info->TarLUN_CA == 0)
&& ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
== TAG_Q_TRYING)) {
if (currTar_Info->TarTagQ_Cnt != 0) {
currTar_Info->TarLUNBusy[lun] = 1;
FPT_queueSelectFail(CurrCard, p_card);
SGRAM_ACCESS(port);
return;
}
else {
currTar_Info->TarLUNBusy[lun] = 1;
}
}
/*End non-tagged */
else {
currTar_Info->TarLUNBusy[lun] = 1;
}
}
/*!Use cmd Q Tagged */
else {
if (currTar_Info->TarLUN_CA == 1) {
FPT_queueSelectFail(CurrCard, p_card);
SGRAM_ACCESS(port);
return;
}
currTar_Info->TarLUNBusy[lun] = 1;
} /*else use cmd Q tagged */
}
/*if glob tagged started */
else {
currTar_Info->TarLUNBusy[lun] = 1;
}
if ((((CurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
|| (!(currSCCB->ControlByte & F_USE_CMD_Q)))) {
if (CurrCard->discQCount >= QUEUE_DEPTH) {
currTar_Info->TarLUNBusy[lun] = 1;
FPT_queueSelectFail(CurrCard, p_card);
SGRAM_ACCESS(port);
return;
}
for (i = 1; i < QUEUE_DEPTH; i++) {
if (++lastTag >= QUEUE_DEPTH)
lastTag = 1;
if (CurrCard->discQ_Tbl[lastTag] == NULL) {
CurrCard->tagQ_Lst = lastTag;
currTar_Info->LunDiscQ_Idx[lun] = lastTag;
CurrCard->discQ_Tbl[lastTag] = currSCCB;
CurrCard->discQCount++;
break;
}
}
if (i == QUEUE_DEPTH) {
currTar_Info->TarLUNBusy[lun] = 1;
FPT_queueSelectFail(CurrCard, p_card);
SGRAM_ACCESS(port);
return;
}
}
auto_loaded = 0;
WR_HARPOON(port + hp_select_id, target);
WR_HARPOON(port + hp_gp_reg_3, target); /* Use by new automation logic */
if (currSCCB->OperationCode == RESET_COMMAND) {
WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT +
(currSCCB->
Sccb_idmsg & ~DISC_PRIV)));
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
currSCCB->Sccb_scsimsg = SMDEV_RESET;
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
auto_loaded = 1;
currSCCB->Sccb_scsistat = SELECT_BDR_ST;
if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
currTar_Info->TarSyncCtrl = 0;
currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
}
if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
}
FPT_sssyncv(port, target, NARROW_SCSI, currTar_Info);
FPT_SccbMgrTableInitTarget(p_card, target);
}
else if (currSCCB->Sccb_scsistat == ABORT_ST) {
WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT +
(currSCCB->
Sccb_idmsg & ~DISC_PRIV)));
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT +
(((unsigned
char)(currSCCB->
ControlByte &
TAG_TYPE_MASK)
>> 6) | (unsigned char)
0x20)));
WRW_HARPOON((port + SYNC_MSGS + 2),
(MPM_OP + AMSG_OUT + currSCCB->Sccb_tag));
WRW_HARPOON((port + SYNC_MSGS + 4), (BRH_OP + ALWAYS + NP));
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
auto_loaded = 1;
}
else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) {
auto_loaded = FPT_siwidn(port, p_card);
currSCCB->Sccb_scsistat = SELECT_WN_ST;
}
else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK)
== SYNC_SUPPORTED)) {
auto_loaded = FPT_sisyncn(port, p_card, 0);
currSCCB->Sccb_scsistat = SELECT_SN_ST;
}
if (!auto_loaded) {
if (currSCCB->ControlByte & F_USE_CMD_Q) {
CurrCard->globalFlags |= F_TAG_STARTED;
if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
== TAG_Q_REJECT) {
currSCCB->ControlByte &= ~F_USE_CMD_Q;
/* Fix up the start instruction with a jump to
Non-Tag-CMD handling */
WRW_HARPOON((port + ID_MSG_STRT),
BRH_OP + ALWAYS + NTCMD);
WRW_HARPOON((port + NON_TAG_ID_MSG),
(MPM_OP + AMSG_OUT +
currSCCB->Sccb_idmsg));
WR_HARPOON(port + hp_autostart_3,
(SELECT + SELCHK_STRT));
/* Setup our STATE so we know what happened when
the wheels fall off. */
currSCCB->Sccb_scsistat = SELECT_ST;
currTar_Info->TarLUNBusy[lun] = 1;
}
else {
WRW_HARPOON((port + ID_MSG_STRT),
(MPM_OP + AMSG_OUT +
currSCCB->Sccb_idmsg));
WRW_HARPOON((port + ID_MSG_STRT + 2),
(MPM_OP + AMSG_OUT +
(((unsigned char)(currSCCB->
ControlByte &
TAG_TYPE_MASK)
>> 6) | (unsigned char)0x20)));
for (i = 1; i < QUEUE_DEPTH; i++) {
if (++lastTag >= QUEUE_DEPTH)
lastTag = 1;
if (CurrCard->discQ_Tbl[lastTag] ==
NULL) {
WRW_HARPOON((port +
ID_MSG_STRT + 6),
(MPM_OP + AMSG_OUT +
lastTag));
CurrCard->tagQ_Lst = lastTag;
currSCCB->Sccb_tag = lastTag;
CurrCard->discQ_Tbl[lastTag] =
currSCCB;
CurrCard->discQCount++;
break;
}
}
if (i == QUEUE_DEPTH) {
currTar_Info->TarLUNBusy[lun] = 1;
FPT_queueSelectFail(CurrCard, p_card);
SGRAM_ACCESS(port);
return;
}
currSCCB->Sccb_scsistat = SELECT_Q_ST;
WR_HARPOON(port + hp_autostart_3,
(SELECT + SELCHK_STRT));
}
}
else {
WRW_HARPOON((port + ID_MSG_STRT),
BRH_OP + ALWAYS + NTCMD);
WRW_HARPOON((port + NON_TAG_ID_MSG),
(MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg));
currSCCB->Sccb_scsistat = SELECT_ST;
WR_HARPOON(port + hp_autostart_3,
(SELECT + SELCHK_STRT));
}
theCCB = (unsigned char *)&currSCCB->Cdb[0];
cdb_reg = port + CMD_STRT;
for (i = 0; i < currSCCB->CdbLength; i++) {
WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB));
cdb_reg += 2;
theCCB++;
}
if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP));
}
/* auto_loaded */
WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
WR_HARPOON(port + hp_xferstat, 0x00);
WRW_HARPOON((port + hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE));
WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT));
if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED)) {
WR_HARPOON(port + hp_scsictrl_0,
(SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL));
} else {
/* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (unsigned char)0x1F);
auto_loaded |= AUTO_IMMED; */
auto_loaded = AUTO_IMMED;
DISABLE_AUTO(port);
WR_HARPOON(port + hp_autostart_3, auto_loaded);
}
SGRAM_ACCESS(port);
}
/*---------------------------------------------------------------------
*
* Function: FPT_sres
*
* Description: Hookup the correct CCB and handle the incoming messages.
*
*---------------------------------------------------------------------*/
static void FPT_sres(unsigned long port, unsigned char p_card,
struct sccb_card *pCurrCard)
{
unsigned char our_target, message, lun = 0, tag, msgRetryCount;
struct sccb_mgr_tar_info *currTar_Info;
struct sccb *currSCCB;
if (pCurrCard->currentSCCB != NULL) {
currTar_Info =
&FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
DISABLE_AUTO(port);
WR_HARPOON((port + hp_scsictrl_0), (ENA_RESEL | ENA_SCAM_SEL));
currSCCB = pCurrCard->currentSCCB;
if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
currSCCB->Sccb_scsistat = BUS_FREE_ST;
}
if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
currSCCB->Sccb_scsistat = BUS_FREE_ST;
}
if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))) {
currTar_Info->TarLUNBusy[currSCCB->Lun] = 0;
if (currSCCB->Sccb_scsistat != ABORT_ST) {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[currTar_Info->
LunDiscQ_Idx[currSCCB->
Lun]]
= NULL;
}
} else {
currTar_Info->TarLUNBusy[0] = 0;
if (currSCCB->Sccb_tag) {
if (currSCCB->Sccb_scsistat != ABORT_ST) {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[currSCCB->
Sccb_tag] = NULL;
}
} else {
if (currSCCB->Sccb_scsistat != ABORT_ST) {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[currTar_Info->
LunDiscQ_Idx[0]] =
NULL;
}
}
}
FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card);
}
WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4);
currTar_Info = &FPT_sccbMgrTbl[p_card][our_target];
msgRetryCount = 0;
do {
currTar_Info = &FPT_sccbMgrTbl[p_card][our_target];
tag = 0;
while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) {
if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) {
WRW_HARPOON((port + hp_intstat), PHASE);
return;
}
}
WRW_HARPOON((port + hp_intstat), PHASE);
if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH) {
message = FPT_sfm(port, pCurrCard->currentSCCB);
if (message) {
if (message <= (0x80 | LUN_MASK)) {
lun = message & (unsigned char)LUN_MASK;
if ((currTar_Info->
TarStatus & TAR_TAG_Q_MASK) ==
TAG_Q_TRYING) {
if (currTar_Info->TarTagQ_Cnt !=
0) {
if (!
(currTar_Info->
TarLUN_CA)) {
ACCEPT_MSG(port); /*Release the ACK for ID msg. */
message =
FPT_sfm
(port,
pCurrCard->
currentSCCB);
if (message) {
ACCEPT_MSG
(port);
}
else
message
= 0;
if (message !=
0) {
tag =
FPT_sfm
(port,
pCurrCard->
currentSCCB);
if (!
(tag))
message
=
0;
}
}
/*C.A. exists! */
}
/*End Q cnt != 0 */
}
/*End Tag cmds supported! */
}
/*End valid ID message. */
else {
ACCEPT_MSG_ATN(port);
}
}
/* End good id message. */
else {
message = 0;
}
} else {
ACCEPT_MSG_ATN(port);
while (!
(RDW_HARPOON((port + hp_intstat)) &
(PHASE | RESET))
&& !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)
&& (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ;
return;
}
if (message == 0) {
msgRetryCount++;
if (msgRetryCount == 1) {
FPT_SendMsg(port, SMPARITY);
} else {
FPT_SendMsg(port, SMDEV_RESET);
FPT_sssyncv(port, our_target, NARROW_SCSI,
currTar_Info);
if (FPT_sccbMgrTbl[p_card][our_target].
TarEEValue & EE_SYNC_MASK) {
FPT_sccbMgrTbl[p_card][our_target].
TarStatus &= ~TAR_SYNC_MASK;
}
if (FPT_sccbMgrTbl[p_card][our_target].
TarEEValue & EE_WIDE_SCSI) {
FPT_sccbMgrTbl[p_card][our_target].
TarStatus &= ~TAR_WIDE_MASK;
}
FPT_queueFlushTargSccb(p_card, our_target,
SCCB_COMPLETE);
FPT_SccbMgrTableInitTarget(p_card, our_target);
return;
}
}
} while (message == 0);
if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
currTar_Info->TarLUNBusy[lun] = 1;
pCurrCard->currentSCCB =
pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]];
if (pCurrCard->currentSCCB != NULL) {
ACCEPT_MSG(port);
} else {
ACCEPT_MSG_ATN(port);
}
} else {
currTar_Info->TarLUNBusy[0] = 1;
if (tag) {
if (pCurrCard->discQ_Tbl[tag] != NULL) {
pCurrCard->currentSCCB =
pCurrCard->discQ_Tbl[tag];
currTar_Info->TarTagQ_Cnt--;
ACCEPT_MSG(port);
} else {
ACCEPT_MSG_ATN(port);
}
} else {
pCurrCard->currentSCCB =
pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]];
if (pCurrCard->currentSCCB != NULL) {
ACCEPT_MSG(port);
} else {
ACCEPT_MSG_ATN(port);
}
}
}
if (pCurrCard->currentSCCB != NULL) {
if (pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST) {
/* During Abort Tag command, the target could have got re-selected
and completed the command. Check the select Q and remove the CCB
if it is in the Select Q */
FPT_queueFindSccb(pCurrCard->currentSCCB, p_card);
}
}
while (!(RDW_HARPOON((port + hp_intstat)) & (PHASE | RESET)) &&
!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) &&
(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ;
}
static void FPT_SendMsg(unsigned long port, unsigned char message)
{
while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) {
if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) {
WRW_HARPOON((port + hp_intstat), PHASE);
return;
}
}
WRW_HARPOON((port + hp_intstat), PHASE);
if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH) {
WRW_HARPOON((port + hp_intstat),
(BUS_FREE | PHASE | XFER_CNT_0));
WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN);
WR_HARPOON(port + hp_scsidata_0, message);
WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
ACCEPT_MSG(port);
WR_HARPOON(port + hp_portctrl_0, 0x00);
if ((message == SMABORT) || (message == SMDEV_RESET) ||
(message == SMABORT_TAG)) {
while (!
(RDW_HARPOON((port + hp_intstat)) &
(BUS_FREE | PHASE))) {
}
if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
WRW_HARPOON((port + hp_intstat), BUS_FREE);
}
}
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_sdecm
*
* Description: Determine the proper response to the message from the
* target device.
*
*---------------------------------------------------------------------*/
static void FPT_sdecm(unsigned char message, unsigned long port,
unsigned char p_card)
{
struct sccb *currSCCB;
struct sccb_card *CurrCard;
struct sccb_mgr_tar_info *currTar_Info;
CurrCard = &FPT_BL_Card[p_card];
currSCCB = CurrCard->currentSCCB;
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
if (message == SMREST_DATA_PTR) {
if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
FPT_hostDataXferRestart(currSCCB);
}
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
else if (message == SMCMD_COMP) {
if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
currTar_Info->TarStatus &=
~(unsigned char)TAR_TAG_Q_MASK;
currTar_Info->TarStatus |= (unsigned char)TAG_Q_REJECT;
}
ACCEPT_MSG(port);
}
else if ((message == SMNO_OP) || (message >= SMIDENT)
|| (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
else if (message == SMREJECT) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
(currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)
|| ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) ==
TAG_Q_TRYING))
{
WRW_HARPOON((port + hp_intstat), BUS_FREE);
ACCEPT_MSG(port);
while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
(!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)))
{
}
if (currSCCB->Lun == 0x00) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED;
currTar_Info->TarEEValue &=
~EE_SYNC_MASK;
}
else if ((currSCCB->Sccb_scsistat ==
SELECT_WN_ST)) {
currTar_Info->TarStatus =
(currTar_Info->
TarStatus & ~WIDE_ENABLED) |
WIDE_NEGOCIATED;
currTar_Info->TarEEValue &=
~EE_WIDE_SCSI;
}
else if ((currTar_Info->
TarStatus & TAR_TAG_Q_MASK) ==
TAG_Q_TRYING) {
currTar_Info->TarStatus =
(currTar_Info->
TarStatus & ~(unsigned char)
TAR_TAG_Q_MASK) | TAG_Q_REJECT;
currSCCB->ControlByte &= ~F_USE_CMD_Q;
CurrCard->discQCount--;
CurrCard->discQ_Tbl[currSCCB->
Sccb_tag] = NULL;
currSCCB->Sccb_tag = 0x00;
}
}
if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
if (currSCCB->Lun == 0x00) {
WRW_HARPOON((port + hp_intstat),
BUS_FREE);
CurrCard->globalFlags |= F_NEW_SCCB_CMD;
}
}
else {
if ((CurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->
TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))
currTar_Info->TarLUNBusy[currSCCB->
Lun] = 1;
else
currTar_Info->TarLUNBusy[0] = 1;
currSCCB->ControlByte &=
~(unsigned char)F_USE_CMD_Q;
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
else {
ACCEPT_MSG(port);
while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
(!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)))
{
}
if (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
}
else if (message == SMEXT) {
ACCEPT_MSG(port);
FPT_shandem(port, p_card, currSCCB);
}
else if (message == SMIGNORWR) {
ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
message = FPT_sfm(port, currSCCB);
if (currSCCB->Sccb_scsimsg != SMPARITY)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
else {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->Sccb_scsimsg = SMREJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_shandem
*
* Description: Decide what to do with the extended message.
*
*---------------------------------------------------------------------*/
static void FPT_shandem(unsigned long port, unsigned char p_card,
struct sccb *pCurrSCCB)
{
unsigned char length, message;
length = FPT_sfm(port, pCurrSCCB);
if (length) {
ACCEPT_MSG(port);
message = FPT_sfm(port, pCurrSCCB);
if (message) {
if (message == SMSYNC) {
if (length == 0x03) {
ACCEPT_MSG(port);
FPT_stsyncn(port, p_card);
} else {
pCurrSCCB->Sccb_scsimsg = SMREJECT;
ACCEPT_MSG_ATN(port);
}
} else if (message == SMWDTR) {
if (length == 0x02) {
ACCEPT_MSG(port);
FPT_stwidn(port, p_card);
} else {
pCurrSCCB->Sccb_scsimsg = SMREJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED +
DISCONNECT_START));
}
} else {
pCurrSCCB->Sccb_scsimsg = SMREJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_sisyncn
*
* Description: Read in a message byte from the SCSI bus, and check
* for a parity error.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_sisyncn(unsigned long port, unsigned char p_card,
unsigned char syncFlag)
{
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) {
WRW_HARPOON((port + ID_MSG_STRT),
(MPM_OP + AMSG_OUT +
(currSCCB->
Sccb_idmsg & ~(unsigned char)DISC_PRIV)));
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
(MPM_OP + AMSG_OUT + SMEXT));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
WRW_HARPOON((port + SYNC_MSGS + 4),
(MPM_OP + AMSG_OUT + SMSYNC));
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
WRW_HARPOON((port + SYNC_MSGS + 6),
(MPM_OP + AMSG_OUT + 12));
else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) ==
EE_SYNC_10MB)
WRW_HARPOON((port + SYNC_MSGS + 6),
(MPM_OP + AMSG_OUT + 25));
else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) ==
EE_SYNC_5MB)
WRW_HARPOON((port + SYNC_MSGS + 6),
(MPM_OP + AMSG_OUT + 50));
else
WRW_HARPOON((port + SYNC_MSGS + 6),
(MPM_OP + AMSG_OUT + 00));
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 10),
(MPM_OP + AMSG_OUT + DEFAULT_OFFSET));
WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP));
if (syncFlag == 0) {
WR_HARPOON(port + hp_autostart_3,
(SELECT + SELCHK_STRT));
currTar_Info->TarStatus =
((currTar_Info->
TarStatus & ~(unsigned char)TAR_SYNC_MASK) |
(unsigned char)SYNC_TRYING);
} else {
WR_HARPOON(port + hp_autostart_3,
(AUTO_IMMED + CMD_ONLY_STRT));
}
return 1;
}
else {
currTar_Info->TarStatus |= (unsigned char)SYNC_SUPPORTED;
currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
return 0;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_stsyncn
*
* Description: The has sent us a Sync Nego message so handle it as
* necessary.
*
*---------------------------------------------------------------------*/
static void FPT_stsyncn(unsigned long port, unsigned char p_card)
{
unsigned char sync_msg, offset, sync_reg, our_sync_msg;
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
sync_msg = FPT_sfm(port, currSCCB);
if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
}
ACCEPT_MSG(port);
offset = FPT_sfm(port, currSCCB);
if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
}
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
our_sync_msg = 12; /* Setup our Message to 20mb/s */
else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
our_sync_msg = 25; /* Setup our Message to 10mb/s */
else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
our_sync_msg = 50; /* Setup our Message to 5mb/s */
else
our_sync_msg = 0; /* Message = Async */
if (sync_msg < our_sync_msg) {
sync_msg = our_sync_msg; /*if faster, then set to max. */
}
if (offset == ASYNC)
sync_msg = ASYNC;
if (offset > MAX_OFFSET)
offset = MAX_OFFSET;
sync_reg = 0x00;
if (sync_msg > 12)
sync_reg = 0x20; /* Use 10MB/s */
if (sync_msg > 25)
sync_reg = 0x40; /* Use 6.6MB/s */
if (sync_msg > 38)
sync_reg = 0x60; /* Use 5MB/s */
if (sync_msg > 50)
sync_reg = 0x80; /* Use 4MB/s */
if (sync_msg > 62)
sync_reg = 0xA0; /* Use 3.33MB/s */
if (sync_msg > 75)
sync_reg = 0xC0; /* Use 2.85MB/s */
if (sync_msg > 87)
sync_reg = 0xE0; /* Use 2.5MB/s */
if (sync_msg > 100) {
sync_reg = 0x00; /* Use ASYNC */
offset = 0x00;
}
if (currTar_Info->TarStatus & WIDE_ENABLED)
sync_reg |= offset;
else
sync_reg |= (offset | NARROW_SCSI);
FPT_sssyncv(port, currSCCB->TargID, sync_reg, currTar_Info);
if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
ACCEPT_MSG(port);
currTar_Info->TarStatus = ((currTar_Info->TarStatus &
~(unsigned char)TAR_SYNC_MASK) |
(unsigned char)SYNC_SUPPORTED);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
else {
ACCEPT_MSG_ATN(port);
FPT_sisyncr(port, sync_msg, offset);
currTar_Info->TarStatus = ((currTar_Info->TarStatus &
~(unsigned char)TAR_SYNC_MASK) |
(unsigned char)SYNC_SUPPORTED);
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_sisyncr
*
* Description: Answer the targets sync message.
*
*---------------------------------------------------------------------*/
static void FPT_sisyncr(unsigned long port, unsigned char sync_pulse,
unsigned char offset)
{
ARAM_ACCESS(port);
WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP));
SGRAM_ACCESS(port);
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1);
WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) {
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_siwidn
*
* Description: Read in a message byte from the SCSI bus, and check
* for a parity error.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_siwidn(unsigned long port, unsigned char p_card)
{
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) {
WRW_HARPOON((port + ID_MSG_STRT),
(MPM_OP + AMSG_OUT +
(currSCCB->
Sccb_idmsg & ~(unsigned char)DISC_PRIV)));
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
(MPM_OP + AMSG_OUT + SMEXT));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
WRW_HARPOON((port + SYNC_MSGS + 4),
(MPM_OP + AMSG_OUT + SMWDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8),
(MPM_OP + AMSG_OUT + SM16BIT));
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
currTar_Info->TarStatus = ((currTar_Info->TarStatus &
~(unsigned char)TAR_WIDE_MASK) |
(unsigned char)WIDE_ENABLED);
return 1;
}
else {
currTar_Info->TarStatus = ((currTar_Info->TarStatus &
~(unsigned char)TAR_WIDE_MASK) |
WIDE_NEGOCIATED);
currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
return 0;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_stwidn
*
* Description: The has sent us a Wide Nego message so handle it as
* necessary.
*
*---------------------------------------------------------------------*/
static void FPT_stwidn(unsigned long port, unsigned char p_card)
{
unsigned char width;
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
width = FPT_sfm(port, currSCCB);
if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
}
if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
width = 0;
if (width) {
currTar_Info->TarStatus |= WIDE_ENABLED;
width = 0;
} else {
width = NARROW_SCSI;
currTar_Info->TarStatus &= ~WIDE_ENABLED;
}
FPT_sssyncv(port, currSCCB->TargID, width, currTar_Info);
if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
currTar_Info->TarStatus |= WIDE_NEGOCIATED;
if (!
((currTar_Info->TarStatus & TAR_SYNC_MASK) ==
SYNC_SUPPORTED)) {
ACCEPT_MSG_ATN(port);
ARAM_ACCESS(port);
FPT_sisyncn(port, p_card, 1);
currSCCB->Sccb_scsistat = SELECT_SN_ST;
SGRAM_ACCESS(port);
} else {
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
else {
ACCEPT_MSG_ATN(port);
if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
width = SM16BIT;
else
width = SM8BIT;
FPT_siwidr(port, width);
currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED);
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_siwidr
*
* Description: Answer the targets Wide nego message.
*
*---------------------------------------------------------------------*/
static void FPT_siwidr(unsigned long port, unsigned char width)
{
ARAM_ACCESS(port);
WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
SGRAM_ACCESS(port);
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1);
WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) {
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_sssyncv
*
* Description: Write the desired value to the Sync Register for the
* ID specified.
*
*---------------------------------------------------------------------*/
static void FPT_sssyncv(unsigned long p_port, unsigned char p_id,
unsigned char p_sync_value,
struct sccb_mgr_tar_info *currTar_Info)
{
unsigned char index;
index = p_id;
switch (index) {
case 0:
index = 12; /* hp_synctarg_0 */
break;
case 1:
index = 13; /* hp_synctarg_1 */
break;
case 2:
index = 14; /* hp_synctarg_2 */
break;
case 3:
index = 15; /* hp_synctarg_3 */
break;
case 4:
index = 8; /* hp_synctarg_4 */
break;
case 5:
index = 9; /* hp_synctarg_5 */
break;
case 6:
index = 10; /* hp_synctarg_6 */
break;
case 7:
index = 11; /* hp_synctarg_7 */
break;
case 8:
index = 4; /* hp_synctarg_8 */
break;
case 9:
index = 5; /* hp_synctarg_9 */
break;
case 10:
index = 6; /* hp_synctarg_10 */
break;
case 11:
index = 7; /* hp_synctarg_11 */
break;
case 12:
index = 0; /* hp_synctarg_12 */
break;
case 13:
index = 1; /* hp_synctarg_13 */
break;
case 14:
index = 2; /* hp_synctarg_14 */
break;
case 15:
index = 3; /* hp_synctarg_15 */
}
WR_HARPOON(p_port + hp_synctarg_base + index, p_sync_value);
currTar_Info->TarSyncCtrl = p_sync_value;
}
/*---------------------------------------------------------------------
*
* Function: FPT_sresb
*
* Description: Reset the desired card's SCSI bus.
*
*---------------------------------------------------------------------*/
static void FPT_sresb(unsigned long port, unsigned char p_card)
{
unsigned char scsiID, i;
struct sccb_mgr_tar_info *currTar_Info;
WR_HARPOON(port + hp_page_ctrl,
(RD_HARPOON(port + hp_page_ctrl) | G_INT_DISABLE));
WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
WR_HARPOON(port + hp_scsictrl_0, SCSI_RST);
scsiID = RD_HARPOON(port + hp_seltimeout);
WR_HARPOON(port + hp_seltimeout, TO_5ms);
WRW_HARPOON((port + hp_intstat), TIMEOUT);
WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT | START_TO));
while (!(RDW_HARPOON((port + hp_intstat)) & TIMEOUT)) {
}
WR_HARPOON(port + hp_seltimeout, scsiID);
WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL);
FPT_Wait(port, TO_5ms);
WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
WR_HARPOON(port + hp_int_mask, (RD_HARPOON(port + hp_int_mask) | 0x00));
for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) {
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
currTar_Info->TarSyncCtrl = 0;
currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
}
if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
}
FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info);
FPT_SccbMgrTableInitTarget(p_card, scsiID);
}
FPT_BL_Card[p_card].scanIndex = 0x00;
FPT_BL_Card[p_card].currentSCCB = NULL;
FPT_BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT
| F_NEW_SCCB_CMD);
FPT_BL_Card[p_card].cmdCounter = 0x00;
FPT_BL_Card[p_card].discQCount = 0x00;
FPT_BL_Card[p_card].tagQ_Lst = 0x01;
for (i = 0; i < QUEUE_DEPTH; i++)
FPT_BL_Card[p_card].discQ_Tbl[i] = NULL;
WR_HARPOON(port + hp_page_ctrl,
(RD_HARPOON(port + hp_page_ctrl) & ~G_INT_DISABLE));
}
/*---------------------------------------------------------------------
*
* Function: FPT_ssenss
*
* Description: Setup for the Auto Sense command.
*
*---------------------------------------------------------------------*/
static void FPT_ssenss(struct sccb_card *pCurrCard)
{
unsigned char i;
struct sccb *currSCCB;
currSCCB = pCurrCard->currentSCCB;
currSCCB->Save_CdbLen = currSCCB->CdbLength;
for (i = 0; i < 6; i++) {
currSCCB->Save_Cdb[i] = currSCCB->Cdb[i];
}
currSCCB->CdbLength = SIX_BYTE_CMD;
currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
currSCCB->Cdb[2] = 0x00;
currSCCB->Cdb[3] = 0x00;
currSCCB->Cdb[4] = currSCCB->RequestSenseLength;
currSCCB->Cdb[5] = 0x00;
currSCCB->Sccb_XferCnt = (unsigned long)currSCCB->RequestSenseLength;
currSCCB->Sccb_ATC = 0x00;
currSCCB->Sccb_XferState |= F_AUTO_SENSE;
currSCCB->Sccb_XferState &= ~F_SG_XFER;
currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(unsigned char)DISC_PRIV;
currSCCB->ControlByte = 0x00;
currSCCB->Sccb_MGRFlags &= F_STATUSLOADED;
}
/*---------------------------------------------------------------------
*
* Function: FPT_sxfrp
*
* Description: Transfer data into the bit bucket until the device
* decides to switch phase.
*
*---------------------------------------------------------------------*/
static void FPT_sxfrp(unsigned long p_port, unsigned char p_card)
{
unsigned char curr_phz;
DISABLE_AUTO(p_port);
if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
FPT_hostDataXferAbort(p_port, p_card,
FPT_BL_Card[p_card].currentSCCB);
}
/* If the Automation handled the end of the transfer then do not
match the phase or we will get out of sync with the ISR. */
if (RDW_HARPOON((p_port + hp_intstat)) &
(BUS_FREE | XFER_CNT_0 | AUTO_INT))
return;
WR_HARPOON(p_port + hp_xfercnt_0, 0x00);
curr_phz = RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ;
WRW_HARPOON((p_port + hp_intstat), XFER_CNT_0);
WR_HARPOON(p_port + hp_scsisig, curr_phz);
while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET)) &&
(curr_phz ==
(RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ)))
{
if (curr_phz & (unsigned char)SCSI_IOBIT) {
WR_HARPOON(p_port + hp_portctrl_0,
(SCSI_PORT | HOST_PORT | SCSI_INBIT));
if (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) {
RD_HARPOON(p_port + hp_fifodata_0);
}
} else {
WR_HARPOON(p_port + hp_portctrl_0,
(SCSI_PORT | HOST_PORT | HOST_WRT));
if (RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY) {
WR_HARPOON(p_port + hp_fifodata_0, 0xFA);
}
}
} /* End of While loop for padding data I/O phase */
while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) {
if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ)
break;
}
WR_HARPOON(p_port + hp_portctrl_0,
(SCSI_PORT | HOST_PORT | SCSI_INBIT));
while (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) {
RD_HARPOON(p_port + hp_fifodata_0);
}
if (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) {
WR_HARPOON(p_port + hp_autostart_0,
(AUTO_IMMED + DISCONNECT_START));
while (!(RDW_HARPOON((p_port + hp_intstat)) & AUTO_INT)) {
}
if (RDW_HARPOON((p_port + hp_intstat)) &
(ICMD_COMP | ITAR_DISC))
while (!
(RDW_HARPOON((p_port + hp_intstat)) &
(BUS_FREE | RSEL))) ;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_schkdd
*
* Description: Make sure data has been flushed from both FIFOs and abort
* the operations if necessary.
*
*---------------------------------------------------------------------*/
static void FPT_schkdd(unsigned long port, unsigned char p_card)
{
unsigned short TimeOutLoop;
unsigned char sPhase;
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) &&
(currSCCB->Sccb_scsistat != DATA_IN_ST)) {
return;
}
if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT) {
currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - 1);
currSCCB->Sccb_XferCnt = 1;
currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT;
WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
WR_HARPOON(port + hp_xferstat, 0x00);
}
else {
currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
currSCCB->Sccb_XferCnt = 0;
}
if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
(currSCCB->HostStatus == SCCB_COMPLETE)) {
currSCCB->HostStatus = SCCB_PARITY_ERR;
WRW_HARPOON((port + hp_intstat), PARITY);
}
FPT_hostDataXferAbort(port, p_card, currSCCB);
while (RD_HARPOON(port + hp_scsisig) & SCSI_ACK) {
}
TimeOutLoop = 0;
while (RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY) {
if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
return;
}
if (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) {
break;
}
if (RDW_HARPOON((port + hp_intstat)) & RESET) {
return;
}
if ((RD_HARPOON(port + hp_scsisig) & SCSI_REQ)
|| (TimeOutLoop++ > 0x3000))
break;
}
sPhase = RD_HARPOON(port + hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ);
if ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) ||
(RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) ||
(sPhase == (SCSI_BSY | S_DATAO_PH)) ||
(sPhase == (SCSI_BSY | S_DATAI_PH))) {
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED)) {
if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
FPT_phaseDataIn(port, p_card);
}
else {
FPT_phaseDataOut(port, p_card);
}
} else {
FPT_sxfrp(port, p_card);
if (!(RDW_HARPOON((port + hp_intstat)) &
(BUS_FREE | ICMD_COMP | ITAR_DISC | RESET))) {
WRW_HARPOON((port + hp_intstat), AUTO_INT);
FPT_phaseDecode(port, p_card);
}
}
}
else {
WR_HARPOON(port + hp_portctrl_0, 0x00);
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_sinits
*
* Description: Setup SCCB manager fields in this SCCB.
*
*---------------------------------------------------------------------*/
static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
{
struct sccb_mgr_tar_info *currTar_Info;
if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
return;
}
currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
p_sccb->Sccb_XferState = 0x00;
p_sccb->Sccb_XferCnt = p_sccb->DataLength;
if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) ||
(p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) {
p_sccb->Sccb_SGoffset = 0;
p_sccb->Sccb_XferState = F_SG_XFER;
p_sccb->Sccb_XferCnt = 0x00;
}
if (p_sccb->DataLength == 0x00)
p_sccb->Sccb_XferState |= F_ALL_XFERRED;
if (p_sccb->ControlByte & F_USE_CMD_Q) {
if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
p_sccb->ControlByte &= ~F_USE_CMD_Q;
else
currTar_Info->TarStatus |= TAG_Q_TRYING;
}
/* For !single SCSI device in system & device allow Disconnect
or command is tag_q type then send Cmd with Disconnect Enable
else send Cmd with Disconnect Disable */
/*
if (((!(FPT_BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) &&
(currTar_Info->TarStatus & TAR_ALLOW_DISC)) ||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
*/
if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
p_sccb->Sccb_idmsg =
(unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
}
else {
p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
}
p_sccb->HostStatus = 0x00;
p_sccb->TargetStatus = 0x00;
p_sccb->Sccb_tag = 0x00;
p_sccb->Sccb_MGRFlags = 0x00;
p_sccb->Sccb_sgseg = 0x00;
p_sccb->Sccb_ATC = 0x00;
p_sccb->Sccb_savedATC = 0x00;
/*
p_sccb->SccbVirtDataPtr = 0x00;
p_sccb->Sccb_forwardlink = NULL;
p_sccb->Sccb_backlink = NULL;
*/
p_sccb->Sccb_scsistat = BUS_FREE_ST;
p_sccb->SccbStatus = SCCB_IN_PROCESS;
p_sccb->Sccb_scsimsg = SMNO_OP;
}
/*---------------------------------------------------------------------
*
* Function: Phase Decode
*
* Description: Determine the phase and call the appropriate function.
*
*---------------------------------------------------------------------*/
static void FPT_phaseDecode(unsigned long p_port, unsigned char p_card)
{
unsigned char phase_ref;
void (*phase) (unsigned long, unsigned char);
DISABLE_AUTO(p_port);
phase_ref =
(unsigned char)(RD_HARPOON(p_port + hp_scsisig) & S_SCSI_PHZ);
phase = FPT_s_PhaseTbl[phase_ref];
(*phase) (p_port, p_card); /* Call the correct phase func */
}
/*---------------------------------------------------------------------
*
* Function: Data Out Phase
*
* Description: Start up both the BusMaster and Xbow.
*
*---------------------------------------------------------------------*/
static void FPT_phaseDataOut(unsigned long port, unsigned char p_card)
{
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB == NULL) {
return; /* Exit if No SCCB record */
}
currSCCB->Sccb_scsistat = DATA_OUT_ST;
currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET);
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START));
FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
if (currSCCB->Sccb_XferCnt == 0) {
if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) &&
(currSCCB->HostStatus == SCCB_COMPLETE))
currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
FPT_sxfrp(port, p_card);
if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET)))
FPT_phaseDecode(port, p_card);
}
}
/*---------------------------------------------------------------------
*
* Function: Data In Phase
*
* Description: Startup the BusMaster and the XBOW.
*
*---------------------------------------------------------------------*/
static void FPT_phaseDataIn(unsigned long port, unsigned char p_card)
{
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB == NULL) {
return; /* Exit if No SCCB record */
}
currSCCB->Sccb_scsistat = DATA_IN_ST;
currSCCB->Sccb_XferState |= F_HOST_XFER_DIR;
currSCCB->Sccb_XferState &= ~F_NO_DATA_YET;
WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START));
FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
if (currSCCB->Sccb_XferCnt == 0) {
if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) &&
(currSCCB->HostStatus == SCCB_COMPLETE))
currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
FPT_sxfrp(port, p_card);
if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET)))
FPT_phaseDecode(port, p_card);
}
}
/*---------------------------------------------------------------------
*
* Function: Command Phase
*
* Description: Load the CDB into the automation and start it up.
*
*---------------------------------------------------------------------*/
static void FPT_phaseCommand(unsigned long p_port, unsigned char p_card)
{
struct sccb *currSCCB;
unsigned long cdb_reg;
unsigned char i;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB->OperationCode == RESET_COMMAND) {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->CdbLength = SIX_BYTE_CMD;
}
WR_HARPOON(p_port + hp_scsisig, 0x00);
ARAM_ACCESS(p_port);
cdb_reg = p_port + CMD_STRT;
for (i = 0; i < currSCCB->CdbLength; i++) {
if (currSCCB->OperationCode == RESET_COMMAND)
WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00));
else
WRW_HARPOON(cdb_reg,
(MPM_OP + ACOMMAND + currSCCB->Cdb[i]));
cdb_reg += 2;
}
if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP));
WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT));
currSCCB->Sccb_scsistat = COMMAND_ST;
WR_HARPOON(p_port + hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT));
SGRAM_ACCESS(p_port);
}
/*---------------------------------------------------------------------
*
* Function: Status phase
*
* Description: Bring in the status and command complete message bytes
*
*---------------------------------------------------------------------*/
static void FPT_phaseStatus(unsigned long port, unsigned char p_card)
{
/* Start-up the automation to finish off this command and let the
isr handle the interrupt for command complete when it comes in.
We could wait here for the interrupt to be generated?
*/
WR_HARPOON(port + hp_scsisig, 0x00);
WR_HARPOON(port + hp_autostart_0, (AUTO_IMMED + END_DATA_START));
}
/*---------------------------------------------------------------------
*
* Function: Phase Message Out
*
* Description: Send out our message (if we have one) and handle whatever
* else is involed.
*
*---------------------------------------------------------------------*/
static void FPT_phaseMsgOut(unsigned long port, unsigned char p_card)
{
unsigned char message, scsiID;
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB != NULL) {
message = currSCCB->Sccb_scsimsg;
scsiID = currSCCB->TargID;
if (message == SMDEV_RESET) {
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
currTar_Info->TarSyncCtrl = 0;
FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info);
if (FPT_sccbMgrTbl[p_card][scsiID].
TarEEValue & EE_SYNC_MASK) {
FPT_sccbMgrTbl[p_card][scsiID].TarStatus &=
~TAR_SYNC_MASK;
}
if (FPT_sccbMgrTbl[p_card][scsiID].
TarEEValue & EE_WIDE_SCSI) {
FPT_sccbMgrTbl[p_card][scsiID].TarStatus &=
~TAR_WIDE_MASK;
}
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
FPT_SccbMgrTableInitTarget(p_card, scsiID);
} else if (currSCCB->Sccb_scsistat == ABORT_ST) {
currSCCB->HostStatus = SCCB_COMPLETE;
if (FPT_BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] !=
NULL) {
FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
Sccb_tag] = NULL;
FPT_sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--;
}
}
else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
if (message == SMNO_OP) {
currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
FPT_ssel(port, p_card);
return;
}
} else {
if (message == SMABORT)
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
}
} else {
message = SMABORT;
}
WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN);
WR_HARPOON(port + hp_scsidata_0, message);
WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
ACCEPT_MSG(port);
WR_HARPOON(port + hp_portctrl_0, 0x00);
if ((message == SMABORT) || (message == SMDEV_RESET) ||
(message == SMABORT_TAG)) {
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
}
if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
WRW_HARPOON((port + hp_intstat), BUS_FREE);
if (currSCCB != NULL) {
if ((FPT_BL_Card[p_card].
globalFlags & F_CONLUN_IO)
&&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))
FPT_sccbMgrTbl[p_card][currSCCB->
TargID].
TarLUNBusy[currSCCB->Lun] = 0;
else
FPT_sccbMgrTbl[p_card][currSCCB->
TargID].
TarLUNBusy[0] = 0;
FPT_queueCmdComplete(&FPT_BL_Card[p_card],
currSCCB, p_card);
}
else {
FPT_BL_Card[p_card].globalFlags |=
F_NEW_SCCB_CMD;
}
}
else {
FPT_sxfrp(port, p_card);
}
}
else {
if (message == SMPARITY) {
currSCCB->Sccb_scsimsg = SMNO_OP;
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
} else {
FPT_sxfrp(port, p_card);
}
}
}
/*---------------------------------------------------------------------
*
* Function: Message In phase
*
* Description: Bring in the message and determine what to do with it.
*
*---------------------------------------------------------------------*/
static void FPT_phaseMsgIn(unsigned long port, unsigned char p_card)
{
unsigned char message;
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
FPT_phaseChkFifo(port, p_card);
}
message = RD_HARPOON(port + hp_scsidata_0);
if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + END_DATA_START));
}
else {
message = FPT_sfm(port, currSCCB);
if (message) {
FPT_sdecm(message, port, p_card);
} else {
if (currSCCB->Sccb_scsimsg != SMPARITY)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
}
}
/*---------------------------------------------------------------------
*
* Function: Illegal phase
*
* Description: Target switched to some illegal phase, so all we can do
* is report an error back to the host (if that is possible)
* and send an ABORT message to the misbehaving target.
*
*---------------------------------------------------------------------*/
static void FPT_phaseIllegal(unsigned long port, unsigned char p_card)
{
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
WR_HARPOON(port + hp_scsisig, RD_HARPOON(port + hp_scsisig));
if (currSCCB != NULL) {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->Sccb_scsistat = ABORT_ST;
currSCCB->Sccb_scsimsg = SMABORT;
}
ACCEPT_MSG_ATN(port);
}
/*---------------------------------------------------------------------
*
* Function: Phase Check FIFO
*
* Description: Make sure data has been flushed from both FIFOs and abort
* the operations if necessary.
*
*---------------------------------------------------------------------*/
static void FPT_phaseChkFifo(unsigned long port, unsigned char p_card)
{
unsigned long xfercnt;
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB->Sccb_scsistat == DATA_IN_ST) {
while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) &&
(RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)) {
}
if (!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) {
currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
currSCCB->Sccb_XferCnt = 0;
if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
(currSCCB->HostStatus == SCCB_COMPLETE)) {
currSCCB->HostStatus = SCCB_PARITY_ERR;
WRW_HARPOON((port + hp_intstat), PARITY);
}
FPT_hostDataXferAbort(port, p_card, currSCCB);
FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY))
&& (RD_HARPOON(port + hp_ext_status) &
BM_CMD_BUSY)) {
}
}
}
/*End Data In specific code. */
GET_XFER_CNT(port, xfercnt);
WR_HARPOON(port + hp_xfercnt_0, 0x00);
WR_HARPOON(port + hp_portctrl_0, 0x00);
currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt);
currSCCB->Sccb_XferCnt = xfercnt;
if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
(currSCCB->HostStatus == SCCB_COMPLETE)) {
currSCCB->HostStatus = SCCB_PARITY_ERR;
WRW_HARPOON((port + hp_intstat), PARITY);
}
FPT_hostDataXferAbort(port, p_card, currSCCB);
WR_HARPOON(port + hp_fifowrite, 0x00);
WR_HARPOON(port + hp_fiforead, 0x00);
WR_HARPOON(port + hp_xferstat, 0x00);
WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
}
/*---------------------------------------------------------------------
*
* Function: Phase Bus Free
*
* Description: We just went bus free so figure out if it was
* because of command complete or from a disconnect.
*
*---------------------------------------------------------------------*/
static void FPT_phaseBusFree(unsigned long port, unsigned char p_card)
{
struct sccb *currSCCB;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB != NULL) {
DISABLE_AUTO(port);
if (currSCCB->OperationCode == RESET_COMMAND) {
if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] = 0;
else
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[0] = 0;
FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB,
p_card);
FPT_queueSearchSelect(&FPT_BL_Card[p_card], p_card);
}
else if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
(unsigned char)SYNC_SUPPORTED;
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
~EE_SYNC_MASK;
}
else if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
(FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
~EE_WIDE_SCSI;
}
else if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
/* Make sure this is not a phony BUS_FREE. If we were
reselected or if BUSY is NOT on then this is a
valid BUS FREE. SRR Wednesday, 5/10/1995. */
if ((!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ||
(RDW_HARPOON((port + hp_intstat)) & RSEL)) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus &= ~TAR_TAG_Q_MASK;
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus |= TAG_Q_REJECT;
}
else {
return;
}
}
else {
currSCCB->Sccb_scsistat = BUS_FREE_ST;
if (!currSCCB->HostStatus) {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
}
if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] = 0;
else
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[0] = 0;
FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB,
p_card);
return;
}
FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
} /*end if !=null */
}
/*---------------------------------------------------------------------
*
* Function: Auto Load Default Map
*
* Description: Load the Automation RAM with the defualt map values.
*
*---------------------------------------------------------------------*/
static void FPT_autoLoadDefaultMap(unsigned long p_port)
{
unsigned long map_addr;
ARAM_ACCESS(p_port);
map_addr = p_port + hp_aramBase;
WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0xC0)); /*ID MESSAGE */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x20)); /*SIMPLE TAG QUEUEING MSG */
map_addr += 2;
WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x00)); /*TAG ID MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 0 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 1 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 2 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 3 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 4 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 5 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 6 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 7 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 8 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 9 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 10 */
map_addr += 2;
WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 11 */
map_addr += 2;
WRW_HARPOON(map_addr, (CPE_OP + ADATA_OUT + DINT)); /*JUMP IF DATA OUT */
map_addr += 2;
WRW_HARPOON(map_addr, (TCB_OP + FIFO_0 + DI)); /*JUMP IF NO DATA IN FIFO */
map_addr += 2; /*This means AYNC DATA IN */
WRW_HARPOON(map_addr, (SSI_OP + SSI_IDO_STRT)); /*STOP AND INTERRUPT */
map_addr += 2;
WRW_HARPOON(map_addr, (CPE_OP + ADATA_IN + DINT)); /*JUMP IF NOT DATA IN PHZ */
map_addr += 2;
WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK 4 DATA IN */
map_addr += 2;
WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x02)); /*SAVE DATA PTR MSG? */
map_addr += 2;
WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + DC)); /*GO CHECK FOR DISCONNECT MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR1)); /*SAVE DATA PTRS MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK DATA IN */
map_addr += 2;
WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x04)); /*DISCONNECT MSG? */
map_addr += 2;
WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + UNKNWN)); /*UKNKNOWN MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*XFER DISCONNECT MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_ITAR_DISC)); /*STOP AND INTERRUPT */
map_addr += 2;
WRW_HARPOON(map_addr, (CPN_OP + ASTATUS + UNKNWN)); /*JUMP IF NOT STATUS PHZ. */
map_addr += 2;
WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR0)); /*GET STATUS BYTE */
map_addr += 2;
WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + CC)); /*ERROR IF NOT MSG IN PHZ */
map_addr += 2;
WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x00)); /*CHECK FOR CMD COMPLETE MSG. */
map_addr += 2;
WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + CC)); /*ERROR IF NOT CMD COMPLETE MSG. */
map_addr += 2;
WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*GET CMD COMPLETE MSG */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_ICMD_COMP)); /*END OF COMMAND */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_ITICKLE)); /*BIOS Tickled the Mgr */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */
map_addr += 2; /* DIDN'T GET ONE */
WRW_HARPOON(map_addr, (CRR_OP + AR3 + S_IDREG)); /* comp SCSI SEL ID & AR3 */
map_addr += 2;
WRW_HARPOON(map_addr, (BRH_OP + EQUAL + 0x00)); /*SEL ID OK then Conti. */
map_addr += 2;
WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
SGRAM_ACCESS(p_port);
}
/*---------------------------------------------------------------------
*
* Function: Auto Command Complete
*
* Description: Post command back to host and find another command
* to execute.
*
*---------------------------------------------------------------------*/
static void FPT_autoCmdCmplt(unsigned long p_port, unsigned char p_card)
{
struct sccb *currSCCB;
unsigned char status_byte;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
status_byte = RD_HARPOON(p_port + hp_gp_reg_0);
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
if (status_byte != SSGOOD) {
if (status_byte == SSQ_FULL) {
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] = 1;
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[currSCCB->Lun]] =
NULL;
} else {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[0] = 1;
if (currSCCB->Sccb_tag) {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
Sccb_tag]
= NULL;
} else {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[0]] = NULL;
}
}
currSCCB->Sccb_MGRFlags |= F_STATUSLOADED;
FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card);
return;
}
if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
(unsigned char)SYNC_SUPPORTED;
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
~EE_SYNC_MASK;
FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] = 1;
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[currSCCB->Lun]] =
NULL;
} else {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[0] = 1;
if (currSCCB->Sccb_tag) {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
Sccb_tag]
= NULL;
} else {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[0]] = NULL;
}
}
return;
}
if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
(FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
~EE_WIDE_SCSI;
FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] = 1;
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[currSCCB->Lun]] =
NULL;
} else {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUNBusy[0] = 1;
if (currSCCB->Sccb_tag) {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
Sccb_tag]
= NULL;
} else {
if (FPT_BL_Card[p_card].discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
LunDiscQ_Idx[0]] = NULL;
}
}
return;
}
if (status_byte == SSCHECK) {
if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarEEValue & EE_SYNC_MASK) {
FPT_sccbMgrTbl[p_card][currSCCB->
TargID].
TarStatus &= ~TAR_SYNC_MASK;
}
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarEEValue & EE_WIDE_SCSI) {
FPT_sccbMgrTbl[p_card][currSCCB->
TargID].
TarStatus &= ~TAR_WIDE_MASK;
}
}
}
if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) {
currSCCB->SccbStatus = SCCB_ERROR;
currSCCB->TargetStatus = status_byte;
if (status_byte == SSCHECK) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUN_CA = 1;
if (currSCCB->RequestSenseLength !=
NO_AUTO_REQUEST_SENSE) {
if (currSCCB->RequestSenseLength == 0)
currSCCB->RequestSenseLength =
14;
FPT_ssenss(&FPT_BL_Card[p_card]);
FPT_BL_Card[p_card].globalFlags |=
F_NEW_SCCB_CMD;
if (((FPT_BL_Card[p_card].
globalFlags & F_CONLUN_IO)
&&
((FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))) {
FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
TarLUNBusy[currSCCB->Lun] =
1;
if (FPT_BL_Card[p_card].
discQCount != 0)
FPT_BL_Card[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[FPT_sccbMgrTbl
[p_card]
[currSCCB->
TargID].
LunDiscQ_Idx
[currSCCB->Lun]] =
NULL;
} else {
FPT_sccbMgrTbl[p_card]
[currSCCB->TargID].
TarLUNBusy[0] = 1;
if (currSCCB->Sccb_tag) {
if (FPT_BL_Card[p_card].
discQCount != 0)
FPT_BL_Card
[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl[currSCCB->
Sccb_tag]
= NULL;
} else {
if (FPT_BL_Card[p_card].
discQCount != 0)
FPT_BL_Card
[p_card].
discQCount--;
FPT_BL_Card[p_card].
discQ_Tbl
[FPT_sccbMgrTbl
[p_card][currSCCB->
TargID].
LunDiscQ_Idx[0]] =
NULL;
}
}
return;
}
}
}
}
if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->
Lun] = 0;
else
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = 0;
FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card);
}
#define SHORT_WAIT 0x0000000F
#define LONG_WAIT 0x0000FFFFL
/*---------------------------------------------------------------------
*
* Function: Data Transfer Processor
*
* Description: This routine performs two tasks.
* (1) Start data transfer by calling HOST_DATA_XFER_START
* function. Once data transfer is started, (2) Depends
* on the type of data transfer mode Scatter/Gather mode
* or NON Scatter/Gather mode. In NON Scatter/Gather mode,
* this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for
* data transfer done. In Scatter/Gather mode, this routine
* checks bus master command complete and dual rank busy
* bit to keep chaining SC transfer command. Similarly,
* in Scatter/Gather mode, it checks Sccb_MGRFlag
* (F_HOST_XFER_ACT bit) for data transfer done.
*
*---------------------------------------------------------------------*/
static void FPT_dataXferProcessor(unsigned long port,
struct sccb_card *pCurrCard)
{
struct sccb *currSCCB;
currSCCB = pCurrCard->currentSCCB;
if (currSCCB->Sccb_XferState & F_SG_XFER) {
if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
{
currSCCB->Sccb_sgseg += (unsigned char)SG_BUF_CNT;
currSCCB->Sccb_SGoffset = 0x00;
}
pCurrCard->globalFlags |= F_HOST_XFER_ACT;
FPT_busMstrSGDataXferStart(port, currSCCB);
}
else {
if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT)) {
pCurrCard->globalFlags |= F_HOST_XFER_ACT;
FPT_busMstrDataXferStart(port, currSCCB);
}
}
}
/*---------------------------------------------------------------------
*
* Function: BusMaster Scatter Gather Data Transfer Start
*
* Description:
*
*---------------------------------------------------------------------*/
static void FPT_busMstrSGDataXferStart(unsigned long p_port,
struct sccb *pcurrSCCB)
{
unsigned long count, addr, tmpSGCnt;
unsigned int sg_index;
unsigned char sg_count, i;
unsigned long reg_offset;
if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
count = ((unsigned long)HOST_RD_CMD) << 24;
}
else {
count = ((unsigned long)HOST_WRT_CMD) << 24;
}
sg_count = 0;
tmpSGCnt = 0;
sg_index = pcurrSCCB->Sccb_sgseg;
reg_offset = hp_aramBase;
i = (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) &
~(SGRAM_ARAM | SCATTER_EN));
WR_HARPOON(p_port + hp_page_ctrl, i);
while ((sg_count < (unsigned char)SG_BUF_CNT) &&
((unsigned long)(sg_index * (unsigned int)SG_ELEMENT_SIZE) <
pcurrSCCB->DataLength)) {
tmpSGCnt += *(((unsigned long *)pcurrSCCB->DataPointer) +
(sg_index * 2));
count |= *(((unsigned long *)pcurrSCCB->DataPointer) +
(sg_index * 2));
addr = *(((unsigned long *)pcurrSCCB->DataPointer) +
((sg_index * 2) + 1));
if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) {
addr +=
((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset);
count =
(count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset;
tmpSGCnt = count & 0x00FFFFFFL;
}
WR_HARP32(p_port, reg_offset, addr);
reg_offset += 4;
WR_HARP32(p_port, reg_offset, count);
reg_offset += 4;
count &= 0xFF000000L;
sg_index++;
sg_count++;
} /*End While */
pcurrSCCB->Sccb_XferCnt = tmpSGCnt;
WR_HARPOON(p_port + hp_sg_addr, (sg_count << 4));
if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt);
WR_HARPOON(p_port + hp_portctrl_0,
(DMA_PORT | SCSI_PORT | SCSI_INBIT));
WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH);
}
else {
if ((!(RD_HARPOON(p_port + hp_synctarg_0) & NARROW_SCSI)) &&
(tmpSGCnt & 0x000000001)) {
pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT;
tmpSGCnt--;
}
WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt);
WR_HARPOON(p_port + hp_portctrl_0,
(SCSI_PORT | DMA_PORT | DMA_RD));
WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH);
}
WR_HARPOON(p_port + hp_page_ctrl, (unsigned char)(i | SCATTER_EN));
}
/*---------------------------------------------------------------------
*
* Function: BusMaster Data Transfer Start
*
* Description:
*
*---------------------------------------------------------------------*/
static void FPT_busMstrDataXferStart(unsigned long p_port,
struct sccb *pcurrSCCB)
{
unsigned long addr, count;
if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) {
count = pcurrSCCB->Sccb_XferCnt;
addr =
(unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC;
}
else {
addr = pcurrSCCB->SensePointer;
count = pcurrSCCB->RequestSenseLength;
}
HP_SETUP_ADDR_CNT(p_port, addr, count);
if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
WR_HARPOON(p_port + hp_portctrl_0,
(DMA_PORT | SCSI_PORT | SCSI_INBIT));
WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH);
WR_HARPOON(p_port + hp_xfer_cmd,
(XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT));
}
else {
WR_HARPOON(p_port + hp_portctrl_0,
(SCSI_PORT | DMA_PORT | DMA_RD));
WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH);
WR_HARPOON(p_port + hp_xfer_cmd,
(XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT));
}
}
/*---------------------------------------------------------------------
*
* Function: BusMaster Timeout Handler
*
* Description: This function is called after a bus master command busy time
* out is detected. This routines issue halt state machine
* with a software time out for command busy. If command busy
* is still asserted at the end of the time out, it issues
* hard abort with another software time out. It hard abort
* command busy is also time out, it'll just give up.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_busMstrTimeOut(unsigned long p_port)
{
unsigned long timeout;
timeout = LONG_WAIT;
WR_HARPOON(p_port + hp_sys_ctrl, HALT_MACH);
while ((!(RD_HARPOON(p_port + hp_ext_status) & CMD_ABORTED))
&& timeout--) {
}
if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) {
WR_HARPOON(p_port + hp_sys_ctrl, HARD_ABORT);
timeout = LONG_WAIT;
while ((RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY)
&& timeout--) {
}
}
RD_HARPOON(p_port + hp_int_status); /*Clear command complete */
if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) {
return 1;
}
else {
return 0;
}
}
/*---------------------------------------------------------------------
*
* Function: Host Data Transfer Abort
*
* Description: Abort any in progress transfer.
*
*---------------------------------------------------------------------*/
static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card,
struct sccb *pCurrSCCB)
{
unsigned long timeout;
unsigned long remain_cnt;
unsigned int sg_ptr;
FPT_BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT;
if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) {
if (!(RD_HARPOON(port + hp_int_status) & INT_CMD_COMPL)) {
WR_HARPOON(port + hp_bm_ctrl,
(RD_HARPOON(port + hp_bm_ctrl) |
FLUSH_XFER_CNTR));
timeout = LONG_WAIT;
while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)
&& timeout--) {
}
WR_HARPOON(port + hp_bm_ctrl,
(RD_HARPOON(port + hp_bm_ctrl) &
~FLUSH_XFER_CNTR));
if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
if (FPT_busMstrTimeOut(port)) {
if (pCurrSCCB->HostStatus == 0x00)
pCurrSCCB->HostStatus =
SCCB_BM_ERR;
}
if (RD_HARPOON(port + hp_int_status) &
INT_EXT_STATUS)
if (RD_HARPOON(port + hp_ext_status) &
BAD_EXT_STATUS)
if (pCurrSCCB->HostStatus ==
0x00)
{
pCurrSCCB->HostStatus =
SCCB_BM_ERR;
}
}
}
}
else if (pCurrSCCB->Sccb_XferCnt) {
if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
WR_HARPOON(port + hp_page_ctrl,
(RD_HARPOON(port + hp_page_ctrl) &
~SCATTER_EN));
WR_HARPOON(port + hp_sg_addr, 0x00);
sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT;
if (sg_ptr >
(unsigned int)(pCurrSCCB->DataLength /
SG_ELEMENT_SIZE)) {
sg_ptr =
(unsigned int)(pCurrSCCB->DataLength /
SG_ELEMENT_SIZE);
}
remain_cnt = pCurrSCCB->Sccb_XferCnt;
while (remain_cnt < 0x01000000L) {
sg_ptr--;
if (remain_cnt >
(unsigned
long)(*(((unsigned long *)pCurrSCCB->
DataPointer) + (sg_ptr * 2)))) {
remain_cnt -=
(unsigned
long)(*(((unsigned long *)
pCurrSCCB->DataPointer) +
(sg_ptr * 2)));
}
else {
break;
}
}
if (remain_cnt < 0x01000000L) {
pCurrSCCB->Sccb_SGoffset = remain_cnt;
pCurrSCCB->Sccb_sgseg = (unsigned short)sg_ptr;
if ((unsigned long)(sg_ptr * SG_ELEMENT_SIZE) ==
pCurrSCCB->DataLength && (remain_cnt == 0))
pCurrSCCB->Sccb_XferState |=
F_ALL_XFERRED;
}
else {
if (pCurrSCCB->HostStatus == 0x00) {
pCurrSCCB->HostStatus =
SCCB_GROSS_FW_ERR;
}
}
}
if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) {
if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
FPT_busMstrTimeOut(port);
}
else {
if (RD_HARPOON(port + hp_int_status) &
INT_EXT_STATUS) {
if (RD_HARPOON(port + hp_ext_status) &
BAD_EXT_STATUS) {
if (pCurrSCCB->HostStatus ==
0x00) {
pCurrSCCB->HostStatus =
SCCB_BM_ERR;
}
}
}
}
}
else {
if ((RD_HARPOON(port + hp_fifo_cnt)) >= BM_THRESHOLD) {
timeout = SHORT_WAIT;
while ((RD_HARPOON(port + hp_ext_status) &
BM_CMD_BUSY)
&& ((RD_HARPOON(port + hp_fifo_cnt)) >=
BM_THRESHOLD) && timeout--) {
}
}
if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
WR_HARPOON(port + hp_bm_ctrl,
(RD_HARPOON(port + hp_bm_ctrl) |
FLUSH_XFER_CNTR));
timeout = LONG_WAIT;
while ((RD_HARPOON(port + hp_ext_status) &
BM_CMD_BUSY) && timeout--) {
}
WR_HARPOON(port + hp_bm_ctrl,
(RD_HARPOON(port + hp_bm_ctrl) &
~FLUSH_XFER_CNTR));
if (RD_HARPOON(port + hp_ext_status) &
BM_CMD_BUSY) {
if (pCurrSCCB->HostStatus == 0x00) {
pCurrSCCB->HostStatus =
SCCB_BM_ERR;
}
FPT_busMstrTimeOut(port);
}
}
if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) {
if (RD_HARPOON(port + hp_ext_status) &
BAD_EXT_STATUS) {
if (pCurrSCCB->HostStatus == 0x00) {
pCurrSCCB->HostStatus =
SCCB_BM_ERR;
}
}
}
}
}
else {
if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
timeout = LONG_WAIT;
while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)
&& timeout--) {
}
if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
if (pCurrSCCB->HostStatus == 0x00) {
pCurrSCCB->HostStatus = SCCB_BM_ERR;
}
FPT_busMstrTimeOut(port);
}
}
if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) {
if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) {
if (pCurrSCCB->HostStatus == 0x00) {
pCurrSCCB->HostStatus = SCCB_BM_ERR;
}
}
}
if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
WR_HARPOON(port + hp_page_ctrl,
(RD_HARPOON(port + hp_page_ctrl) &
~SCATTER_EN));
WR_HARPOON(port + hp_sg_addr, 0x00);
pCurrSCCB->Sccb_sgseg += SG_BUF_CNT;
pCurrSCCB->Sccb_SGoffset = 0x00;
if ((unsigned long)(pCurrSCCB->Sccb_sgseg *
SG_ELEMENT_SIZE) >=
pCurrSCCB->DataLength) {
pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
pCurrSCCB->Sccb_sgseg =
(unsigned short)(pCurrSCCB->DataLength /
SG_ELEMENT_SIZE);
}
}
else {
if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE))
pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
}
}
WR_HARPOON(port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
}
/*---------------------------------------------------------------------
*
* Function: Host Data Transfer Restart
*
* Description: Reset the available count due to a restore data
* pointers message.
*
*---------------------------------------------------------------------*/
static void FPT_hostDataXferRestart(struct sccb *currSCCB)
{
unsigned long data_count;
unsigned int sg_index;
unsigned long *sg_ptr;
if (currSCCB->Sccb_XferState & F_SG_XFER) {
currSCCB->Sccb_XferCnt = 0;
sg_index = 0xffff; /*Index by long words into sg list. */
data_count = 0; /*Running count of SG xfer counts. */
sg_ptr = (unsigned long *)currSCCB->DataPointer;
while (data_count < currSCCB->Sccb_ATC) {
sg_index++;
data_count += *(sg_ptr + (sg_index * 2));
}
if (data_count == currSCCB->Sccb_ATC) {
currSCCB->Sccb_SGoffset = 0;
sg_index++;
}
else {
currSCCB->Sccb_SGoffset =
data_count - currSCCB->Sccb_ATC;
}
currSCCB->Sccb_sgseg = (unsigned short)sg_index;
}
else {
currSCCB->Sccb_XferCnt =
currSCCB->DataLength - currSCCB->Sccb_ATC;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_scini
*
* Description: Setup all data structures necessary for SCAM selection.
*
*---------------------------------------------------------------------*/
static void FPT_scini(unsigned char p_card, unsigned char p_our_id,
unsigned char p_power_up)
{
unsigned char loser, assigned_id;
unsigned long p_port;
unsigned char i, k, ScamFlg;
struct sccb_card *currCard;
struct nvram_info *pCurrNvRam;
currCard = &FPT_BL_Card[p_card];
p_port = currCard->ioPort;
pCurrNvRam = currCard->pNvRamInfo;
if (pCurrNvRam) {
ScamFlg = pCurrNvRam->niScamConf;
i = pCurrNvRam->niSysConf;
} else {
ScamFlg =
(unsigned char)FPT_utilEERead(p_port, SCAM_CONFIG / 2);
i = (unsigned
char)(FPT_utilEERead(p_port, (SYSTEM_CONFIG / 2)));
}
if (!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */
return;
FPT_inisci(p_card, p_port, p_our_id);
/* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW
too slow to return to SCAM selection */
/* if (p_power_up)
FPT_Wait1Second(p_port);
else
FPT_Wait(p_port, TO_250ms); */
FPT_Wait1Second(p_port);
if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) {
while (!(FPT_scarb(p_port, INIT_SELTD))) {
}
FPT_scsel(p_port);
do {
FPT_scxferc(p_port, SYNC_PTRN);
FPT_scxferc(p_port, DOM_MSTR);
loser =
FPT_scsendi(p_port,
&FPT_scamInfo[p_our_id].id_string[0]);
} while (loser == 0xFF);
FPT_scbusf(p_port);
if ((p_power_up) && (!loser)) {
FPT_sresb(p_port, p_card);
FPT_Wait(p_port, TO_250ms);
while (!(FPT_scarb(p_port, INIT_SELTD))) {
}
FPT_scsel(p_port);
do {
FPT_scxferc(p_port, SYNC_PTRN);
FPT_scxferc(p_port, DOM_MSTR);
loser =
FPT_scsendi(p_port,
&FPT_scamInfo[p_our_id].
id_string[0]);
} while (loser == 0xFF);
FPT_scbusf(p_port);
}
}
else {
loser = 0;
}
if (!loser) {
FPT_scamInfo[p_our_id].state = ID_ASSIGNED;
if (ScamFlg & SCAM_ENABLED) {
for (i = 0; i < MAX_SCSI_TAR; i++) {
if ((FPT_scamInfo[i].state == ID_UNASSIGNED) ||
(FPT_scamInfo[i].state == ID_UNUSED)) {
if (FPT_scsell(p_port, i)) {
FPT_scamInfo[i].state = LEGACY;
if ((FPT_scamInfo[i].
id_string[0] != 0xFF)
|| (FPT_scamInfo[i].
id_string[1] != 0xFA)) {
FPT_scamInfo[i].
id_string[0] = 0xFF;
FPT_scamInfo[i].
id_string[1] = 0xFA;
if (pCurrNvRam == NULL)
currCard->
globalFlags
|=
F_UPDATE_EEPROM;
}
}
}
}
FPT_sresb(p_port, p_card);
FPT_Wait1Second(p_port);
while (!(FPT_scarb(p_port, INIT_SELTD))) {
}
FPT_scsel(p_port);
FPT_scasid(p_card, p_port);
}
}
else if ((loser) && (ScamFlg & SCAM_ENABLED)) {
FPT_scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0;
assigned_id = 0;
FPT_scwtsel(p_port);
do {
while (FPT_scxferc(p_port, 0x00) != SYNC_PTRN) {
}
i = FPT_scxferc(p_port, 0x00);
if (i == ASSIGN_ID) {
if (!
(FPT_scsendi
(p_port,
&FPT_scamInfo[p_our_id].id_string[0]))) {
i = FPT_scxferc(p_port, 0x00);
if (FPT_scvalq(i)) {
k = FPT_scxferc(p_port, 0x00);
if (FPT_scvalq(k)) {
currCard->ourId =
((unsigned char)(i
<<
3)
+
(k &
(unsigned char)7))
& (unsigned char)
0x3F;
FPT_inisci(p_card,
p_port,
p_our_id);
FPT_scamInfo[currCard->
ourId].
state = ID_ASSIGNED;
FPT_scamInfo[currCard->
ourId].
id_string[0]
= SLV_TYPE_CODE0;
assigned_id = 1;
}
}
}
}
else if (i == SET_P_FLAG) {
if (!(FPT_scsendi(p_port,
&FPT_scamInfo[p_our_id].
id_string[0])))
FPT_scamInfo[p_our_id].id_string[0] |=
0x80;
}
} while (!assigned_id);
while (FPT_scxferc(p_port, 0x00) != CFG_CMPLT) {
}
}
if (ScamFlg & SCAM_ENABLED) {
FPT_scbusf(p_port);
if (currCard->globalFlags & F_UPDATE_EEPROM) {
FPT_scsavdi(p_card, p_port);
currCard->globalFlags &= ~F_UPDATE_EEPROM;
}
}
/*
for (i=0,k=0; i < MAX_SCSI_TAR; i++)
{
if ((FPT_scamInfo[i].state == ID_ASSIGNED) ||
(FPT_scamInfo[i].state == LEGACY))
k++;
}
if (k==2)
currCard->globalFlags |= F_SINGLE_DEVICE;
else
currCard->globalFlags &= ~F_SINGLE_DEVICE;
*/
}
/*---------------------------------------------------------------------
*
* Function: FPT_scarb
*
* Description: Gain control of the bus and wait SCAM select time (250ms)
*
*---------------------------------------------------------------------*/
static int FPT_scarb(unsigned long p_port, unsigned char p_sel_type)
{
if (p_sel_type == INIT_SELTD) {
while (RD_HARPOON(p_port + hp_scsisig) & (SCSI_SEL | SCSI_BSY)) {
}
if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL)
return 0;
if (RD_HARPOON(p_port + hp_scsidata_0) != 00)
return 0;
WR_HARPOON(p_port + hp_scsisig,
(RD_HARPOON(p_port + hp_scsisig) | SCSI_BSY));
if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) {
WR_HARPOON(p_port + hp_scsisig,
(RD_HARPOON(p_port + hp_scsisig) &
~SCSI_BSY));
return 0;
}
WR_HARPOON(p_port + hp_scsisig,
(RD_HARPOON(p_port + hp_scsisig) | SCSI_SEL));
if (RD_HARPOON(p_port + hp_scsidata_0) != 00) {
WR_HARPOON(p_port + hp_scsisig,
(RD_HARPOON(p_port + hp_scsisig) &
~(SCSI_BSY | SCSI_SEL)));
return 0;
}
}
WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0)
& ~ACTdeassert));
WR_HARPOON(p_port + hp_scsireset, SCAM_EN);
WR_HARPOON(p_port + hp_scsidata_0, 0x00);
WR_HARPOON(p_port + hp_scsidata_1, 0x00);
WR_HARPOON(p_port + hp_portctrl_0, SCSI_BUS_EN);
WR_HARPOON(p_port + hp_scsisig,
(RD_HARPOON(p_port + hp_scsisig) | SCSI_MSG));
WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig)
& ~SCSI_BSY));
FPT_Wait(p_port, TO_250ms);
return 1;
}
/*---------------------------------------------------------------------
*
* Function: FPT_scbusf
*
* Description: Release the SCSI bus and disable SCAM selection.
*
*---------------------------------------------------------------------*/
static void FPT_scbusf(unsigned long p_port)
{
WR_HARPOON(p_port + hp_page_ctrl,
(RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE));
WR_HARPOON(p_port + hp_scsidata_0, 0x00);
WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0)
& ~SCSI_BUS_EN));
WR_HARPOON(p_port + hp_scsisig, 0x00);
WR_HARPOON(p_port + hp_scsireset, (RD_HARPOON(p_port + hp_scsireset)
& ~SCAM_EN));
WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0)
| ACTdeassert));
WRW_HARPOON((p_port + hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL));
WR_HARPOON(p_port + hp_page_ctrl,
(RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE));
}
/*---------------------------------------------------------------------
*
* Function: FPT_scasid
*
* Description: Assign an ID to all the SCAM devices.
*
*---------------------------------------------------------------------*/
static void FPT_scasid(unsigned char p_card, unsigned long p_port)
{
unsigned char temp_id_string[ID_STRING_LENGTH];
unsigned char i, k, scam_id;
unsigned char crcBytes[3];
struct nvram_info *pCurrNvRam;
unsigned short *pCrcBytes;
pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo;
i = 0;
while (!i) {
for (k = 0; k < ID_STRING_LENGTH; k++) {
temp_id_string[k] = (unsigned char)0x00;
}
FPT_scxferc(p_port, SYNC_PTRN);
FPT_scxferc(p_port, ASSIGN_ID);
if (!(FPT_sciso(p_port, &temp_id_string[0]))) {
if (pCurrNvRam) {
pCrcBytes = (unsigned short *)&crcBytes[0];
*pCrcBytes = FPT_CalcCrc16(&temp_id_string[0]);
crcBytes[2] = FPT_CalcLrc(&temp_id_string[0]);
temp_id_string[1] = crcBytes[2];
temp_id_string[2] = crcBytes[0];
temp_id_string[3] = crcBytes[1];
for (k = 4; k < ID_STRING_LENGTH; k++)
temp_id_string[k] = (unsigned char)0x00;
}
i = FPT_scmachid(p_card, temp_id_string);
if (i == CLR_PRIORITY) {
FPT_scxferc(p_port, MISC_CODE);
FPT_scxferc(p_port, CLR_P_FLAG);
i = 0; /*Not the last ID yet. */
}
else if (i != NO_ID_AVAIL) {
if (i < 8)
FPT_scxferc(p_port, ID_0_7);
else
FPT_scxferc(p_port, ID_8_F);
scam_id = (i & (unsigned char)0x07);
for (k = 1; k < 0x08; k <<= 1)
if (!(k & i))
scam_id += 0x08; /*Count number of zeros in DB0-3. */
FPT_scxferc(p_port, scam_id);
i = 0; /*Not the last ID yet. */
}
}
else {
i = 1;
}
} /*End while */
FPT_scxferc(p_port, SYNC_PTRN);
FPT_scxferc(p_port, CFG_CMPLT);
}
/*---------------------------------------------------------------------
*
* Function: FPT_scsel
*
* Description: Select all the SCAM devices.
*
*---------------------------------------------------------------------*/
static void FPT_scsel(unsigned long p_port)
{
WR_HARPOON(p_port + hp_scsisig, SCSI_SEL);
FPT_scwiros(p_port, SCSI_MSG);
WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY));
WR_HARPOON(p_port + hp_scsisig,
(SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
WR_HARPOON(p_port + hp_scsidata_0,
(unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) |
(unsigned char)(BIT(7) + BIT(6))));
WR_HARPOON(p_port + hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD));
FPT_scwiros(p_port, SCSI_SEL);
WR_HARPOON(p_port + hp_scsidata_0,
(unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) &
~(unsigned char)BIT(6)));
FPT_scwirod(p_port, BIT(6));
WR_HARPOON(p_port + hp_scsisig,
(SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
}
/*---------------------------------------------------------------------
*
* Function: FPT_scxferc
*
* Description: Handshake the p_data (DB4-0) across the bus.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_scxferc(unsigned long p_port, unsigned char p_data)
{
unsigned char curr_data, ret_data;
curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
curr_data &= ~BIT(7);
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
FPT_scwirod(p_port, BIT(7)); /*Wait for DB7 to be released. */
while (!(RD_HARPOON(p_port + hp_scsidata_0) & BIT(5))) ;
ret_data = (RD_HARPOON(p_port + hp_scsidata_0) & (unsigned char)0x1F);
curr_data |= BIT(6);
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
curr_data &= ~BIT(5);
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
FPT_scwirod(p_port, BIT(5)); /*Wait for DB5 to be released. */
curr_data &= ~(BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); /*Release data bits */
curr_data |= BIT(7);
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
curr_data &= ~BIT(6);
WR_HARPOON(p_port + hp_scsidata_0, curr_data);
FPT_scwirod(p_port, BIT(6)); /*Wait for DB6 to be released. */
return ret_data;
}
/*---------------------------------------------------------------------
*
* Function: FPT_scsendi
*
* Description: Transfer our Identification string to determine if we
* will be the dominant master.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_scsendi(unsigned long p_port,
unsigned char p_id_string[])
{
unsigned char ret_data, byte_cnt, bit_cnt, defer;
defer = 0;
for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
for (bit_cnt = 0x80; bit_cnt != 0; bit_cnt >>= 1) {
if (defer)
ret_data = FPT_scxferc(p_port, 00);
else if (p_id_string[byte_cnt] & bit_cnt)
ret_data = FPT_scxferc(p_port, 02);
else {
ret_data = FPT_scxferc(p_port, 01);
if (ret_data & 02)
defer = 1;
}
if ((ret_data & 0x1C) == 0x10)
return 0x00; /*End of isolation stage, we won! */
if (ret_data & 0x1C)
return 0xFF;
if ((defer) && (!(ret_data & 0x1F)))
return 0x01; /*End of isolation stage, we lost. */
} /*bit loop */
} /*byte loop */
if (defer)
return 0x01; /*We lost */
else
return 0; /*We WON! Yeeessss! */
}
/*---------------------------------------------------------------------
*
* Function: FPT_sciso
*
* Description: Transfer the Identification string.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_sciso(unsigned long p_port,
unsigned char p_id_string[])
{
unsigned char ret_data, the_data, byte_cnt, bit_cnt;
the_data = 0;
for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) {
ret_data = FPT_scxferc(p_port, 0);
if (ret_data & 0xFC)
return 0xFF;
else {
the_data <<= 1;
if (ret_data & BIT(1)) {
the_data |= 1;
}
}
if ((ret_data & 0x1F) == 0) {
/*
if(bit_cnt != 0 || bit_cnt != 8)
{
byte_cnt = 0;
bit_cnt = 0;
FPT_scxferc(p_port, SYNC_PTRN);
FPT_scxferc(p_port, ASSIGN_ID);
continue;
}
*/
if (byte_cnt)
return 0x00;
else
return 0xFF;
}
} /*bit loop */
p_id_string[byte_cnt] = the_data;
} /*byte loop */
return 0;
}
/*---------------------------------------------------------------------
*
* Function: FPT_scwirod
*
* Description: Sample the SCSI data bus making sure the signal has been
* deasserted for the correct number of consecutive samples.
*
*---------------------------------------------------------------------*/
static void FPT_scwirod(unsigned long p_port, unsigned char p_data_bit)
{
unsigned char i;
i = 0;
while (i < MAX_SCSI_TAR) {
if (RD_HARPOON(p_port + hp_scsidata_0) & p_data_bit)
i = 0;
else
i++;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_scwiros
*
* Description: Sample the SCSI Signal lines making sure the signal has been
* deasserted for the correct number of consecutive samples.
*
*---------------------------------------------------------------------*/
static void FPT_scwiros(unsigned long p_port, unsigned char p_data_bit)
{
unsigned char i;
i = 0;
while (i < MAX_SCSI_TAR) {
if (RD_HARPOON(p_port + hp_scsisig) & p_data_bit)
i = 0;
else
i++;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_scvalq
*
* Description: Make sure we received a valid data byte.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_scvalq(unsigned char p_quintet)
{
unsigned char count;
for (count = 1; count < 0x08; count <<= 1) {
if (!(p_quintet & count))
p_quintet -= 0x80;
}
if (p_quintet & 0x18)
return 0;
else
return 1;
}
/*---------------------------------------------------------------------
*
* Function: FPT_scsell
*
* Description: Select the specified device ID using a selection timeout
* less than 4ms. If somebody responds then it is a legacy
* drive and this ID must be marked as such.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_scsell(unsigned long p_port, unsigned char targ_id)
{
unsigned long i;
WR_HARPOON(p_port + hp_page_ctrl,
(RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE));
ARAM_ACCESS(p_port);
WR_HARPOON(p_port + hp_addstat,
(RD_HARPOON(p_port + hp_addstat) | SCAM_TIMER));
WR_HARPOON(p_port + hp_seltimeout, TO_4ms);
for (i = p_port + CMD_STRT; i < p_port + CMD_STRT + 12; i += 2) {
WRW_HARPOON(i, (MPM_OP + ACOMMAND));
}
WRW_HARPOON(i, (BRH_OP + ALWAYS + NP));
WRW_HARPOON((p_port + hp_intstat),
(RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
WR_HARPOON(p_port + hp_select_id, targ_id);
WR_HARPOON(p_port + hp_portctrl_0, SCSI_PORT);
WR_HARPOON(p_port + hp_autostart_3, (SELECT | CMD_ONLY_STRT));
WR_HARPOON(p_port + hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
while (!(RDW_HARPOON((p_port + hp_intstat)) &
(RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {
}
if (RDW_HARPOON((p_port + hp_intstat)) & RESET)
FPT_Wait(p_port, TO_250ms);
DISABLE_AUTO(p_port);
WR_HARPOON(p_port + hp_addstat,
(RD_HARPOON(p_port + hp_addstat) & ~SCAM_TIMER));
WR_HARPOON(p_port + hp_seltimeout, TO_290ms);
SGRAM_ACCESS(p_port);
if (RDW_HARPOON((p_port + hp_intstat)) & (RESET | TIMEOUT)) {
WRW_HARPOON((p_port + hp_intstat),
(RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
WR_HARPOON(p_port + hp_page_ctrl,
(RD_HARPOON(p_port + hp_page_ctrl) &
~G_INT_DISABLE));
return 0; /*No legacy device */
}
else {
while (!(RDW_HARPOON((p_port + hp_intstat)) & BUS_FREE)) {
if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) {
WR_HARPOON(p_port + hp_scsisig,
(SCSI_ACK + S_ILL_PH));
ACCEPT_MSG(p_port);
}
}
WRW_HARPOON((p_port + hp_intstat), CLR_ALL_INT_1);
WR_HARPOON(p_port + hp_page_ctrl,
(RD_HARPOON(p_port + hp_page_ctrl) &
~G_INT_DISABLE));
return 1; /*Found one of them oldies! */
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_scwtsel
*
* Description: Wait to be selected by another SCAM initiator.
*
*---------------------------------------------------------------------*/
static void FPT_scwtsel(unsigned long p_port)
{
while (!(RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) {
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_inisci
*
* Description: Setup the data Structure with the info from the EEPROM.
*
*---------------------------------------------------------------------*/
static void FPT_inisci(unsigned char p_card, unsigned long p_port,
unsigned char p_our_id)
{
unsigned char i, k, max_id;
unsigned short ee_data;
struct nvram_info *pCurrNvRam;
pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo;
if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
max_id = 0x08;
else
max_id = 0x10;
if (pCurrNvRam) {
for (i = 0; i < max_id; i++) {
for (k = 0; k < 4; k++)
FPT_scamInfo[i].id_string[k] =
pCurrNvRam->niScamTbl[i][k];
for (k = 4; k < ID_STRING_LENGTH; k++)
FPT_scamInfo[i].id_string[k] =
(unsigned char)0x00;
if (FPT_scamInfo[i].id_string[0] == 0x00)
FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
else
FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
}
} else {
for (i = 0; i < max_id; i++) {
for (k = 0; k < ID_STRING_LENGTH; k += 2) {
ee_data =
FPT_utilEERead(p_port,
(unsigned
short)((EE_SCAMBASE / 2) +
(unsigned short)(i *
((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2)));
FPT_scamInfo[i].id_string[k] =
(unsigned char)ee_data;
ee_data >>= 8;
FPT_scamInfo[i].id_string[k + 1] =
(unsigned char)ee_data;
}
if ((FPT_scamInfo[i].id_string[0] == 0x00) ||
(FPT_scamInfo[i].id_string[0] == 0xFF))
FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
else
FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
}
}
for (k = 0; k < ID_STRING_LENGTH; k++)
FPT_scamInfo[p_our_id].id_string[k] = FPT_scamHAString[k];
}
/*---------------------------------------------------------------------
*
* Function: FPT_scmachid
*
* Description: Match the Device ID string with our values stored in
* the EEPROM.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_scmachid(unsigned char p_card,
unsigned char p_id_string[])
{
unsigned char i, k, match;
for (i = 0; i < MAX_SCSI_TAR; i++) {
match = 1;
for (k = 0; k < ID_STRING_LENGTH; k++) {
if (p_id_string[k] != FPT_scamInfo[i].id_string[k])
match = 0;
}
if (match) {
FPT_scamInfo[i].state = ID_ASSIGNED;
return i;
}
}
if (p_id_string[0] & BIT(5))
i = 8;
else
i = MAX_SCSI_TAR;
if (((p_id_string[0] & 0x06) == 0x02)
|| ((p_id_string[0] & 0x06) == 0x04))
match = p_id_string[1] & (unsigned char)0x1F;
else
match = 7;
while (i > 0) {
i--;
if (FPT_scamInfo[match].state == ID_UNUSED) {
for (k = 0; k < ID_STRING_LENGTH; k++) {
FPT_scamInfo[match].id_string[k] =
p_id_string[k];
}
FPT_scamInfo[match].state = ID_ASSIGNED;
if (FPT_BL_Card[p_card].pNvRamInfo == NULL)
FPT_BL_Card[p_card].globalFlags |=
F_UPDATE_EEPROM;
return match;
}
match--;
if (match == 0xFF) {
if (p_id_string[0] & BIT(5))
match = 7;
else
match = MAX_SCSI_TAR - 1;
}
}
if (p_id_string[0] & BIT(7)) {
return CLR_PRIORITY;
}
if (p_id_string[0] & BIT(5))
i = 8;
else
i = MAX_SCSI_TAR;
if (((p_id_string[0] & 0x06) == 0x02)
|| ((p_id_string[0] & 0x06) == 0x04))
match = p_id_string[1] & (unsigned char)0x1F;
else
match = 7;
while (i > 0) {
i--;
if (FPT_scamInfo[match].state == ID_UNASSIGNED) {
for (k = 0; k < ID_STRING_LENGTH; k++) {
FPT_scamInfo[match].id_string[k] =
p_id_string[k];
}
FPT_scamInfo[match].id_string[0] |= BIT(7);
FPT_scamInfo[match].state = ID_ASSIGNED;
if (FPT_BL_Card[p_card].pNvRamInfo == NULL)
FPT_BL_Card[p_card].globalFlags |=
F_UPDATE_EEPROM;
return match;
}
match--;
if (match == 0xFF) {
if (p_id_string[0] & BIT(5))
match = 7;
else
match = MAX_SCSI_TAR - 1;
}
}
return NO_ID_AVAIL;
}
/*---------------------------------------------------------------------
*
* Function: FPT_scsavdi
*
* Description: Save off the device SCAM ID strings.
*
*---------------------------------------------------------------------*/
static void FPT_scsavdi(unsigned char p_card, unsigned long p_port)
{
unsigned char i, k, max_id;
unsigned short ee_data, sum_data;
sum_data = 0x0000;
for (i = 1; i < EE_SCAMBASE / 2; i++) {
sum_data += FPT_utilEERead(p_port, i);
}
FPT_utilEEWriteOnOff(p_port, 1); /* Enable write access to the EEPROM */
if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
max_id = 0x08;
else
max_id = 0x10;
for (i = 0; i < max_id; i++) {
for (k = 0; k < ID_STRING_LENGTH; k += 2) {
ee_data = FPT_scamInfo[i].id_string[k + 1];
ee_data <<= 8;
ee_data |= FPT_scamInfo[i].id_string[k];
sum_data += ee_data;
FPT_utilEEWrite(p_port, ee_data,
(unsigned short)((EE_SCAMBASE / 2) +
(unsigned short)(i *
((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2)));
}
}
FPT_utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM / 2);
FPT_utilEEWriteOnOff(p_port, 0); /* Turn off write access */
}
/*---------------------------------------------------------------------
*
* Function: FPT_XbowInit
*
* Description: Setup the Xbow for normal operation.
*
*---------------------------------------------------------------------*/
static void FPT_XbowInit(unsigned long port, unsigned char ScamFlg)
{
unsigned char i;
i = RD_HARPOON(port + hp_page_ctrl);
WR_HARPOON(port + hp_page_ctrl, (unsigned char)(i | G_INT_DISABLE));
WR_HARPOON(port + hp_scsireset, 0x00);
WR_HARPOON(port + hp_portctrl_1, HOST_MODE8);
WR_HARPOON(port + hp_scsireset, (DMA_RESET | HPSCSI_RESET | PROG_RESET |
FIFO_CLR));
WR_HARPOON(port + hp_scsireset, SCSI_INI);
WR_HARPOON(port + hp_clkctrl_0, CLKCTRL_DEFAULT);
WR_HARPOON(port + hp_scsisig, 0x00); /* Clear any signals we might */
WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL);
WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
FPT_default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
BUS_FREE | XFER_CNT_0 | AUTO_INT;
if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
FPT_default_intena |= SCAM_SEL;
WRW_HARPOON((port + hp_intena), FPT_default_intena);
WR_HARPOON(port + hp_seltimeout, TO_290ms);
/* Turn on SCSI_MODE8 for narrow cards to fix the
strapping issue with the DUAL CHANNEL card */
if (RD_HARPOON(port + hp_page_ctrl) & NARROW_SCSI_CARD)
WR_HARPOON(port + hp_addstat, SCSI_MODE8);
WR_HARPOON(port + hp_page_ctrl, i);
}
/*---------------------------------------------------------------------
*
* Function: FPT_BusMasterInit
*
* Description: Initialize the BusMaster for normal operations.
*
*---------------------------------------------------------------------*/
static void FPT_BusMasterInit(unsigned long p_port)
{
WR_HARPOON(p_port + hp_sys_ctrl, DRVR_RST);
WR_HARPOON(p_port + hp_sys_ctrl, 0x00);
WR_HARPOON(p_port + hp_host_blk_cnt, XFER_BLK64);
WR_HARPOON(p_port + hp_bm_ctrl, (BMCTRL_DEFAULT));
WR_HARPOON(p_port + hp_ee_ctrl, (SCSI_TERM_ENA_H));
RD_HARPOON(p_port + hp_int_status); /*Clear interrupts. */
WR_HARPOON(p_port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) &
~SCATTER_EN));
}
/*---------------------------------------------------------------------
*
* Function: FPT_DiagEEPROM
*
* Description: Verfiy checksum and 'Key' and initialize the EEPROM if
* necessary.
*
*---------------------------------------------------------------------*/
static void FPT_DiagEEPROM(unsigned long p_port)
{
unsigned short index, temp, max_wd_cnt;
if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
max_wd_cnt = EEPROM_WD_CNT;
else
max_wd_cnt = EEPROM_WD_CNT * 2;
temp = FPT_utilEERead(p_port, FW_SIGNATURE / 2);
if (temp == 0x4641) {
for (index = 2; index < max_wd_cnt; index++) {
temp += FPT_utilEERead(p_port, index);
}
if (temp == FPT_utilEERead(p_port, EEPROM_CHECK_SUM / 2)) {
return; /*EEPROM is Okay so return now! */
}
}
FPT_utilEEWriteOnOff(p_port, (unsigned char)1);
for (index = 0; index < max_wd_cnt; index++) {
FPT_utilEEWrite(p_port, 0x0000, index);
}
temp = 0;
FPT_utilEEWrite(p_port, 0x4641, FW_SIGNATURE / 2);
temp += 0x4641;
FPT_utilEEWrite(p_port, 0x3920, MODEL_NUMB_0 / 2);
temp += 0x3920;
FPT_utilEEWrite(p_port, 0x3033, MODEL_NUMB_2 / 2);
temp += 0x3033;
FPT_utilEEWrite(p_port, 0x2020, MODEL_NUMB_4 / 2);
temp += 0x2020;
FPT_utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG / 2);
temp += 0x70D3;
FPT_utilEEWrite(p_port, 0x0010, BIOS_CONFIG / 2);
temp += 0x0010;
FPT_utilEEWrite(p_port, 0x0003, SCAM_CONFIG / 2);
temp += 0x0003;
FPT_utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID / 2);
temp += 0x0007;
FPT_utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN / 2);
temp += 0x0000;
FPT_utilEEWrite(p_port, 0x0000, SEND_START_ENA / 2);
temp += 0x0000;
FPT_utilEEWrite(p_port, 0x0000, DEVICE_ENABLE / 2);
temp += 0x0000;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01 / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23 / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45 / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67 / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89 / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef / 2);
temp += 0x4242;
FPT_utilEEWrite(p_port, 0x6C46, 64 / 2); /*PRODUCT ID */
temp += 0x6C46;
FPT_utilEEWrite(p_port, 0x7361, 66 / 2); /* FlashPoint LT */
temp += 0x7361;
FPT_utilEEWrite(p_port, 0x5068, 68 / 2);
temp += 0x5068;
FPT_utilEEWrite(p_port, 0x696F, 70 / 2);
temp += 0x696F;
FPT_utilEEWrite(p_port, 0x746E, 72 / 2);
temp += 0x746E;
FPT_utilEEWrite(p_port, 0x4C20, 74 / 2);
temp += 0x4C20;
FPT_utilEEWrite(p_port, 0x2054, 76 / 2);
temp += 0x2054;
FPT_utilEEWrite(p_port, 0x2020, 78 / 2);
temp += 0x2020;
index = ((EE_SCAMBASE / 2) + (7 * 16));
FPT_utilEEWrite(p_port, (0x0700 + TYPE_CODE0), index);
temp += (0x0700 + TYPE_CODE0);
index++;
FPT_utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */
temp += 0x5542; /* BUSLOGIC */
index++;
FPT_utilEEWrite(p_port, 0x4C53, index);
temp += 0x4C53;
index++;
FPT_utilEEWrite(p_port, 0x474F, index);
temp += 0x474F;
index++;
FPT_utilEEWrite(p_port, 0x4349, index);
temp += 0x4349;
index++;
FPT_utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */
temp += 0x5442; /* BT- 930 */
index++;
FPT_utilEEWrite(p_port, 0x202D, index);
temp += 0x202D;
index++;
FPT_utilEEWrite(p_port, 0x3339, index);
temp += 0x3339;
index++; /*Serial # */
FPT_utilEEWrite(p_port, 0x2030, index); /* 01234567 */
temp += 0x2030;
index++;
FPT_utilEEWrite(p_port, 0x5453, index);
temp += 0x5453;
index++;
FPT_utilEEWrite(p_port, 0x5645, index);
temp += 0x5645;
index++;
FPT_utilEEWrite(p_port, 0x2045, index);
temp += 0x2045;
index++;
FPT_utilEEWrite(p_port, 0x202F, index);
temp += 0x202F;
index++;
FPT_utilEEWrite(p_port, 0x4F4A, index);
temp += 0x4F4A;
index++;
FPT_utilEEWrite(p_port, 0x204E, index);
temp += 0x204E;
index++;
FPT_utilEEWrite(p_port, 0x3539, index);
temp += 0x3539;
FPT_utilEEWrite(p_port, temp, EEPROM_CHECK_SUM / 2);
FPT_utilEEWriteOnOff(p_port, (unsigned char)0);
}
/*---------------------------------------------------------------------
*
* Function: Queue Search Select
*
* Description: Try to find a new command to execute.
*
*---------------------------------------------------------------------*/
static void FPT_queueSearchSelect(struct sccb_card *pCurrCard,
unsigned char p_card)
{
unsigned char scan_ptr, lun;
struct sccb_mgr_tar_info *currTar_Info;
struct sccb *pOldSccb;
scan_ptr = pCurrCard->scanIndex;
do {
currTar_Info = &FPT_sccbMgrTbl[p_card][scan_ptr];
if ((pCurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING)) {
if (currTar_Info->TarSelQ_Cnt != 0) {
scan_ptr++;
if (scan_ptr == MAX_SCSI_TAR)
scan_ptr = 0;
for (lun = 0; lun < MAX_LUN; lun++) {
if (currTar_Info->TarLUNBusy[lun] == 0) {
pCurrCard->currentSCCB =
currTar_Info->TarSelQ_Head;
pOldSccb = NULL;
while ((pCurrCard->
currentSCCB != NULL)
&& (lun !=
pCurrCard->
currentSCCB->Lun)) {
pOldSccb =
pCurrCard->
currentSCCB;
pCurrCard->currentSCCB =
(struct sccb
*)(pCurrCard->
currentSCCB)->
Sccb_forwardlink;
}
if (pCurrCard->currentSCCB ==
NULL)
continue;
if (pOldSccb != NULL) {
pOldSccb->
Sccb_forwardlink =
(struct sccb
*)(pCurrCard->
currentSCCB)->
Sccb_forwardlink;
pOldSccb->
Sccb_backlink =
(struct sccb
*)(pCurrCard->
currentSCCB)->
Sccb_backlink;
currTar_Info->
TarSelQ_Cnt--;
} else {
currTar_Info->
TarSelQ_Head =
(struct sccb
*)(pCurrCard->
currentSCCB)->
Sccb_forwardlink;
if (currTar_Info->
TarSelQ_Head ==
NULL) {
currTar_Info->
TarSelQ_Tail
= NULL;
currTar_Info->
TarSelQ_Cnt
= 0;
} else {
currTar_Info->
TarSelQ_Cnt--;
currTar_Info->
TarSelQ_Head->
Sccb_backlink
=
(struct sccb
*)NULL;
}
}
pCurrCard->scanIndex = scan_ptr;
pCurrCard->globalFlags |=
F_NEW_SCCB_CMD;
break;
}
}
}
else {
scan_ptr++;
if (scan_ptr == MAX_SCSI_TAR) {
scan_ptr = 0;
}
}
} else {
if ((currTar_Info->TarSelQ_Cnt != 0) &&
(currTar_Info->TarLUNBusy[0] == 0)) {
pCurrCard->currentSCCB =
currTar_Info->TarSelQ_Head;
currTar_Info->TarSelQ_Head =
(struct sccb *)(pCurrCard->currentSCCB)->
Sccb_forwardlink;
if (currTar_Info->TarSelQ_Head == NULL) {
currTar_Info->TarSelQ_Tail = NULL;
currTar_Info->TarSelQ_Cnt = 0;
} else {
currTar_Info->TarSelQ_Cnt--;
currTar_Info->TarSelQ_Head->
Sccb_backlink = (struct sccb *)NULL;
}
scan_ptr++;
if (scan_ptr == MAX_SCSI_TAR)
scan_ptr = 0;
pCurrCard->scanIndex = scan_ptr;
pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
break;
}
else {
scan_ptr++;
if (scan_ptr == MAX_SCSI_TAR) {
scan_ptr = 0;
}
}
}
} while (scan_ptr != pCurrCard->scanIndex);
}
/*---------------------------------------------------------------------
*
* Function: Queue Select Fail
*
* Description: Add the current SCCB to the head of the Queue.
*
*---------------------------------------------------------------------*/
static void FPT_queueSelectFail(struct sccb_card *pCurrCard,
unsigned char p_card)
{
unsigned char thisTarg;
struct sccb_mgr_tar_info *currTar_Info;
if (pCurrCard->currentSCCB != NULL) {
thisTarg =
(unsigned char)(((struct sccb *)(pCurrCard->currentSCCB))->
TargID);
currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
pCurrCard->currentSCCB->Sccb_backlink = (struct sccb *)NULL;
pCurrCard->currentSCCB->Sccb_forwardlink =
currTar_Info->TarSelQ_Head;
if (currTar_Info->TarSelQ_Cnt == 0) {
currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB;
}
else {
currTar_Info->TarSelQ_Head->Sccb_backlink =
pCurrCard->currentSCCB;
}
currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB;
pCurrCard->currentSCCB = NULL;
currTar_Info->TarSelQ_Cnt++;
}
}
/*---------------------------------------------------------------------
*
* Function: Queue Command Complete
*
* Description: Call the callback function with the current SCCB.
*
*---------------------------------------------------------------------*/
static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
struct sccb *p_sccb, unsigned char p_card)
{
unsigned char i, SCSIcmd;
CALL_BK_FN callback;
struct sccb_mgr_tar_info *currTar_Info;
SCSIcmd = p_sccb->Cdb[0];
if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) {
if ((p_sccb->
ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
&& (p_sccb->HostStatus == SCCB_COMPLETE)
&& (p_sccb->TargetStatus != SSCHECK))
if ((SCSIcmd == SCSI_READ) ||
(SCSIcmd == SCSI_WRITE) ||
(SCSIcmd == SCSI_READ_EXTENDED) ||
(SCSIcmd == SCSI_WRITE_EXTENDED) ||
(SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
(SCSIcmd == SCSI_START_STOP_UNIT) ||
(pCurrCard->globalFlags & F_NO_FILTER)
)
p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
}
if (p_sccb->SccbStatus == SCCB_IN_PROCESS) {
if (p_sccb->HostStatus || p_sccb->TargetStatus)
p_sccb->SccbStatus = SCCB_ERROR;
else
p_sccb->SccbStatus = SCCB_SUCCESS;
}
if (p_sccb->Sccb_XferState & F_AUTO_SENSE) {
p_sccb->CdbLength = p_sccb->Save_CdbLen;
for (i = 0; i < 6; i++) {
p_sccb->Cdb[i] = p_sccb->Save_Cdb[i];
}
}
if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
(p_sccb->OperationCode == RESIDUAL_COMMAND)) {
FPT_utilUpdateResidual(p_sccb);
}
pCurrCard->cmdCounter--;
if (!pCurrCard->cmdCounter) {
if (pCurrCard->globalFlags & F_GREEN_PC) {
WR_HARPOON(pCurrCard->ioPort + hp_clkctrl_0,
(PWR_DWN | CLKCTRL_DEFAULT));
WR_HARPOON(pCurrCard->ioPort + hp_sys_ctrl, STOP_CLK);
}
WR_HARPOON(pCurrCard->ioPort + hp_semaphore,
(RD_HARPOON(pCurrCard->ioPort + hp_semaphore) &
~SCCB_MGR_ACTIVE));
}
if (pCurrCard->discQCount != 0) {
currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
TAG_Q_TRYING))) {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[currTar_Info->
LunDiscQ_Idx[p_sccb->Lun]] = NULL;
} else {
if (p_sccb->Sccb_tag) {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
} else {
pCurrCard->discQCount--;
pCurrCard->discQ_Tbl[currTar_Info->
LunDiscQ_Idx[0]] = NULL;
}
}
}
callback = (CALL_BK_FN) p_sccb->SccbCallback;
callback(p_sccb);
pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
pCurrCard->currentSCCB = NULL;
}
/*---------------------------------------------------------------------
*
* Function: Queue Disconnect
*
* Description: Add SCCB to our disconnect array.
*
*---------------------------------------------------------------------*/
static void FPT_queueDisconnect(struct sccb *p_sccb, unsigned char p_card)
{
struct sccb_mgr_tar_info *currTar_Info;
currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
FPT_BL_Card[p_card].discQ_Tbl[currTar_Info->
LunDiscQ_Idx[p_sccb->Lun]] =
p_sccb;
} else {
if (p_sccb->Sccb_tag) {
FPT_BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] =
p_sccb;
FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] =
0;
FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++;
} else {
FPT_BL_Card[p_card].discQ_Tbl[currTar_Info->
LunDiscQ_Idx[0]] = p_sccb;
}
}
FPT_BL_Card[p_card].currentSCCB = NULL;
}
/*---------------------------------------------------------------------
*
* Function: Queue Flush SCCB
*
* Description: Flush all SCCB's back to the host driver for this target.
*
*---------------------------------------------------------------------*/
static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code)
{
unsigned char qtag, thisTarg;
struct sccb *currSCCB;
struct sccb_mgr_tar_info *currTar_Info;
currSCCB = FPT_BL_Card[p_card].currentSCCB;
if (currSCCB != NULL) {
thisTarg = (unsigned char)currSCCB->TargID;
currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
if (FPT_BL_Card[p_card].discQ_Tbl[qtag] &&
(FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID ==
thisTarg)) {
FPT_BL_Card[p_card].discQ_Tbl[qtag]->
HostStatus = (unsigned char)error_code;
FPT_queueCmdComplete(&FPT_BL_Card[p_card],
FPT_BL_Card[p_card].
discQ_Tbl[qtag], p_card);
FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
currTar_Info->TarTagQ_Cnt--;
}
}
}
}
/*---------------------------------------------------------------------
*
* Function: Queue Flush Target SCCB
*
* Description: Flush all SCCB's back to the host driver for this target.
*
*---------------------------------------------------------------------*/
static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg,
unsigned char error_code)
{
unsigned char qtag;
struct sccb_mgr_tar_info *currTar_Info;
currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
if (FPT_BL_Card[p_card].discQ_Tbl[qtag] &&
(FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg)) {
FPT_BL_Card[p_card].discQ_Tbl[qtag]->HostStatus =
(unsigned char)error_code;
FPT_queueCmdComplete(&FPT_BL_Card[p_card],
FPT_BL_Card[p_card].
discQ_Tbl[qtag], p_card);
FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
currTar_Info->TarTagQ_Cnt--;
}
}
}
static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char p_card)
{
struct sccb_mgr_tar_info *currTar_Info;
currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID];
p_SCCB->Sccb_forwardlink = NULL;
p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail;
if (currTar_Info->TarSelQ_Cnt == 0) {
currTar_Info->TarSelQ_Head = p_SCCB;
}
else {
currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB;
}
currTar_Info->TarSelQ_Tail = p_SCCB;
currTar_Info->TarSelQ_Cnt++;
}
/*---------------------------------------------------------------------
*
* Function: Queue Find SCCB
*
* Description: Search the target select Queue for this SCCB, and
* remove it if found.
*
*---------------------------------------------------------------------*/
static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB,
unsigned char p_card)
{
struct sccb *q_ptr;
struct sccb_mgr_tar_info *currTar_Info;
currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID];
q_ptr = currTar_Info->TarSelQ_Head;
while (q_ptr != NULL) {
if (q_ptr == p_SCCB) {
if (currTar_Info->TarSelQ_Head == q_ptr) {
currTar_Info->TarSelQ_Head =
q_ptr->Sccb_forwardlink;
}
if (currTar_Info->TarSelQ_Tail == q_ptr) {
currTar_Info->TarSelQ_Tail =
q_ptr->Sccb_backlink;
}
if (q_ptr->Sccb_forwardlink != NULL) {
q_ptr->Sccb_forwardlink->Sccb_backlink =
q_ptr->Sccb_backlink;
}
if (q_ptr->Sccb_backlink != NULL) {
q_ptr->Sccb_backlink->Sccb_forwardlink =
q_ptr->Sccb_forwardlink;
}
currTar_Info->TarSelQ_Cnt--;
return 1;
}
else {
q_ptr = q_ptr->Sccb_forwardlink;
}
}
return 0;
}
/*---------------------------------------------------------------------
*
* Function: Utility Update Residual Count
*
* Description: Update the XferCnt to the remaining byte count.
* If we transferred all the data then just write zero.
* If Non-SG transfer then report Total Cnt - Actual Transfer
* Cnt. For SG transfers add the count fields of all
* remaining SG elements, as well as any partial remaining
* element.
*
*---------------------------------------------------------------------*/
static void FPT_utilUpdateResidual(struct sccb *p_SCCB)
{
unsigned long partial_cnt;
unsigned int sg_index;
unsigned long *sg_ptr;
if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) {
p_SCCB->DataLength = 0x0000;
}
else if (p_SCCB->Sccb_XferState & F_SG_XFER) {
partial_cnt = 0x0000;
sg_index = p_SCCB->Sccb_sgseg;
sg_ptr = (unsigned long *)p_SCCB->DataPointer;
if (p_SCCB->Sccb_SGoffset) {
partial_cnt = p_SCCB->Sccb_SGoffset;
sg_index++;
}
while (((unsigned long)sg_index *
(unsigned long)SG_ELEMENT_SIZE) < p_SCCB->DataLength) {
partial_cnt += *(sg_ptr + (sg_index * 2));
sg_index++;
}
p_SCCB->DataLength = partial_cnt;
}
else {
p_SCCB->DataLength -= p_SCCB->Sccb_ATC;
}
}
/*---------------------------------------------------------------------
*
* Function: Wait 1 Second
*
* Description: Wait for 1 second.
*
*---------------------------------------------------------------------*/
static void FPT_Wait1Second(unsigned long p_port)
{
unsigned char i;
for (i = 0; i < 4; i++) {
FPT_Wait(p_port, TO_250ms);
if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST))
break;
if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL))
break;
}
}
/*---------------------------------------------------------------------
*
* Function: FPT_Wait
*
* Description: Wait the desired delay.
*
*---------------------------------------------------------------------*/
static void FPT_Wait(unsigned long p_port, unsigned char p_delay)
{
unsigned char old_timer;
unsigned char green_flag;
old_timer = RD_HARPOON(p_port + hp_seltimeout);
green_flag = RD_HARPOON(p_port + hp_clkctrl_0);
WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT);
WR_HARPOON(p_port + hp_seltimeout, p_delay);
WRW_HARPOON((p_port + hp_intstat), TIMEOUT);
WRW_HARPOON((p_port + hp_intena), (FPT_default_intena & ~TIMEOUT));
WR_HARPOON(p_port + hp_portctrl_0,
(RD_HARPOON(p_port + hp_portctrl_0) | START_TO));
while (!(RDW_HARPOON((p_port + hp_intstat)) & TIMEOUT)) {
if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST))
break;
if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL))
break;
}
WR_HARPOON(p_port + hp_portctrl_0,
(RD_HARPOON(p_port + hp_portctrl_0) & ~START_TO));
WRW_HARPOON((p_port + hp_intstat), TIMEOUT);
WRW_HARPOON((p_port + hp_intena), FPT_default_intena);
WR_HARPOON(p_port + hp_clkctrl_0, green_flag);
WR_HARPOON(p_port + hp_seltimeout, old_timer);
}
/*---------------------------------------------------------------------
*
* Function: Enable/Disable Write to EEPROM
*
* Description: The EEPROM must first be enabled for writes
* A total of 9 clocks are needed.
*
*---------------------------------------------------------------------*/
static void FPT_utilEEWriteOnOff(unsigned long p_port, unsigned char p_mode)
{
unsigned char ee_value;
ee_value =
(unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) &
(EXT_ARB_ACK | SCSI_TERM_ENA_H));
if (p_mode)
FPT_utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR);
else
FPT_utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR);
WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */
}
/*---------------------------------------------------------------------
*
* Function: Write EEPROM
*
* Description: Write a word to the EEPROM at the specified
* address.
*
*---------------------------------------------------------------------*/
static void FPT_utilEEWrite(unsigned long p_port, unsigned short ee_data,
unsigned short ee_addr)
{
unsigned char ee_value;
unsigned short i;
ee_value =
(unsigned
char)((RD_HARPOON(p_port + hp_ee_ctrl) &
(EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS));
FPT_utilEESendCmdAddr(p_port, EE_WRITE, ee_addr);
ee_value |= (SEE_MS + SEE_CS);
for (i = 0x8000; i != 0; i >>= 1) {
if (i & ee_data)
ee_value |= SEE_DO;
else
ee_value &= ~SEE_DO;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value |= SEE_CLK; /* Clock data! */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value &= ~SEE_CLK;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
}
ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H);
WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS));
FPT_Wait(p_port, TO_10ms);
WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */
WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /* Turn off Master Select */
}
/*---------------------------------------------------------------------
*
* Function: Read EEPROM
*
* Description: Read a word from the EEPROM at the desired
* address.
*
*---------------------------------------------------------------------*/
static unsigned short FPT_utilEERead(unsigned long p_port,
unsigned short ee_addr)
{
unsigned short i, ee_data1, ee_data2;
i = 0;
ee_data1 = FPT_utilEEReadOrg(p_port, ee_addr);
do {
ee_data2 = FPT_utilEEReadOrg(p_port, ee_addr);
if (ee_data1 == ee_data2)
return ee_data1;
ee_data1 = ee_data2;
i++;
} while (i < 4);
return ee_data1;
}
/*---------------------------------------------------------------------
*
* Function: Read EEPROM Original
*
* Description: Read a word from the EEPROM at the desired
* address.
*
*---------------------------------------------------------------------*/
static unsigned short FPT_utilEEReadOrg(unsigned long p_port,
unsigned short ee_addr)
{
unsigned char ee_value;
unsigned short i, ee_data;
ee_value =
(unsigned
char)((RD_HARPOON(p_port + hp_ee_ctrl) &
(EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS));
FPT_utilEESendCmdAddr(p_port, EE_READ, ee_addr);
ee_value |= (SEE_MS + SEE_CS);
ee_data = 0;
for (i = 1; i <= 16; i++) {
ee_value |= SEE_CLK; /* Clock data! */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value &= ~SEE_CLK;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_data <<= 1;
if (RD_HARPOON(p_port + hp_ee_ctrl) & SEE_DI)
ee_data |= 1;
}
ee_value &= ~(SEE_MS + SEE_CS);
WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */
return ee_data;
}
/*---------------------------------------------------------------------
*
* Function: Send EE command and Address to the EEPROM
*
* Description: Transfers the correct command and sends the address
* to the eeprom.
*
*---------------------------------------------------------------------*/
static void FPT_utilEESendCmdAddr(unsigned long p_port, unsigned char ee_cmd,
unsigned short ee_addr)
{
unsigned char ee_value;
unsigned char narrow_flg;
unsigned short i;
narrow_flg =
(unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) &
NARROW_SCSI_CARD);
ee_value = SEE_MS;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value |= SEE_CS; /* Set CS to EEPROM */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
for (i = 0x04; i != 0; i >>= 1) {
if (i & ee_cmd)
ee_value |= SEE_DO;
else
ee_value &= ~SEE_DO;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value |= SEE_CLK; /* Clock data! */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value &= ~SEE_CLK;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
}
if (narrow_flg)
i = 0x0080;
else
i = 0x0200;
while (i != 0) {
if (i & ee_addr)
ee_value |= SEE_DO;
else
ee_value &= ~SEE_DO;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value |= SEE_CLK; /* Clock data! */
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
ee_value &= ~SEE_CLK;
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
i >>= 1;
}
}
static unsigned short FPT_CalcCrc16(unsigned char buffer[])
{
unsigned short crc = 0;
int i, j;
unsigned short ch;
for (i = 0; i < ID_STRING_LENGTH; i++) {
ch = (unsigned short)buffer[i];
for (j = 0; j < 8; j++) {
if ((crc ^ ch) & 1)
crc = (crc >> 1) ^ CRCMASK;
else
crc >>= 1;
ch >>= 1;
}
}
return crc;
}
static unsigned char FPT_CalcLrc(unsigned char buffer[])
{
int i;
unsigned char lrc;
lrc = 0;
for (i = 0; i < ID_STRING_LENGTH; i++)
lrc ^= buffer[i];
return lrc;
}
/*
The following inline definitions avoid type conflicts.
*/
static inline unsigned char
FlashPoint__ProbeHostAdapter(struct FlashPoint_Info *FlashPointInfo)
{
return FlashPoint_ProbeHostAdapter((struct sccb_mgr_info *)
FlashPointInfo);
}
static inline FlashPoint_CardHandle_T
FlashPoint__HardwareResetHostAdapter(struct FlashPoint_Info *FlashPointInfo)
{
return FlashPoint_HardwareResetHostAdapter((struct sccb_mgr_info *)
FlashPointInfo);
}
static inline void
FlashPoint__ReleaseHostAdapter(FlashPoint_CardHandle_T CardHandle)
{
FlashPoint_ReleaseHostAdapter(CardHandle);
}
static inline void
FlashPoint__StartCCB(FlashPoint_CardHandle_T CardHandle,
struct BusLogic_CCB *CCB)
{
FlashPoint_StartCCB(CardHandle, (struct sccb *)CCB);
}
static inline void
FlashPoint__AbortCCB(FlashPoint_CardHandle_T CardHandle,
struct BusLogic_CCB *CCB)
{
FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB);
}
static inline bool
FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle)
{
return FlashPoint_InterruptPending(CardHandle);
}
static inline int
FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle)
{
return FlashPoint_HandleInterrupt(CardHandle);
}
#define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter
#define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter
#define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter
#define FlashPoint_StartCCB FlashPoint__StartCCB
#define FlashPoint_AbortCCB FlashPoint__AbortCCB
#define FlashPoint_InterruptPending FlashPoint__InterruptPending
#define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt
#else /* !CONFIG_SCSI_FLASHPOINT */
/*
Define prototypes for the FlashPoint SCCB Manager Functions.
*/
extern unsigned char FlashPoint_ProbeHostAdapter(struct FlashPoint_Info *);
extern FlashPoint_CardHandle_T
FlashPoint_HardwareResetHostAdapter(struct FlashPoint_Info *);
extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *);
extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *);
extern bool FlashPoint_InterruptPending(FlashPoint_CardHandle_T);
extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T);
extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T);
#endif /* CONFIG_SCSI_FLASHPOINT */
| gpl-2.0 |
CyanideL/android_kernel_samsung_klte | arch/alpha/kernel/smc37c93x.c | 11950 | 6348 | /*
* SMC 37C93X initialization code
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/hwrpb.h>
#include <asm/io.h>
#include <asm/segment.h>
#define SMC_DEBUG 0
#if SMC_DEBUG
# define DBG_DEVS(args) printk args
#else
# define DBG_DEVS(args)
#endif
#define KB 1024
#define MB (1024*KB)
#define GB (1024*MB)
/* device "activate" register contents */
#define DEVICE_ON 1
#define DEVICE_OFF 0
/* configuration on/off keys */
#define CONFIG_ON_KEY 0x55
#define CONFIG_OFF_KEY 0xaa
/* configuration space device definitions */
#define FDC 0
#define IDE1 1
#define IDE2 2
#define PARP 3
#define SER1 4
#define SER2 5
#define RTCL 6
#define KYBD 7
#define AUXIO 8
/* Chip register offsets from base */
#define CONFIG_CONTROL 0x02
#define INDEX_ADDRESS 0x03
#define LOGICAL_DEVICE_NUMBER 0x07
#define DEVICE_ID 0x20
#define DEVICE_REV 0x21
#define POWER_CONTROL 0x22
#define POWER_MGMT 0x23
#define OSC 0x24
#define ACTIVATE 0x30
#define ADDR_HI 0x60
#define ADDR_LO 0x61
#define INTERRUPT_SEL 0x70
#define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */
#define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */
#define FDD_MODE_REGISTER 0x90
#define FDD_OPTION_REGISTER 0x91
/* values that we read back that are expected ... */
#define VALID_DEVICE_ID 2
/* default device addresses */
#define KYBD_INTERRUPT 1
#define MOUS_INTERRUPT 12
#define COM2_BASE 0x2f8
#define COM2_INTERRUPT 3
#define COM1_BASE 0x3f8
#define COM1_INTERRUPT 4
#define PARP_BASE 0x3bc
#define PARP_INTERRUPT 7
static unsigned long __init SMCConfigState(unsigned long baseAddr)
{
unsigned char devId;
unsigned long configPort;
unsigned long indexPort;
unsigned long dataPort;
int i;
configPort = indexPort = baseAddr;
dataPort = configPort + 1;
#define NUM_RETRIES 5
for (i = 0; i < NUM_RETRIES; i++)
{
outb(CONFIG_ON_KEY, configPort);
outb(CONFIG_ON_KEY, configPort);
outb(DEVICE_ID, indexPort);
devId = inb(dataPort);
if (devId == VALID_DEVICE_ID) {
outb(DEVICE_REV, indexPort);
/* unsigned char devRev = */ inb(dataPort);
break;
}
else
udelay(100);
}
return (i != NUM_RETRIES) ? baseAddr : 0L;
}
static void __init SMCRunState(unsigned long baseAddr)
{
outb(CONFIG_OFF_KEY, baseAddr);
}
static unsigned long __init SMCDetectUltraIO(void)
{
unsigned long baseAddr;
baseAddr = 0x3F0;
if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) {
return( baseAddr );
}
baseAddr = 0x370;
if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) {
return( baseAddr );
}
return( ( unsigned long )0 );
}
static void __init SMCEnableDevice(unsigned long baseAddr,
unsigned long device,
unsigned long portaddr,
unsigned long interrupt)
{
unsigned long indexPort;
unsigned long dataPort;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(device, dataPort);
outb(ADDR_LO, indexPort);
outb(( portaddr & 0xFF ), dataPort);
outb(ADDR_HI, indexPort);
outb((portaddr >> 8) & 0xFF, dataPort);
outb(INTERRUPT_SEL, indexPort);
outb(interrupt, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
static void __init SMCEnableKYBD(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(KYBD, dataPort);
outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
outb(KYBD_INTERRUPT, dataPort);
outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */
outb(MOUS_INTERRUPT, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
static void __init SMCEnableFDC(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
unsigned char oldValue;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(FDC, dataPort);
outb(FDD_MODE_REGISTER, indexPort);
oldValue = inb(dataPort);
oldValue |= 0x0E; /* Enable burst mode */
outb(oldValue, dataPort);
outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
outb(0x06, dataPort );
outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */
outb(0x02, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
#if SMC_DEBUG
static void __init SMCReportDeviceStatus(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
unsigned char currentControl;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(POWER_CONTROL, indexPort);
currentControl = inb(dataPort);
printk(currentControl & (1 << FDC)
? "\t+FDC Enabled\n" : "\t-FDC Disabled\n");
printk(currentControl & (1 << IDE1)
? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n");
printk(currentControl & (1 << IDE2)
? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n");
printk(currentControl & (1 << PARP)
? "\t+PARP Enabled\n" : "\t-PARP Disabled\n");
printk(currentControl & (1 << SER1)
? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n");
printk(currentControl & (1 << SER2)
? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n");
printk( "\n" );
}
#endif
int __init SMC93x_Init(void)
{
unsigned long SMCUltraBase;
unsigned long flags;
local_irq_save(flags);
if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) {
#if SMC_DEBUG
SMCReportDeviceStatus(SMCUltraBase);
#endif
SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: SER1 done\n"));
SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: SER2 done\n"));
SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: PARP done\n"));
/* On PC164, IDE on the SMC is not enabled;
CMD646 (PCI) on MB */
SMCEnableKYBD(SMCUltraBase);
DBG_DEVS(("SMC FDC37C93X: KYB done\n"));
SMCEnableFDC(SMCUltraBase);
DBG_DEVS(("SMC FDC37C93X: FDC done\n"));
#if SMC_DEBUG
SMCReportDeviceStatus(SMCUltraBase);
#endif
SMCRunState(SMCUltraBase);
local_irq_restore(flags);
printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n",
SMCUltraBase);
return 1;
}
else {
local_irq_restore(flags);
DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n"));
return 0;
}
}
| gpl-2.0 |
zanezam/boeffla-kernel-samsung-s3 | drivers/gpu/vithar_rev0/kbase/src/common/mali_kbase_8401_workaround.c | 175 | 11932 | /*
*
* (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
/**
* @file mali_kbase_8401_workaround.c
* Functions related to working around BASE_HW_ISSUE_8401
*/
#include <kbase/src/common/mali_kbase.h>
#include <kbase/src/common/mali_kbase_defs.h>
#include <kbase/src/common/mali_kbase_jm.h>
#include <kbase/src/common/mali_kbase_8401_workaround.h>
#if BASE_HW_ISSUE_8401
#define WORKAROUND_PAGE_OFFSET (2)
#define URT_POINTER_INDEX (20)
#define RMU_POINTER_INDEX (23)
#define RSD_POINTER_INDEX (24)
#define TSD_POINTER_INDEX (31)
static const u32 compute_job_32bit_header[] =
{
/* Job Descriptor Header */
/* Job Status */
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Flags and Indices */
/* job_type = compute shader job */
0x00000008, 0x00000000,
/* Pointer to next job */
0x00000000,
/* Reserved */
0x00000000,
/* Job Dimension Data */
0x0000000f, 0x21040842,
/* Task Split */
0x08000000,
/* Reserved */
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Draw Call Descriptor - 32 bit (Must be aligned to a 64-byte boundry) */
/* Flags */
0x00000004,
/* Primary Attribute Offset */
0x00000000,
/* Primitive Index Base Value */
0x00000000,
/* Pointer To Vertex Position Array (64-byte alignment) */
0x00000000,
/* Pointer To Uniform Remapping Table (8-byte alignment) */
0,
/* Pointer To Image Descriptor Pointer Table */
0x00000000,
/* Pointer To Sampler Array */
0x00000000,
/* Pointer To Register-Mapped Uniform Data Area (16-byte alignment) */
0,
/* Pointer To Renderer State Descriptor (64-byte alignment) */
0,
/* Pointer To Primary Attribute Buffer Array */
0x00000000,
/* Pointer To Primary Attribute Array */
0x00000000,
/* Pointer To Secondary Attribute Buffer Array */
0x00000000,
/* Pointer To Secondary Attribute Array */
0x00000000,
/* Pointer To Viewport Descriptor */
0x00000000,
/* Pointer To Occlusion Query Result */
0x00000000,
/* Pointer To Thread Storage (64 byte alignment) */
0,
};
static const u32 compute_job_32bit_urt[] =
{
/* Uniform Remapping Table Entry */
0, 0,
};
static const u32 compute_job_32bit_rmu[] =
{
/* Register Mapped Uniform Data Area (16 byte aligned) */
0x00000000, 0x00000000,
0x00000000, 0x00000000,
};
static const u32 compute_job_32bit_rsd[] =
{
/* Renderer State Descriptor */
/* Shader program inital PC (low) */
0x00000001,
/* Shader program initial PC (high) */
0x00000000,
/* Image descriptor array sizes */
0x00000000,
/* Attribute array sizes */
0x00000000,
/* Uniform array size and Shader Flags */
/* Flags set: R, D, SE, FPM */
0x40003800,
/* Depth bias */
0x00000000,
/* Depth slope bias */
0x00000000,
/* Depth bias clamp */
0x00000000,
/* Multisample Write Mask and Flags */
0x00000000,
/* Stencil Write Masks and Alpha parameters */
0x00000000,
/* Stencil tests - forward facing */
0x00000000,
/* Stencel tests - back facing */
0x00000000,
/* Alpha Test Reference Value */
0x00000000,
/* Thread Balancing Information */
0x00000000,
/* Blend Parameters or Pointer (low) */
0x00000000,
/* Blend Parameters or Pointer (high) */
0x00000000,
};
static const u32 compute_job_32bit_tsd[] =
{
/* Thread Storage Descriptor */
/* Thread Local Storage Sizes */
0x00000000,
/* Workgroup Local Memory Area Flags */
0x0000001f,
/* Pointer to Local Storage Area */
0x00021000, 0x00000001,
/* Pointer to Workgroup Local Storage Area */
0x00000000, 0x00000000,
/* Pointer to Shader Exception Handler */
0x00000000, 0x00000000
};
static kbase_jd_atom dummy_job_atom[KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT];
/**
* Initialize the compute job sturcture.
*/
static void kbasep_8401_workaround_update_job_pointers(u32 *dummy_compute_job, int page_nr)
{
u32 base_address = (page_nr+WORKAROUND_PAGE_OFFSET)*OSK_PAGE_SIZE;
u8 *dummy_job = (u8*) dummy_compute_job;
u8 *dummy_job_urt;
u8 *dummy_job_rmu;
u8 *dummy_job_rsd;
u8 *dummy_job_tsd;
OSK_ASSERT(dummy_compute_job);
/* determin where each job section goes taking alignment restrictions into consideration */
dummy_job_urt = (u8*) ((((u32)dummy_job + sizeof(compute_job_32bit_header))+7) & ~7);
dummy_job_rmu = (u8*) ((((u32)dummy_job_urt + sizeof(compute_job_32bit_urt))+15) & ~15);
dummy_job_rsd = (u8*) ((((u32)dummy_job_rmu + sizeof(compute_job_32bit_rmu))+63) & ~63);
dummy_job_tsd = (u8*) ((((u32)dummy_job_rsd + sizeof(compute_job_32bit_rsd))+63) & ~63);
/* Make sure the job fits within a single page */
OSK_ASSERT(OSK_PAGE_SIZE > ((dummy_job_tsd+sizeof(compute_job_32bit_tsd)) - dummy_job));
/* Copy the job sections to the allocated memory */
memcpy(dummy_job, compute_job_32bit_header, sizeof(compute_job_32bit_header));
memcpy(dummy_job_urt, compute_job_32bit_urt, sizeof(compute_job_32bit_urt));
memcpy(dummy_job_rmu, compute_job_32bit_rmu, sizeof(compute_job_32bit_rmu));
memcpy(dummy_job_rsd, compute_job_32bit_rsd, sizeof(compute_job_32bit_rsd));
memcpy(dummy_job_tsd, compute_job_32bit_tsd, sizeof(compute_job_32bit_tsd));
/* Update header pointers */
*(dummy_compute_job + URT_POINTER_INDEX) = (dummy_job_urt - dummy_job) + base_address;
*(dummy_compute_job + RMU_POINTER_INDEX) = (dummy_job_rmu - dummy_job) + base_address;
*(dummy_compute_job + RSD_POINTER_INDEX) = (dummy_job_rsd - dummy_job) + base_address;
*(dummy_compute_job + TSD_POINTER_INDEX) = (dummy_job_tsd - dummy_job) + base_address;
/* Update URT pointer */
*((u32*)dummy_job_urt+0) = (((dummy_job_rmu - dummy_job) + base_address) << 8) & 0xffffff00;
*((u32*)dummy_job_urt+1) = (((dummy_job_rmu - dummy_job) + base_address) >> 24) & 0xff;
}
/**
* Initialize the memory for 8401 workaround.
*/
mali_error kbasep_8401_workaround_init(kbase_device *kbdev)
{
kbase_context *workaround_kctx;
u32 count;
int i;
OSK_ASSERT(kbdev);
OSK_ASSERT(kbdev->workaround_kctx == NULL);
/* For this workaround we reserve one address space to allow us to
* submit a special job independent of other contexts */
kbdev->nr_address_spaces--;
workaround_kctx = kbase_create_context(kbdev);
if(!workaround_kctx)
{
return MALI_ERROR_FUNCTION_FAILED;
}
/* Allocate the pages required to contain the job */
count = kbase_phy_pages_alloc(workaround_kctx->kbdev,
&workaround_kctx->pgd_allocator,
KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT,
kbdev->workaround_compute_job_pa);
if(count < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT)
{
goto page_release;
}
/* Get virtual address of mapped memory and write a compute job for each page */
for(i = 0; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++)
{
kbdev->workaround_compute_job_va[i] = osk_kmap(kbdev->workaround_compute_job_pa[i]);
if(NULL == kbdev->workaround_compute_job_va[i])
{
goto page_free;
}
/* Generate the compute job data */
kbasep_8401_workaround_update_job_pointers((u32*)kbdev->workaround_compute_job_va[i], i);
}
/* Insert pages to the gpu mmu. */
kbase_mmu_insert_pages(workaround_kctx,
/* vpfn = page number */
(u64)WORKAROUND_PAGE_OFFSET,
/* physical address */
kbdev->workaround_compute_job_pa,
/* number of pages */
KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT,
/* flags */
KBASE_REG_CPU_RW|KBASE_REG_GPU_RW);
kbdev->workaround_kctx = workaround_kctx;
return MALI_ERROR_NONE;
page_free:
while(i--)
{
osk_kunmap(kbdev->workaround_compute_job_pa[i], kbdev->workaround_compute_job_va[i]);
}
page_release:
kbase_phy_pages_free(kbdev, &workaround_kctx->pgd_allocator, count, kbdev->workaround_compute_job_pa);
kbase_destroy_context(workaround_kctx);
return MALI_ERROR_FUNCTION_FAILED;
}
/**
* Free up the memory used by 8401 workaround.
**/
void kbasep_8401_workaround_term(kbase_device *kbdev)
{
int i;
OSK_ASSERT(kbdev);
OSK_ASSERT(kbdev->workaround_kctx);
for(i = 0; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++)
{
osk_kunmap(kbdev->workaround_compute_job_pa[i], kbdev->workaround_compute_job_va[i]);
}
kbase_phy_pages_free(kbdev, &kbdev->workaround_kctx->pgd_allocator, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa);
kbase_destroy_context(kbdev->workaround_kctx);
kbdev->workaround_kctx = NULL;
/* Free up the workaround address space */
kbdev->nr_address_spaces++;
}
/**
* Submit the 8401 workaround job.
**/
void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
{
u32 cfg;
mali_addr64 jc;
/* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
int as = kbdev->nr_address_spaces;
/* Don't issue compute jobs on job slot 0 */
OSK_ASSERT(js != 0);
OSK_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);
/* Job chain GPU address */
jc = (js+WORKAROUND_PAGE_OFFSET)*OSK_PAGE_SIZE; /* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init*/
/* Clear the job status words which may contain values from a previous job completion */
memset(kbdev->workaround_compute_job_va[js], 0, 4*sizeof(u32));
/* Get the affinity of the previous job */
dummy_job_atom[js].affinity = ((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) |
(((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);
/* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
if(!dummy_job_atom[js].affinity)
{
return;
}
/* Ensure that our page tables are programmed into the MMU */
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO),
(kbdev->workaround_kctx->pgd & 0xFFFFF000) | (1ul << 2) | 3, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI),
(kbdev->workaround_kctx->pgd >> 32), NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), 0x48484848, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), 0x48484848, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);
/* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
cfg = as | (3 << 12) | (1 << 10) | (8 << 16) | (3 << 8);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);
KBASE_TRACE_ADD_SLOT( kbdev, JM_SUBMIT, NULL, 0, jc, js );
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
/* Report that the job has been submitted */
kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
}
/**
* Check if the katom given is a dummy compute job.
*/
mali_bool kbasep_8401_is_workaround_job(kbase_jd_atom *katom)
{
int i;
/* Note: we don't check the first dummy_job_atom as slot 0 is never used for the workaround */
for(i = 1; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++)
{
if(katom == &dummy_job_atom[i])
{
/* This is a dummy job */
return MALI_TRUE;
}
}
/* This is a real job */
return MALI_FALSE;
}
#endif
| gpl-2.0 |
JohnKDay/linux | drivers/net/dsa/mv88e6171.c | 175 | 3913 | /* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>
#include "mv88e6xxx.h"
static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
{
struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
int ret;
if (bus == NULL)
return NULL;
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
return "Marvell 88E6171";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6175)
return "Marvell 88E6175";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6350)
return "Marvell 88E6350";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6351)
return "Marvell 88E6351";
}
return NULL;
}
static int mv88e6171_setup_global(struct dsa_switch *ds)
{
u32 upstream_port = dsa_upstream_port(ds);
int ret;
u32 reg;
ret = mv88e6xxx_setup_global(ds);
if (ret)
return ret;
/* Discard packets with excessive collisions, mask all
* interrupt sources, enable PPU.
*/
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
return 0;
}
static int mv88e6171_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
ret = mv88e6xxx_setup_common(ds);
if (ret < 0)
return ret;
ps->num_ports = 7;
ret = mv88e6xxx_switch_reset(ds, true);
if (ret < 0)
return ret;
ret = mv88e6171_setup_global(ds);
if (ret < 0)
return ret;
return mv88e6xxx_setup_ports(ds);
}
struct dsa_switch_driver mv88e6171_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_EDSA,
.priv_size = sizeof(struct mv88e6xxx_priv_state),
.probe = mv88e6171_probe,
.setup = mv88e6171_setup,
.set_addr = mv88e6xxx_set_addr_indirect,
.phy_read = mv88e6xxx_phy_read_indirect,
.phy_write = mv88e6xxx_phy_write_indirect,
.poll_link = mv88e6xxx_poll_link,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
.adjust_link = mv88e6xxx_adjust_link,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
.port_pvid_get = mv88e6xxx_port_pvid_get,
.port_pvid_set = mv88e6xxx_port_pvid_set,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.vlan_getnext = mv88e6xxx_vlan_getnext,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
MODULE_ALIAS("platform:mv88e6171");
MODULE_ALIAS("platform:mv88e6175");
MODULE_ALIAS("platform:mv88e6350");
MODULE_ALIAS("platform:mv88e6351");
| gpl-2.0 |
ypresto/linux-2.6 | drivers/acpi/processor_core.c | 431 | 8312 | /*
* Copyright (C) 2005 Intel Corporation
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
*
* Alex Chiang <achiang@hp.com>
* - Unified x86/ia64 implementations
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/dmi.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
#include "internal.h"
#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
static int set_no_mwait(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
"disabling mwait for CPU C-states\n", id->ident);
idle_nomwait = 1;
return 0;
}
static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
{
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
{},
};
#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_apic *lapic =
(struct acpi_madt_local_apic *)entry;
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return 0;
if (lapic->processor_id != acpi_id)
return 0;
*apic_id = lapic->id;
return 1;
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_x2apic *apic =
(struct acpi_madt_local_x2apic *)entry;
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return 0;
if (device_declaration && (apic->uid == acpi_id)) {
*apic_id = apic->local_apic_id;
return 1;
}
return 0;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
(struct acpi_madt_local_sapic *)entry;
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return 0;
if (device_declaration) {
if ((entry->length < 16) || (lsapic->uid != acpi_id))
return 0;
} else if (lsapic->processor_id != acpi_id)
return 0;
*apic_id = (lsapic->id << 8) | lsapic->eid;
return 1;
}
static int map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
int apic_id = -1;
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
(struct acpi_table_header **)&madt)))
madt = NULL;
read_madt++;
}
if (!madt)
return apic_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
/* Parse all entries looking for a match. */
entry += sizeof(struct acpi_table_madt);
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
if (map_lapic_id(header, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
if (map_x2apic_id(header, type, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (map_lsapic_id(header, type, acpi_id, &apic_id))
break;
}
entry += header->length;
}
return apic_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
int apic_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
if (!buffer.length || !buffer.pointer)
goto exit;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(struct acpi_subtable_header)) {
goto exit;
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
map_lapic_id(header, acpi_id, &apic_id);
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
map_lsapic_id(header, type, acpi_id, &apic_id);
}
exit:
if (buffer.pointer)
kfree(buffer.pointer);
return apic_id;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
int i;
int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
if (apic_id == -1)
return apic_id;
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
#endif
static bool processor_physically_present(acpi_handle handle)
{
int cpuid, type;
u32 acpi_id;
acpi_status status;
acpi_object_type acpi_type;
unsigned long long tmp;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
return false;
switch (acpi_type) {
case ACPI_TYPE_PROCESSOR:
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status))
return false;
acpi_id = object.processor.proc_id;
break;
case ACPI_TYPE_DEVICE:
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
if (ACPI_FAILURE(status))
return false;
acpi_id = tmp;
break;
default:
return false;
}
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
if ((cpuid == -1) && (num_possible_cpus() > 1))
return false;
return true;
}
static void acpi_set_pdc_bits(u32 *buf)
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
/* Enable coordination with firmware's _TSD info */
buf[2] = ACPI_PDC_SMP_T_SWCOORD;
/* Twiddle arch-specific bits needed for _PDC */
arch_acpi_set_pdc_bits(buf);
}
static struct acpi_object_list *acpi_processor_alloc_pdc(void)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return NULL;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return NULL;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return NULL;
}
acpi_set_pdc_bits(buf);
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
return obj_list;
}
/*
* _PDC is required for a BIOS-OS handshake for most of the newer
* ACPI processor features.
*/
static int
acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
if (idle_nomwait) {
/*
* If mwait is disabled for CPU C-states, the C2C3_FFH access
* mode will be disabled in the parameter of _PDC object.
* Of course C1_FFH access mode will also be disabled.
*/
union acpi_object *obj;
u32 *buffer = NULL;
obj = pdc_in->pointer;
buffer = (u32 *)(obj->buffer.pointer);
buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
}
status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
if (ACPI_FAILURE(status))
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Could not evaluate _PDC, using legacy perf. control.\n"));
return status;
}
void acpi_processor_set_pdc(acpi_handle handle)
{
struct acpi_object_list *obj_list;
if (arch_has_acpi_pdc() == false)
return;
obj_list = acpi_processor_alloc_pdc();
if (!obj_list)
return;
acpi_processor_eval_pdc(handle, obj_list);
kfree(obj_list->pointer->buffer.pointer);
kfree(obj_list->pointer);
kfree(obj_list);
}
EXPORT_SYMBOL_GPL(acpi_processor_set_pdc);
static acpi_status
early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (processor_physically_present(handle) == false)
return AE_OK;
acpi_processor_set_pdc(handle);
return AE_OK;
}
void __init acpi_early_processor_set_pdc(void)
{
/*
* Check whether the system is DMI table. If yes, OSPM
* should not use mwait for CPU-states.
*/
dmi_check_system(processor_idle_dmi_table);
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
}
| gpl-2.0 |
mfrw/linux | drivers/net/usb/asix_devices.c | 687 | 29561 | /*
* ASIX AX8817X based USB 2.0 Ethernet Devices
* Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2006 James Painter <jamie.painter@iname.com>
* Copyright (c) 2002-2003 TiVo Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
#define PHY_MODE_MARVELL 0x0000
#define MII_MARVELL_LED_CTRL 0x0018
#define MII_MARVELL_STATUS 0x001b
#define MII_MARVELL_CTRL 0x0014
#define MARVELL_LED_MANUAL 0x0019
#define MARVELL_STATUS_HWCFG 0x0004
#define MARVELL_CTRL_TXDELAY 0x0002
#define MARVELL_CTRL_RXDELAY 0x0080
#define PHY_MODE_RTL8211CL 0x000C
struct ax88172_int_data {
__le16 res1;
u8 link;
__le16 res2;
u8 status;
__le16 res3;
} __packed;
static void asix_status(struct usbnet *dev, struct urb *urb)
{
struct ax88172_int_data *event;
int link;
if (urb->actual_length < 8)
return;
event = urb->transfer_buffer;
link = event->link & 0x01;
if (netif_carrier_ok(dev->net) != link) {
usbnet_link_change(dev, link, 1);
netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
{
if (is_valid_ether_addr(addr)) {
memcpy(dev->net->dev_addr, addr, ETH_ALEN);
} else {
netdev_info(dev->net, "invalid hw address, using random\n");
eth_hw_addr_random(dev->net);
}
}
/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
static u32 asix_get_phyid(struct usbnet *dev)
{
int phy_reg;
u32 phy_id;
int i;
/* Poll for the rare case the FW or phy isn't ready yet. */
for (i = 0; i < 100; i++) {
phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
if (phy_reg != 0 && phy_reg != 0xFFFF)
break;
mdelay(1);
}
if (phy_reg <= 0 || phy_reg == 0xFFFF)
return 0;
phy_id = (phy_reg & 0xffff) << 16;
phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
if (phy_reg < 0)
return 0;
phy_id |= (phy_reg & 0xffff);
return phy_id;
}
static u32 asix_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
return mii_link_ok(&dev->mii);
}
static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
{
struct usbnet *dev = netdev_priv(net);
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
static const struct ethtool_ops ax88172_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static void ax88172_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
u8 rx_ctl = 0x8c;
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
netdev_for_each_mc_addr(ha, net) {
crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
AX_MCAST_FILTER_SIZE, data->multi_filter);
rx_ctl |= 0x10;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
static int ax88172_link_reset(struct usbnet *dev)
{
u8 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88172_MEDIUM_DEFAULT;
if (ecmd.duplex != DUPLEX_FULL)
mode |= ~AX88172_MEDIUM_FD;
netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
return 0;
}
static const struct net_device_ops ax88172_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_rx_mode = ax88172_set_multicast,
};
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
u8 buf[ETH_ALEN];
int i;
unsigned long gpio_bits = dev->driver_info->data;
usbnet_get_endpoints(dev,intf);
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
(gpio_bits >> (i * 8)) & 0xff, 0, 0, NULL);
if (ret < 0)
goto out;
msleep(5);
}
ret = asix_write_rx_ctl(dev, 0x80);
if (ret < 0)
goto out;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
if (ret < 0) {
netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
ret);
goto out;
}
asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
mii_nway_restart(&dev->mii);
return 0;
out:
return ret;
}
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static int ax88772_link_reset(struct usbnet *dev)
{
u16 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88772_MEDIUM_DEFAULT;
if (ethtool_cmd_speed(&ecmd) != SPEED_100)
mode &= ~AX_MEDIUM_PS;
if (ecmd.duplex != DUPLEX_FULL)
mode &= ~AX_MEDIUM_FD;
netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
return 0;
}
static int ax88772_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
int ret, embd_phy;
u16 rx_ctl;
ret = asix_write_gpio(dev,
AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5);
if (ret < 0)
goto out;
embd_phy = ((asix_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
if (ret < 0) {
netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
goto out;
}
ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
if (ret < 0)
goto out;
msleep(150);
ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
if (ret < 0)
goto out;
msleep(150);
if (embd_phy) {
ret = asix_sw_reset(dev, AX_SWRESET_IPRL);
if (ret < 0)
goto out;
} else {
ret = asix_sw_reset(dev, AX_SWRESET_PRTE);
if (ret < 0)
goto out;
}
msleep(150);
rx_ctl = asix_read_rx_ctl(dev);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
ret = asix_write_rx_ctl(dev, 0x0000);
if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
ret = asix_sw_reset(dev, AX_SWRESET_PRL);
if (ret < 0)
goto out;
msleep(150);
ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL);
if (ret < 0)
goto out;
msleep(150);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA);
mii_nway_restart(&dev->mii);
ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT);
if (ret < 0)
goto out;
ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
AX88772_IPG2_DEFAULT, 0, NULL);
if (ret < 0) {
netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
goto out;
}
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
data->mac_addr);
if (ret < 0)
goto out;
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
rx_ctl);
rx_ctl = asix_read_medium_status(dev);
netdev_dbg(dev->net,
"Medium Status is 0x%04x after all initializations\n",
rx_ctl);
return 0;
out:
return ret;
}
static const struct net_device_ops ax88772_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_rx_mode = asix_set_multicast,
};
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, embd_phy, i;
u8 buf[ETH_ALEN];
u32 phyid;
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
if (dev->driver_info->data & FLAG_EEPROM_MAC) {
for (i = 0; i < (ETH_ALEN >> 1); i++) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
0, 2, buf + i * 2);
if (ret < 0)
break;
}
} else {
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
0, 0, ETH_ALEN, buf);
}
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
/* Reset the PHY to normal operation mode */
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
if (ret < 0) {
netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
return ret;
}
ax88772_reset(dev);
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
if (!dev->driver_priv)
return -ENOMEM;
return 0;
}
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
kfree(dev->driver_priv);
}
static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static int marvell_phy_init(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
u16 reg;
netdev_dbg(dev->net, "marvell_phy_init()\n");
reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
if (data->ledmode) {
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg);
reg &= 0xf8ff;
reg |= (1 + 0x0100);
asix_mdio_write(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL, reg);
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
reg &= 0xfc0f;
}
return 0;
}
static int rtl8211cl_phy_init(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
netdev_dbg(dev->net, "rtl8211cl_phy_init()\n");
asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0005);
asix_mdio_write (dev->net, dev->mii.phy_id, 0x0c, 0);
asix_mdio_write (dev->net, dev->mii.phy_id, 0x01,
asix_mdio_read (dev->net, dev->mii.phy_id, 0x01) | 0x0080);
asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0);
if (data->ledmode == 12) {
asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0002);
asix_mdio_write (dev->net, dev->mii.phy_id, 0x1a, 0x00cb);
asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0);
}
return 0;
}
static int marvell_led_status(struct usbnet *dev, u16 speed)
{
u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg);
/* Clear out the center LED bits - 0x03F0 */
reg &= 0xfc0f;
switch (speed) {
case SPEED_1000:
reg |= 0x03e0;
break;
case SPEED_100:
reg |= 0x03b0;
break;
default:
reg |= 0x02f0;
}
netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
return 0;
}
static int ax88178_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
int ret;
__le16 eeprom;
u8 status;
int gpio0 = 0;
u32 phyid;
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
if (eeprom == cpu_to_le16(0xffff)) {
data->phymode = PHY_MODE_MARVELL;
data->ledmode = 0;
gpio0 = 1;
} else {
data->phymode = le16_to_cpu(eeprom) & 0x7F;
data->ledmode = le16_to_cpu(eeprom) >> 8;
gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
}
netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
/* Power up external GigaPHY through AX88178 GPIO pin */
asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
if ((le16_to_cpu(eeprom) >> 8) != 1) {
asix_write_gpio(dev, 0x003c, 30);
asix_write_gpio(dev, 0x001c, 300);
asix_write_gpio(dev, 0x003c, 30);
} else {
netdev_dbg(dev->net, "gpio phymode == 1 path\n");
asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
}
/* Read PHYID register *AFTER* powering up PHY */
phyid = asix_get_phyid(dev);
netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
asix_sw_reset(dev, 0);
msleep(150);
asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
msleep(150);
asix_write_rx_ctl(dev, 0);
if (data->phymode == PHY_MODE_MARVELL) {
marvell_phy_init(dev);
msleep(60);
} else if (data->phymode == PHY_MODE_RTL8211CL)
rtl8211cl_phy_init(dev);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
BMCR_RESET | BMCR_ANENABLE);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
ADVERTISE_1000FULL);
mii_nway_restart(&dev->mii);
ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT);
if (ret < 0)
return ret;
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
data->mac_addr);
if (ret < 0)
return ret;
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
return ret;
return 0;
}
static int ax88178_link_reset(struct usbnet *dev)
{
u16 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
struct asix_data *data = (struct asix_data *)&dev->data;
u32 speed;
netdev_dbg(dev->net, "ax88178_link_reset()\n");
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88178_MEDIUM_DEFAULT;
speed = ethtool_cmd_speed(&ecmd);
if (speed == SPEED_1000)
mode |= AX_MEDIUM_GM;
else if (speed == SPEED_100)
mode |= AX_MEDIUM_PS;
else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
mode |= AX_MEDIUM_ENCK;
if (ecmd.duplex == DUPLEX_FULL)
mode |= AX_MEDIUM_FD;
else
mode &= ~AX_MEDIUM_FD;
netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
marvell_led_status(dev, speed);
return 0;
}
static void ax88178_set_mfb(struct usbnet *dev)
{
u16 mfb = AX_RX_CTL_MFB_16384;
u16 rxctl;
u16 medium;
int old_rx_urb_size = dev->rx_urb_size;
if (dev->hard_mtu < 2048) {
dev->rx_urb_size = 2048;
mfb = AX_RX_CTL_MFB_2048;
} else if (dev->hard_mtu < 4096) {
dev->rx_urb_size = 4096;
mfb = AX_RX_CTL_MFB_4096;
} else if (dev->hard_mtu < 8192) {
dev->rx_urb_size = 8192;
mfb = AX_RX_CTL_MFB_8192;
} else if (dev->hard_mtu < 16384) {
dev->rx_urb_size = 16384;
mfb = AX_RX_CTL_MFB_16384;
}
rxctl = asix_read_rx_ctl(dev);
asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb);
medium = asix_read_medium_status(dev);
if (dev->net->mtu > 1500)
medium |= AX_MEDIUM_JFE;
else
medium &= ~AX_MEDIUM_JFE;
asix_write_medium_mode(dev, medium);
if (dev->rx_urb_size > old_rx_urb_size)
usbnet_unlink_rx_urbs(dev);
}
static int ax88178_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len + 4;
netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
if (new_mtu <= 0 || ll_mtu > 16384)
return -EINVAL;
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
net->mtu = new_mtu;
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
/* max qlen depend on hard_mtu and rx_urb_size */
usbnet_update_max_qlen(dev);
return 0;
}
static const struct net_device_ops ax88178_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
.ndo_do_ioctl = asix_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
u8 buf[ETH_ALEN];
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0xff;
dev->mii.supports_gmii = 1;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
/* Blink LEDS so users know driver saw dongle */
asix_sw_reset(dev, 0);
msleep(150);
asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
msleep(150);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
if (!dev->driver_priv)
return -ENOMEM;
return 0;
}
static const struct driver_info ax8817x_info = {
.description = "ASIX AX8817x USB 2.0 Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x00130103,
};
static const struct driver_info dlink_dub_e100_info = {
.description = "DLink DUB-E100 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x009f9d9f,
};
static const struct driver_info netgear_fa120_info = {
.description = "Netgear FA-120 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x00130103,
};
static const struct driver_info hawking_uf200_info = {
.description = "Hawking UF200 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x001f1d1f,
};
static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_link_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
static const struct driver_info ax88772b_info = {
.description = "ASIX AX88772B USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
};
static const struct driver_info ax88178_info = {
.description = "ASIX AX88178 USB 2.0 Ethernet",
.bind = ax88178_bind,
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
/*
* USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
* no-name packaging.
* USB device strings are:
* 1: Manufacturer: USBLINK
* 2: Product: HG20F9 USB2.0
* 3: Serial: 000003
* Appears to be compatible with Asix 88772B.
*/
static const struct driver_info hg20f9_info = {
.description = "HG20F9 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
};
static const struct usb_device_id products [] = {
{
// Linksys USB200M
USB_DEVICE (0x077b, 0x2226),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Netgear FA120
USB_DEVICE (0x0846, 0x1040),
.driver_info = (unsigned long) &netgear_fa120_info,
}, {
// DLink DUB-E100
USB_DEVICE (0x2001, 0x1a00),
.driver_info = (unsigned long) &dlink_dub_e100_info,
}, {
// Intellinet, ST Lab USB Ethernet
USB_DEVICE (0x0b95, 0x1720),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Hawking UF200, TrendNet TU2-ET100
USB_DEVICE (0x07b8, 0x420a),
.driver_info = (unsigned long) &hawking_uf200_info,
}, {
// Billionton Systems, USB2AR
USB_DEVICE (0x08dd, 0x90ff),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// ATEN UC210T
USB_DEVICE (0x0557, 0x2009),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Buffalo LUA-U2-KTX
USB_DEVICE (0x0411, 0x003d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Buffalo LUA-U2-GT 10/100/1000
USB_DEVICE (0x0411, 0x006e),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
USB_DEVICE (0x0df6, 0x0056),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
USB_DEVICE (0x0df6, 0x061c),
.driver_info = (unsigned long) &ax88178_info,
}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Surecom EP-1427X-2
USB_DEVICE (0x1189, 0x0893),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// goodway corp usb gwusb2e
USB_DEVICE (0x1631, 0x6200),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// JVC MP-PRX1 Port Replicator
USB_DEVICE (0x04f1, 0x3008),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Lenovo U2L100P 10/100
USB_DEVICE (0x17ef, 0x7203),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
.driver_info = (unsigned long) &ax88772b_info,
}, {
// ASIX AX88772 10/100
USB_DEVICE (0x0b95, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ASIX AX88178 10/100/1000
USB_DEVICE (0x0b95, 0x1780),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Logitec LAN-GTJ/U2A
USB_DEVICE (0x0789, 0x0160),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Linksys USB200M Rev 2
USB_DEVICE (0x13b1, 0x0018),
.driver_info = (unsigned long) &ax88772_info,
}, {
// 0Q0 cable ethernet
USB_DEVICE (0x1557, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
}, {
// DLink DUB-E100 H/W Ver B1
USB_DEVICE (0x07d1, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
}, {
// DLink DUB-E100 H/W Ver B1 Alternate
USB_DEVICE (0x2001, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
}, {
// DLink DUB-E100 H/W Ver C1
USB_DEVICE (0x2001, 0x1a02),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Linksys USB1000
USB_DEVICE (0x1737, 0x0039),
.driver_info = (unsigned long) &ax88178_info,
}, {
// IO-DATA ETG-US2
USB_DEVICE (0x04bb, 0x0930),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Belkin F5D5055
USB_DEVICE(0x050d, 0x5055),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Apple USB Ethernet Adapter
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Cables-to-Go USB Ethernet Adapter
USB_DEVICE(0x0b95, 0x772a),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ABOCOM for pci
USB_DEVICE(0x14ea, 0xab11),
.driver_info = (unsigned long) &ax88178_info,
}, {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Asus USB Ethernet Adapter
USB_DEVICE (0x0b95, 0x7e2b),
.driver_info = (unsigned long) &ax88772_info,
}, {
/* ASIX 88172a demo board */
USB_DEVICE(0x0b95, 0x172a),
.driver_info = (unsigned long) &ax88172a_info,
}, {
/*
* USBLINK HG20F9 "USB 2.0 LAN"
* Appears to have gazumped Linksys's manufacturer ID but
* doesn't (yet) conflict with any known Linksys product.
*/
USB_DEVICE(0x066b, 0x20f9),
.driver_info = (unsigned long) &hg20f9_info,
},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver asix_driver = {
.name = DRIVER_NAME,
.id_table = products,
.probe = usbnet_probe,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.disconnect = usbnet_disconnect,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(asix_driver);
MODULE_AUTHOR("David Hollis");
MODULE_VERSION(DRIVER_VERSION);
MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Ezekeel/GLaDOS-nexus-s | drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c | 943 | 23407 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data-com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-------------------------------+---------------------------------------+
| Project : APCI-2200 | Compiler : GCC |
| Module name : hwdrv_apci2200.c| Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: Eric Stolz | Date : 02/12/2002 |
+-------------------------------+---------------------------------------+
| Description : Hardware Layer Acces For APCI-2200 |
+-----------------------------------------------------------------------+
| UPDATES |
+----------+-----------+------------------------------------------------+
| Date | Author | Description of updates |
+----------+-----------+------------------------------------------------+
| | | |
| | | |
| | | |
+----------+-----------+------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Included files |
+----------------------------------------------------------------------------+
*/
#include "hwdrv_apci2200.h"
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_Read1DigitalInput |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Return the status of the digital input |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_Read1DigitalInput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_TmpValue = 0;
unsigned int ui_Channel;
ui_Channel = CR_CHAN(insn->chanspec);
if (ui_Channel <= 7) {
ui_TmpValue = (unsigned int) inw(devpriv->iobase + APCI2200_DIGITAL_IP);
*data = (ui_TmpValue >> ui_Channel) & 0x1;
} /* if(ui_Channel >= 0 && ui_Channel <=7) */
else {
printk("\nThe specified channel does not exist\n");
return -EINVAL; /* "sorry channel spec wrong " */
} /* else if(ui_Channel >= 0 && ui_Channel <=7) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_ReadMoreDigitalInput |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Return the status of the Requested digital inputs |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_ReadMoreDigitalInput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_PortValue = data[0];
unsigned int ui_Mask = 0;
unsigned int ui_NoOfChannels;
ui_NoOfChannels = CR_CHAN(insn->chanspec);
*data = (unsigned int) inw(devpriv->iobase + APCI2200_DIGITAL_IP);
switch (ui_NoOfChannels) {
case 2:
ui_Mask = 3;
*data = (*data >> (2 * ui_PortValue)) & ui_Mask;
break;
case 4:
ui_Mask = 15;
*data = (*data >> (4 * ui_PortValue)) & ui_Mask;
break;
case 7:
break;
default:
printk("\nWrong parameters\n");
return -EINVAL; /* "sorry channel spec wrong " */
break;
} /* switch(ui_NoOfChannels) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_ConfigDigitalOutput (struct comedi_device *dev,
| struct comedi_subdevice *s struct comedi_insn *insn,unsigned int *data) |
| |
+----------------------------------------------------------------------------+
| Task : Configures The Digital Output Subdevice. |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| unsigned int *data : Data Pointer contains |
| configuration parameters as below |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| data[0] :1:Memory on |
| 0:Memory off |
| |
| |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_ConfigDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
devpriv->b_OutputMemoryStatus = data[0];
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_WriteDigitalOutput |
| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Writes port value To the selected port |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_Temp, ui_Temp1;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
if (devpriv->b_OutputMemoryStatus) {
ui_Temp = inw(devpriv->iobase + APCI2200_DIGITAL_OP);
} /* if(devpriv->b_OutputMemoryStatus ) */
else {
ui_Temp = 0;
} /* if(devpriv->b_OutputMemoryStatus ) */
if (data[3] == 0) {
if (data[1] == 0) {
data[0] = (data[0] << ui_NoOfChannel) | ui_Temp;
outw(data[0], devpriv->iobase + APCI2200_DIGITAL_OP);
} /* if(data[1]==0) */
else {
if (data[1] == 1) {
switch (ui_NoOfChannel) {
case 2:
data[0] =
(data[0] << (2 *
data[2])) | ui_Temp;
break;
case 4:
data[0] =
(data[0] << (4 *
data[2])) | ui_Temp;
break;
case 8:
data[0] =
(data[0] << (8 *
data[2])) | ui_Temp;
break;
case 15:
data[0] = data[0] | ui_Temp;
break;
default:
comedi_error(dev, " chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannels) */
outw(data[0],
devpriv->iobase + APCI2200_DIGITAL_OP);
} /* if(data[1]==1) */
else {
printk("\nSpecified channel not supported\n");
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==0) */
else {
if (data[3] == 1) {
if (data[1] == 0) {
data[0] = ~data[0] & 0x1;
ui_Temp1 = 1;
ui_Temp1 = ui_Temp1 << ui_NoOfChannel;
ui_Temp = ui_Temp | ui_Temp1;
data[0] = (data[0] << ui_NoOfChannel) ^ 0xffff;
data[0] = data[0] & ui_Temp;
outw(data[0],
devpriv->iobase + APCI2200_DIGITAL_OP);
} /* if(data[1]==0) */
else {
if (data[1] == 1) {
switch (ui_NoOfChannel) {
case 2:
data[0] = ~data[0] & 0x3;
ui_Temp1 = 3;
ui_Temp1 =
ui_Temp1 << 2 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (2 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 4:
data[0] = ~data[0] & 0xf;
ui_Temp1 = 15;
ui_Temp1 =
ui_Temp1 << 4 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (4 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 8:
data[0] = ~data[0] & 0xff;
ui_Temp1 = 255;
ui_Temp1 =
ui_Temp1 << 8 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (8 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 15:
break;
default:
comedi_error(dev,
" chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannels) */
outw(data[0],
devpriv->iobase +
APCI2200_DIGITAL_OP);
} /* if(data[1]==1) */
else {
printk("\nSpecified channel not supported\n");
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==1); */
else {
printk("\nSpecified functionality does not exist\n");
return -EINVAL;
} /* if else data[3]==1) */
} /* if else data[3]==0) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_ReadDigitalOutput |
| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read value of the selected channel or port |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_ReadDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_Temp;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
ui_Temp = data[0];
*data = inw(devpriv->iobase + APCI2200_DIGITAL_OP);
if (ui_Temp == 0) {
*data = (*data >> ui_NoOfChannel) & 0x1;
} /* if(ui_Temp==0) */
else {
if (ui_Temp == 1) {
switch (ui_NoOfChannel) {
case 2:
*data = (*data >> (2 * data[1])) & 3;
break;
case 4:
*data = (*data >> (4 * data[1])) & 15;
break;
case 8:
*data = (*data >> (8 * data[1])) & 255;
break;
case 15:
break;
default:
comedi_error(dev, " chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannels) */
} /* if(ui_Temp==1) */
else {
printk("\nSpecified channel not supported \n");
} /* elseif(ui_Temp==1) */
} /* elseif(ui_Temp==0) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_ConfigWatchdog(struct comedi_device *dev,
| struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
| |
+----------------------------------------------------------------------------+
| Task : Configures The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_ConfigWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0] == 0) {
/* Disable the watchdog */
outw(0x0,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_ENABLEDISABLE);
/* Loading the Reload value */
outw(data[1],
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_RELOAD_VALUE);
data[1] = data[1] >> 16;
outw(data[1],
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_RELOAD_VALUE + 2);
} /* if(data[0]==0) */
else {
printk("\nThe input parameters are wrong\n");
return -EINVAL;
} /* elseif(data[0]==0) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_StartStopWriteWatchdog |
| (struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data); |
+----------------------------------------------------------------------------+
| Task : Start / Stop The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_StartStopWriteWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
switch (data[0]) {
case 0: /* stop the watchdog */
outw(0x0, devpriv->iobase + APCI2200_WATCHDOG + APCI2200_WATCHDOG_ENABLEDISABLE); /* disable the watchdog */
break;
case 1: /* start the watchdog */
outw(0x0001,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_ENABLEDISABLE);
break;
case 2: /* Software trigger */
outw(0x0201,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_ENABLEDISABLE);
break;
default:
printk("\nSpecified functionality does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_ReadWatchdog |
| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
| unsigned int *data); |
+----------------------------------------------------------------------------+
| Task : Read The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_ReadWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] =
inw(devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_STATUS) & 0x1;
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2200_Reset(struct comedi_device *dev) | |
+----------------------------------------------------------------------------+
| Task :resets all the registers |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2200_Reset(struct comedi_device *dev)
{
outw(0x0, devpriv->iobase + APCI2200_DIGITAL_OP); /* RESETS THE DIGITAL OUTPUTS */
outw(0x0,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_ENABLEDISABLE);
outw(0x0,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_RELOAD_VALUE);
outw(0x0,
devpriv->iobase + APCI2200_WATCHDOG +
APCI2200_WATCHDOG_RELOAD_VALUE + 2);
return 0;
}
| gpl-2.0 |
MaxiCM/android_kernel_motorola_msm8916 | drivers/staging/vt6656/iwctl.c | 1967 | 50687 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: iwctl.c
*
* Purpose: wireless ext & ioctl functions
*
* Author: Lyndon Chen
*
* Date: July 5, 2006
*
* Functions:
*
* Revision History:
*
*/
#include "device.h"
#include "iwctl.h"
#include "mac.h"
#include "card.h"
#include "hostap.h"
#include "power.h"
#include "rf.h"
#include "iowpa.h"
#include "wpactl.h"
#include "control.h"
#include "rndis.h"
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980,
5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240,
5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680,
5700, 5745, 5765, 5785, 5805, 5825
};
static int msglevel = MSG_LEVEL_INFO;
struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
long ldBm;
pDevice->wstats.status = pDevice->eOPMode;
if (pDevice->scStatistic.LinkQuality > 100)
pDevice->scStatistic.LinkQuality = 100;
pDevice->wstats.qual.qual =(u8)pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
pDevice->wstats.qual.updated = 1;
pDevice->wstats.discard.nwid = 0;
pDevice->wstats.discard.code = 0;
pDevice->wstats.discard.fragment = 0;
pDevice->wstats.discard.retries = pDevice->scStatistic.dwTsrErr;
pDevice->wstats.discard.misc = 0;
pDevice->wstats.miss.beacon = 0;
return &pDevice->wstats;
}
/*
* Wireless Handler: get protocol name
*/
int iwctl_giwname(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
strcpy(wrqu->name, "802.11-a/b/g");
return 0;
}
/*
* Wireless Handler: set scan
*/
int iwctl_siwscan(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->data;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_scan_req *req = (struct iw_scan_req *)extra;
u8 abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
PWLAN_IE_SSID pItemSSID = NULL;
if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
return -EINVAL;
PRINT_K(" SIOCSIWSCAN\n");
if (pMgmt == NULL)
return -EFAULT;
if (pMgmt->eScanState == WMAC_IS_SCANNING) {
// In scanning..
PRINT_K("SIOCSIWSCAN(overlap??)-->In scanning...\n");
return -EAGAIN;
}
if (pDevice->byReAssocCount > 0) { // reject scan when re-associating!
// send scan event to wpa_Supplicant
union iwreq_data wrqu;
PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
return 0;
}
spin_lock_irq(&pDevice->lock);
BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
// mike add: active scan OR passive scan OR desire_ssid scan
if (wrq->length == sizeof(struct iw_scan_req)) {
if (wrq->flags & IW_SCAN_THIS_ESSID) { // desire_ssid scan
memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)abyScanSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len);
if (pItemSSID->abySSID[req->essid_len] == '\0') {
if (req->essid_len > 0)
pItemSSID->len = req->essid_len;
} else {
pItemSSID->len = req->essid_len;
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n", ((PWLAN_IE_SSID)abyScanSSID)->abySSID,
((PWLAN_IE_SSID)abyScanSSID)->len);
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
spin_unlock_irq(&pDevice->lock);
return 0;
} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { // passive scan
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
}
} else { // active scan
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
}
/*
* Wireless Handler : get scan results
*/
int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *wrq = &wrqu->data;
int ii;
int jj;
int kk;
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSS;
PWLAN_IE_SSID pItemSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
char *current_ev = extra;
char *end_buf = extra + IW_SCAN_MAX_DATA;
char *current_val = NULL;
struct iw_event iwe;
long ldBm;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSCAN\n");
if (pMgmt == NULL)
return -EFAULT;
if (pMgmt->eScanState == WMAC_IS_SCANNING) {
// In scanning..
return -EAGAIN;
}
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; jj < MAX_BSS_NUM ; jj++) {
if (current_ev >= end_buf)
break;
pBSS = &(pMgmt->sBSSList[jj]);
if (pBSS->bActive) {
// ADD mac address
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, pBSS->abyBSSID, WLAN_BSSID_LEN);
current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
// ADD ssid
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWESSID;
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
iwe.u.data.length = pItemSSID->len;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
// ADD mode
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWMODE;
if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
iwe.u.mode = IW_MODE_INFRA;
else
iwe.u.mode = IW_MODE_ADHOC;
iwe.len = IW_EV_UINT_LEN;
current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN);
// ADD frequency
pSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abySuppRates;
pExtSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abyExtSuppRates;
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = pBSS->uChannel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
current_ev = iwe_stream_add_event(info, current_ev,end_buf, &iwe, IW_EV_FREQ_LEN);
{
int f = (int)pBSS->uChannel - 1;
if (f < 0)
f = 0;
iwe.u.freq.m = frequency_list[f] * 100000;
iwe.u.freq.e = 1;
}
current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
// ADD quality
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
RFvRSSITodBm(pDevice, (u8)(pBSS->uRSSI), &ldBm);
iwe.u.qual.level = ldBm;
iwe.u.qual.noise = 0;
if (-ldBm < 50)
iwe.u.qual.qual = 100;
else if (-ldBm > 90)
iwe.u.qual.qual = 0;
else
iwe.u.qual.qual = (40 - (-ldBm - 50)) * 100 / 40;
iwe.u.qual.updated = 7;
current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
// ADD encryption
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWENCODE;
iwe.u.data.length = 0;
if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
current_val = current_ev + IW_EV_LCP_LEN;
for (kk = 0; kk < 12; kk++) {
if (pSuppRates->abyRates[kk] == 0)
break;
// Bit rate given in 500 kb/s units (+ 0x80)
iwe.u.bitrate.value = ((pSuppRates->abyRates[kk] & 0x7f) * 500000);
current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
}
for (kk = 0; kk < 8; kk++) {
if (pExtSuppRates->abyRates[kk] == 0)
break;
// Bit rate given in 500 kb/s units (+ 0x80)
iwe.u.bitrate.value = ((pExtSuppRates->abyRates[kk] & 0x7f) * 500000);
current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
}
if ((current_val - current_ev) > IW_EV_LCP_LEN)
current_ev = current_val;
if ((pBSS->wWPALen > 0) && (pBSS->wWPALen <= MAX_WPA_IE_LEN)) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = pBSS->wWPALen;
current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byWPAIE);
}
if ((pBSS->wRSNLen > 0) && (pBSS->wRSNLen <= MAX_WPA_IE_LEN)) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = pBSS->wRSNLen;
current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byRSNIE);
}
}
} // for
wrq->length = current_ev - extra;
return 0;
}
/*
* Wireless Handler: set frequence or channel
*/
int iwctl_siwfreq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_freq *wrq = &wrqu->freq;
int rc = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWFREQ\n");
// If setting by frequency, convert to a channel
if ((wrq->e == 1) && (wrq->m >= (int)2.412e8) &&
(wrq->m <= (int)2.487e8)) {
int f = wrq->m / 100000;
int c = 0;
while ((c < 14) && (f != frequency_list[c]))
c++;
wrq->e = 0;
wrq->m = c + 1;
}
// Setting by channel number
if ((wrq->m > 14) || (wrq->e > 0)) {
rc = -EOPNOTSUPP;
} else {
int channel = wrq->m;
if ((channel < 1) || (channel > 14)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: New channel value of %d is invalid!\n", dev->name, wrq->m);
rc = -EINVAL;
} else {
// Yes ! We can set it !!!
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Set to channel = %d\n", channel);
pDevice->uChannel = channel;
}
}
return rc;
}
/*
* Wireless Handler: get frequence or channel
*/
int iwctl_giwfreq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_freq *wrq = &wrqu->freq;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFREQ\n");
if (pMgmt == NULL)
return -EFAULT;
#ifdef WEXT_USECHANNELS
wrq->m = (int)pMgmt->uCurrChannel;
wrq->e = 0;
#else
{
int f = (int)pMgmt->uCurrChannel - 1;
if (f < 0)
f = 0;
wrq->m = frequency_list[f] * 100000;
wrq->e = 1;
}
#endif
return 0;
}
/*
* Wireless Handler: set operation mode
*/
int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
__u32 *wmode = &wrqu->mode;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int rc = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMODE\n");
if (pMgmt == NULL)
return -EFAULT;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"Can't set operation mode, hostapd is running\n");
return rc;
}
switch (*wmode) {
case IW_MODE_ADHOC:
if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
break;
case IW_MODE_AUTO:
case IW_MODE_INFRA:
if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
break;
case IW_MODE_MASTER:
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
rc = -EOPNOTSUPP;
break;
if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
pMgmt->eConfigMode = WMAC_CONFIG_AP;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
break;
case IW_MODE_REPEAT:
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
rc = -EOPNOTSUPP;
break;
default:
rc = -EINVAL;
}
if (pDevice->bCommit) {
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
bScheduleCommand((void *) pDevice,
WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"Commit the settings\n");
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass &&
memcmp(pMgmt->abyCurrSSID,
pMgmt->abyDesireSSID,
WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) {
bScheduleCommand((void *) pDevice,
WLAN_CMD_DISASSOCIATE, NULL);
} else {
pDevice->bLinkPass = false;
pMgmt->eCurrState = WMAC_STATE_IDLE;
memset(pMgmt->abyCurrBSSID, 0, 6);
}
ControlvMaskByte(pDevice,
MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY,
LEDSTS_STS, LEDSTS_SLOW);
netif_stop_queue(pDevice->dev);
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
if (!pDevice->bWPASuppWextEnabled)
bScheduleCommand((void *) pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
bScheduleCommand((void *) pDevice,
WLAN_CMD_SSID,
NULL);
spin_unlock_irq(&pDevice->lock);
}
pDevice->bCommit = false;
}
return rc;
}
/*
* Wireless Handler: get operation mode
*/
int iwctl_giwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
__u32 *wmode = &wrqu->mode;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWMODE\n");
if (pMgmt == NULL)
return -EFAULT;
// If not managed, assume it's ad-hoc
switch (pMgmt->eConfigMode) {
case WMAC_CONFIG_ESS_STA:
*wmode = IW_MODE_INFRA;
break;
case WMAC_CONFIG_IBSS_STA:
*wmode = IW_MODE_ADHOC;
break;
case WMAC_CONFIG_AUTO:
*wmode = IW_MODE_INFRA;
break;
case WMAC_CONFIG_AP:
*wmode = IW_MODE_MASTER;
break;
default:
*wmode = IW_MODE_ADHOC;
}
return 0;
}
/*
* Wireless Handler: get capability range
*/
int iwctl_giwrange(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *wrq = &wrqu->data;
struct iw_range *range = (struct iw_range *)extra;
int i;
int k;
u8 abySupportedRates[13] = {
0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48,
0x60, 0x6C, 0x90
};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRANGE\n");
if (wrq->pointer) {
wrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
range->min_nwid = 0x0000;
range->max_nwid = 0x0000;
range->num_channels = 14;
// Should be based on cap_rid.country to give only
// what the current card support
k = 0;
for (i = 0; i < 14; i++) {
range->freq[k].i = i + 1; // List index
range->freq[k].m = frequency_list[i] * 100000;
range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10
}
range->num_frequency = k;
// Hum... Should put the right values there
range->max_qual.qual = 100;
range->max_qual.level = 0;
range->max_qual.noise = 0;
range->sensitivity = 255;
for (i = 0; i < 13; i++) {
range->bitrate[i] = abySupportedRates[i] * 500000;
if (range->bitrate[i] == 0)
break;
}
range->num_bitrates = i;
// Set an indication of the max TCP throughput
// in bit/s that we can expect using this interface.
// May be use for QoS stuff... Jean II
if (i > 2)
range->throughput = 5 * 1000 * 1000;
else
range->throughput = 1.5 * 1000 * 1000;
range->min_rts = 0;
range->max_rts = 2312;
range->min_frag = 256;
range->max_frag = 2312;
// the encoding capabilities
range->num_encoding_sizes = 3;
// 64(40) bits WEP
range->encoding_size[0] = 5;
// 128(104) bits WEP
range->encoding_size[1] = 13;
// 256 bits for WPA-PSK
range->encoding_size[2] = 32;
// 4 keys are allowed
range->max_encoding_tokens = 4;
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
range->min_pmp = 0;
range->max_pmp = 1000000; // 1 secs
range->min_pmt = 0;
range->max_pmt = 1000000; // 1 secs
range->pmp_flags = IW_POWER_PERIOD;
range->pmt_flags = IW_POWER_TIMEOUT;
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
// Transmit Power - values are in mW
range->txpower[0] = 100;
range->num_txpower = 1;
range->txpower_capa = IW_TXPOW_MWATT;
range->we_version_source = WIRELESS_EXT;
range->we_version_compiled = WIRELESS_EXT;
range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
range->retry_flags = IW_RETRY_LIMIT;
range->r_time_flags = IW_RETRY_LIFETIME;
range->min_retry = 1;
range->max_retry = 65535;
range->min_r_time = 1024;
range->max_r_time = 65535 * 1024;
// Experimental measurements - boundary 11/5.5 Mb/s
// Note : with or without the (local->rssi), results
// are somewhat different. - Jean II
range->avg_qual.qual = 6;
range->avg_qual.level = 176; // -80 dBm
range->avg_qual.noise = 0;
}
return 0;
}
/*
* Wireless Handler : set ap mac address
*/
int iwctl_siwap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct sockaddr *wrq = &wrqu->ap_addr;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int rc = 0;
u8 ZeroBSSID[WLAN_BSSID_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
PRINT_K(" SIOCSIWAP\n");
if (pMgmt == NULL)
return -EFAULT;
if (wrq->sa_family != ARPHRD_ETHER) {
rc = -EINVAL;
} else {
memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
// mike: add
if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) ||
(memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)) {
PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n");
return rc;
}
// mike add: if desired AP is hidden ssid(there are
// two same BSSID in list), then ignore,because you
// don't known which one to be connect with??
{
unsigned ii;
unsigned uSameBssidNum = 0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
!compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
pMgmt->abyDesireBSSID)) {
uSameBssidNum++;
}
}
if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
PRINT_K("SIOCSIWAP:ignore for desired AP in hidden mode\n");
return rc;
}
}
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
}
return rc;
}
/*
* Wireless Handler: get ap mac address
*/
int iwctl_giwap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct sockaddr *wrq = &wrqu->ap_addr;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAP\n");
if (pMgmt == NULL)
return -EFAULT;
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
wrq->sa_family = ARPHRD_ETHER;
return 0;
}
/*
* Wireless Handler: get ap list
*/
int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *wrq = &wrqu->data;
struct sockaddr *sock;
struct iw_quality *qual;
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSS = &pMgmt->sBSSList[0];
int ii;
int jj;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAPLIST\n");
/* Only super-user can see AP list */
if (pBSS == NULL)
return -ENODEV;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!wrq->pointer)
return -EINVAL;
sock = kzalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
if (sock == NULL)
return -ENOMEM;
qual = kzalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
if (qual == NULL) {
kfree(sock);
return -ENOMEM;
}
for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
if (!pBSS[ii].bActive)
continue;
if (jj >= IW_MAX_AP)
break;
memcpy(sock[jj].sa_data, pBSS[ii].abyBSSID, 6);
sock[jj].sa_family = ARPHRD_ETHER;
qual[jj].level = pBSS[ii].uRSSI;
qual[jj].qual = qual[jj].noise = 0;
qual[jj].updated = 2;
jj++;
}
wrq->flags = 1; /* Should be defined */
wrq->length = jj;
memcpy(extra, sock, sizeof(struct sockaddr) * jj);
memcpy(extra + sizeof(struct sockaddr) * jj, qual,
sizeof(struct iw_quality) * jj);
kfree(sock);
kfree(qual);
return 0;
}
/*
* Wireless Handler: set essid
*/
int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->essid;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PWLAN_IE_SSID pItemSSID;
if (pMgmt == NULL)
return -EFAULT;
if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
return -EINVAL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWESSID :\n");
pDevice->fWPA_Authened = false;
// Check if we asked for `any'
if (wrq->flags == 0) {
// Just send an empty SSID list
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memset(pMgmt->abyDesireBSSID, 0xFF,6);
PRINT_K("set essid to 'any' \n");
// Unknown desired AP, so here need not associate??
return 0;
} else {
// Set the SSID
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
memcpy(pItemSSID->abySSID, extra, wrq->length);
if (pItemSSID->abySSID[wrq->length] == '\0') {
if (wrq->length>0)
pItemSSID->len = wrq->length;
} else {
pItemSSID->len = wrq->length;
}
PRINT_K("set essid to %s \n", pItemSSID->abySSID);
// mike: need clear desiredBSSID
if (pItemSSID->len==0) {
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
return 0;
}
// Wext wil order another command of siwap to link
// with desired AP, so here need not associate??
if (pDevice->bWPASuppWextEnabled == true) {
/*******search if in hidden ssid mode ****/
PKnownBSS pCurr = NULL;
u8 abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
unsigned ii;
unsigned uSameBssidNum = 0;
memcpy(abyTmpDesireSSID, pMgmt->abyDesireSSID, sizeof(abyTmpDesireSSID));
pCurr = BSSpSearchBSSList(pDevice, NULL,
abyTmpDesireSSID,
pDevice->eConfigPHYMode);
if (pCurr == NULL) {
PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n");
vResetCommandTimer((void *)pDevice);
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
bScheduleCommand((void *)pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
bScheduleCommand((void *)pDevice,
WLAN_CMD_SSID,
pMgmt->abyDesireSSID);
} else { // mike: to find out if that desired SSID is a
// hidden-ssid AP, by means of judging if there
// are two same BSSID exist in list ?
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
!compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
pCurr->abyBSSID)) {
uSameBssidNum++;
}
}
if (uSameBssidNum >= 2) { // hit: desired AP is in hidden ssid mode!!!
PRINT_K("SIOCSIWESSID:hidden ssid directly associate.......\n");
vResetCommandTimer((void *)pDevice);
pMgmt->eScanType = WMAC_SCAN_PASSIVE; // this scan type, you'll submit scan result!
bScheduleCommand((void *)pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
bScheduleCommand((void *)pDevice,
WLAN_CMD_SSID,
pMgmt->abyDesireSSID);
}
}
return 0;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set essid = %s \n", pItemSSID->abySSID);
}
if (pDevice->flags & DEVICE_FLAGS_OPENED)
pDevice->bCommit = true;
return 0;
}
/*
* Wireless Handler: get essid
*/
int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->essid;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PWLAN_IE_SSID pItemSSID;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWESSID\n");
if (pMgmt == NULL)
return -EFAULT;
// Note: if wrq->u.data.flags != 0, we should get the relevant
// SSID from the SSID list...
// Get the current SSID
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
memcpy(extra, pItemSSID->abySSID, pItemSSID->len);
extra[pItemSSID->len] = '\0';
wrq->length = pItemSSID->len;
wrq->flags = 1; // active
return 0;
}
/*
* Wireless Handler: set data rate
*/
int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->bitrate;
int rc = 0;
u8 brate = 0;
int i;
u8 abySupportedRates[13] = {
0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48,
0x60, 0x6C, 0x90
};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRATE \n");
if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
rc = -EINVAL;
return rc;
}
// First: get a valid bit rate value
// Which type of value
if ((wrq->value < 13) && (wrq->value >= 0)) {
// Setting by rate index
// Find value in the magic rate table
brate = wrq->value;
} else {
// Setting by frequency value
u8 normvalue = (u8)(wrq->value/500000);
// Check if rate is valid
for (i = 0; i < 13; i++) {
if (normvalue == abySupportedRates[i]) {
brate = i;
break;
}
}
}
// -1 designed the max rate (mostly auto mode)
if (wrq->value == -1) {
// Get the highest available rate
for (i = 0; i < 13; i++) {
if (abySupportedRates[i] == 0)
break;
}
if (i != 0)
brate = i - 1;
}
// Check that it is valid
// brate is index of abySupportedRates[]
if (brate > 13 ) {
rc = -EINVAL;
return rc;
}
// Now, check if we want a fixed or auto value
if (wrq->fixed != 0) {
// Fixed mode
// One rate, fixed
pDevice->bFixRate = true;
if ((pDevice->byBBType == BB_TYPE_11B) && (brate > 3)) {
pDevice->uConnectionRate = 3;
} else {
pDevice->uConnectionRate = brate;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fixed to Rate %d \n", pDevice->uConnectionRate);
}
} else {
pDevice->bFixRate = false;
pDevice->uConnectionRate = 13;
}
return rc;
}
/*
* Wireless Handler: get data rate
*/
int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->bitrate;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRATE\n");
if (pMgmt == NULL)
return -EFAULT;
{
u8 abySupportedRates[13] = {
0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30,
0x48, 0x60, 0x6C, 0x90
};
int brate = 0;
if (pDevice->uConnectionRate < 13) {
brate = abySupportedRates[pDevice->uConnectionRate];
} else {
if (pDevice->byBBType == BB_TYPE_11B)
brate = 0x16;
if (pDevice->byBBType == BB_TYPE_11G)
brate = 0x6C;
if (pDevice->byBBType == BB_TYPE_11A)
brate = 0x6C;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->byBBType == BB_TYPE_11B)
brate = 0x16;
if (pDevice->byBBType == BB_TYPE_11G)
brate = 0x6C;
if (pDevice->byBBType == BB_TYPE_11A)
brate = 0x6C;
}
if (pDevice->uConnectionRate == 13)
brate = abySupportedRates[pDevice->wCurrentRate];
wrq->value = brate * 500000;
// If more than one rate, set auto
if (pDevice->bFixRate == true)
wrq->fixed = true;
}
return 0;
}
/*
* Wireless Handler: set rts threshold
*/
int iwctl_siwrts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->rts;
if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
return -EINVAL;
else if (wrq->disabled)
pDevice->wRTSThreshold = 2312;
else
pDevice->wRTSThreshold = wrq->value;
return 0;
}
/*
* Wireless Handler: get rts
*/
int iwctl_giwrts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->rts;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRTS\n");
wrq->value = pDevice->wRTSThreshold;
wrq->disabled = (wrq->value >= 2312);
wrq->fixed = 1;
return 0;
}
/*
* Wireless Handler: set fragment threshold
*/
int iwctl_siwfrag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->frag;
int rc = 0;
int fthr = wrq->value;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWFRAG\n");
if (wrq->disabled)
fthr = 2312;
if ((fthr < 256) || (fthr > 2312)) {
rc = -EINVAL;
} else {
fthr &= ~0x1; // Get an even value
pDevice->wFragmentationThreshold = (u16)fthr;
}
return rc;
}
/*
* Wireless Handler: get fragment threshold
*/
int iwctl_giwfrag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->frag;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFRAG\n");
wrq->value = pDevice->wFragmentationThreshold;
wrq->disabled = (wrq->value >= 2312);
wrq->fixed = 1;
return 0;
}
/*
* Wireless Handler: set retry threshold
*/
int iwctl_siwretry(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->retry;
int rc = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRETRY\n");
if (wrq->disabled) {
rc = -EINVAL;
return rc;
}
if (wrq->flags & IW_RETRY_LIMIT) {
if (wrq->flags & IW_RETRY_MAX) {
pDevice->byLongRetryLimit = wrq->value;
} else if (wrq->flags & IW_RETRY_MIN) {
pDevice->byShortRetryLimit = wrq->value;
} else {
// No modifier : set both
pDevice->byShortRetryLimit = wrq->value;
pDevice->byLongRetryLimit = wrq->value;
}
}
if (wrq->flags & IW_RETRY_LIFETIME)
pDevice->wMaxTransmitMSDULifetime = wrq->value;
return rc;
}
/*
* Wireless Handler: get retry threshold
*/
int iwctl_giwretry(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->retry;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRETRY\n");
wrq->disabled = 0; // Can't be disabled
// Note: by default, display the min retry number
if ((wrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
wrq->flags = IW_RETRY_LIFETIME;
wrq->value = (int)pDevice->wMaxTransmitMSDULifetime; // ms
} else if ((wrq->flags & IW_RETRY_MAX)) {
wrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
wrq->value = (int)pDevice->byLongRetryLimit;
} else {
wrq->flags = IW_RETRY_LIMIT;
wrq->value = (int)pDevice->byShortRetryLimit;
if ((int)pDevice->byShortRetryLimit != (int)pDevice->byLongRetryLimit)
wrq->flags |= IW_RETRY_MIN;
}
return 0;
}
/*
* Wireless Handler: set encode mode
*/
int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
u32 dwKeyIndex = (u32)(wrq->flags & IW_ENCODE_INDEX);
int ii;
int uu;
int rc = 0;
int index = (wrq->flags & IW_ENCODE_INDEX);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODE\n");
if (pMgmt == NULL)
return -EFAULT;
// Check the size of the key
if (wrq->length > WLAN_WEP232_KEYLEN) {
rc = -EINVAL;
return rc;
}
if (dwKeyIndex > WLAN_WEP_NKEYS) {
rc = -EINVAL;
return rc;
}
if (dwKeyIndex > 0)
dwKeyIndex--;
// Send the key to the card
if (wrq->length > 0) {
if (wrq->length == WLAN_WEP232_KEYLEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 232 bit wep key\n");
} else if (wrq->length == WLAN_WEP104_KEYLEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 104 bit wep key\n");
} else if (wrq->length == WLAN_WEP40_KEYLEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 40 bit wep key, index= %d\n", (int)dwKeyIndex);
}
memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN);
memcpy(pDevice->abyKey, extra, wrq->length);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyKey: ");
for (ii = 0; ii < wrq->length; ii++)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pDevice->abyKey[ii]);
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
KeybSetDefaultKey(pDevice,
&(pDevice->sKey),
dwKeyIndex | (1 << 31),
wrq->length, NULL,
pDevice->abyKey,
KEY_CTL_WEP);
spin_unlock_irq(&pDevice->lock);
}
pDevice->byKeyIndex = (u8)dwKeyIndex;
pDevice->uKeyLength = wrq->length;
pDevice->bTransmitKey = true;
pDevice->bEncryptionEnable = true;
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
// Do we want to just set the transmit key index?
if (index < 4) {
pDevice->byKeyIndex = index;
} else if (!(wrq->flags & IW_ENCODE_MODE)) {
rc = -EINVAL;
return rc;
}
}
// Read the flags
if (wrq->flags & IW_ENCODE_DISABLED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n");
pMgmt->bShareKeyAlgorithm = false;
pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
for (uu = 0; uu < MAX_KEY_TABLE; uu++)
MACvDisableKeyEntry(pDevice, uu);
spin_unlock_irq(&pDevice->lock);
}
}
if (wrq->flags & IW_ENCODE_RESTRICTED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & ShareKey System\n");
pMgmt->bShareKeyAlgorithm = true;
}
if (wrq->flags & IW_ENCODE_OPEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & Open System\n");
pMgmt->bShareKeyAlgorithm = false;
}
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
return rc;
}
int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
char abyKey[WLAN_WEP232_KEYLEN];
unsigned index = (unsigned)(wrq->flags & IW_ENCODE_INDEX);
PSKeyItem pKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
if (pMgmt == NULL)
return -EFAULT;
if (index > WLAN_WEP_NKEYS)
return -EINVAL;
if (index < 1) { // get default key
if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
index = pDevice->byKeyIndex;
else
index = 0;
} else {
index--;
}
memset(abyKey, 0, WLAN_WEP232_KEYLEN);
// Check encryption mode
wrq->flags = IW_ENCODE_NOKEY;
// Is WEP enabled ???
if (pDevice->bEncryptionEnable)
wrq->flags |= IW_ENCODE_ENABLED;
else
wrq->flags |= IW_ENCODE_DISABLED;
if (pMgmt->bShareKeyAlgorithm)
wrq->flags |= IW_ENCODE_RESTRICTED;
else
wrq->flags |= IW_ENCODE_OPEN;
wrq->length = 0;
if ((index == 0) && (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled ||
pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)) { // get wpa pairwise key
if (KeybGetKey(&(pDevice->sKey), pMgmt->abyCurrBSSID, 0xffffffff, &pKey)) {
wrq->length = pKey->uKeyLength;
memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
}
} else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (u8)index, &pKey)) {
wrq->length = pKey->uKeyLength;
memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
}
wrq->flags |= index + 1;
return 0;
}
/*
* Wireless Handler: set power mode
*/
int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->power;
int rc = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER\n");
if (pMgmt == NULL)
return -EFAULT;
if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
rc = -EINVAL;
return rc;
}
spin_lock_irq(&pDevice->lock);
if (wrq->disabled) {
pDevice->ePSMode = WMAC_POWER_CAM;
PSvDisablePowerSaving(pDevice);
spin_unlock_irq(&pDevice->lock);
return rc;
}
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
pDevice->ePSMode = WMAC_POWER_FAST;
PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
} else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
pDevice->ePSMode = WMAC_POWER_FAST;
PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
}
spin_unlock_irq(&pDevice->lock);
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
rc = -EINVAL;
break;
case IW_POWER_ALL_R:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ALL_R \n");
rc = -EINVAL;
case IW_POWER_ON:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_ON \n");
break;
default:
rc = -EINVAL;
}
return rc;
}
/*
* Wireless Handler: get power mode
*/
int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->power;
int mode = pDevice->ePSMode;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPOWER\n");
if (pMgmt == NULL)
return -EFAULT;
if ((wrq->disabled = (mode == WMAC_POWER_CAM)))
return 0;
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
wrq->value = (int)((pMgmt->wListenInterval *
pMgmt->wCurrBeaconPeriod) / 100);
wrq->flags = IW_POWER_TIMEOUT;
} else {
wrq->value = (int)((pMgmt->wListenInterval *
pMgmt->wCurrBeaconPeriod) / 100);
wrq->flags = IW_POWER_PERIOD;
}
wrq->flags |= IW_POWER_ALL_R;
return 0;
}
/*
* Wireless Handler: get Sensitivity
*/
int iwctl_giwsens(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->sens;
long ldBm;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSENS\n");
if (pDevice->bLinkPass == true) {
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
wrq->value = ldBm;
} else {
wrq->value = 0;
}
wrq->disabled = (wrq->value == 0);
wrq->fixed = 1;
return 0;
}
int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->param;
int ret = 0;
static int wpa_version = 0; // must be static to save the last value, einsn liu
static int pairwise = 0;
if (pMgmt == NULL)
return -EFAULT;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH\n");
switch (wrq->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
wpa_version = wrq->value;
if (wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
} else if (wrq->value == IW_AUTH_WPA_VERSION_WPA) {
PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
} else {
PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
}
break;
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
PRINT_K("iwctl_siwauth:set pairwise=%d\n", pairwise);
if (pairwise == IW_AUTH_CIPHER_CCMP){
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
} else if (pairwise == IW_AUTH_CIPHER_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
} else if (pairwise == IW_AUTH_CIPHER_WEP40 ||
pairwise == IW_AUTH_CIPHER_WEP104) {
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
} else if (pairwise == IW_AUTH_CIPHER_NONE) {
// do nothing, einsn liu
} else {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
break;
case IW_AUTH_CIPHER_GROUP:
PRINT_K("iwctl_siwauth:set GROUP=%d\n", wrq->value);
if (wpa_version == IW_AUTH_WPA_VERSION_DISABLED)
break;
if (pairwise == IW_AUTH_CIPHER_NONE) {
if (wrq->value == IW_AUTH_CIPHER_CCMP)
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
else
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
}
break;
case IW_AUTH_KEY_MGMT:
PRINT_K("iwctl_siwauth(wpa_version=%d):set KEY_MGMT=%d\n", wpa_version,wrq->value);
if (wpa_version == IW_AUTH_WPA_VERSION_WPA2){
if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
else pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
} else if (wpa_version == IW_AUTH_WPA_VERSION_WPA) {
if (wrq->value == 0){
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
} else if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
} else {
pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
break; /* FIXME */
case IW_AUTH_DROP_UNENCRYPTED:
break;
case IW_AUTH_80211_AUTH_ALG:
PRINT_K("iwctl_siwauth:set AUTH_ALG=%d\n", wrq->value);
if (wrq->value == IW_AUTH_ALG_OPEN_SYSTEM)
pMgmt->bShareKeyAlgorithm = false;
else if (wrq->value == IW_AUTH_ALG_SHARED_KEY)
pMgmt->bShareKeyAlgorithm = true;
break;
case IW_AUTH_WPA_ENABLED:
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
break;
case IW_AUTH_ROAMING_CONTROL:
ret = -EOPNOTSUPP;
break;
case IW_AUTH_PRIVACY_INVOKED:
pDevice->bEncryptionEnable = !!wrq->value;
if (pDevice->bEncryptionEnable == false) {
wpa_version = 0;
pairwise = 0;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pMgmt->bShareKeyAlgorithm = false;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
}
break;
default:
PRINT_K("iwctl_siwauth: not supported %x\n", wrq->flags);
ret = -EOPNOTSUPP;
break;
}
return ret;
}
int iwctl_giwauth(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
return -EOPNOTSUPP;
}
int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->data;
int ret = 0;
if (pMgmt == NULL)
return -EFAULT;
if (wrq->length){
if ((wrq->length < 2) || (extra[1] + 2 != wrq->length)) {
ret = -EINVAL;
goto out;
}
if (wrq->length > MAX_WPA_IE_LEN){
ret = -ENOMEM;
goto out;
}
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)){
ret = -EFAULT;
goto out;
}
pMgmt->wWPAIELen = wrq->length;
} else {
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
pMgmt->wWPAIELen = 0;
}
out: // not completely ...not necessary in wpa_supplicant 0.5.8
return ret;
}
int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->data;
int ret = 0;
int space = wrq->length;
if (pMgmt == NULL)
return -EFAULT;
wrq->length = 0;
if (pMgmt->wWPAIELen > 0) {
wrq->length = pMgmt->wWPAIELen;
if (pMgmt->wWPAIELen <= space) {
if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen)) {
ret = -EFAULT;
}
} else {
ret = -E2BIG;
}
}
return ret;
}
int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext*)extra;
struct viawget_wpa_param *param=NULL;
// original member
wpa_alg alg_name;
u8 addr[6];
int key_idx;
int set_tx = 0;
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
u8 key[64];
size_t seq_len = 0;
size_t key_len = 0;
u8 *buf;
u8 key_array[64];
int ret = 0;
PRINT_K("SIOCSIWENCODEEXT......\n");
if (pMgmt == NULL)
return -EFAULT;
if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
return -ENODEV;
buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
param = (struct viawget_wpa_param *)buf;
// recover alg_name
switch (ext->alg) {
case IW_ENCODE_ALG_NONE:
alg_name = WPA_ALG_NONE;
break;
case IW_ENCODE_ALG_WEP:
alg_name = WPA_ALG_WEP;
break;
case IW_ENCODE_ALG_TKIP:
alg_name = WPA_ALG_TKIP;
break;
case IW_ENCODE_ALG_CCMP:
alg_name = WPA_ALG_CCMP;
break;
default:
PRINT_K("Unknown alg = %d\n",ext->alg);
ret= -ENOMEM;
goto error;
}
// recover addr
memcpy(addr, ext->addr.sa_data, ETH_ALEN);
// recover key_idx
key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1;
// recover set_tx
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
set_tx = 1;
// recover seq,seq_len
if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
seq_len=IW_ENCODE_SEQ_MAX_SIZE;
memcpy(seq, ext->rx_seq, seq_len);
}
// recover key,key_len
if (ext->key_len) {
key_len = ext->key_len;
memcpy(key, &ext->key[0], key_len);
}
memset(key_array, 0, 64);
if (key_len > 0) {
memcpy(key_array, key, key_len);
if (key_len == 32) {
// notice ! the oder
memcpy(&key_array[16], &key[24], 8);
memcpy(&key_array[24], &key[16], 8);
}
}
/**************Translate iw_encode_ext to viawget_wpa_param****************/
memcpy(param->addr, addr, ETH_ALEN);
param->u.wpa_key.alg_name = (int)alg_name;
param->u.wpa_key.set_tx = set_tx;
param->u.wpa_key.key_index = key_idx;
param->u.wpa_key.key_len = key_len;
param->u.wpa_key.key = (u8 *)key_array;
param->u.wpa_key.seq = (u8 *)seq;
param->u.wpa_key.seq_len = seq_len;
/****set if current action is Network Manager count?? */
/****this method is so foolish,but there is no other way??? */
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
if (param->u.wpa_key.key_index ==0) {
pDevice->bwextstep0 = true;
}
if ((pDevice->bwextstep0 == true) && (param->u.wpa_key.key_index == 1)) {
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = true;
}
if ((pDevice->bwextstep1 == true) && (param->u.wpa_key.key_index == 2)) {
pDevice->bwextstep1 = false;
pDevice->bwextstep2 = true;
}
if ((pDevice->bwextstep2 == true) && (param->u.wpa_key.key_index == 3)) {
pDevice->bwextstep2 = false;
pDevice->bwextstep3 = true;
}
}
if (pDevice->bwextstep3 == true) {
PRINT_K("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
pDevice->bwextstep2 = false;
pDevice->bwextstep3 = false;
pDevice->bWPASuppWextEnabled = true;
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
KeyvInitTable(pDevice, &pDevice->sKey);
}
/*******/
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param);
spin_unlock_irq(&pDevice->lock);
error:
kfree(buf);
return ret;
}
int iwctl_giwencodeext(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
return -EOPNOTSUPP;
}
int iwctl_siwmlme(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_mlme *mlme = (struct iw_mlme *)extra;
int ret = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMLME\n");
if (pMgmt == NULL)
return -EFAULT;
if (memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)) {
ret = -EINVAL;
return ret;
}
switch (mlme->cmd){
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
if (pDevice->bLinkPass == true) {
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE,
NULL);
}
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static int iwctl_config_commit(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "SIOCSIWCOMMIT\n");
return 0;
}
static const iw_handler iwctl_handler[] = {
IW_HANDLER(SIOCSIWCOMMIT, iwctl_config_commit),
IW_HANDLER(SIOCGIWNAME, iwctl_giwname),
IW_HANDLER(SIOCSIWFREQ, iwctl_siwfreq),
IW_HANDLER(SIOCGIWFREQ, iwctl_giwfreq),
IW_HANDLER(SIOCSIWMODE, iwctl_siwmode),
IW_HANDLER(SIOCGIWMODE, iwctl_giwmode),
IW_HANDLER(SIOCGIWSENS, iwctl_giwsens),
IW_HANDLER(SIOCGIWRANGE, iwctl_giwrange),
IW_HANDLER(SIOCSIWAP, iwctl_siwap),
IW_HANDLER(SIOCGIWAP, iwctl_giwap),
IW_HANDLER(SIOCSIWMLME, iwctl_siwmlme),
IW_HANDLER(SIOCGIWAPLIST, iwctl_giwaplist),
IW_HANDLER(SIOCSIWSCAN, iwctl_siwscan),
IW_HANDLER(SIOCGIWSCAN, iwctl_giwscan),
IW_HANDLER(SIOCSIWESSID, iwctl_siwessid),
IW_HANDLER(SIOCGIWESSID, iwctl_giwessid),
IW_HANDLER(SIOCSIWRATE, iwctl_siwrate),
IW_HANDLER(SIOCGIWRATE, iwctl_giwrate),
IW_HANDLER(SIOCSIWRTS, iwctl_siwrts),
IW_HANDLER(SIOCGIWRTS, iwctl_giwrts),
IW_HANDLER(SIOCSIWFRAG, iwctl_siwfrag),
IW_HANDLER(SIOCGIWFRAG, iwctl_giwfrag),
IW_HANDLER(SIOCSIWRETRY, iwctl_siwretry),
IW_HANDLER(SIOCGIWRETRY, iwctl_giwretry),
IW_HANDLER(SIOCSIWENCODE, iwctl_siwencode),
IW_HANDLER(SIOCGIWENCODE, iwctl_giwencode),
IW_HANDLER(SIOCSIWPOWER, iwctl_siwpower),
IW_HANDLER(SIOCGIWPOWER, iwctl_giwpower),
IW_HANDLER(SIOCSIWGENIE, iwctl_siwgenie),
IW_HANDLER(SIOCGIWGENIE, iwctl_giwgenie),
IW_HANDLER(SIOCSIWMLME, iwctl_siwmlme),
IW_HANDLER(SIOCSIWAUTH, iwctl_siwauth),
IW_HANDLER(SIOCGIWAUTH, iwctl_giwauth),
IW_HANDLER(SIOCSIWENCODEEXT, iwctl_siwencodeext),
IW_HANDLER(SIOCGIWENCODEEXT, iwctl_giwencodeext)
};
static const iw_handler iwctl_private_handler[] = {
NULL, // SIOCIWFIRSTPRIV
};
const struct iw_handler_def iwctl_handler_def = {
.get_wireless_stats = &iwctl_get_wireless_stats,
.num_standard = ARRAY_SIZE(iwctl_handler),
.num_private = 0,
.num_private_args = 0,
.standard = iwctl_handler,
.private = NULL,
.private_args = NULL,
};
| gpl-2.0 |
moulecorp/greezly | fs/jffs2/super.c | 2223 | 10534 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mount.h>
#include <linux/parser.h>
#include <linux/jffs2.h>
#include <linux/pagemap.h>
#include <linux/mtd/super.h>
#include <linux/ctype.h>
#include <linux/namei.h>
#include <linux/seq_file.h>
#include <linux/exportfs.h>
#include "compr.h"
#include "nodelist.h"
static void jffs2_put_super(struct super_block *);
static struct kmem_cache *jffs2_inode_cachep;
static struct inode *jffs2_alloc_inode(struct super_block *sb)
{
struct jffs2_inode_info *f;
f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
if (!f)
return NULL;
return &f->vfs_inode;
}
static void jffs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
}
static void jffs2_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, jffs2_i_callback);
}
static void jffs2_i_init_once(void *foo)
{
struct jffs2_inode_info *f = foo;
mutex_init(&f->sem);
inode_init_once(&f->vfs_inode);
}
static const char *jffs2_compr_name(unsigned int compr)
{
switch (compr) {
case JFFS2_COMPR_MODE_NONE:
return "none";
#ifdef CONFIG_JFFS2_LZO
case JFFS2_COMPR_MODE_FORCELZO:
return "lzo";
#endif
#ifdef CONFIG_JFFS2_ZLIB
case JFFS2_COMPR_MODE_FORCEZLIB:
return "zlib";
#endif
default:
/* should never happen; programmer error */
WARN_ON(1);
return "";
}
}
static int jffs2_show_options(struct seq_file *s, struct dentry *root)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb);
struct jffs2_mount_opts *opts = &c->mount_opts;
if (opts->override_compr)
seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
if (opts->rp_size)
seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
return 0;
}
static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
cancel_delayed_work_sync(&c->wbuf_dwork);
#endif
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
return 0;
}
static struct inode *jffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
uint32_t generation)
{
/* We don't care about i_generation. We'll destroy the flash
before we start re-using inode numbers anyway. And even
if that wasn't true, we'd have other problems...*/
return jffs2_iget(sb, ino);
}
static struct dentry *jffs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
jffs2_nfs_get_inode);
}
static struct dentry *jffs2_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
jffs2_nfs_get_inode);
}
static struct dentry *jffs2_get_parent(struct dentry *child)
{
struct jffs2_inode_info *f;
uint32_t pino;
BUG_ON(!S_ISDIR(child->d_inode->i_mode));
f = JFFS2_INODE_INFO(child->d_inode);
pino = f->inocache->pino_nlink;
JFFS2_DEBUG("Parent of directory ino #%u is #%u\n",
f->inocache->ino, pino);
return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino));
}
static const struct export_operations jffs2_export_ops = {
.get_parent = jffs2_get_parent,
.fh_to_dentry = jffs2_fh_to_dentry,
.fh_to_parent = jffs2_fh_to_parent,
};
/*
* JFFS2 mount options.
*
* Opt_override_compr: override default compressor
* Opt_rp_size: size of reserved pool in KiB
* Opt_err: just end of array marker
*/
enum {
Opt_override_compr,
Opt_rp_size,
Opt_err,
};
static const match_table_t tokens = {
{Opt_override_compr, "compr=%s"},
{Opt_rp_size, "rp_size=%u"},
{Opt_err, NULL},
};
static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
{
substring_t args[MAX_OPT_ARGS];
char *p, *name;
unsigned int opt;
if (!data)
return 0;
while ((p = strsep(&data, ","))) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_override_compr:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (!strcmp(name, "none"))
c->mount_opts.compr = JFFS2_COMPR_MODE_NONE;
#ifdef CONFIG_JFFS2_LZO
else if (!strcmp(name, "lzo"))
c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO;
#endif
#ifdef CONFIG_JFFS2_ZLIB
else if (!strcmp(name, "zlib"))
c->mount_opts.compr =
JFFS2_COMPR_MODE_FORCEZLIB;
#endif
else {
pr_err("Error: unknown compressor \"%s\"\n",
name);
kfree(name);
return -EINVAL;
}
kfree(name);
c->mount_opts.override_compr = true;
break;
case Opt_rp_size:
if (match_int(&args[0], &opt))
return -EINVAL;
opt *= 1024;
if (opt > c->mtd->size) {
pr_warn("Too large reserve pool specified, max "
"is %llu KB\n", c->mtd->size / 1024);
return -EINVAL;
}
c->mount_opts.rp_size = opt;
break;
default:
pr_err("Error: unrecognized mount option '%s' or missing value\n",
p);
return -EINVAL;
}
}
return 0;
}
static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
int err;
err = jffs2_parse_options(c, data);
if (err)
return -EINVAL;
return jffs2_do_remount_fs(sb, flags, data);
}
static const struct super_operations jffs2_super_operations =
{
.alloc_inode = jffs2_alloc_inode,
.destroy_inode =jffs2_destroy_inode,
.put_super = jffs2_put_super,
.statfs = jffs2_statfs,
.remount_fs = jffs2_remount_fs,
.evict_inode = jffs2_evict_inode,
.dirty_inode = jffs2_dirty_inode,
.show_options = jffs2_show_options,
.sync_fs = jffs2_sync_fs,
};
/*
* fill in the superblock
*/
static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct jffs2_sb_info *c;
int ret;
jffs2_dbg(1, "jffs2_get_sb_mtd():"
" New superblock for device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
c->mtd = sb->s_mtd;
c->os_priv = sb;
sb->s_fs_info = c;
ret = jffs2_parse_options(c, data);
if (ret) {
kfree(c);
return -EINVAL;
}
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
mutex_init(&c->alloc_sem);
mutex_init(&c->erase_free_sem);
init_waitqueue_head(&c->erase_wait);
init_waitqueue_head(&c->inocache_wq);
spin_lock_init(&c->erase_completion_lock);
spin_lock_init(&c->inocache_lock);
sb->s_op = &jffs2_super_operations;
sb->s_export_op = &jffs2_export_ops;
sb->s_flags = sb->s_flags | MS_NOATIME;
sb->s_xattr = jffs2_xattr_handlers;
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
ret = jffs2_do_fill_super(sb, data, silent);
return ret;
}
static struct dentry *jffs2_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_mtd(fs_type, flags, dev_name, data, jffs2_fill_super);
}
static void jffs2_put_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
jffs2_dbg(2, "%s()\n", __func__);
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
jffs2_sum_exit(c);
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
kfree(c->blocks);
jffs2_flash_cleanup(c);
kfree(c->inocache_list);
jffs2_clear_xattr_subsystem(c);
mtd_sync(c->mtd);
jffs2_dbg(1, "%s(): returning\n", __func__);
}
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
if (!(sb->s_flags & MS_RDONLY))
jffs2_stop_garbage_collect_thread(c);
kill_mtd_super(sb);
kfree(c);
}
static struct file_system_type jffs2_fs_type = {
.owner = THIS_MODULE,
.name = "jffs2",
.mount = jffs2_mount,
.kill_sb = jffs2_kill_sb,
};
MODULE_ALIAS_FS("jffs2");
static int __init init_jffs2_fs(void)
{
int ret;
/* Paranoia checks for on-medium structures. If we ask GCC
to pack them with __attribute__((packed)) then it _also_
assumes that they're not aligned -- so it emits crappy
code on some architectures. Ideally we want an attribute
which means just 'no padding', without the alignment
thing. But GCC doesn't have that -- we have to just
hope the structs are the right sizes, instead. */
BUILD_BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
BUILD_BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
pr_info("version 2.2."
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
#endif
#ifdef CONFIG_JFFS2_SUMMARY
" (SUMMARY) "
#endif
" © 2001-2006 Red Hat, Inc.\n");
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
sizeof(struct jffs2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
jffs2_i_init_once);
if (!jffs2_inode_cachep) {
pr_err("error: Failed to initialise inode cache\n");
return -ENOMEM;
}
ret = jffs2_compressors_init();
if (ret) {
pr_err("error: Failed to initialise compressors\n");
goto out;
}
ret = jffs2_create_slab_caches();
if (ret) {
pr_err("error: Failed to initialise slab caches\n");
goto out_compressors;
}
ret = register_filesystem(&jffs2_fs_type);
if (ret) {
pr_err("error: Failed to register filesystem\n");
goto out_slab;
}
return 0;
out_slab:
jffs2_destroy_slab_caches();
out_compressors:
jffs2_compressors_exit();
out:
kmem_cache_destroy(jffs2_inode_cachep);
return ret;
}
static void __exit exit_jffs2_fs(void)
{
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
jffs2_compressors_exit();
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(jffs2_inode_cachep);
}
module_init(init_jffs2_fs);
module_exit(exit_jffs2_fs);
MODULE_DESCRIPTION("The Journalling Flash File System, v2");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
// the sake of this tag. It's Free Software.
| gpl-2.0 |
mikeljim/fb-mptcp | drivers/sbus/char/flash.c | 2479 | 4867 | /* flash.c: Allow mmap access to the OBP Flash, for OBP updates.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/upa.h>
static DEFINE_MUTEX(flash_mutex);
static DEFINE_SPINLOCK(flash_lock);
static struct {
unsigned long read_base; /* Physical read address */
unsigned long write_base; /* Physical write address */
unsigned long read_size; /* Size of read area */
unsigned long write_size; /* Size of write area */
unsigned long busy; /* In use? */
} flash;
#define FLASH_MINOR 152
static int
flash_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long addr;
unsigned long size;
spin_lock(&flash_lock);
if (flash.read_base == flash.write_base) {
addr = flash.read_base;
size = flash.read_size;
} else {
if ((vma->vm_flags & VM_READ) &&
(vma->vm_flags & VM_WRITE)) {
spin_unlock(&flash_lock);
return -EINVAL;
}
if (vma->vm_flags & VM_READ) {
addr = flash.read_base;
size = flash.read_size;
} else if (vma->vm_flags & VM_WRITE) {
addr = flash.write_base;
size = flash.write_size;
} else {
spin_unlock(&flash_lock);
return -ENXIO;
}
}
spin_unlock(&flash_lock);
if ((vma->vm_pgoff << PAGE_SHIFT) > size)
return -ENXIO;
addr = vma->vm_pgoff + (addr >> PAGE_SHIFT);
if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static long long
flash_llseek(struct file *file, long long offset, int origin)
{
mutex_lock(&flash_mutex);
switch (origin) {
case 0:
file->f_pos = offset;
break;
case 1:
file->f_pos += offset;
if (file->f_pos > flash.read_size)
file->f_pos = flash.read_size;
break;
case 2:
file->f_pos = flash.read_size;
break;
default:
mutex_unlock(&flash_mutex);
return -EINVAL;
}
mutex_unlock(&flash_mutex);
return file->f_pos;
}
static ssize_t
flash_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
loff_t p = *ppos;
int i;
if (count > flash.read_size - p)
count = flash.read_size - p;
for (i = 0; i < count; i++) {
u8 data = upa_readb(flash.read_base + p + i);
if (put_user(data, buf))
return -EFAULT;
buf++;
}
*ppos += count;
return count;
}
static int
flash_open(struct inode *inode, struct file *file)
{
mutex_lock(&flash_mutex);
if (test_and_set_bit(0, (void *)&flash.busy) != 0) {
mutex_unlock(&flash_mutex);
return -EBUSY;
}
mutex_unlock(&flash_mutex);
return 0;
}
static int
flash_release(struct inode *inode, struct file *file)
{
spin_lock(&flash_lock);
flash.busy = 0;
spin_unlock(&flash_lock);
return 0;
}
static const struct file_operations flash_fops = {
/* no write to the Flash, use mmap
* and play flash dependent tricks.
*/
.owner = THIS_MODULE,
.llseek = flash_llseek,
.read = flash_read,
.mmap = flash_mmap,
.open = flash_open,
.release = flash_release,
};
static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
static int flash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct device_node *parent;
parent = dp->parent;
if (strcmp(parent->name, "sbus") &&
strcmp(parent->name, "sbi") &&
strcmp(parent->name, "ebus"))
return -ENODEV;
flash.read_base = op->resource[0].start;
flash.read_size = resource_size(&op->resource[0]);
if (op->resource[1].flags) {
flash.write_base = op->resource[1].start;
flash.write_size = resource_size(&op->resource[1]);
} else {
flash.write_base = op->resource[0].start;
flash.write_size = resource_size(&op->resource[0]);
}
flash.busy = 0;
printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
op->dev.of_node->full_name,
flash.read_base, flash.read_size,
flash.write_base, flash.write_size);
return misc_register(&flash_dev);
}
static int flash_remove(struct platform_device *op)
{
misc_deregister(&flash_dev);
return 0;
}
static const struct of_device_id flash_match[] = {
{
.name = "flashprom",
},
{},
};
MODULE_DEVICE_TABLE(of, flash_match);
static struct platform_driver flash_driver = {
.driver = {
.name = "flash",
.owner = THIS_MODULE,
.of_match_table = flash_match,
},
.probe = flash_probe,
.remove = flash_remove,
};
module_platform_driver(flash_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
hephaex/kernel | drivers/hwmon/pmbus/zl6100.c | 3759 | 10135 | /*
* Hardware monitoring driver for ZL6100 and compatibles
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/ktime.h>
#include <linux/delay.h>
#include "pmbus.h"
enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105,
zl9101, zl9117 };
struct zl6100_data {
int id;
ktime_t access; /* chip access time */
int delay; /* Delay between chip accesses in uS */
struct pmbus_driver_info info;
};
#define to_zl6100_data(x) container_of(x, struct zl6100_data, info)
#define ZL6100_MFR_CONFIG 0xd0
#define ZL6100_DEVICE_ID 0xe4
#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
#define MFR_VMON_OV_FAULT_LIMIT 0xf5
#define MFR_VMON_UV_FAULT_LIMIT 0xf6
#define MFR_READ_VMON 0xf7
#define VMON_UV_WARNING (1 << 5)
#define VMON_OV_WARNING (1 << 4)
#define VMON_UV_FAULT (1 << 1)
#define VMON_OV_FAULT (1 << 0)
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
module_param(delay, ushort, 0644);
MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
/* Convert linear sensor value to milli-units */
static long zl6100_l2d(s16 l)
{
s16 exponent;
s32 mantissa;
long val;
exponent = l >> 11;
mantissa = ((s16)((l & 0x7ff) << 5)) >> 5;
val = mantissa;
/* scale result to milli-units */
val = val * 1000L;
if (exponent >= 0)
val <<= exponent;
else
val >>= -exponent;
return val;
}
#define MAX_MANTISSA (1023 * 1000)
#define MIN_MANTISSA (511 * 1000)
static u16 zl6100_d2l(long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
/* simple case */
if (val == 0)
return 0;
if (val < 0) {
negative = true;
val = -val;
}
/* Reduce large mantissa until it fits into 10 bit */
while (val >= MAX_MANTISSA && exponent < 15) {
exponent++;
val >>= 1;
}
/* Increase small mantissa to improve precision */
while (val < MIN_MANTISSA && exponent > -15) {
exponent--;
val <<= 1;
}
/* Convert mantissa from milli-units to units */
mantissa = DIV_ROUND_CLOSEST(val, 1000);
/* Ensure that resulting number is within range */
if (mantissa > 0x3ff)
mantissa = 0x3ff;
/* restore sign */
if (negative)
mantissa = -mantissa;
/* Convert to 5 bit exponent, 11 bit mantissa */
return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
}
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
if (data->delay) {
s64 delta = ktime_us_delta(ktime_get(), data->access);
if (delta < data->delay)
udelay(data->delay - delta);
}
}
static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
if (page > 0)
return -ENXIO;
if (data->id == zl2005) {
/*
* Limit register detection is not reliable on ZL2005.
* Make sure registers are not erroneously detected.
*/
switch (reg) {
case PMBUS_VOUT_OV_WARN_LIMIT:
case PMBUS_VOUT_UV_WARN_LIMIT:
case PMBUS_IOUT_OC_WARN_LIMIT:
return -ENXIO;
}
}
switch (reg) {
case PMBUS_VIRT_READ_VMON:
vreg = MFR_READ_VMON;
break;
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
vreg = MFR_VMON_OV_FAULT_LIMIT;
break;
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
vreg = MFR_VMON_UV_FAULT_LIMIT;
break;
default:
if (reg >= PMBUS_VIRT_BASE)
return -ENXIO;
vreg = reg;
break;
}
zl6100_wait(data);
ret = pmbus_read_word_data(client, page, vreg);
data->access = ktime_get();
if (ret < 0)
return ret;
switch (reg) {
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 9, 10));
break;
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 11, 10));
break;
}
return ret;
}
static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
int ret, status;
if (page > 0)
return -ENXIO;
zl6100_wait(data);
switch (reg) {
case PMBUS_VIRT_STATUS_VMON:
ret = pmbus_read_byte_data(client, 0,
PMBUS_STATUS_MFR_SPECIFIC);
if (ret < 0)
break;
status = 0;
if (ret & VMON_UV_WARNING)
status |= PB_VOLTAGE_UV_WARNING;
if (ret & VMON_OV_WARNING)
status |= PB_VOLTAGE_OV_WARNING;
if (ret & VMON_UV_FAULT)
status |= PB_VOLTAGE_UV_FAULT;
if (ret & VMON_OV_FAULT)
status |= PB_VOLTAGE_OV_FAULT;
ret = status;
break;
default:
ret = pmbus_read_byte_data(client, page, reg);
break;
}
data->access = ktime_get();
return ret;
}
static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
if (page > 0)
return -ENXIO;
switch (reg) {
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 9));
vreg = MFR_VMON_OV_FAULT_LIMIT;
pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
vreg = MFR_VMON_OV_FAULT_LIMIT;
pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 11));
vreg = MFR_VMON_UV_FAULT_LIMIT;
pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
vreg = MFR_VMON_UV_FAULT_LIMIT;
pmbus_clear_cache(client);
break;
default:
if (reg >= PMBUS_VIRT_BASE)
return -ENXIO;
vreg = reg;
}
zl6100_wait(data);
ret = pmbus_write_word_data(client, page, vreg, word);
data->access = ktime_get();
return ret;
}
static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
int ret;
if (page > 0)
return -ENXIO;
zl6100_wait(data);
ret = pmbus_write_byte(client, page, value);
data->access = ktime_get();
return ret;
}
static const struct i2c_device_id zl6100_id[] = {
{"bmr450", zl2005},
{"bmr451", zl2005},
{"bmr462", zl2008},
{"bmr463", zl2008},
{"bmr464", zl2008},
{"zl2004", zl2004},
{"zl2005", zl2005},
{"zl2006", zl2006},
{"zl2008", zl2008},
{"zl2105", zl2105},
{"zl2106", zl2106},
{"zl6100", zl6100},
{"zl6105", zl6105},
{"zl9101", zl9101},
{"zl9117", zl9117},
{ }
};
MODULE_DEVICE_TABLE(i2c, zl6100_id);
static int zl6100_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
struct zl6100_data *data;
struct pmbus_driver_info *info;
u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA
| I2C_FUNC_SMBUS_READ_BLOCK_DATA))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, ZL6100_DEVICE_ID,
device_id);
if (ret < 0) {
dev_err(&client->dev, "Failed to read device ID\n");
return ret;
}
device_id[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", device_id);
mid = NULL;
for (mid = zl6100_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, device_id, strlen(mid->name)))
break;
}
if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
if (id->driver_data != mid->driver_data)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
data = devm_kzalloc(&client->dev, sizeof(struct zl6100_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->id = mid->driver_data;
/*
* According to information from the chip vendor, all currently
* supported chips are known to require a wait time between I2C
* accesses.
*/
data->delay = delay;
/*
* Since there was a direct I2C device access above, wait before
* accessing the chip again.
*/
data->access = ktime_get();
zl6100_wait(data);
info = &data->info;
info->pages = 1;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
/*
* ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
* (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
*/
if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
if (ret < 0)
return ret;
if (ret & ZL6100_MFR_XTEMP_ENABLE)
info->func[0] |= PMBUS_HAVE_TEMP2;
data->access = ktime_get();
zl6100_wait(data);
info->read_word_data = zl6100_read_word_data;
info->read_byte_data = zl6100_read_byte_data;
info->write_word_data = zl6100_write_word_data;
info->write_byte = zl6100_write_byte;
return pmbus_do_probe(client, mid, info);
}
static struct i2c_driver zl6100_driver = {
.driver = {
.name = "zl6100",
},
.probe = zl6100_probe,
.remove = pmbus_do_remove,
.id_table = zl6100_id,
};
module_i2c_driver(zl6100_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for ZL6100 and compatibles");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ArtisteHsu/jetson-tk1-r21.3-kernel | net/netfilter/xt_NETMAP.c | 4527 | 4795 | /*
* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
* Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_nat.h>
static unsigned int
netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct nf_nat_range *range = par->targinfo;
struct nf_nat_range newrange;
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
union nf_inet_addr new_addr, netmask;
unsigned int i;
ct = nf_ct_get(skb, &ctinfo);
for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
range->max_addr.ip6[i]);
if (par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT)
new_addr.in6 = ipv6_hdr(skb)->daddr;
else
new_addr.in6 = ipv6_hdr(skb)->saddr;
for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) {
new_addr.ip6[i] &= ~netmask.ip6[i];
new_addr.ip6[i] |= range->min_addr.ip6[i] &
netmask.ip6[i];
}
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr = new_addr;
newrange.max_addr = new_addr;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
}
static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_range *range = par->targinfo;
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return -EINVAL;
return 0;
}
static unsigned int
netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
__be32 new_ip, netmask;
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
struct nf_nat_range newrange;
NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_POST_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT ||
par->hooknum == NF_INET_LOCAL_IN);
ct = nf_ct_get(skb, &ctinfo);
netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
if (par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT)
new_ip = ip_hdr(skb)->daddr & ~netmask;
else
new_ip = ip_hdr(skb)->saddr & ~netmask;
new_ip |= mr->range[0].min_ip & netmask;
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.ip = new_ip;
newrange.max_addr.ip = new_ip;
newrange.min_proto = mr->range[0].min;
newrange.max_proto = mr->range[0].max;
/* Hand modified range to generic setup. */
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
}
static int netmap_tg4_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
if (mr->rangesize != 1) {
pr_debug("bad rangesize %u.\n", mr->rangesize);
return -EINVAL;
}
return 0;
}
static struct xt_target netmap_tg_reg[] __read_mostly = {
{
.name = "NETMAP",
.family = NFPROTO_IPV6,
.revision = 0,
.target = netmap_tg6,
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.checkentry = netmap_tg6_checkentry,
.me = THIS_MODULE,
},
{
.name = "NETMAP",
.family = NFPROTO_IPV4,
.revision = 0,
.target = netmap_tg4,
.targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.checkentry = netmap_tg4_check,
.me = THIS_MODULE,
},
};
static int __init netmap_tg_init(void)
{
return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
}
static void netmap_tg_exit(void)
{
xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
}
module_init(netmap_tg_init);
module_exit(netmap_tg_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS("ip6t_NETMAP");
MODULE_ALIAS("ipt_NETMAP");
| gpl-2.0 |
tositrino/linux | drivers/video/gbefb.c | 8111 | 33504 | /*
* SGI GBE frame buffer driver
*
* Copyright (C) 1999 Silicon Graphics, Inc. - Jeffrey Newquist
* Copyright (C) 2002 Vivien Chappelier <vivien.chappelier@linux-mips.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/mtrr.h>
#endif
#ifdef CONFIG_MIPS
#include <asm/addrspace.h>
#endif
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <video/gbe.h>
static struct sgi_gbe *gbe;
struct gbefb_par {
struct fb_var_screeninfo var;
struct gbe_timing_info timing;
int valid;
};
#ifdef CONFIG_SGI_IP32
#define GBE_BASE 0x16000000 /* SGI O2 */
#endif
#ifdef CONFIG_X86_VISWS
#define GBE_BASE 0xd0000000 /* SGI Visual Workstation */
#endif
/* macro for fastest write-though access to the framebuffer */
#ifdef CONFIG_MIPS
#ifdef CONFIG_CPU_R10000
#define pgprot_fb(_prot) (((_prot) & (~_CACHE_MASK)) | _CACHE_UNCACHED_ACCELERATED)
#else
#define pgprot_fb(_prot) (((_prot) & (~_CACHE_MASK)) | _CACHE_CACHABLE_NO_WA)
#endif
#endif
#ifdef CONFIG_X86
#define pgprot_fb(_prot) ((_prot) | _PAGE_PCD)
#endif
/*
* RAM we reserve for the frame buffer. This defines the maximum screen
* size
*/
#if CONFIG_FB_GBE_MEM > 8
#error GBE Framebuffer cannot use more than 8MB of memory
#endif
#define TILE_SHIFT 16
#define TILE_SIZE (1 << TILE_SHIFT)
#define TILE_MASK (TILE_SIZE - 1)
static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024;
static void *gbe_mem;
static dma_addr_t gbe_dma_addr;
static unsigned long gbe_mem_phys;
static struct {
uint16_t *cpu;
dma_addr_t dma;
} gbe_tiles;
static int gbe_revision;
static int ypan, ywrap;
static uint32_t pseudo_palette[16];
static uint32_t gbe_cmap[256];
static int gbe_turned_on; /* 0 turned off, 1 turned on */
static char *mode_option __devinitdata = NULL;
/* default CRT mode */
static struct fb_var_screeninfo default_var_CRT __devinitdata = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.xoffset = 0,
.yoffset = 0,
.bits_per_pixel = 8,
.grayscale = 0,
.red = { 0, 8, 0 },
.green = { 0, 8, 0 },
.blue = { 0, 8, 0 },
.transp = { 0, 0, 0 },
.nonstd = 0,
.activate = 0,
.height = -1,
.width = -1,
.accel_flags = 0,
.pixclock = 39722, /* picoseconds */
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
};
/* default LCD mode */
static struct fb_var_screeninfo default_var_LCD __devinitdata = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
.xres_virtual = 1600,
.yres_virtual = 1024,
.xoffset = 0,
.yoffset = 0,
.bits_per_pixel = 8,
.grayscale = 0,
.red = { 0, 8, 0 },
.green = { 0, 8, 0 },
.blue = { 0, 8, 0 },
.transp = { 0, 0, 0 },
.nonstd = 0,
.activate = 0,
.height = -1,
.width = -1,
.accel_flags = 0,
.pixclock = 9353,
.left_margin = 20,
.right_margin = 30,
.upper_margin = 37,
.lower_margin = 3,
.hsync_len = 20,
.vsync_len = 3,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED
};
/* default modedb mode */
/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
static struct fb_videomode default_mode_CRT __devinitdata = {
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 39722,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
};
/* 1600x1024 SGI flatpanel 1600sw */
static struct fb_videomode default_mode_LCD __devinitdata = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
.pixclock = 9353,
.left_margin = 20,
.right_margin = 30,
.upper_margin = 37,
.lower_margin = 3,
.hsync_len = 20,
.vsync_len = 3,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_videomode *default_mode __devinitdata = &default_mode_CRT;
static struct fb_var_screeninfo *default_var __devinitdata = &default_var_CRT;
static int flat_panel_enabled = 0;
static void gbe_reset(void)
{
/* Turn on dotclock PLL */
gbe->ctrlstat = 0x300aa000;
}
/*
* Function: gbe_turn_off
* Parameters: (None)
* Description: This should turn off the monitor and gbe. This is used
* when switching between the serial console and the graphics
* console.
*/
static void gbe_turn_off(void)
{
int i;
unsigned int val, x, y, vpixen_off;
gbe_turned_on = 0;
/* check if pixel counter is on */
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 1)
return;
/* turn off DMA */
val = gbe->ovr_control;
SET_GBE_FIELD(OVR_CONTROL, OVR_DMA_ENABLE, val, 0);
gbe->ovr_control = val;
udelay(1000);
val = gbe->frm_control;
SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 0);
gbe->frm_control = val;
udelay(1000);
val = gbe->did_control;
SET_GBE_FIELD(DID_CONTROL, DID_DMA_ENABLE, val, 0);
gbe->did_control = val;
udelay(1000);
/* We have to wait through two vertical retrace periods before
* the pixel DMA is turned off for sure. */
for (i = 0; i < 10000; i++) {
val = gbe->frm_inhwctrl;
if (GET_GBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, val)) {
udelay(10);
} else {
val = gbe->ovr_inhwctrl;
if (GET_GBE_FIELD(OVR_INHWCTRL, OVR_DMA_ENABLE, val)) {
udelay(10);
} else {
val = gbe->did_inhwctrl;
if (GET_GBE_FIELD(DID_INHWCTRL, DID_DMA_ENABLE, val)) {
udelay(10);
} else
break;
}
}
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn off DMA timed out\n");
/* wait for vpixen_off */
val = gbe->vt_vpixen;
vpixen_off = GET_GBE_FIELD(VT_VPIXEN, VPIXEN_OFF, val);
for (i = 0; i < 100000; i++) {
val = gbe->vt_xy;
x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y < vpixen_off)
break;
udelay(1);
}
if (i == 100000)
printk(KERN_ERR
"gbefb: wait for vpixen_off timed out\n");
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y > vpixen_off)
break;
udelay(1);
}
if (i == 10000)
printk(KERN_ERR "gbefb: wait for vpixen_off timed out\n");
/* turn off pixel counter */
val = 0;
SET_GBE_FIELD(VT_XY, FREEZE, val, 1);
gbe->vt_xy = val;
udelay(10000);
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val) != 1)
udelay(10);
else
break;
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn off pixel clock timed out\n");
/* turn off dot clock */
val = gbe->dotclock;
SET_GBE_FIELD(DOTCLK, RUN, val, 0);
gbe->dotclock = val;
udelay(10000);
for (i = 0; i < 10000; i++) {
val = gbe->dotclock;
if (GET_GBE_FIELD(DOTCLK, RUN, val))
udelay(10);
else
break;
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn off dotclock timed out\n");
/* reset the frame DMA FIFO */
val = gbe->frm_size_tile;
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, val, 1);
gbe->frm_size_tile = val;
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, val, 0);
gbe->frm_size_tile = val;
}
static void gbe_turn_on(void)
{
unsigned int val, i;
/*
* Check if pixel counter is off, for unknown reason this
* code hangs Visual Workstations
*/
if (gbe_revision < 2) {
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 0)
return;
}
/* turn on dot clock */
val = gbe->dotclock;
SET_GBE_FIELD(DOTCLK, RUN, val, 1);
gbe->dotclock = val;
udelay(10000);
for (i = 0; i < 10000; i++) {
val = gbe->dotclock;
if (GET_GBE_FIELD(DOTCLK, RUN, val) != 1)
udelay(10);
else
break;
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn on dotclock timed out\n");
/* turn on pixel counter */
val = 0;
SET_GBE_FIELD(VT_XY, FREEZE, val, 0);
gbe->vt_xy = val;
udelay(10000);
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val))
udelay(10);
else
break;
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn on pixel clock timed out\n");
/* turn on DMA */
val = gbe->frm_control;
SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 1);
gbe->frm_control = val;
udelay(1000);
for (i = 0; i < 10000; i++) {
val = gbe->frm_inhwctrl;
if (GET_GBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, val) != 1)
udelay(10);
else
break;
}
if (i == 10000)
printk(KERN_ERR "gbefb: turn on DMA timed out\n");
gbe_turned_on = 1;
}
static void gbe_loadcmap(void)
{
int i, j;
for (i = 0; i < 256; i++) {
for (j = 0; j < 1000 && gbe->cm_fifo >= 63; j++)
udelay(10);
if (j == 1000)
printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
gbe->cmap[i] = gbe_cmap[i];
}
}
/*
* Blank the display.
*/
static int gbefb_blank(int blank, struct fb_info *info)
{
/* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */
switch (blank) {
case FB_BLANK_UNBLANK: /* unblank */
gbe_turn_on();
gbe_loadcmap();
break;
case FB_BLANK_NORMAL: /* blank */
gbe_turn_off();
break;
default:
/* Nothing */
break;
}
return 0;
}
/*
* Setup flatpanel related registers.
*/
static void gbefb_setup_flatpanel(struct gbe_timing_info *timing)
{
int fp_wid, fp_hgt, fp_vbs, fp_vbe;
u32 outputVal = 0;
SET_GBE_FIELD(VT_FLAGS, HDRV_INVERT, outputVal,
(timing->flags & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1);
SET_GBE_FIELD(VT_FLAGS, VDRV_INVERT, outputVal,
(timing->flags & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1);
gbe->vt_flags = outputVal;
/* Turn on the flat panel */
fp_wid = 1600;
fp_hgt = 1024;
fp_vbs = 0;
fp_vbe = 1600;
timing->pll_m = 4;
timing->pll_n = 1;
timing->pll_p = 0;
outputVal = 0;
SET_GBE_FIELD(FP_DE, ON, outputVal, fp_vbs);
SET_GBE_FIELD(FP_DE, OFF, outputVal, fp_vbe);
gbe->fp_de = outputVal;
outputVal = 0;
SET_GBE_FIELD(FP_HDRV, OFF, outputVal, fp_wid);
gbe->fp_hdrv = outputVal;
outputVal = 0;
SET_GBE_FIELD(FP_VDRV, ON, outputVal, 1);
SET_GBE_FIELD(FP_VDRV, OFF, outputVal, fp_hgt + 1);
gbe->fp_vdrv = outputVal;
}
struct gbe_pll_info {
int clock_rate;
int fvco_min;
int fvco_max;
};
static struct gbe_pll_info gbe_pll_table[2] = {
{ 20, 80, 220 },
{ 27, 80, 220 },
};
static int compute_gbe_timing(struct fb_var_screeninfo *var,
struct gbe_timing_info *timing)
{
int pll_m, pll_n, pll_p, error, best_m, best_n, best_p, best_error;
int pixclock;
struct gbe_pll_info *gbe_pll;
if (gbe_revision < 2)
gbe_pll = &gbe_pll_table[0];
else
gbe_pll = &gbe_pll_table[1];
/* Determine valid resolution and timing
* GBE crystal runs at 20Mhz or 27Mhz
* pll_m, pll_n, pll_p define the following frequencies
* fvco = pll_m * 20Mhz / pll_n
* fout = fvco / (2**pll_p) */
best_error = 1000000000;
best_n = best_m = best_p = 0;
for (pll_p = 0; pll_p < 4; pll_p++)
for (pll_m = 1; pll_m < 256; pll_m++)
for (pll_n = 1; pll_n < 64; pll_n++) {
pixclock = (1000000 / gbe_pll->clock_rate) *
(pll_n << pll_p) / pll_m;
error = var->pixclock - pixclock;
if (error < 0)
error = -error;
if (error < best_error &&
pll_m / pll_n >
gbe_pll->fvco_min / gbe_pll->clock_rate &&
pll_m / pll_n <
gbe_pll->fvco_max / gbe_pll->clock_rate) {
best_error = error;
best_m = pll_m;
best_n = pll_n;
best_p = pll_p;
}
}
if (!best_n || !best_m)
return -EINVAL; /* Resolution to high */
pixclock = (1000000 / gbe_pll->clock_rate) *
(best_n << best_p) / best_m;
/* set video timing information */
if (timing) {
timing->width = var->xres;
timing->height = var->yres;
timing->pll_m = best_m;
timing->pll_n = best_n;
timing->pll_p = best_p;
timing->cfreq = gbe_pll->clock_rate * 1000 * timing->pll_m /
(timing->pll_n << timing->pll_p);
timing->htotal = var->left_margin + var->xres +
var->right_margin + var->hsync_len;
timing->vtotal = var->upper_margin + var->yres +
var->lower_margin + var->vsync_len;
timing->fields_sec = 1000 * timing->cfreq / timing->htotal *
1000 / timing->vtotal;
timing->hblank_start = var->xres;
timing->vblank_start = var->yres;
timing->hblank_end = timing->htotal;
timing->hsync_start = var->xres + var->right_margin + 1;
timing->hsync_end = timing->hsync_start + var->hsync_len;
timing->vblank_end = timing->vtotal;
timing->vsync_start = var->yres + var->lower_margin + 1;
timing->vsync_end = timing->vsync_start + var->vsync_len;
}
return pixclock;
}
static void gbe_set_timing_info(struct gbe_timing_info *timing)
{
int temp;
unsigned int val;
/* setup dot clock PLL */
val = 0;
SET_GBE_FIELD(DOTCLK, M, val, timing->pll_m - 1);
SET_GBE_FIELD(DOTCLK, N, val, timing->pll_n - 1);
SET_GBE_FIELD(DOTCLK, P, val, timing->pll_p);
SET_GBE_FIELD(DOTCLK, RUN, val, 0); /* do not start yet */
gbe->dotclock = val;
udelay(10000);
/* setup pixel counter */
val = 0;
SET_GBE_FIELD(VT_XYMAX, MAXX, val, timing->htotal);
SET_GBE_FIELD(VT_XYMAX, MAXY, val, timing->vtotal);
gbe->vt_xymax = val;
/* setup video timing signals */
val = 0;
SET_GBE_FIELD(VT_VSYNC, VSYNC_ON, val, timing->vsync_start);
SET_GBE_FIELD(VT_VSYNC, VSYNC_OFF, val, timing->vsync_end);
gbe->vt_vsync = val;
val = 0;
SET_GBE_FIELD(VT_HSYNC, HSYNC_ON, val, timing->hsync_start);
SET_GBE_FIELD(VT_HSYNC, HSYNC_OFF, val, timing->hsync_end);
gbe->vt_hsync = val;
val = 0;
SET_GBE_FIELD(VT_VBLANK, VBLANK_ON, val, timing->vblank_start);
SET_GBE_FIELD(VT_VBLANK, VBLANK_OFF, val, timing->vblank_end);
gbe->vt_vblank = val;
val = 0;
SET_GBE_FIELD(VT_HBLANK, HBLANK_ON, val,
timing->hblank_start - 5);
SET_GBE_FIELD(VT_HBLANK, HBLANK_OFF, val,
timing->hblank_end - 3);
gbe->vt_hblank = val;
/* setup internal timing signals */
val = 0;
SET_GBE_FIELD(VT_VCMAP, VCMAP_ON, val, timing->vblank_start);
SET_GBE_FIELD(VT_VCMAP, VCMAP_OFF, val, timing->vblank_end);
gbe->vt_vcmap = val;
val = 0;
SET_GBE_FIELD(VT_HCMAP, HCMAP_ON, val, timing->hblank_start);
SET_GBE_FIELD(VT_HCMAP, HCMAP_OFF, val, timing->hblank_end);
gbe->vt_hcmap = val;
val = 0;
temp = timing->vblank_start - timing->vblank_end - 1;
if (temp > 0)
temp = -temp;
if (flat_panel_enabled)
gbefb_setup_flatpanel(timing);
SET_GBE_FIELD(DID_START_XY, DID_STARTY, val, (u32) temp);
if (timing->hblank_end >= 20)
SET_GBE_FIELD(DID_START_XY, DID_STARTX, val,
timing->hblank_end - 20);
else
SET_GBE_FIELD(DID_START_XY, DID_STARTX, val,
timing->htotal - (20 - timing->hblank_end));
gbe->did_start_xy = val;
val = 0;
SET_GBE_FIELD(CRS_START_XY, CRS_STARTY, val, (u32) (temp + 1));
if (timing->hblank_end >= GBE_CRS_MAGIC)
SET_GBE_FIELD(CRS_START_XY, CRS_STARTX, val,
timing->hblank_end - GBE_CRS_MAGIC);
else
SET_GBE_FIELD(CRS_START_XY, CRS_STARTX, val,
timing->htotal - (GBE_CRS_MAGIC -
timing->hblank_end));
gbe->crs_start_xy = val;
val = 0;
SET_GBE_FIELD(VC_START_XY, VC_STARTY, val, (u32) temp);
SET_GBE_FIELD(VC_START_XY, VC_STARTX, val, timing->hblank_end - 4);
gbe->vc_start_xy = val;
val = 0;
temp = timing->hblank_end - GBE_PIXEN_MAGIC_ON;
if (temp < 0)
temp += timing->htotal; /* allow blank to wrap around */
SET_GBE_FIELD(VT_HPIXEN, HPIXEN_ON, val, temp);
SET_GBE_FIELD(VT_HPIXEN, HPIXEN_OFF, val,
((temp + timing->width -
GBE_PIXEN_MAGIC_OFF) % timing->htotal));
gbe->vt_hpixen = val;
val = 0;
SET_GBE_FIELD(VT_VPIXEN, VPIXEN_ON, val, timing->vblank_end);
SET_GBE_FIELD(VT_VPIXEN, VPIXEN_OFF, val, timing->vblank_start);
gbe->vt_vpixen = val;
/* turn off sync on green */
val = 0;
SET_GBE_FIELD(VT_FLAGS, SYNC_LOW, val, 1);
gbe->vt_flags = val;
}
/*
* Set the hardware according to 'par'.
*/
static int gbefb_set_par(struct fb_info *info)
{
int i;
unsigned int val;
int wholeTilesX, partTilesX, maxPixelsPerTileX;
int height_pix;
int xpmax, ypmax; /* Monitor resolution */
int bytesPerPixel; /* Bytes per pixel */
struct gbefb_par *par = (struct gbefb_par *) info->par;
compute_gbe_timing(&info->var, &par->timing);
bytesPerPixel = info->var.bits_per_pixel / 8;
info->fix.line_length = info->var.xres_virtual * bytesPerPixel;
xpmax = par->timing.width;
ypmax = par->timing.height;
/* turn off GBE */
gbe_turn_off();
/* set timing info */
gbe_set_timing_info(&par->timing);
/* initialize DIDs */
val = 0;
switch (bytesPerPixel) {
case 1:
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_I8);
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 2:
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_ARGB5);
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
case 4:
SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_RGB8);
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
}
SET_GBE_FIELD(WID, BUF, val, GBE_BMODE_BOTH);
for (i = 0; i < 32; i++)
gbe->mode_regs[i] = val;
/* Initialize interrupts */
gbe->vt_intr01 = 0xffffffff;
gbe->vt_intr23 = 0xffffffff;
/* HACK:
The GBE hardware uses a tiled memory to screen mapping. Tiles are
blocks of 512x128, 256x128 or 128x128 pixels, respectively for 8bit,
16bit and 32 bit modes (64 kB). They cover the screen with partial
tiles on the right and/or bottom of the screen if needed.
For example in 640x480 8 bit mode the mapping is:
<-------- 640 ----->
<---- 512 ----><128|384 offscreen>
^ ^
| 128 [tile 0] [tile 1]
| v
^
4 128 [tile 2] [tile 3]
8 v
0 ^
128 [tile 4] [tile 5]
| v
| ^
v 96 [tile 6] [tile 7]
32 offscreen
Tiles have the advantage that they can be allocated individually in
memory. However, this mapping is not linear at all, which is not
really convenient. In order to support linear addressing, the GBE
DMA hardware is fooled into thinking the screen is only one tile
large and but has a greater height, so that the DMA transfer covers
the same region.
Tiles are still allocated as independent chunks of 64KB of
continuous physical memory and remapped so that the kernel sees the
framebuffer as a continuous virtual memory. The GBE tile table is
set up so that each tile references one of these 64k blocks:
GBE -> tile list framebuffer TLB <------------ CPU
[ tile 0 ] -> [ 64KB ] <- [ 16x 4KB page entries ] ^
... ... ... linear virtual FB
[ tile n ] -> [ 64KB ] <- [ 16x 4KB page entries ] v
The GBE hardware is then told that the buffer is 512*tweaked_height,
with tweaked_height = real_width*real_height/pixels_per_tile.
Thus the GBE hardware will scan the first tile, filing the first 64k
covered region of the screen, and then will proceed to the next
tile, until the whole screen is covered.
Here is what would happen at 640x480 8bit:
normal tiling linear
^ 11111111111111112222 11111111111111111111 ^
128 11111111111111112222 11111111111111111111 102 lines
11111111111111112222 11111111111111111111 v
V 11111111111111112222 11111111222222222222
33333333333333334444 22222222222222222222
33333333333333334444 22222222222222222222
< 512 > < 256 > 102*640+256 = 64k
NOTE: The only mode for which this is not working is 800x600 8bit,
as 800*600/512 = 937.5 which is not integer and thus causes
flickering.
I guess this is not so important as one can use 640x480 8bit or
800x600 16bit anyway.
*/
/* Tell gbe about the tiles table location */
/* tile_ptr -> [ tile 1 ] -> FB mem */
/* [ tile 2 ] -> FB mem */
/* ... */
val = 0;
SET_GBE_FIELD(FRM_CONTROL, FRM_TILE_PTR, val, gbe_tiles.dma >> 9);
SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 0); /* do not start */
SET_GBE_FIELD(FRM_CONTROL, FRM_LINEAR, val, 0);
gbe->frm_control = val;
maxPixelsPerTileX = 512 / bytesPerPixel;
wholeTilesX = 1;
partTilesX = 0;
/* Initialize the framebuffer */
val = 0;
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_WIDTH_TILE, val, wholeTilesX);
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_RHS, val, partTilesX);
switch (bytesPerPixel) {
case 1:
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
GBE_FRM_DEPTH_8);
break;
case 2:
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
GBE_FRM_DEPTH_16);
break;
case 4:
SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
GBE_FRM_DEPTH_32);
break;
}
gbe->frm_size_tile = val;
/* compute tweaked height */
height_pix = xpmax * ypmax / maxPixelsPerTileX;
val = 0;
SET_GBE_FIELD(FRM_SIZE_PIXEL, FB_HEIGHT_PIX, val, height_pix);
gbe->frm_size_pixel = val;
/* turn off DID and overlay DMA */
gbe->did_control = 0;
gbe->ovr_width_tile = 0;
/* Turn off mouse cursor */
gbe->crs_ctl = 0;
/* Turn on GBE */
gbe_turn_on();
/* Initialize the gamma map */
udelay(10);
for (i = 0; i < 256; i++)
gbe->gmap[i] = (i << 24) | (i << 16) | (i << 8);
/* Initialize the color map */
for (i = 0; i < 256; i++)
gbe_cmap[i] = (i << 8) | (i << 16) | (i << 24);
gbe_loadcmap();
return 0;
}
static void gbefb_encode_fix(struct fb_fix_screeninfo *fix,
struct fb_var_screeninfo *var)
{
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "SGI GBE");
fix->smem_start = (unsigned long) gbe_mem;
fix->smem_len = gbe_mem_size;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->accel = FB_ACCEL_NONE;
switch (var->bits_per_pixel) {
case 8:
fix->visual = FB_VISUAL_PSEUDOCOLOR;
break;
default:
fix->visual = FB_VISUAL_TRUECOLOR;
break;
}
fix->ywrapstep = 0;
fix->xpanstep = 0;
fix->ypanstep = 0;
fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
fix->mmio_start = GBE_BASE;
fix->mmio_len = sizeof(struct sgi_gbe);
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
*/
static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
int i;
if (regno > 255)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
if (info->var.bits_per_pixel <= 8) {
gbe_cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
if (gbe_turned_on) {
/* wait for the color map FIFO to have a free entry */
for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
udelay(10);
if (i == 1000) {
printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
return 1;
}
gbe->cmap[regno] = gbe_cmap[regno];
}
} else if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 15:
case 16:
red >>= 3;
green >>= 3;
blue >>= 3;
pseudo_palette[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
case 32:
pseudo_palette[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
}
}
return 0;
}
/*
* Check video mode validity, eventually modify var to best match.
*/
static int gbefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
unsigned int line_length;
struct gbe_timing_info timing;
int ret;
/* Limit bpp to 8, 16, and 32 */
if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel <= 32)
var->bits_per_pixel = 32;
else
return -EINVAL;
/* Check the mode can be mapped linearly with the tile table trick. */
/* This requires width x height x bytes/pixel be a multiple of 512 */
if ((var->xres * var->yres * var->bits_per_pixel) & 4095)
return -EINVAL;
var->grayscale = 0; /* No grayscale for now */
ret = compute_gbe_timing(var, &timing);
var->pixclock = ret;
if (ret < 0)
return -EINVAL;
/* Adjust virtual resolution, if necessary */
if (var->xres > var->xres_virtual || (!ywrap && !ypan))
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual || (!ywrap && !ypan))
var->yres_virtual = var->yres;
if (var->vmode & FB_VMODE_CONUPDATE) {
var->vmode |= FB_VMODE_YWRAP;
var->xoffset = info->var.xoffset;
var->yoffset = info->var.yoffset;
}
/* No grayscale for now */
var->grayscale = 0;
/* Memory limit */
line_length = var->xres_virtual * var->bits_per_pixel / 8;
if (line_length * var->yres_virtual > gbe_mem_size)
return -ENOMEM; /* Virtual resolution too high */
switch (var->bits_per_pixel) {
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16: /* RGB 1555 */
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32: /* RGB 8888 */
var->red.offset = 24;
var->red.length = 8;
var->green.offset = 16;
var->green.length = 8;
var->blue.offset = 8;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 8;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
var->left_margin = timing.htotal - timing.hsync_end;
var->right_margin = timing.hsync_start - timing.width;
var->upper_margin = timing.vtotal - timing.vsync_end;
var->lower_margin = timing.vsync_start - timing.height;
var->hsync_len = timing.hsync_end - timing.hsync_start;
var->vsync_len = timing.vsync_end - timing.vsync_start;
return 0;
}
static int gbefb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long addr;
unsigned long phys_addr, phys_size;
u16 *tile;
/* check range */
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (offset + size > gbe_mem_size)
return -EINVAL;
/* remap using the fastest write-through mode on architecture */
/* try not polluting the cache when possible */
pgprot_val(vma->vm_page_prot) =
pgprot_fb(pgprot_val(vma->vm_page_prot));
vma->vm_flags |= VM_IO | VM_RESERVED;
/* look for the starting tile */
tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
addr = vma->vm_start;
offset &= TILE_MASK;
/* remap each tile separately */
do {
phys_addr = (((unsigned long) (*tile)) << TILE_SHIFT) + offset;
if ((offset + size) < TILE_SIZE)
phys_size = size;
else
phys_size = TILE_SIZE - offset;
if (remap_pfn_range(vma, addr, phys_addr >> PAGE_SHIFT,
phys_size, vma->vm_page_prot))
return -EAGAIN;
offset = 0;
size -= phys_size;
addr += phys_size;
tile++;
} while (size);
return 0;
}
static struct fb_ops gbefb_ops = {
.owner = THIS_MODULE,
.fb_check_var = gbefb_check_var,
.fb_set_par = gbefb_set_par,
.fb_setcolreg = gbefb_setcolreg,
.fb_mmap = gbefb_mmap,
.fb_blank = gbefb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*
* sysfs
*/
static ssize_t gbefb_show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", gbe_mem_size);
}
static DEVICE_ATTR(size, S_IRUGO, gbefb_show_memsize, NULL);
static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", gbe_revision);
}
static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL);
static void __devexit gbefb_remove_sysfs(struct device *dev)
{
device_remove_file(dev, &dev_attr_size);
device_remove_file(dev, &dev_attr_revision);
}
static void gbefb_create_sysfs(struct device *dev)
{
device_create_file(dev, &dev_attr_size);
device_create_file(dev, &dev_attr_revision);
}
/*
* Initialization
*/
static int __devinit gbefb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "monitor:", 8)) {
if (!strncmp(this_opt + 8, "crt", 3)) {
flat_panel_enabled = 0;
default_var = &default_var_CRT;
default_mode = &default_mode_CRT;
} else if (!strncmp(this_opt + 8, "1600sw", 6) ||
!strncmp(this_opt + 8, "lcd", 3)) {
flat_panel_enabled = 1;
default_var = &default_var_LCD;
default_mode = &default_mode_LCD;
}
} else if (!strncmp(this_opt, "mem:", 4)) {
gbe_mem_size = memparse(this_opt + 4, &this_opt);
if (gbe_mem_size > CONFIG_FB_GBE_MEM * 1024 * 1024)
gbe_mem_size = CONFIG_FB_GBE_MEM * 1024 * 1024;
if (gbe_mem_size < TILE_SIZE)
gbe_mem_size = TILE_SIZE;
} else
mode_option = this_opt;
}
return 0;
}
static int __devinit gbefb_probe(struct platform_device *p_dev)
{
int i, ret = 0;
struct fb_info *info;
struct gbefb_par *par;
#ifndef MODULE
char *options = NULL;
#endif
info = framebuffer_alloc(sizeof(struct gbefb_par), &p_dev->dev);
if (!info)
return -ENOMEM;
#ifndef MODULE
if (fb_get_options("gbefb", &options)) {
ret = -ENODEV;
goto out_release_framebuffer;
}
gbefb_setup(options);
#endif
if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
printk(KERN_ERR "gbefb: couldn't reserve mmio region\n");
ret = -EBUSY;
goto out_release_framebuffer;
}
gbe = (struct sgi_gbe *) ioremap(GBE_BASE, sizeof(struct sgi_gbe));
if (!gbe) {
printk(KERN_ERR "gbefb: couldn't map mmio region\n");
ret = -ENXIO;
goto out_release_mem_region;
}
gbe_revision = gbe->ctrlstat & 15;
gbe_tiles.cpu =
dma_alloc_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
&gbe_tiles.dma, GFP_KERNEL);
if (!gbe_tiles.cpu) {
printk(KERN_ERR "gbefb: couldn't allocate tiles table\n");
ret = -ENOMEM;
goto out_unmap;
}
if (gbe_mem_phys) {
/* memory was allocated at boot time */
gbe_mem = ioremap_nocache(gbe_mem_phys, gbe_mem_size);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't map framebuffer\n");
ret = -ENOMEM;
goto out_tiles_free;
}
gbe_dma_addr = 0;
} else {
/* try to allocate memory with the classical allocator
* this has high chance to fail on low memory machines */
gbe_mem = dma_alloc_coherent(NULL, gbe_mem_size, &gbe_dma_addr,
GFP_KERNEL);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n");
ret = -ENOMEM;
goto out_tiles_free;
}
gbe_mem_phys = (unsigned long) gbe_dma_addr;
}
#ifdef CONFIG_X86
mtrr_add(gbe_mem_phys, gbe_mem_size, MTRR_TYPE_WRCOMB, 1);
#endif
/* map framebuffer memory into tiles table */
for (i = 0; i < (gbe_mem_size >> TILE_SHIFT); i++)
gbe_tiles.cpu[i] = (gbe_mem_phys >> TILE_SHIFT) + i;
info->fbops = &gbefb_ops;
info->pseudo_palette = pseudo_palette;
info->flags = FBINFO_DEFAULT;
info->screen_base = gbe_mem;
fb_alloc_cmap(&info->cmap, 256, 0);
/* reset GBE */
gbe_reset();
par = info->par;
/* turn on default video mode */
if (fb_find_mode(&par->var, info, mode_option, NULL, 0,
default_mode, 8) == 0)
par->var = *default_var;
info->var = par->var;
gbefb_check_var(&par->var, info);
gbefb_encode_fix(&info->fix, &info->var);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "gbefb: couldn't register framebuffer\n");
ret = -ENXIO;
goto out_gbe_unmap;
}
platform_set_drvdata(p_dev, info);
gbefb_create_sysfs(&p_dev->dev);
printk(KERN_INFO "fb%d: %s rev %d @ 0x%08x using %dkB memory\n",
info->node, info->fix.id, gbe_revision, (unsigned) GBE_BASE,
gbe_mem_size >> 10);
return 0;
out_gbe_unmap:
if (gbe_dma_addr)
dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
else
iounmap(gbe_mem);
out_tiles_free:
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
out_unmap:
iounmap(gbe);
out_release_mem_region:
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
out_release_framebuffer:
framebuffer_release(info);
return ret;
}
static int __devexit gbefb_remove(struct platform_device* p_dev)
{
struct fb_info *info = platform_get_drvdata(p_dev);
unregister_framebuffer(info);
gbe_turn_off();
if (gbe_dma_addr)
dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
else
iounmap(gbe_mem);
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
iounmap(gbe);
gbefb_remove_sysfs(&p_dev->dev);
framebuffer_release(info);
return 0;
}
static struct platform_driver gbefb_driver = {
.probe = gbefb_probe,
.remove = __devexit_p(gbefb_remove),
.driver = {
.name = "gbefb",
},
};
static struct platform_device *gbefb_device;
static int __init gbefb_init(void)
{
int ret = platform_driver_register(&gbefb_driver);
if (!ret) {
gbefb_device = platform_device_alloc("gbefb", 0);
if (gbefb_device) {
ret = platform_device_add(gbefb_device);
} else {
ret = -ENOMEM;
}
if (ret) {
platform_device_put(gbefb_device);
platform_driver_unregister(&gbefb_driver);
}
}
return ret;
}
static void __exit gbefb_exit(void)
{
platform_device_unregister(gbefb_device);
platform_driver_unregister(&gbefb_driver);
}
module_init(gbefb_init);
module_exit(gbefb_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
UnofficialAOKPLWW/android_kernel_semc_msm7x30 | drivers/net/ethernet/xscale/ixp2000/enp2611.c | 9647 | 6201 | /*
* IXP2400 MSF network device driver for the Radisys ENP2611
* Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
* Dedicated to Marija Kulikova.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <asm/hardware/uengine.h>
#include <asm/mach-types.h>
#include <asm/io.h>
#include "ixpdev.h"
#include "caleb.h"
#include "ixp2400-msf.h"
#include "pm3386.h"
/***********************************************************************
* The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
* slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
* to the IXP2400.
*
* +-------------+
* SFP GBIC #0 ---+ | +---------+
* | PM3386 #0 +-------+ |
* SFP GBIC #1 ---+ | | "Caleb" | +---------+
* +-------------+ | | | |
* | SPI-3 +---------+ IXP2400 |
* +-------------+ | bridge | | |
* SFP GBIC #2 ---+ | | FPGA | +---------+
* | PM3386 #1 +-------+ |
* | | +---------+
* +-------------+
* ^ ^ ^
* | 1.25Gbaud | 104MHz | 104MHz
* | SERDES ea. | SPI-3 ea. | SPI-3
*
***********************************************************************/
static struct ixp2400_msf_parameters enp2611_msf_parameters =
{
.rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
IXP2400_RX_MODE_1x32 |
IXP2400_RX_MODE_MPHY |
IXP2400_RX_MODE_MPHY_32 |
IXP2400_RX_MODE_MPHY_POLLED_STATUS |
IXP2400_RX_MODE_MPHY_LEVEL3 |
IXP2400_RX_MODE_RBUF_SIZE_64,
.rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
.rx_poll_ports = 3,
.rx_channel_mode = {
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_RX_MODE_MASTER |
IXP2400_PORT_RX_MODE_POS_PHY |
IXP2400_PORT_RX_MODE_POS_PHY_L3 |
IXP2400_PORT_RX_MODE_ODD_PARITY |
IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
},
.tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
IXP2400_TX_MODE_1x32 |
IXP2400_TX_MODE_MPHY |
IXP2400_TX_MODE_MPHY_32 |
IXP2400_TX_MODE_MPHY_POLLED_STATUS |
IXP2400_TX_MODE_MPHY_LEVEL3 |
IXP2400_TX_MODE_TBUF_SIZE_64,
.txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
.tx_poll_ports = 3,
.tx_channel_mode = {
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
IXP2400_PORT_TX_MODE_MASTER |
IXP2400_PORT_TX_MODE_POS_PHY |
IXP2400_PORT_TX_MODE_ODD_PARITY |
IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
}
};
static struct net_device *nds[3];
static struct timer_list link_check_timer;
/* @@@ Poll the SFP moddef0 line too. */
/* @@@ Try to use the pm3386 DOOL interrupt as well. */
static void enp2611_check_link_status(unsigned long __dummy)
{
int i;
for (i = 0; i < 3; i++) {
struct net_device *dev;
int status;
dev = nds[i];
if (dev == NULL)
continue;
status = pm3386_is_link_up(i);
if (status && !netif_carrier_ok(dev)) {
/* @@@ Should report autonegotiation status. */
printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
pm3386_enable_tx(i);
caleb_enable_tx(i);
netif_carrier_on(dev);
} else if (!status && netif_carrier_ok(dev)) {
printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
netif_carrier_off(dev);
caleb_disable_tx(i);
pm3386_disable_tx(i);
}
}
link_check_timer.expires = jiffies + HZ / 10;
add_timer(&link_check_timer);
}
static void enp2611_set_port_admin_status(int port, int up)
{
if (up) {
caleb_enable_rx(port);
pm3386_set_carrier(port, 1);
pm3386_enable_rx(port);
} else {
caleb_disable_tx(port);
pm3386_disable_tx(port);
/* @@@ Flush out pending packets. */
pm3386_set_carrier(port, 0);
pm3386_disable_rx(port);
caleb_disable_rx(port);
}
}
static int __init enp2611_init_module(void)
{
int ports;
int i;
if (!machine_is_enp2611())
return -ENODEV;
caleb_reset();
pm3386_reset();
ports = pm3386_port_count();
for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
free_netdev(nds[i]);
return -ENOMEM;
}
pm3386_init_port(i);
pm3386_get_mac(i, nds[i]->dev_addr);
}
ixp2400_msf_init(&enp2611_msf_parameters);
if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
for (i = 0; i < ports; i++)
if (nds[i])
free_netdev(nds[i]);
return -EINVAL;
}
init_timer(&link_check_timer);
link_check_timer.function = enp2611_check_link_status;
link_check_timer.expires = jiffies;
add_timer(&link_check_timer);
return 0;
}
static void __exit enp2611_cleanup_module(void)
{
int i;
del_timer_sync(&link_check_timer);
ixpdev_deinit();
for (i = 0; i < 3; i++)
free_netdev(nds[i]);
}
module_init(enp2611_init_module);
module_exit(enp2611_cleanup_module);
MODULE_LICENSE("GPL");
| gpl-2.0 |
daver18qc/android_kernel_samsung_kylessopen | Documentation/laptops/hpfall.c | 10927 | 2486 | /* Disk protection for HP machines.
*
* Copyright 2008 Eric Piel
* Copyright 2009 Pavel Machek <pavel@ucw.cz>
*
* GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <signal.h>
#include <sys/mman.h>
#include <sched.h>
char unload_heads_path[64];
int set_unload_heads_path(char *device)
{
char devname[64];
if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
return -EINVAL;
strncpy(devname, device + 5, sizeof(devname));
snprintf(unload_heads_path, sizeof(unload_heads_path),
"/sys/block/%s/device/unload_heads", devname);
return 0;
}
int valid_disk(void)
{
int fd = open(unload_heads_path, O_RDONLY);
if (fd < 0) {
perror(unload_heads_path);
return 0;
}
close(fd);
return 1;
}
void write_int(char *path, int i)
{
char buf[1024];
int fd = open(path, O_RDWR);
if (fd < 0) {
perror("open");
exit(1);
}
sprintf(buf, "%d", i);
if (write(fd, buf, strlen(buf)) != strlen(buf)) {
perror("write");
exit(1);
}
close(fd);
}
void set_led(int on)
{
write_int("/sys/class/leds/hp::hddprotect/brightness", on);
}
void protect(int seconds)
{
write_int(unload_heads_path, seconds*1000);
}
int on_ac(void)
{
// /sys/class/power_supply/AC0/online
}
int lid_open(void)
{
// /proc/acpi/button/lid/LID/state
}
void ignore_me(void)
{
protect(0);
set_led(0);
}
int main(int argc, char **argv)
{
int fd, ret;
struct sched_param param;
if (argc == 1)
ret = set_unload_heads_path("/dev/sda");
else if (argc == 2)
ret = set_unload_heads_path(argv[1]);
else
ret = -EINVAL;
if (ret || !valid_disk()) {
fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
argv[0]);
exit(1);
}
fd = open("/dev/freefall", O_RDONLY);
if (fd < 0) {
perror("/dev/freefall");
return EXIT_FAILURE;
}
daemon(0, 0);
param.sched_priority = sched_get_priority_max(SCHED_FIFO);
sched_setscheduler(0, SCHED_FIFO, ¶m);
mlockall(MCL_CURRENT|MCL_FUTURE);
signal(SIGALRM, ignore_me);
for (;;) {
unsigned char count;
ret = read(fd, &count, sizeof(count));
alarm(0);
if ((ret == -1) && (errno == EINTR)) {
/* Alarm expired, time to unpark the heads */
continue;
}
if (ret != sizeof(count)) {
perror("read");
break;
}
protect(21);
set_led(1);
if (1 || on_ac() || lid_open())
alarm(2);
else
alarm(20);
}
close(fd);
return EXIT_SUCCESS;
}
| gpl-2.0 |
ries-tech/linux | drivers/input/touchscreen/da9052_tsi.c | 176 | 8870 | /*
* TSI driver for Dialog DA9052
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/da9052.h>
#define TSI_PEN_DOWN_STATUS 0x40
struct da9052_tsi {
struct da9052 *da9052;
struct input_dev *dev;
struct delayed_work ts_pen_work;
struct mutex mutex;
unsigned int irq_pendwn;
unsigned int irq_datardy;
bool stopped;
bool adc_on;
};
static void da9052_ts_adc_toggle(struct da9052_tsi *tsi, bool on)
{
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 0, on);
tsi->adc_on = on;
}
static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
{
struct da9052_tsi *tsi = data;
if (!tsi->stopped) {
/* Mask PEN_DOWN event and unmask TSI_READY event */
disable_irq_nosync(tsi->irq_pendwn);
enable_irq(tsi->irq_datardy);
da9052_ts_adc_toggle(tsi, true);
schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
}
return IRQ_HANDLED;
}
static void da9052_ts_read(struct da9052_tsi *tsi)
{
struct input_dev *input = tsi->dev;
int ret;
u16 x, y, z;
u8 v;
ret = da9052_reg_read(tsi->da9052, DA9052_TSI_X_MSB_REG);
if (ret < 0)
return;
x = (u16) ret;
ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Y_MSB_REG);
if (ret < 0)
return;
y = (u16) ret;
ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Z_MSB_REG);
if (ret < 0)
return;
z = (u16) ret;
ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
if (ret < 0)
return;
v = (u8) ret;
x = ((x << 2) & 0x3fc) | (v & 0x3);
y = ((y << 2) & 0x3fc) | ((v & 0xc) >> 2);
z = ((z << 2) & 0x3fc) | ((v & 0x30) >> 4);
input_report_key(input, BTN_TOUCH, 1);
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_PRESSURE, z);
input_sync(input);
}
static irqreturn_t da9052_ts_datardy_irq(int irq, void *data)
{
struct da9052_tsi *tsi = data;
da9052_ts_read(tsi);
return IRQ_HANDLED;
}
static void da9052_ts_pen_work(struct work_struct *work)
{
struct da9052_tsi *tsi = container_of(work, struct da9052_tsi,
ts_pen_work.work);
if (!tsi->stopped) {
int ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
if (ret < 0 || (ret & TSI_PEN_DOWN_STATUS)) {
/* Pen is still DOWN (or read error) */
schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
} else {
struct input_dev *input = tsi->dev;
/* Pen UP */
da9052_ts_adc_toggle(tsi, false);
/* Report Pen UP */
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_PRESSURE, 0);
input_sync(input);
/*
* FIXME: Fixes the unhandled irq issue when quick
* pen down and pen up events occurs
*/
ret = da9052_reg_update(tsi->da9052,
DA9052_EVENT_B_REG, 0xC0, 0xC0);
if (ret < 0)
return;
/* Mask TSI_READY event and unmask PEN_DOWN event */
disable_irq(tsi->irq_datardy);
enable_irq(tsi->irq_pendwn);
}
}
}
static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
{
int error;
error = da9052_reg_update(da9052, DA9052_GPIO_2_3_REG, 0x30, 0);
if (error < 0)
return error;
error = da9052_reg_update(da9052, DA9052_GPIO_4_5_REG, 0x33, 0);
if (error < 0)
return error;
error = da9052_reg_update(da9052, DA9052_GPIO_6_7_REG, 0x33, 0);
if (error < 0)
return error;
return 0;
}
static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
{
int error;
error = da9052_ts_configure_gpio(tsi->da9052);
if (error)
return error;
/* Measure TSI sample every 1ms */
error = da9052_reg_update(tsi->da9052, DA9052_ADC_CONT_REG,
1 << 6, 1 << 6);
if (error < 0)
return error;
/* TSI_DELAY: 3 slots, TSI_SKIP: 0 slots, TSI_MODE: XYZP */
error = da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 0xFC, 0xC0);
if (error < 0)
return error;
/* Supply TSIRef through LD09 */
error = da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x59);
if (error < 0)
return error;
return 0;
}
static int da9052_ts_input_open(struct input_dev *input_dev)
{
struct da9052_tsi *tsi = input_get_drvdata(input_dev);
tsi->stopped = false;
mb();
/* Unmask PEN_DOWN event */
enable_irq(tsi->irq_pendwn);
/* Enable Pen Detect Circuit */
return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
1 << 1, 1 << 1);
}
static void da9052_ts_input_close(struct input_dev *input_dev)
{
struct da9052_tsi *tsi = input_get_drvdata(input_dev);
tsi->stopped = true;
mb();
disable_irq(tsi->irq_pendwn);
cancel_delayed_work_sync(&tsi->ts_pen_work);
if (tsi->adc_on) {
disable_irq(tsi->irq_datardy);
da9052_ts_adc_toggle(tsi, false);
/*
* If ADC was on that means that pendwn IRQ was disabled
* twice and we need to enable it to keep enable/disable
* counter balanced. IRQ is still off though.
*/
enable_irq(tsi->irq_pendwn);
}
/* Disable Pen Detect Circuit */
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
}
static int __devinit da9052_ts_probe(struct platform_device *pdev)
{
struct da9052 *da9052;
struct da9052_tsi *tsi;
struct input_dev *input_dev;
int irq_pendwn;
int irq_datardy;
int error;
da9052 = dev_get_drvdata(pdev->dev.parent);
if (!da9052)
return -EINVAL;
irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
if (irq_pendwn < 0 || irq_datardy < 0) {
dev_err(da9052->dev, "Unable to determine device interrupts\n");
return -ENXIO;
}
tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
input_dev = input_allocate_device();
if (!tsi || !input_dev) {
error = -ENOMEM;
goto err_free_mem;
}
tsi->da9052 = da9052;
tsi->dev = input_dev;
tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
tsi->irq_datardy = da9052->irq_base + irq_datardy;
tsi->stopped = true;
INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
input_dev->id.version = 0x0101;
input_dev->id.vendor = 0x15B6;
input_dev->id.product = 0x9052;
input_dev->name = "Dialog DA9052 TouchScreen Driver";
input_dev->dev.parent = &pdev->dev;
input_dev->open = da9052_ts_input_open;
input_dev->close = da9052_ts_input_close;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1023, 0, 0);
input_set_drvdata(input_dev, tsi);
/* Disable Pen Detect Circuit */
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
/* Disable ADC */
da9052_ts_adc_toggle(tsi, false);
error = request_threaded_irq(tsi->irq_pendwn,
NULL, da9052_ts_pendwn_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"PENDWN", tsi);
if (error) {
dev_err(tsi->da9052->dev,
"Failed to register PENDWN IRQ %d, error = %d\n",
tsi->irq_pendwn, error);
goto err_free_mem;
}
error = request_threaded_irq(tsi->irq_datardy,
NULL, da9052_ts_datardy_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"TSIRDY", tsi);
if (error) {
dev_err(tsi->da9052->dev,
"Failed to register TSIRDY IRQ %d, error = %d\n",
tsi->irq_datardy, error);
goto err_free_pendwn_irq;
}
/* Mask PEN_DOWN and TSI_READY events */
disable_irq(tsi->irq_pendwn);
disable_irq(tsi->irq_datardy);
error = da9052_configure_tsi(tsi);
if (error)
goto err_free_datardy_irq;
error = input_register_device(tsi->dev);
if (error)
goto err_free_datardy_irq;
platform_set_drvdata(pdev, tsi);
return 0;
err_free_datardy_irq:
free_irq(tsi->irq_datardy, tsi);
err_free_pendwn_irq:
free_irq(tsi->irq_pendwn, tsi);
err_free_mem:
kfree(tsi);
input_free_device(input_dev);
return error;
}
static int __devexit da9052_ts_remove(struct platform_device *pdev)
{
struct da9052_tsi *tsi = platform_get_drvdata(pdev);
da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
free_irq(tsi->irq_pendwn, tsi);
free_irq(tsi->irq_datardy, tsi);
input_unregister_device(tsi->dev);
kfree(tsi);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver da9052_tsi_driver = {
.probe = da9052_ts_probe,
.remove = __devexit_p(da9052_ts_remove),
.driver = {
.name = "da9052-tsi",
.owner = THIS_MODULE,
},
};
module_platform_driver(da9052_tsi_driver);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9052");
MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9052-tsi");
| gpl-2.0 |
TI-OpenLink/wl12xx_soldel_maintenance | mm/page-writeback.c | 176 | 40661 | /*
* mm/page-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Contains functions related to writing back dirty pages at the
* address_space level.
*
* 10Apr2002 Andrew Morton
* Initial version
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/blkdev.h>
#include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h>
#include <linux/pagevec.h>
#include <trace/events/writeback.h>
/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
static long ratelimit_pages = 32;
/*
* When balance_dirty_pages decides that the caller needs to perform some
* non-background writeback, this is how many pages it will attempt to write.
* It should be somewhat larger than dirtied pages to ensure that reasonably
* large amounts of I/O are submitted.
*/
static inline long sync_writeback_pages(unsigned long dirtied)
{
if (dirtied < ratelimit_pages)
dirtied = ratelimit_pages;
return dirtied + dirtied / 2;
}
/* The following parameters are exported via /proc/sys/vm */
/*
* Start background writeback (via writeback threads) at this percentage
*/
int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
* dirty_background_ratio * the amount of dirtyable memory
*/
unsigned long dirty_background_bytes;
/*
* free highmem will not be subtracted from the total free memory
* for calculating free ratios if vm_highmem_is_dirtyable is true
*/
int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
* vm_dirty_ratio * the amount of dirtyable memory
*/
unsigned long vm_dirty_bytes;
/*
* The interval between `kupdate'-style writebacks
*/
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
/*
* The longest time for which data is allowed to remain dirty
*/
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
*/
int block_dump;
/*
* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
* a full sync is triggered after this time elapses without any disk activity.
*/
int laptop_mode;
EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
* We do this by keeping a floating proportion between BDIs, based on page
* writeback completions [end_page_writeback()]. Those devices that write out
* pages fastest will get the larger share, while the slower will get a smaller
* share.
*
* We use page writeout completions because we are interested in getting rid of
* dirty pages. Having them written out is the primary goal.
*
* We introduce a concept of time, a period over which we measure these events,
* because demand can/will vary over time. The length of this period itself is
* measured in page writeback completions.
*
*/
static struct prop_descriptor vm_completions;
static struct prop_descriptor vm_dirties;
/*
* couple the period to the dirty_ratio:
*
* period/2 ~ roundup_pow_of_two(dirty limit)
*/
static int calc_period_shift(void)
{
unsigned long dirty_total;
if (vm_dirty_bytes)
dirty_total = vm_dirty_bytes / PAGE_SIZE;
else
dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
100;
return 2 + ilog2(dirty_total - 1);
}
/*
* update the period when the dirty threshold changes.
*/
static void update_completion_period(void)
{
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
prop_change_shift(&vm_dirties, shift);
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_bytes = 0;
return ret;
}
int dirty_background_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_ratio = 0;
return ret;
}
int dirty_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_ratio = vm_dirty_ratio;
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
update_completion_period();
vm_dirty_bytes = 0;
}
return ret;
}
int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
unsigned long old_bytes = vm_dirty_bytes;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
update_completion_period();
vm_dirty_ratio = 0;
}
return ret;
}
/*
* Increment the BDI's writeout completion count and the global writeout
* completion count. Called from test_clear_page_writeback().
*/
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{
__prop_inc_percpu_max(&vm_completions, &bdi->completions,
bdi->max_prop_frac);
}
void bdi_writeout_inc(struct backing_dev_info *bdi)
{
unsigned long flags;
local_irq_save(flags);
__bdi_writeout_inc(bdi);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
void task_dirty_inc(struct task_struct *tsk)
{
prop_inc_single(&vm_dirties, &tsk->dirties);
}
/*
* Obtain an accurate fraction of the BDI's portion.
*/
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
long *numerator, long *denominator)
{
if (bdi_cap_writeback_dirty(bdi)) {
prop_fraction_percpu(&vm_completions, &bdi->completions,
numerator, denominator);
} else {
*numerator = 0;
*denominator = 1;
}
}
static inline void task_dirties_fraction(struct task_struct *tsk,
long *numerator, long *denominator)
{
prop_fraction_single(&vm_dirties, &tsk->dirties,
numerator, denominator);
}
/*
* task_dirty_limit - scale down dirty throttling threshold for one task
*
* task specific dirty limit:
*
* dirty -= (dirty/8) * p_{t}
*
* To protect light/slow dirtying tasks from heavier/fast ones, we start
* throttling individual tasks before reaching the bdi dirty limit.
* Relatively low thresholds will be allocated to heavy dirtiers. So when
* dirty pages grow large, heavy dirtiers will be throttled first, which will
* effectively curb the growth of dirty pages. Light dirtiers with high enough
* dirty threshold may never get throttled.
*/
static unsigned long task_dirty_limit(struct task_struct *tsk,
unsigned long bdi_dirty)
{
long numerator, denominator;
unsigned long dirty = bdi_dirty;
u64 inv = dirty >> 3;
task_dirties_fraction(tsk, &numerator, &denominator);
inv *= numerator;
do_div(inv, denominator);
dirty -= inv;
return max(dirty, bdi_dirty/2);
}
/*
*
*/
static unsigned int bdi_min_ratio;
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int ret = 0;
spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
min_ratio -= bdi->min_ratio;
if (bdi_min_ratio + min_ratio < 100) {
bdi_min_ratio += min_ratio;
bdi->min_ratio += min_ratio;
} else {
ret = -EINVAL;
}
}
spin_unlock_bh(&bdi_lock);
return ret;
}
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
int ret = 0;
if (max_ratio > 100)
return -EINVAL;
spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
spin_unlock_bh(&bdi_lock);
return ret;
}
EXPORT_SYMBOL(bdi_set_max_ratio);
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
/**
* determine_dirtyable_memory - amount of memory that may be used
*
* Returns the numebr of pages that can currently be freed and used
* by the kernel for direct mappings.
*/
unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
return x + 1; /* Ensure that we never return 0 */
}
/*
* global_dirty_limits - background-writeback and dirty-throttling thresholds
*
* Calculate the dirty thresholds based on sysctl parameters
* - vm.dirty_background_ratio or vm.dirty_background_bytes
* - vm.dirty_ratio or vm.dirty_bytes
* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
* real-time tasks.
*/
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
unsigned long background;
unsigned long dirty;
unsigned long uninitialized_var(available_memory);
struct task_struct *tsk;
if (!vm_dirty_bytes || !dirty_background_bytes)
available_memory = determine_dirtyable_memory();
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
else
dirty = (vm_dirty_ratio * available_memory) / 100;
if (dirty_background_bytes)
background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
else
background = (dirty_background_ratio * available_memory) / 100;
if (background >= dirty)
background = dirty / 2;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
}
/*
* bdi_dirty_limit - @bdi's share of dirty throttling threshold
*
* Allocate high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
* - piling up dirty pages (that will take long time to sync) on slow devices
*
* The bdi's share of dirty limit will be adapting to its throughput and
* bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
*/
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
{
u64 bdi_dirty;
long numerator, denominator;
/*
* Calculate this BDI's share of the dirty ratio.
*/
bdi_writeout_fraction(bdi, &numerator, &denominator);
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator;
do_div(bdi_dirty, denominator);
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;
return bdi_dirty;
}
/*
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
* the caller to perform writeback if the system is over `vm_dirty_ratio'.
* If we're over `background_thresh' then the writeback threads are woken to
* perform some writeout.
*/
static void balance_dirty_pages(struct address_space *mapping,
unsigned long write_chunk)
{
long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback;
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
unsigned long pages_written = 0;
unsigned long pause = 1;
bool dirty_exceeded = false;
struct backing_dev_info *bdi = mapping->backing_dev_info;
for (;;) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.nr_to_write = write_chunk,
.range_cyclic = 1,
};
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK);
global_dirty_limits(&background_thresh, &dirty_thresh);
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
if (nr_reclaimable + nr_writeback <=
(background_thresh + dirty_thresh) / 2)
break;
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
bdi_thresh = task_dirty_limit(current, bdi_thresh);
/*
* In order to avoid the stacked BDI deadlock we need
* to ensure we accurately count the 'dirty' pages when
* the threshold is low.
*
* Otherwise it would be possible to get thresh+n pages
* reported dirty, even though there are thresh-m pages
* actually dirty; with m+n sitting in the percpu
* deltas.
*/
if (bdi_thresh < 2*bdi_stat_error(bdi)) {
bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
} else {
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
}
/*
* The bdi thresh is somehow "soft" limit derived from the
* global "hard" limit. The former helps to prevent heavy IO
* bdi or process from holding back light ones; The latter is
* the last resort safeguard.
*/
dirty_exceeded =
(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
|| (nr_reclaimable + nr_writeback > dirty_thresh);
if (!dirty_exceeded)
break;
if (!bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;
/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
* Unstable writes are a feature of certain networked
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
* Only move pages to writeback if this bdi is over its
* threshold otherwise wait until the disk writes catch
* up.
*/
trace_wbc_balance_dirty_start(&wbc, bdi);
if (bdi_nr_reclaimable > bdi_thresh) {
writeback_inodes_wb(&bdi->wb, &wbc);
pages_written += write_chunk - wbc.nr_to_write;
trace_wbc_balance_dirty_written(&wbc, bdi);
if (pages_written >= write_chunk)
break; /* We've done our duty */
}
trace_wbc_balance_dirty_wait(&wbc, bdi);
__set_current_state(TASK_UNINTERRUPTIBLE);
io_schedule_timeout(pause);
/*
* Increase the delay for each loop, up to our previous
* default of taking a 100ms nap.
*/
pause <<= 1;
if (pause > HZ / 10)
pause = HZ / 10;
}
if (!dirty_exceeded && bdi->dirty_exceeded)
bdi->dirty_exceeded = 0;
if (writeback_in_progress(bdi))
return;
/*
* In laptop mode, we wait until hitting the higher threshold before
* starting background writeout, and then write out all the way down
* to the lower threshold. So slow writers cause minimal disk activity.
*
* In normal mode, we start background writeout at the lower
* background_thresh, to keep the amount of dirty memory low.
*/
if ((laptop_mode && pages_written) ||
(!laptop_mode && (nr_reclaimable > background_thresh)))
bdi_start_background_writeback(bdi);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
{
if (set_page_dirty(page) || page_mkwrite) {
struct address_space *mapping = page_mapping(page);
if (mapping)
balance_dirty_pages_ratelimited(mapping);
}
}
static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
/**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
* @nr_pages_dirtied: number of pages which the caller has just dirtied
*
* Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's
* dirty state and will initiate writeback if needed.
*
* On really big machines, get_writeback_state is expensive, so try to avoid
* calling it too often (ratelimiting). But once we're over the dirty memory
* limit we decrease the ratelimiting by a lot, to prevent individual processes
* from overshooting the limit by (ratelimit_pages) each.
*/
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied)
{
unsigned long ratelimit;
unsigned long *p;
ratelimit = ratelimit_pages;
if (mapping->backing_dev_info->dirty_exceeded)
ratelimit = 8;
/*
* Check the rate limiting. Also, we do not want to throttle real-time
* tasks in balance_dirty_pages(). Period.
*/
preempt_disable();
p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) {
ratelimit = sync_writeback_pages(*p);
*p = 0;
preempt_enable();
balance_dirty_pages(mapping, ratelimit);
return;
}
preempt_enable();
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
void throttle_vm_writeout(gfp_t gfp_mask)
{
unsigned long background_thresh;
unsigned long dirty_thresh;
for ( ; ; ) {
global_dirty_limits(&background_thresh, &dirty_thresh);
/*
* Boost the allowable dirty threshold a bit for page
* allocators so they don't get DoS'ed by heavy writers
*/
dirty_thresh += dirty_thresh / 10; /* wheeee... */
if (global_page_state(NR_UNSTABLE_NFS) +
global_page_state(NR_WRITEBACK) <= dirty_thresh)
break;
congestion_wait(BLK_RW_ASYNC, HZ/10);
/*
* The caller might hold locks which can prevent IO completion
* or progress in the filesystem. So we cannot just sit here
* waiting for IO to complete.
*/
if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
break;
}
}
/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
bdi_arm_supers_timer();
return 0;
}
#ifdef CONFIG_BLOCK
void laptop_mode_timer_fn(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
int nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
/*
* We want to write everything out, not just down to the dirty
* threshold
*/
if (bdi_has_dirty_io(&q->backing_dev_info))
bdi_start_writeback(&q->backing_dev_info, nr_pages);
}
/*
* We've spun up the disk and we're in laptop mode: schedule writeback
* of all dirty data a few seconds from now. If the flush is already scheduled
* then push it back - the user is still using the disk.
*/
void laptop_io_completion(struct backing_dev_info *info)
{
mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
}
/*
* We're in laptop mode and we've just synced. The sync's writes will have
* caused another writeback to be scheduled by laptop_io_completion.
* Nothing needs to be written back anymore, so we unschedule the writeback.
*/
void laptop_sync_completion(void)
{
struct backing_dev_info *bdi;
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
del_timer(&bdi->laptop_mode_wb_timer);
rcu_read_unlock();
}
#endif
/*
* If ratelimit_pages is too high then we can get into dirty-data overload
* if a large number of processes all perform writes at the same time.
* If it is too low then SMP machines will call the (expensive)
* get_writeback_state too often.
*
* Here we set ratelimit_pages to a level which ensures that when all CPUs are
* dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
* thresholds before writeback cuts in.
*
* But the limit should not be set too high. Because it also controls the
* amount of memory which the balance_dirty_pages() caller has to write back.
* If this is too large then the caller will block on the IO queue all the
* time. So limit it to four megabytes - the balance_dirty_pages() caller
* will write six megabyte chunks, max.
*/
void writeback_set_ratelimit(void)
{
ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
if (ratelimit_pages < 16)
ratelimit_pages = 16;
if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}
static int __cpuinit
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
writeback_set_ratelimit();
return NOTIFY_DONE;
}
static struct notifier_block __cpuinitdata ratelimit_nb = {
.notifier_call = ratelimit_handler,
.next = NULL,
};
/*
* Called early on to tune the page writeback dirty limits.
*
* We used to scale dirty pages according to how total memory
* related to pages that could be allocated for buffers (by
* comparing nr_free_buffer_pages() to vm_total_pages.
*
* However, that was when we used "dirty_ratio" to scale with
* all memory, and we don't do that any more. "dirty_ratio"
* is now applied to total non-HIGHPAGE memory (by subtracting
* totalhigh_pages from vm_total_pages), and as such we can't
* get into the old insane situation any more where we had
* large amounts of dirty pages compared to a small amount of
* non-HIGHMEM memory.
*
* But we might still want to scale the dirty_ratio by how
* much memory the box has..
*/
void __init page_writeback_init(void)
{
int shift;
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift);
prop_descriptor_init(&vm_dirties, shift);
}
/**
* tag_pages_for_writeback - tag pages to be written by write_cache_pages
* @mapping: address space structure to write
* @start: starting page index
* @end: ending page index (inclusive)
*
* This function scans the page range from @start to @end (inclusive) and tags
* all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
* that write_cache_pages (or whoever calls this function) will then use
* TOWRITE tag to identify pages eligible for writeback. This mechanism is
* used to avoid livelocking of writeback by a process steadily creating new
* dirty pages in the file (thus it is important for this function to be quick
* so that it can tag pages faster than a dirtying process can create them).
*/
/*
* We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
*/
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
#define WRITEBACK_TAG_BATCH 4096
unsigned long tagged;
do {
spin_lock_irq(&mapping->tree_lock);
tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
&start, end, WRITEBACK_TAG_BATCH,
PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
spin_unlock_irq(&mapping->tree_lock);
WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
cond_resched();
/* We check 'start' to handle wrapping when end == ~0UL */
} while (tagged >= WRITEBACK_TAG_BATCH && start);
}
EXPORT_SYMBOL(tag_pages_for_writeback);
/**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
* @writepage: function called for each page
* @data: data passed to writepage function
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
* but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
* and msync() need to guarantee that all the data which was dirty at the time
* the call was made get new I/O started against them. If wbc->sync_mode is
* WB_SYNC_ALL then we were called for data integrity and we must wait for
* existing IO to complete.
*
* To avoid livelocks (when other process dirties new pages), we first tag
* pages which should be written back with TOWRITE tag and only then start
* writing them. For data-integrity sync we have to be careful so that we do
* not miss some pages (e.g., because some other process has cleared TOWRITE
* tag we set). The rule we follow is that TOWRITE tag can be cleared only
* by the process clearing the DIRTY tag (and submitting the page for IO).
*/
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int cycled;
int range_whole = 0;
int tag;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
if (index == 0)
cycled = 1;
else
cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
int i;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end) {
/*
* can't be range_cyclic (1st pass) because
* end == -1 in that case.
*/
done = 1;
break;
}
done_index = page->index;
lock_page(page);
/*
* Page truncated or invalidated. We can freely skip it
* then, even for data integrity operations: the page
* has disappeared concurrently, so there could be no
* real expectation of this data interity operation
* even if there is now a new, dirty page at the same
* pagecache address.
*/
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
trace_wbc_writepage(wbc, mapping->backing_dev_info);
ret = (*writepage)(page, wbc, data);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
} else {
/*
* done_index is set past this page,
* so media errors will not choke
* background writeout for the entire
* file. This has consequences for
* range_cyclic semantics (ie. it may
* not be suitable for data integrity
* writeout).
*/
done_index = page->index + 1;
done = 1;
break;
}
}
/*
* We stop writing back only if we are not doing
* integrity sync. In case of integrity sync we have to
* keep going until we have written all the pages
* we tagged for writeback prior to entering this loop.
*/
if (--wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
}
pagevec_release(&pvec);
cond_resched();
}
if (!cycled && !done) {
/*
* range_cyclic:
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
cycled = 1;
index = 0;
end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
return ret;
}
EXPORT_SYMBOL(write_cache_pages);
/*
* Function used by generic_writepages to call the real writepage
* function and set the mapping flags on error
*/
static int __writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = mapping->a_ops->writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
}
/**
* generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
*
* This is a library function, which implements the writepages()
* address_space_operation.
*/
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct blk_plug plug;
int ret;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
return 0;
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, __writepage, mapping);
blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(generic_writepages);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
int ret;
if (wbc->nr_to_write <= 0)
return 0;
if (mapping->a_ops->writepages)
ret = mapping->a_ops->writepages(mapping, wbc);
else
ret = generic_writepages(mapping, wbc);
return ret;
}
/**
* write_one_page - write out a single page and optionally wait on I/O
* @page: the page to write
* @wait: if true, wait on writeout
*
* The page must be locked by the caller and will be unlocked upon return.
*
* write_one_page() returns a negative error code if I/O failed.
*/
int write_one_page(struct page *page, int wait)
{
struct address_space *mapping = page->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
};
BUG_ON(!PageLocked(page));
if (wait)
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
page_cache_get(page);
ret = mapping->a_ops->writepage(page, &wbc);
if (ret == 0 && wait) {
wait_on_page_writeback(page);
if (PageError(page))
ret = -EIO;
}
page_cache_release(page);
} else {
unlock_page(page);
}
return ret;
}
EXPORT_SYMBOL(write_one_page);
/*
* For address_spaces which do not use buffers nor write back.
*/
int __set_page_dirty_no_writeback(struct page *page)
{
if (!PageDirty(page))
return !TestSetPageDirty(page);
return 0;
}
/*
* Helper function for set_page_dirty family.
* NOTE: This relies on being atomic wrt interrupts.
*/
void account_page_dirtied(struct page *page, struct address_space *mapping)
{
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
}
EXPORT_SYMBOL(account_page_dirtied);
/*
* Helper function for set_page_writeback family.
* NOTE: Unlike account_page_dirtied this does not rely on being atomic
* wrt interrupts.
*/
void account_page_writeback(struct page *page)
{
inc_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN);
}
EXPORT_SYMBOL(account_page_writeback);
/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
* This is also used when a single buffer is being dirtied: we want to set the
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
* Most callers have locked the page, which pins the address_space in memory.
* But zap_pte_range() does not lock the page, however in that case the
* mapping is pinned by the vma's ->vm_file reference.
*
* We take care to handle the case where the page was truncated from the
* mapping by re-checking page_mapping() inside tree_lock.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
if (!mapping)
return 1;
spin_lock_irq(&mapping->tree_lock);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&mapping->tree_lock);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
/*
* When a writepage implementation decides that it doesn't want to write this
* page for some reason, it should redirty the locked page via
* redirty_page_for_writepage() and it should then unlock the page and return 0
*/
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
wbc->pages_skipped++;
return __set_page_dirty_nobuffers(page);
}
EXPORT_SYMBOL(redirty_page_for_writepage);
/*
* Dirty a page.
*
* For pages with a mapping this should be done under the page lock
* for the benefit of asynchronous memory errors who prefer a consistent
* dirty state. This rule can be broken in some special cases,
* but should be better not to.
*
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
int set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
if (likely(mapping)) {
int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
/*
* readahead/lru_deactivate_page could remain
* PG_readahead/PG_reclaim due to race with end_page_writeback
* About readahead, if the page is written, the flags would be
* reset. So no problem.
* About lru_deactivate_page, if the page is redirty, the flag
* will be reset. So no problem. but if the page is used by readahead
* it will confuse readahead and make it restart the size rampup
* process. But it's a trivial problem.
*/
ClearPageReclaim(page);
#ifdef CONFIG_BLOCK
if (!spd)
spd = __set_page_dirty_buffers;
#endif
return (*spd)(page);
}
if (!PageDirty(page)) {
if (!TestSetPageDirty(page))
return 1;
}
return 0;
}
EXPORT_SYMBOL(set_page_dirty);
/*
* set_page_dirty() is racy if the caller has no reference against
* page->mapping->host, and if the page is unlocked. This is because another
* CPU could truncate the page off the mapping and then free the mapping.
*
* Usually, the page _is_ locked, or the caller is a user-space process which
* holds a reference on the inode by having an open file.
*
* In other cases, the page should be locked before running set_page_dirty().
*/
int set_page_dirty_lock(struct page *page)
{
int ret;
lock_page(page);
ret = set_page_dirty(page);
unlock_page(page);
return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);
/*
* Clear a page's dirty flag, while caring for dirty memory accounting.
* Returns true if the page was previously dirty.
*
* This is for preparing to put the page under writeout. We leave the page
* tagged as dirty in the radix tree so that a concurrent write-for-sync
* can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
* implementation will run either set_page_writeback() or set_page_dirty(),
* at which stage we bring the page's dirty flag and radix-tree dirty tag
* back into sync.
*
* This incoherency between the page's dirty flag and radix-tree tag is
* unfortunate, but it only exists while the page is locked.
*/
int clear_page_dirty_for_io(struct page *page)
{
struct address_space *mapping = page_mapping(page);
BUG_ON(!PageLocked(page));
if (mapping && mapping_cap_account_dirty(mapping)) {
/*
* Yes, Virginia, this is indeed insane.
*
* We use this sequence to make sure that
* (a) we account for dirty stats properly
* (b) we tell the low-level filesystem to
* mark the whole page dirty if it was
* dirty in a pagetable. Only to then
* (c) clean the page again and return 1 to
* cause the writeback.
*
* This way we avoid all nasty races with the
* dirty bit in multiple places and clearing
* them concurrently from different threads.
*
* Note! Normally the "set_page_dirty(page)"
* has no effect on the actual dirty bit - since
* that will already usually be set. But we
* need the side effects, and it can help us
* avoid races.
*
* We basically use the page "master dirty bit"
* as a serialization point for all the different
* threads doing their things.
*/
if (page_mkclean(page))
set_page_dirty(page);
/*
* We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty
* at this point. We do this by having them hold the
* page lock at some point after installing their
* pte, but before marking the page dirty.
* Pages are always locked coming in here, so we get
* the desired exclusion. See mm/memory.c:do_wp_page()
* for more comments.
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
return 1;
}
return 0;
}
return TestClearPageDirty(page);
}
EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestClearPageWriteback(page);
if (ret) {
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi)) {
__dec_bdi_stat(bdi, BDI_WRITEBACK);
__bdi_writeout_inc(bdi);
}
}
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestClearPageWriteback(page);
}
if (ret)
dec_zone_page_state(page, NR_WRITEBACK);
return ret;
}
int test_set_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
int ret;
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page);
if (!ret) {
radix_tree_tag_set(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi))
__inc_bdi_stat(bdi, BDI_WRITEBACK);
}
if (!PageDirty(page))
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_TOWRITE);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestSetPageWriteback(page);
}
if (!ret)
account_page_writeback(page);
return ret;
}
EXPORT_SYMBOL(test_set_page_writeback);
/*
* Return true if any of the pages in the mapping are marked with the
* passed tag.
*/
int mapping_tagged(struct address_space *mapping, int tag)
{
int ret;
rcu_read_lock();
ret = radix_tree_tagged(&mapping->page_tree, tag);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(mapping_tagged);
| gpl-2.0 |
Redmi-dev/android_kernel_xiaomi_msm8226 | sound/soc/msm/apq8074.c | 432 | 71119 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <linux/qpnp/clkdiv.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <sound/jack.h>
#include <sound/q6afe-v2.h>
#include <sound/pcm_params.h>
#include <asm/mach-types.h>
#include <mach/subsystem_notif.h>
#include <mach/socinfo.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
#include "qdsp6v2/q6core.h"
#include "../codecs/wcd9xxx-common.h"
#include "../codecs/wcd9320.h"
#define DRV_NAME "apq8074-asoc-taiko"
#define APQ8074_SPK_ON 1
#define APQ8074_SPK_OFF 0
#define MSM_SLIM_0_RX_MAX_CHANNELS 2
#define MSM_SLIM_0_TX_MAX_CHANNELS 4
#define BTSCO_RATE_8KHZ 8000
#define BTSCO_RATE_16KHZ 16000
static int slim0_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int hdmi_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
#define SAMPLING_RATE_48KHZ 48000
#define SAMPLING_RATE_96KHZ 96000
#define SAMPLING_RATE_192KHZ 192000
static int apq8074_auxpcm_rate = 8000;
#define LO_1_SPK_AMP 0x1
#define LO_3_SPK_AMP 0x2
#define LO_2_SPK_AMP 0x4
#define LO_4_SPK_AMP 0x8
#define LPAIF_OFFSET 0xFE000000
#define LPAIF_PRI_MODE_MUXSEL (LPAIF_OFFSET + 0x2B000)
#define LPAIF_SEC_MODE_MUXSEL (LPAIF_OFFSET + 0x2C000)
#define LPAIF_TER_MODE_MUXSEL (LPAIF_OFFSET + 0x2D000)
#define LPAIF_QUAD_MODE_MUXSEL (LPAIF_OFFSET + 0x2E000)
#define I2S_PCM_SEL 1
#define I2S_PCM_SEL_OFFSET 1
#define WCD9XXX_MBHC_DEF_BUTTONS 8
#define WCD9XXX_MBHC_DEF_RLOADS 5
#define TAIKO_EXT_CLK_RATE 9600000
/* It takes about 13ms for Class-D PAs to ramp-up */
#define EXT_CLASS_D_EN_DELAY 13000
#define EXT_CLASS_D_DIS_DELAY 3000
#define EXT_CLASS_D_DELAY_DELTA 2000
/* It takes about 13ms for Class-AB PAs to ramp-up */
#define EXT_CLASS_AB_EN_DELAY 10000
#define EXT_CLASS_AB_DIS_DELAY 1000
#define EXT_CLASS_AB_DELAY_DELTA 1000
#define NUM_OF_AUXPCM_GPIOS 4
static void *adsp_state_notifier;
#define ADSP_STATE_READY_TIMEOUT_MS 3000
static inline int param_is_mask(int p)
{
return ((p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
(p <= SNDRV_PCM_HW_PARAM_LAST_MASK));
}
static inline struct snd_mask *param_to_mask(struct snd_pcm_hw_params *p, int n)
{
return &(p->masks[n - SNDRV_PCM_HW_PARAM_FIRST_MASK]);
}
static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
{
if (bit >= SNDRV_MASK_MAX)
return;
if (param_is_mask(n)) {
struct snd_mask *m = param_to_mask(p, n);
m->bits[0] = 0;
m->bits[1] = 0;
m->bits[bit >> 5] |= (1 << (bit & 31));
}
}
static const char *const auxpcm_rate_text[] = {"rate_8000", "rate_16000"};
static const struct soc_enum apq8074_auxpcm_enum[] = {
SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text),
};
static void *def_taiko_mbhc_cal(void);
static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
static struct wcd9xxx_mbhc_config mbhc_cfg = {
.read_fw_bin = false,
.calibration = NULL,
.micbias = MBHC_MICBIAS2,
.mclk_cb_fn = msm_snd_enable_codec_ext_clk,
.mclk_rate = TAIKO_EXT_CLK_RATE,
.gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
.detect_extn_cable = false,
.insert_detect = true,
.swap_gnd_mic = NULL,
};
struct msm_auxpcm_gpio {
unsigned gpio_no;
const char *gpio_name;
};
struct msm_auxpcm_ctrl {
struct msm_auxpcm_gpio *pin_data;
u32 cnt;
};
struct apq8074_asoc_mach_data {
int mclk_gpio;
u32 mclk_freq;
int us_euro_gpio;
struct msm_auxpcm_ctrl *pri_auxpcm_ctrl;
};
#define GPIO_NAME_INDEX 0
#define DT_PARSE_INDEX 1
static char *msm_prim_auxpcm_gpio_name[][2] = {
{"PRIM_AUXPCM_CLK", "qcom,prim-auxpcm-gpio-clk"},
{"PRIM_AUXPCM_SYNC", "qcom,prim-auxpcm-gpio-sync"},
{"PRIM_AUXPCM_DIN", "qcom,prim-auxpcm-gpio-din"},
{"PRIM_AUXPCM_DOUT", "qcom,prim-auxpcm-gpio-dout"},
};
static void *lpaif_pri_muxsel_virt_addr;
struct apq8074_liquid_dock_dev {
int dock_plug_gpio;
int dock_plug_irq;
struct snd_soc_dapm_context *dapm;
struct work_struct irq_work;
};
static struct apq8074_liquid_dock_dev *apq8074_liquid_dock_dev;
static int dock_plug_det = -1;
/* Shared channel numbers for Slimbus ports that connect APQ to MDM. */
enum {
SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
SLIM_1_TX_1 = 146, /* BT-SCO and USB RX */
SLIM_2_RX_1 = 147, /* HDMI RX */
SLIM_3_RX_1 = 148, /* In-call recording RX */
SLIM_3_RX_2 = 149, /* In-call recording RX */
SLIM_4_TX_1 = 150, /* In-call musid delivery TX */
};
static struct platform_device *spdev;
static struct regulator *ext_spk_amp_regulator;
static int ext_spk_amp_gpio = -1;
static int ext_ult_spk_amp_gpio = -1;
static int apq8074_spk_control = 1;
static int apq8074_ext_spk_pamp;
static int msm_slim_0_rx_ch = 1;
static int msm_slim_0_tx_ch = 1;
static int msm_btsco_rate = BTSCO_RATE_8KHZ;
static int msm_btsco_ch = 1;
static int msm_hdmi_rx_ch = 2;
static int slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
static int msm_proxy_rx_ch = 2;
static struct mutex cdc_mclk_mutex;
static struct clk *codec_clk;
static int clk_users;
static atomic_t prim_auxpcm_rsc_ref;
static int apq8074_liquid_ext_spk_power_amp_init(void)
{
int ret = 0;
ext_spk_amp_gpio = of_get_named_gpio(spdev->dev.of_node,
"qcom,ext-spk-amp-gpio", 0);
if (ext_spk_amp_gpio >= 0) {
ret = gpio_request(ext_spk_amp_gpio, "ext_spk_amp_gpio");
if (ret) {
pr_err("%s: gpio_request failed for ext_spk_amp_gpio.\n",
__func__);
return -EINVAL;
}
gpio_direction_output(ext_spk_amp_gpio, 0);
if (ext_spk_amp_regulator == NULL) {
ext_spk_amp_regulator = regulator_get(&spdev->dev,
"qcom,ext-spk-amp");
if (IS_ERR(ext_spk_amp_regulator)) {
pr_err("%s: Cannot get regulator %s.\n",
__func__, "qcom,ext-spk-amp");
gpio_free(ext_spk_amp_gpio);
return PTR_ERR(ext_spk_amp_regulator);
}
}
}
ext_ult_spk_amp_gpio = of_get_named_gpio(spdev->dev.of_node,
"qcom,ext-ult-spk-amp-gpio", 0);
if (ext_ult_spk_amp_gpio >= 0) {
ret = gpio_request(ext_ult_spk_amp_gpio,
"ext_ult_spk_amp_gpio");
if (ret) {
pr_err("%s: gpio_request failed for ext-ult_spk-amp-gpio.\n",
__func__);
return -EINVAL;
}
gpio_direction_output(ext_ult_spk_amp_gpio, 0);
}
return 0;
}
static void apq8074_liquid_ext_ult_spk_power_amp_enable(u32 on)
{
if (on) {
regulator_enable(ext_spk_amp_regulator);
gpio_direction_output(ext_ult_spk_amp_gpio, 1);
/* time takes enable the external power class AB amplifier */
usleep_range(EXT_CLASS_AB_EN_DELAY,
EXT_CLASS_AB_EN_DELAY + EXT_CLASS_AB_DELAY_DELTA);
} else {
gpio_direction_output(ext_ult_spk_amp_gpio, 0);
regulator_disable(ext_spk_amp_regulator);
/* time takes disable the external power class AB amplifier */
usleep_range(EXT_CLASS_AB_DIS_DELAY,
EXT_CLASS_AB_DIS_DELAY + EXT_CLASS_AB_DELAY_DELTA);
}
pr_debug("%s: %s external ultrasound SPKR_DRV PAs.\n", __func__,
on ? "Enable" : "Disable");
}
static void apq8074_liquid_ext_spk_power_amp_enable(u32 on)
{
if (on) {
regulator_enable(ext_spk_amp_regulator);
gpio_direction_output(ext_spk_amp_gpio, on);
/*time takes enable the external power amplifier*/
usleep_range(EXT_CLASS_D_EN_DELAY,
EXT_CLASS_D_EN_DELAY + EXT_CLASS_D_DELAY_DELTA);
} else {
gpio_direction_output(ext_spk_amp_gpio, on);
regulator_disable(ext_spk_amp_regulator);
/*time takes disable the external power amplifier*/
usleep_range(EXT_CLASS_D_DIS_DELAY,
EXT_CLASS_D_DIS_DELAY + EXT_CLASS_D_DELAY_DELTA);
}
pr_debug("%s: %s external speaker PAs.\n", __func__,
on ? "Enable" : "Disable");
}
static void apq8074_liquid_docking_irq_work(struct work_struct *work)
{
struct apq8074_liquid_dock_dev *dock_dev =
container_of(work,
struct apq8074_liquid_dock_dev,
irq_work);
struct snd_soc_dapm_context *dapm = dock_dev->dapm;
mutex_lock(&dapm->codec->mutex);
dock_plug_det =
gpio_get_value(dock_dev->dock_plug_gpio);
if (0 == dock_plug_det) {
if ((apq8074_ext_spk_pamp & LO_1_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_3_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_2_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_4_SPK_AMP))
apq8074_liquid_ext_spk_power_amp_enable(1);
} else {
if ((apq8074_ext_spk_pamp & LO_1_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_3_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_2_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_4_SPK_AMP))
apq8074_liquid_ext_spk_power_amp_enable(0);
}
mutex_unlock(&dapm->codec->mutex);
}
static irqreturn_t apq8074_liquid_docking_irq_handler(int irq, void *dev)
{
struct apq8074_liquid_dock_dev *dock_dev = dev;
/* switch speakers should not run in interrupt context */
schedule_work(&dock_dev->irq_work);
return IRQ_HANDLED;
}
static int apq8074_liquid_init_docking(struct snd_soc_dapm_context *dapm)
{
int ret = 0;
int dock_plug_gpio = 0;
/* plug in docking speaker+plug in device OR unplug one of them */
u32 dock_plug_irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
dock_plug_det = 0;
dock_plug_gpio = of_get_named_gpio(spdev->dev.of_node,
"qcom,dock-plug-det-irq", 0);
if (dock_plug_gpio >= 0) {
apq8074_liquid_dock_dev =
kzalloc(sizeof(*apq8074_liquid_dock_dev), GFP_KERNEL);
if (!apq8074_liquid_dock_dev) {
pr_err("apq8074_liquid_dock_dev alloc fail.\n");
return -ENOMEM;
}
apq8074_liquid_dock_dev->dock_plug_gpio = dock_plug_gpio;
ret = gpio_request(apq8074_liquid_dock_dev->dock_plug_gpio,
"dock-plug-det-irq");
if (ret) {
pr_err("%s:failed request apq8074_liquid_dock_plug_gpio.\n",
__func__);
return -EINVAL;
}
dock_plug_det =
gpio_get_value(apq8074_liquid_dock_dev->dock_plug_gpio);
apq8074_liquid_dock_dev->dock_plug_irq =
gpio_to_irq(apq8074_liquid_dock_dev->dock_plug_gpio);
apq8074_liquid_dock_dev->dapm = dapm;
ret = request_irq(apq8074_liquid_dock_dev->dock_plug_irq,
apq8074_liquid_docking_irq_handler,
dock_plug_irq_flags,
"liquid_dock_plug_irq",
apq8074_liquid_dock_dev);
INIT_WORK(
&apq8074_liquid_dock_dev->irq_work,
apq8074_liquid_docking_irq_work);
}
return 0;
}
static int apq8074_liquid_ext_spk_power_amp_on(u32 spk)
{
int rc;
if (spk & (LO_1_SPK_AMP | LO_3_SPK_AMP | LO_2_SPK_AMP | LO_4_SPK_AMP)) {
pr_debug("%s: External speakers are already on. spk = 0x%x\n",
__func__, spk);
apq8074_ext_spk_pamp |= spk;
if ((apq8074_ext_spk_pamp & LO_1_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_3_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_2_SPK_AMP) &&
(apq8074_ext_spk_pamp & LO_4_SPK_AMP))
if (ext_spk_amp_gpio >= 0 &&
dock_plug_det == 0)
apq8074_liquid_ext_spk_power_amp_enable(1);
rc = 0;
} else {
pr_err("%s: Invalid external speaker ampl. spk = 0x%x\n",
__func__, spk);
rc = -EINVAL;
}
return rc;
}
static void apq8074_ext_spk_power_amp_on(u32 spk)
{
if (gpio_is_valid(ext_spk_amp_gpio))
apq8074_liquid_ext_spk_power_amp_on(spk);
}
static void apq8074_liquid_ext_spk_power_amp_off(u32 spk)
{
if (spk & (LO_1_SPK_AMP |
LO_3_SPK_AMP |
LO_2_SPK_AMP |
LO_4_SPK_AMP)) {
pr_debug("%s Left and right speakers case spk = 0x%08x",
__func__, spk);
if (!apq8074_ext_spk_pamp) {
if (ext_spk_amp_gpio >= 0 &&
dock_plug_det == 0)
apq8074_liquid_ext_spk_power_amp_enable(0);
apq8074_ext_spk_pamp = 0;
}
} else {
pr_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n",
__func__, spk);
return;
}
}
static void apq8074_ext_spk_power_amp_off(u32 spk)
{
if (gpio_is_valid(ext_spk_amp_gpio))
apq8074_liquid_ext_spk_power_amp_off(spk);
}
static void apq8074_ext_control(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
mutex_lock(&dapm->codec->mutex);
pr_debug("%s: apq8074_spk_control = %d", __func__, apq8074_spk_control);
if (apq8074_spk_control == APQ8074_SPK_ON) {
snd_soc_dapm_enable_pin(dapm, "Lineout_1 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_3 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_2 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_4 amp");
} else {
snd_soc_dapm_disable_pin(dapm, "Lineout_1 amp");
snd_soc_dapm_disable_pin(dapm, "Lineout_3 amp");
snd_soc_dapm_disable_pin(dapm, "Lineout_2 amp");
snd_soc_dapm_disable_pin(dapm, "Lineout_4 amp");
}
snd_soc_dapm_sync(dapm);
mutex_unlock(&dapm->codec->mutex);
}
static int apq8074_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: apq8074_spk_control = %d", __func__, apq8074_spk_control);
ucontrol->value.integer.value[0] = apq8074_spk_control;
return 0;
}
static int apq8074_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
pr_debug("%s()\n", __func__);
if (apq8074_spk_control == ucontrol->value.integer.value[0])
return 0;
apq8074_spk_control = ucontrol->value.integer.value[0];
apq8074_ext_control(codec);
return 1;
}
static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
pr_debug("%s()\n", __func__);
if (SND_SOC_DAPM_EVENT_ON(event)) {
if (!strncmp(w->name, "Lineout_1 amp", 14))
apq8074_ext_spk_power_amp_on(LO_1_SPK_AMP);
else if (!strncmp(w->name, "Lineout_3 amp", 14))
apq8074_ext_spk_power_amp_on(LO_3_SPK_AMP);
else if (!strncmp(w->name, "Lineout_2 amp", 14))
apq8074_ext_spk_power_amp_on(LO_2_SPK_AMP);
else if (!strncmp(w->name, "Lineout_4 amp", 14))
apq8074_ext_spk_power_amp_on(LO_4_SPK_AMP);
else {
pr_err("%s() Invalid Speaker Widget = %s\n",
__func__, w->name);
return -EINVAL;
}
} else {
if (!strncmp(w->name, "Lineout_1 amp", 14))
apq8074_ext_spk_power_amp_off(LO_1_SPK_AMP);
else if (!strncmp(w->name, "Lineout_3 amp", 14))
apq8074_ext_spk_power_amp_off(LO_3_SPK_AMP);
else if (!strncmp(w->name, "Lineout_2 amp", 14))
apq8074_ext_spk_power_amp_off(LO_2_SPK_AMP);
else if (!strncmp(w->name, "Lineout_4 amp", 14))
apq8074_ext_spk_power_amp_off(LO_4_SPK_AMP);
else {
pr_err("%s() Invalid Speaker Widget = %s\n",
__func__, w->name);
return -EINVAL;
}
}
return 0;
}
static int msm_ext_spkramp_ultrasound_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
pr_debug("%s()\n", __func__);
if (!strncmp(w->name, "SPK_ultrasound amp", 19)) {
if (!gpio_is_valid(ext_ult_spk_amp_gpio)) {
pr_err("%s: ext_ult_spk_amp_gpio isn't configured\n",
__func__);
return -EINVAL;
}
if (SND_SOC_DAPM_EVENT_ON(event))
apq8074_liquid_ext_ult_spk_power_amp_enable(1);
else
apq8074_liquid_ext_ult_spk_power_amp_enable(0);
} else {
pr_err("%s() Invalid Speaker Widget = %s\n",
__func__, w->name);
return -EINVAL;
}
return 0;
}
static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm)
{
int ret = 0;
pr_debug("%s: enable = %d clk_users = %d\n",
__func__, enable, clk_users);
mutex_lock(&cdc_mclk_mutex);
if (enable) {
if (!codec_clk) {
dev_err(codec->dev, "%s: did not get Taiko MCLK\n",
__func__);
ret = -EINVAL;
goto exit;
}
clk_users++;
if (clk_users != 1)
goto exit;
if (codec_clk) {
clk_set_rate(codec_clk, TAIKO_EXT_CLK_RATE);
clk_prepare_enable(codec_clk);
taiko_mclk_enable(codec, 1, dapm);
} else {
pr_err("%s: Error setting Taiko MCLK\n", __func__);
clk_users--;
goto exit;
}
} else {
if (clk_users > 0) {
clk_users--;
if (clk_users == 0) {
taiko_mclk_enable(codec, 0, dapm);
clk_disable_unprepare(codec_clk);
}
} else {
pr_err("%s: Error releasing Taiko MCLK\n", __func__);
ret = -EINVAL;
goto exit;
}
}
exit:
mutex_unlock(&cdc_mclk_mutex);
return ret;
}
static int apq8074_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
pr_debug("%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
return msm_snd_enable_codec_ext_clk(w->codec, 1, true);
case SND_SOC_DAPM_POST_PMD:
return msm_snd_enable_codec_ext_clk(w->codec, 0, true);
}
return 0;
}
static const struct snd_soc_dapm_widget apq8074_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
apq8074_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SPK("Lineout_1 amp", msm_ext_spkramp_event),
SND_SOC_DAPM_SPK("Lineout_3 amp", msm_ext_spkramp_event),
SND_SOC_DAPM_SPK("Lineout_2 amp", msm_ext_spkramp_event),
SND_SOC_DAPM_SPK("Lineout_4 amp", msm_ext_spkramp_event),
SND_SOC_DAPM_SPK("SPK_ultrasound amp",
msm_ext_spkramp_ultrasound_event),
SND_SOC_DAPM_MIC("Handset Mic", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL),
SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL),
SND_SOC_DAPM_MIC("Analog Mic4", NULL),
SND_SOC_DAPM_MIC("Analog Mic6", NULL),
SND_SOC_DAPM_MIC("Analog Mic7", NULL),
SND_SOC_DAPM_MIC("Digital Mic1", NULL),
SND_SOC_DAPM_MIC("Digital Mic2", NULL),
SND_SOC_DAPM_MIC("Digital Mic3", NULL),
SND_SOC_DAPM_MIC("Digital Mic4", NULL),
SND_SOC_DAPM_MIC("Digital Mic5", NULL),
SND_SOC_DAPM_MIC("Digital Mic6", NULL),
};
static const char *const spk_function[] = {"Off", "On"};
static const char *const slim0_rx_ch_text[] = {"One", "Two"};
static const char *const slim0_tx_ch_text[] = {"One", "Two", "Three", "Four",
"Five"};
static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
"Six", "Seven", "Eight"};
static char const *rx_bit_format_text[] = {"S16_LE", "S24_LE"};
static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
"KHZ_192"};
static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static const char *const btsco_rate_text[] = {"8000", "16000"};
static const struct soc_enum msm_btsco_enum[] = {
SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
static int slim0_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int sample_rate_val = 0;
switch (slim0_rx_sample_rate) {
case SAMPLING_RATE_192KHZ:
sample_rate_val = 2;
break;
case SAMPLING_RATE_96KHZ:
sample_rate_val = 1;
break;
case SAMPLING_RATE_48KHZ:
default:
sample_rate_val = 0;
break;
}
ucontrol->value.integer.value[0] = sample_rate_val;
pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
slim0_rx_sample_rate);
return 0;
}
static int slim0_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: ucontrol value = %ld\n", __func__,
ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
case 2:
slim0_rx_sample_rate = SAMPLING_RATE_192KHZ;
break;
case 1:
slim0_rx_sample_rate = SAMPLING_RATE_96KHZ;
break;
case 0:
default:
slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
}
pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
slim0_rx_sample_rate);
return 0;
}
static int slim0_rx_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (slim0_rx_bit_format) {
case SNDRV_PCM_FORMAT_S24_LE:
ucontrol->value.integer.value[0] = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
default:
ucontrol->value.integer.value[0] = 0;
break;
}
pr_debug("%s: slim0_rx_bit_format = %d, ucontrol value = %ld\n",
__func__, slim0_rx_bit_format,
ucontrol->value.integer.value[0]);
return 0;
}
static int slim0_rx_bit_format_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 1:
slim0_rx_bit_format = SNDRV_PCM_FORMAT_S24_LE;
break;
case 0:
default:
slim0_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
break;
}
return 0;
}
static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
msm_slim_0_rx_ch);
ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1;
return 0;
}
static int msm_slim_0_rx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
msm_slim_0_rx_ch);
return 1;
}
static int msm_slim_0_tx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__,
msm_slim_0_tx_ch);
ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1;
return 0;
}
static int msm_slim_0_tx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch);
return 1;
}
static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_btsco_rate = %d", __func__, msm_btsco_rate);
ucontrol->value.integer.value[0] = msm_btsco_rate;
return 0;
}
static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 0:
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
case 1:
msm_btsco_rate = BTSCO_RATE_16KHZ;
break;
default:
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
}
pr_debug("%s: msm_btsco_rate = %d\n", __func__, msm_btsco_rate);
return 0;
}
static int hdmi_rx_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (hdmi_rx_bit_format) {
case SNDRV_PCM_FORMAT_S24_LE:
ucontrol->value.integer.value[0] = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
default:
ucontrol->value.integer.value[0] = 0;
break;
}
pr_debug("%s: hdmi_rx_bit_format = %d, ucontrol value = %ld\n",
__func__, hdmi_rx_bit_format,
ucontrol->value.integer.value[0]);
return 0;
}
static int hdmi_rx_bit_format_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 1:
hdmi_rx_bit_format = SNDRV_PCM_FORMAT_S24_LE;
break;
case 0:
default:
hdmi_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
break;
}
pr_debug("%s: hdmi_rx_bit_format = %d, ucontrol value = %ld\n",
__func__, hdmi_rx_bit_format,
ucontrol->value.integer.value[0]);
return 0;
}
static int msm_hdmi_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__,
msm_hdmi_rx_ch);
ucontrol->value.integer.value[0] = msm_hdmi_rx_ch - 2;
return 0;
}
static int msm_hdmi_rx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_hdmi_rx_ch = ucontrol->value.integer.value[0] + 2;
if (msm_hdmi_rx_ch > 8) {
pr_err("%s: channels exceeded 8.Limiting to max channels-8\n",
__func__);
msm_hdmi_rx_ch = 8;
}
pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__, msm_hdmi_rx_ch);
return 1;
}
static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
msm_btsco_rate_get, msm_btsco_rate_put),
};
static int msm_btsco_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
rate->min = rate->max = msm_btsco_rate;
channels->min = channels->max = msm_btsco_ch;
return 0;
}
static int apq8074_auxpcm_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = apq8074_auxpcm_rate;
return 0;
}
static int apq8074_auxpcm_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 0:
apq8074_auxpcm_rate = 8000;
break;
case 1:
apq8074_auxpcm_rate = 16000;
break;
default:
apq8074_auxpcm_rate = 8000;
break;
}
return 0;
}
static int msm_proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_proxy_rx_ch = %d\n", __func__,
msm_proxy_rx_ch);
ucontrol->value.integer.value[0] = msm_proxy_rx_ch - 1;
return 0;
}
static int msm_proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
msm_proxy_rx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_proxy_rx_ch = %d\n", __func__,
msm_proxy_rx_ch);
return 1;
}
static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
rate->min = rate->max = apq8074_auxpcm_rate;
channels->min = channels->max = 1;
return 0;
}
static int msm_proxy_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s: msm_proxy_rx_ch =%d\n", __func__, msm_proxy_rx_ch);
if (channels->max < 2)
channels->min = channels->max = 2;
channels->min = channels->max = msm_proxy_rx_ch;
rate->min = rate->max = 48000;
return 0;
}
static int msm_proxy_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
rate->min = rate->max = 48000;
return 0;
}
static int apq8074_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s channels->min %u channels->max %u ()\n", __func__,
channels->min, channels->max);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
hdmi_rx_bit_format);
if (channels->max < 2)
channels->min = channels->max = 2;
rate->min = rate->max = 48000;
channels->min = channels->max = msm_hdmi_rx_ch;
return 0;
}
static int msm_aux_pcm_get_gpios(struct msm_auxpcm_ctrl *auxpcm_ctrl)
{
struct msm_auxpcm_gpio *pin_data = NULL;
int ret = 0;
int i;
int j;
pin_data = auxpcm_ctrl->pin_data;
for (i = 0; i < auxpcm_ctrl->cnt; i++, pin_data++) {
ret = gpio_request(pin_data->gpio_no,
pin_data->gpio_name);
pr_debug("%s: gpio = %d, gpio name = %s\n"
"ret = %d\n", __func__,
pin_data->gpio_no,
pin_data->gpio_name,
ret);
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
__func__, pin_data->gpio_no);
/* Release all GPIOs on failure */
for (j = i; j >= 0; j--)
gpio_free(pin_data->gpio_no);
return ret;
}
}
return 0;
}
static int msm_aux_pcm_free_gpios(struct msm_auxpcm_ctrl *auxpcm_ctrl)
{
struct msm_auxpcm_gpio *pin_data = NULL;
int i;
int ret = 0;
if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
pr_err("%s: Ctrl pointers are NULL\n", __func__);
ret = -EINVAL;
goto err;
}
pin_data = auxpcm_ctrl->pin_data;
for (i = 0; i < auxpcm_ctrl->cnt; i++, pin_data++) {
gpio_free(pin_data->gpio_no);
pr_debug("%s: gpio = %d, gpio_name = %s\n",
__func__, pin_data->gpio_no,
pin_data->gpio_name);
}
err:
return ret;
}
static int msm_prim_auxpcm_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
int ret = 0;
pr_debug("%s(): substream = %s, prim_auxpcm_rsc_ref counter = %d\n",
__func__, substream->name, atomic_read(&prim_auxpcm_rsc_ref));
auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
pr_err("%s: Ctrl pointers are NULL\n", __func__);
ret = -EINVAL;
goto err;
}
if (atomic_inc_return(&prim_auxpcm_rsc_ref) == 1) {
if (lpaif_pri_muxsel_virt_addr != NULL)
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
lpaif_pri_muxsel_virt_addr);
else
pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
__func__);
ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
pr_err("%s: Aux PCM GPIO request failed\n", __func__);
return -EINVAL;
}
err:
return ret;
}
static void msm_prim_auxpcm_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
pr_debug("%s(): substream = %s, prim_auxpcm_rsc_ref counter = %d\n",
__func__, substream->name, atomic_read(&prim_auxpcm_rsc_ref));
auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
if (atomic_dec_return(&prim_auxpcm_rsc_ref) == 0)
msm_aux_pcm_free_gpios(auxpcm_ctrl);
}
static struct snd_soc_ops msm_auxpcm_be_ops = {
.startup = msm_prim_auxpcm_startup,
.shutdown = msm_prim_auxpcm_shutdown,
};
static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s()\n", __func__);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
slim0_rx_bit_format);
rate->min = rate->max = slim0_rx_sample_rate;
channels->min = channels->max = msm_slim_0_rx_ch;
pr_debug("%s: format = %d, rate = %d, channels = %d\n",
__func__, params_format(params), params_rate(params),
msm_slim_0_rx_ch);
return 0;
}
static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s()\n", __func__);
rate->min = rate->max = 48000;
channels->min = channels->max = msm_slim_0_tx_ch;
return 0;
}
static int msm_slim_5_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
int rc;
void *config;
struct snd_soc_codec *codec = rtd->codec;
struct snd_interval *rate =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
pr_debug("%s enter\n", __func__);
rate->min = rate->max = 16000;
channels->min = channels->max = 1;
config = taiko_get_afe_config(codec, AFE_SLIMBUS_SLAVE_PORT_CONFIG);
rc = afe_set_config(AFE_SLIMBUS_SLAVE_PORT_CONFIG, config,
SLIMBUS_5_TX);
if (rc) {
pr_err("%s: Failed to set slimbus slave port config %d\n",
__func__, rc);
return rc;
}
return 0;
}
static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
pr_debug("%s()\n", __func__);
rate->min = rate->max = 48000;
return 0;
}
static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(2, spk_function),
SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
SOC_ENUM_SINGLE_EXT(5, slim0_tx_ch_text),
SOC_ENUM_SINGLE_EXT(7, hdmi_rx_ch_text),
SOC_ENUM_SINGLE_EXT(2, rx_bit_format_text),
SOC_ENUM_SINGLE_EXT(3, slim0_rx_sample_rate_text),
SOC_ENUM_SINGLE_EXT(8, proxy_rx_ch_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("Speaker Function", msm_snd_enum[0], apq8074_get_spk,
apq8074_set_spk),
SOC_ENUM_EXT("SLIM_0_RX Channels", msm_snd_enum[1],
msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_TX Channels", msm_snd_enum[2],
msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
SOC_ENUM_EXT("AUX PCM SampleRate", apq8074_auxpcm_enum[0],
apq8074_auxpcm_rate_get, apq8074_auxpcm_rate_put),
SOC_ENUM_EXT("HDMI_RX Channels", msm_snd_enum[3],
msm_hdmi_rx_ch_get, msm_hdmi_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_RX Format", msm_snd_enum[4],
slim0_rx_bit_format_get, slim0_rx_bit_format_put),
SOC_ENUM_EXT("SLIM_0_RX SampleRate", msm_snd_enum[5],
slim0_rx_sample_rate_get, slim0_rx_sample_rate_put),
SOC_ENUM_EXT("HDMI_RX Bit Format", msm_snd_enum[4],
hdmi_rx_bit_format_get, hdmi_rx_bit_format_put),
SOC_ENUM_EXT("PROXY_RX Channels", msm_snd_enum[6],
msm_proxy_rx_ch_get, msm_proxy_rx_ch_put),
};
static bool apq8074_swap_gnd_mic(struct snd_soc_codec *codec)
{
struct snd_soc_card *card = codec->card;
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
int value = gpio_get_value_cansleep(pdata->us_euro_gpio);
pr_debug("%s: swap select switch %d to %d\n", __func__, value, !value);
gpio_set_value_cansleep(pdata->us_euro_gpio, !value);
return true;
}
static int msm_afe_set_config(struct snd_soc_codec *codec)
{
int rc;
void *config_data;
pr_debug("%s: enter\n", __func__);
config_data = taiko_get_afe_config(codec, AFE_CDC_REGISTERS_CONFIG);
rc = afe_set_config(AFE_CDC_REGISTERS_CONFIG, config_data, 0);
if (rc) {
pr_err("%s: Failed to set codec registers config %d\n",
__func__, rc);
return rc;
}
config_data = taiko_get_afe_config(codec, AFE_SLIMBUS_SLAVE_CONFIG);
rc = afe_set_config(AFE_SLIMBUS_SLAVE_CONFIG, config_data, 0);
if (rc) {
pr_err("%s: Failed to set slimbus slave config %d\n", __func__,
rc);
return rc;
}
return 0;
}
static void msm_afe_clear_config(void)
{
afe_clear_config(AFE_CDC_REGISTERS_CONFIG);
afe_clear_config(AFE_SLIMBUS_SLAVE_CONFIG);
}
static int msm8974_adsp_state_callback(struct notifier_block *nb,
unsigned long value, void *priv)
{
if (value == SUBSYS_BEFORE_SHUTDOWN) {
pr_debug("%s: ADSP is about to shutdown. Clearing AFE config\n",
__func__);
msm_afe_clear_config();
} else if (value == SUBSYS_AFTER_POWERUP) {
pr_debug("%s: ADSP is up\n", __func__);
}
return NOTIFY_OK;
}
static struct notifier_block adsp_state_notifier_block = {
.notifier_call = msm8974_adsp_state_callback,
.priority = -INT_MAX,
};
static int msm8974_taiko_codec_up(struct snd_soc_codec *codec)
{
int err;
unsigned long timeout;
int adsp_ready = 0;
timeout = jiffies +
msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
do {
if (!q6core_is_adsp_ready()) {
pr_err("%s: ADSP Audio isn't ready\n", __func__);
} else {
pr_debug("%s: ADSP Audio is ready\n", __func__);
adsp_ready = 1;
break;
}
} while (time_after(timeout, jiffies));
if (!adsp_ready) {
pr_err("%s: timed out waiting for ADSP Audio\n", __func__);
return -ETIMEDOUT;
}
err = msm_afe_set_config(codec);
if (err)
pr_err("%s: Failed to set AFE config. err %d\n",
__func__, err);
return err;
}
static int apq8074_taiko_event_cb(struct snd_soc_codec *codec,
enum wcd9xxx_codec_event codec_event)
{
switch (codec_event) {
case WCD9XXX_CODEC_EVENT_CODEC_UP:
return msm8974_taiko_codec_up(codec);
break;
default:
pr_err("%s: UnSupported codec event %d\n",
__func__, codec_event);
return -EINVAL;
}
}
static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
int err;
void *config_data;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* Taiko SLIMBUS configuration
* RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8, RX9, RX10, RX11, RX12, RX13
* TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
* TX14, TX15, TX16
*/
unsigned int rx_ch[TAIKO_RX_MAX] = {144, 145, 146, 147, 148, 149, 150,
151, 152, 153, 154, 155, 156};
unsigned int tx_ch[TAIKO_TX_MAX] = {128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139,
140, 141, 142, 143};
pr_info("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev));
rtd->pmdown_time = 0;
err = snd_soc_add_codec_controls(codec, msm_snd_controls,
ARRAY_SIZE(msm_snd_controls));
if (err < 0)
return err;
err = apq8074_liquid_ext_spk_power_amp_init();
if (err) {
pr_err("%s: LiQUID 8974 CLASS_D PAs init failed (%d)\n",
__func__, err);
return err;
}
err = apq8074_liquid_init_docking(dapm);
if (err) {
pr_err("%s: LiQUID 8974 init Docking stat IRQ failed (%d)\n",
__func__, err);
return err;
}
snd_soc_dapm_new_controls(dapm, apq8074_dapm_widgets,
ARRAY_SIZE(apq8074_dapm_widgets));
snd_soc_dapm_enable_pin(dapm, "Lineout_1 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_3 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_2 amp");
snd_soc_dapm_enable_pin(dapm, "Lineout_4 amp");
snd_soc_dapm_sync(dapm);
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
err = msm_afe_set_config(codec);
if (err) {
pr_err("%s: Failed to set AFE config %d\n", __func__, err);
goto out;
}
config_data = taiko_get_afe_config(codec, AFE_AANC_VERSION);
err = afe_set_config(AFE_AANC_VERSION, config_data, 0);
if (err) {
pr_err("%s: Failed to set aanc version %d\n",
__func__, err);
goto out;
}
config_data = taiko_get_afe_config(codec,
AFE_CDC_CLIP_REGISTERS_CONFIG);
if (config_data) {
err = afe_set_config(AFE_CDC_CLIP_REGISTERS_CONFIG,
config_data, 0);
if (err) {
pr_err("%s: Failed to set clip registers %d\n",
__func__, err);
return err;
}
}
config_data = taiko_get_afe_config(codec, AFE_CLIP_BANK_SEL);
if (config_data) {
err = afe_set_config(AFE_CLIP_BANK_SEL, config_data, 0);
if (err) {
pr_err("%s: Failed to set AFE bank selection %d\n",
__func__, err);
return err;
}
}
/* start mbhc */
mbhc_cfg.calibration = def_taiko_mbhc_cal();
if (mbhc_cfg.calibration) {
err = taiko_hs_detect(codec, &mbhc_cfg);
if (err)
goto out;
} else {
err = -ENOMEM;
goto out;
}
adsp_state_notifier =
subsys_notif_register_notifier("adsp",
&adsp_state_notifier_block);
if (!adsp_state_notifier) {
pr_err("%s: Failed to register adsp state notifier\n",
__func__);
err = -EFAULT;
taiko_hs_detect_exit(codec);
goto out;
}
taiko_event_register(apq8074_taiko_event_cb, rtd->codec);
return 0;
out:
clk_put(codec_clk);
return err;
}
static int apq8074_snd_startup(struct snd_pcm_substream *substream)
{
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
return 0;
}
static void *def_taiko_mbhc_cal(void)
{
void *taiko_cal;
struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
u16 *btn_low, *btn_high;
u8 *n_ready, *n_cic, *gain;
taiko_cal = kzalloc(WCD9XXX_MBHC_CAL_SIZE(WCD9XXX_MBHC_DEF_BUTTONS,
WCD9XXX_MBHC_DEF_RLOADS),
GFP_KERNEL);
if (!taiko_cal) {
pr_err("%s: out of memory\n", __func__);
return NULL;
}
#define S(X, Y) ((WCD9XXX_MBHC_CAL_GENERAL_PTR(taiko_cal)->X) = (Y))
S(t_ldoh, 100);
S(t_bg_fast_settle, 100);
S(t_shutdown_plug_rem, 255);
S(mbhc_nsa, 4);
S(mbhc_navg, 4);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_DET_PTR(taiko_cal)->X) = (Y))
S(mic_current, TAIKO_PID_MIC_5_UA);
S(hph_current, TAIKO_PID_MIC_5_UA);
S(t_mic_pid, 100);
S(t_ins_complete, 250);
S(t_ins_retry, 200);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(taiko_cal)->X) = (Y))
S(v_no_mic, 30);
S(v_hs_max, 2400);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal)->X) = (Y))
S(c[0], 62);
S(c[1], 124);
S(nc, 1);
S(n_meas, 3);
S(mbhc_nsc, 11);
S(n_btn_meas, 1);
S(n_btn_con, 2);
S(num_btn, WCD9XXX_MBHC_DEF_BUTTONS);
S(v_btn_press_delta_sta, 100);
S(v_btn_press_delta_cic, 50);
#undef S
btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal);
btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_V_BTN_LOW);
btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg,
MBHC_BTN_DET_V_BTN_HIGH);
btn_low[0] = -50;
btn_high[0] = 20;
btn_low[1] = 21;
btn_high[1] = 61;
btn_low[2] = 62;
btn_high[2] = 104;
btn_low[3] = 105;
btn_high[3] = 148;
btn_low[4] = 149;
btn_high[4] = 189;
btn_low[5] = 190;
btn_high[5] = 228;
btn_low[6] = 229;
btn_high[6] = 269;
btn_low[7] = 270;
btn_high[7] = 500;
n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
n_ready[0] = 80;
n_ready[1] = 68;
n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_CIC);
n_cic[0] = 60;
n_cic[1] = 47;
gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_GAIN);
gain[0] = 11;
gain[1] = 9;
return taiko_cal;
}
static int msm_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0;
unsigned int user_set_tx_ch = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
pr_debug("%s: rx_0_ch=%d\n", __func__, msm_slim_0_rx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
&tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n", __func__);
goto end;
}
ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
msm_slim_0_rx_ch, rx_ch);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map\n", __func__);
goto end;
}
} else {
pr_debug("%s: %s_tx_dai_id_%d_ch=%d\n", __func__,
codec_dai->name, codec_dai->id, user_set_tx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
&tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n", __func__);
goto end;
}
/* For tabla_tx1 case */
if (codec_dai->id == 1)
user_set_tx_ch = msm_slim_0_tx_ch;
/* For tabla_tx2 case */
else if (codec_dai->id == 3)
user_set_tx_ch = params_channels(params);
else
user_set_tx_ch = tx_ch_cnt;
pr_debug("%s: msm_slim_0_tx_ch(%d)user_set_tx_ch(%d)tx_ch_cnt(%d)\n",
__func__, msm_slim_0_tx_ch, user_set_tx_ch, tx_ch_cnt);
ret = snd_soc_dai_set_channel_map(cpu_dai,
user_set_tx_ch, tx_ch, 0 , 0);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map\n", __func__);
goto end;
}
}
end:
return ret;
}
static void apq8074_snd_shudown(struct snd_pcm_substream *substream)
{
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
}
static struct snd_soc_ops apq8074_be_ops = {
.startup = apq8074_snd_startup,
.hw_params = msm_snd_hw_params,
.shutdown = apq8074_snd_shudown,
};
static int apq8074_slimbus_2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0;
unsigned int num_tx_ch = 0;
unsigned int num_rx_ch = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
num_rx_ch = params_channels(params);
pr_debug("%s: %s rx_dai_id = %d num_ch = %d\n", __func__,
codec_dai->name, codec_dai->id, num_rx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
&tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n", __func__);
goto end;
}
ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
num_rx_ch, rx_ch);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map\n", __func__);
goto end;
}
} else {
num_tx_ch = params_channels(params);
pr_debug("%s: %s tx_dai_id = %d num_ch = %d\n", __func__,
codec_dai->name, codec_dai->id, num_tx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
&tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n", __func__);
goto end;
}
ret = snd_soc_dai_set_channel_map(cpu_dai,
num_tx_ch, tx_ch, 0 , 0);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map\n", __func__);
goto end;
}
}
end:
return ret;
}
static struct snd_soc_ops apq8074_slimbus_2_be_ops = {
.startup = apq8074_snd_startup,
.hw_params = apq8074_slimbus_2_hw_params,
.shutdown = apq8074_snd_shudown,
};
/* Digital audio interface glue - connects codec <---> CPU */
static struct snd_soc_dai_link apq8074_common_dai_links[] = {
/* FrontEnd DAI Links */
{
.name = "MSM8974 Media1",
.stream_name = "MultiMedia1",
.cpu_dai_name = "MultiMedia1",
.platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{
.name = "MSM8974 Media2",
.stream_name = "MultiMedia2",
.cpu_dai_name = "MultiMedia2",
.platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
},
{
.name = "Circuit-Switch Voice",
.stream_name = "CS-Voice",
.cpu_dai_name = "CS-VOICE",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_CS_VOICE,
},
{
.name = "MSM VoIP",
.stream_name = "VoIP",
.cpu_dai_name = "VoIP",
.platform_name = "msm-voip-dsp",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_VOIP,
},
{
.name = "MSM8974 LPA",
.stream_name = "LPA",
.cpu_dai_name = "MultiMedia3",
.platform_name = "msm-pcm-lpa",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
/* Hostless PCM purpose */
{
.name = "SLIMBUS_0 Hostless",
.stream_name = "SLIMBUS_0 Hostless",
.cpu_dai_name = "SLIMBUS0_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* dai link has playback support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "INT_FM Hostless",
.stream_name = "INT_FM Hostless",
.cpu_dai_name = "INT_FM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "MSM AFE-PCM RX",
.stream_name = "AFE-PROXY RX",
.cpu_dai_name = "msm-dai-q6-dev.241",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
},
{
.name = "MSM AFE-PCM TX",
.stream_name = "AFE-PROXY TX",
.cpu_dai_name = "msm-dai-q6-dev.240",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
},
{
.name = "MSM8974 Compr",
.stream_name = "COMPR",
.cpu_dai_name = "MultiMedia4",
.platform_name = "msm-compress-dsp",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dainlink has playback support */
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
},
{
.name = "AUXPCM Hostless",
.stream_name = "AUXPCM Hostless",
.cpu_dai_name = "AUXPCM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "SLIMBUS_1 Hostless",
.stream_name = "SLIMBUS_1 Hostless",
.cpu_dai_name = "SLIMBUS1_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* dai link has playback support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "SLIMBUS_3 Hostless",
.stream_name = "SLIMBUS_3 Hostless",
.cpu_dai_name = "SLIMBUS3_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* dai link has playback support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "SLIMBUS_4 Hostless",
.stream_name = "SLIMBUS_4 Hostless",
.cpu_dai_name = "SLIMBUS4_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* dai link has playback support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
{
.name = "VoLTE",
.stream_name = "VoLTE",
.cpu_dai_name = "VoLTE",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.be_id = MSM_FRONTEND_DAI_VOLTE,
},
{
.name = "MSM8974 LowLatency",
.stream_name = "MultiMedia5",
.cpu_dai_name = "MultiMedia5",
.platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
},
/* LSM FE */
{
.name = "Listen Audio Service",
.stream_name = "Listen Audio Service",
.cpu_dai_name = "LSM",
.platform_name = "msm-lsm-client",
.dynamic = 1,
.trigger = { SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST },
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.be_id = MSM_FRONTEND_DAI_LSM1,
},
/* Backend BT/FM DAI Links */
{
.name = LPASS_BE_INT_BT_SCO_RX,
.stream_name = "Internal BT-SCO Playback",
.cpu_dai_name = "msm-dai-q6-dev.12288",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_BT_SCO_RX,
.be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_BT_SCO_TX,
.stream_name = "Internal BT-SCO Capture",
.cpu_dai_name = "msm-dai-q6-dev.12289",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_BT_SCO_TX,
.be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_FM_RX,
.stream_name = "Internal FM Playback",
.cpu_dai_name = "msm-dai-q6-dev.12292",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_FM_RX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_INT_FM_TX,
.stream_name = "Internal FM Capture",
.cpu_dai_name = "msm-dai-q6-dev.12293",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INT_FM_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Backend AFE DAI Links */
{
.name = LPASS_BE_AFE_PCM_RX,
.stream_name = "AFE Playback",
.cpu_dai_name = "msm-dai-q6-dev.224",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
.be_hw_params_fixup = msm_proxy_rx_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_AFE_PCM_TX,
.stream_name = "AFE Capture",
.cpu_dai_name = "msm-dai-q6-dev.225",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
.be_hw_params_fixup = msm_proxy_tx_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* HDMI Hostless */
{
.name = "HDMI_RX_HOSTLESS",
.stream_name = "HDMI_RX_HOSTLESS",
.cpu_dai_name = "HDMI_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
/* AUX PCM Backend DAI Links */
{
.name = LPASS_BE_AUXPCM_RX,
.stream_name = "AUX PCM Playback",
.cpu_dai_name = "msm-dai-q6-auxpcm.1",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
.ops = &msm_auxpcm_be_ops,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
/* this dainlink has playback support */
},
{
.name = LPASS_BE_AUXPCM_TX,
.stream_name = "AUX PCM Capture",
.cpu_dai_name = "msm-dai-q6-auxpcm.1",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
.ops = &msm_auxpcm_be_ops,
.ignore_suspend = 1,
},
/* Backend DAI Links */
{
.name = LPASS_BE_SLIMBUS_0_RX,
.stream_name = "Slimbus Playback",
.cpu_dai_name = "msm-dai-q6-dev.16384",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
.init = &msm_audrx_init,
.be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
.ignore_pmdown_time = 1, /* dai link has playback support */
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_0_TX,
.stream_name = "Slimbus Capture",
.cpu_dai_name = "msm-dai-q6-dev.16385",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
.be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_1_RX,
.stream_name = "Slimbus1 Playback",
.cpu_dai_name = "msm-dai-q6-dev.16386",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
.be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
/* dai link has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_1_TX,
.stream_name = "Slimbus1 Capture",
.cpu_dai_name = "msm-dai-q6-dev.16387",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
.be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_3_RX,
.stream_name = "Slimbus3 Playback",
.cpu_dai_name = "msm-dai-q6-dev.16390",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
.be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
/* dai link has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_3_TX,
.stream_name = "Slimbus3 Capture",
.cpu_dai_name = "msm-dai-q6-dev.16391",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
.be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_4_RX,
.stream_name = "Slimbus4 Playback",
.cpu_dai_name = "msm-dai-q6-dev.16392",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
.be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
/* dai link has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
{
.name = LPASS_BE_SLIMBUS_4_TX,
.stream_name = "Slimbus4 Capture",
.cpu_dai_name = "msm-dai-q6-dev.16393",
.platform_name = "msm-pcm-hostless",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_vifeedback",
.be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
.be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
},
/* Incall Record Uplink BACK END DAI Link */
{
.name = LPASS_BE_INCALL_RECORD_TX,
.stream_name = "Voice Uplink Capture",
.cpu_dai_name = "msm-dai-q6-dev.32772",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Incall Record Downlink BACK END DAI Link */
{
.name = LPASS_BE_INCALL_RECORD_RX,
.stream_name = "Voice Downlink Capture",
.cpu_dai_name = "msm-dai-q6-dev.32771",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* MAD BE */
{
.name = LPASS_BE_SLIMBUS_5_TX,
.stream_name = "Slimbus5 Capture",
.cpu_dai_name = "msm-dai-q6-dev.16395",
.platform_name = "msm-pcm-routing",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_mad1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
.be_hw_params_fixup = msm_slim_5_tx_be_hw_params_fixup,
.ops = &apq8074_be_ops,
},
/* Incall Music BACK END DAI Link */
{
.name = LPASS_BE_VOICE_PLAYBACK_TX,
.stream_name = "Voice Farend Playback",
.cpu_dai_name = "msm-dai-q6-dev.32773",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* Ultrasound RX Back End DAI Link */
{
.name = "SLIMBUS_2 Hostless Playback",
.stream_name = "SLIMBUS_2 Hostless Playback",
.cpu_dai_name = "msm-dai-q6-dev.16388",
.platform_name = "msm-pcm-hostless",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_rx2",
.ignore_suspend = 1,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ops = &apq8074_slimbus_2_be_ops,
},
/* Ultrasound TX Back End DAI Link */
{
.name = "SLIMBUS_2 Hostless Capture",
.stream_name = "SLIMBUS_2 Hostless Capture",
.cpu_dai_name = "msm-dai-q6-dev.16389",
.platform_name = "msm-pcm-hostless",
.codec_name = "taiko_codec",
.codec_dai_name = "taiko_tx2",
.ignore_suspend = 1,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ops = &apq8074_slimbus_2_be_ops,
},
};
static struct snd_soc_dai_link apq8074_hdmi_dai_link[] = {
/* HDMI BACK END DAI Link */
{
.name = LPASS_BE_HDMI,
.stream_name = "HDMI Playback",
.cpu_dai_name = "msm-dai-q6-hdmi.8",
.platform_name = "msm-pcm-routing",
.codec_name = "msm-hdmi-audio-codec-rx",
.codec_dai_name = "msm_hdmi_audio_codec_rx_dai",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_HDMI_RX,
.be_hw_params_fixup = apq8074_hdmi_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
};
static struct snd_soc_dai_link apq8074_dai_links[
ARRAY_SIZE(apq8074_common_dai_links) +
ARRAY_SIZE(apq8074_hdmi_dai_link)];
struct snd_soc_card snd_soc_card_apq8074 = {
.name = "apq8074-taiko-snd-card",
};
static int apq8074_dtparse_auxpcm(struct platform_device *pdev,
struct msm_auxpcm_ctrl **auxpcm_ctrl,
char *msm_auxpcm_gpio_name[][2])
{
int ret = 0;
int i = 0;
struct msm_auxpcm_gpio *pin_data = NULL;
struct msm_auxpcm_ctrl *ctrl;
unsigned int gpio_no[NUM_OF_AUXPCM_GPIOS];
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
int auxpcm_cnt = 0;
pin_data = devm_kzalloc(&pdev->dev, (ARRAY_SIZE(gpio_no) *
sizeof(struct msm_auxpcm_gpio)),
GFP_KERNEL);
if (!pin_data) {
dev_err(&pdev->dev, "No memory for gpio\n");
ret = -ENOMEM;
goto err;
}
for (i = 0; i < ARRAY_SIZE(gpio_no); i++) {
gpio_no[i] = of_get_named_gpio_flags(pdev->dev.of_node,
msm_auxpcm_gpio_name[i][DT_PARSE_INDEX],
0, &flags);
if (gpio_no[i] > 0) {
pin_data[i].gpio_name =
msm_auxpcm_gpio_name[auxpcm_cnt][GPIO_NAME_INDEX];
pin_data[i].gpio_no = gpio_no[i];
dev_dbg(&pdev->dev, "%s:GPIO gpio[%s] =\n"
"0x%x\n", __func__,
pin_data[i].gpio_name,
pin_data[i].gpio_no);
auxpcm_cnt++;
} else {
dev_err(&pdev->dev, "%s:Invalid AUXPCM GPIO[%s]= %x\n",
__func__,
msm_auxpcm_gpio_name[i][GPIO_NAME_INDEX],
gpio_no[i]);
ret = -ENODEV;
goto err;
}
}
ctrl = devm_kzalloc(&pdev->dev,
sizeof(struct msm_auxpcm_ctrl), GFP_KERNEL);
if (!ctrl) {
dev_err(&pdev->dev, "No memory for gpio\n");
ret = -ENOMEM;
goto err;
}
ctrl->pin_data = pin_data;
ctrl->cnt = auxpcm_cnt;
*auxpcm_ctrl = ctrl;
return ret;
err:
if (pin_data)
devm_kfree(&pdev->dev, pin_data);
return ret;
}
static int apq8074_prepare_codec_mclk(struct snd_soc_card *card)
{
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
int ret;
if (pdata->mclk_gpio) {
ret = gpio_request(pdata->mclk_gpio, "TAIKO_CODEC_PMIC_MCLK");
if (ret) {
dev_err(card->dev,
"%s: Failed to request taiko mclk gpio %d\n",
__func__, pdata->mclk_gpio);
return ret;
}
}
return 0;
}
static int apq8074_prepare_us_euro(struct snd_soc_card *card)
{
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
int ret;
if (pdata->us_euro_gpio) {
dev_dbg(card->dev, "%s : us_euro gpio request %d", __func__,
pdata->us_euro_gpio);
ret = gpio_request(pdata->us_euro_gpio, "TAIKO_CODEC_US_EURO");
if (ret) {
dev_err(card->dev,
"%s: Failed to request taiko US/EURO gpio %d error %d\n",
__func__, pdata->us_euro_gpio, ret);
return ret;
}
}
return 0;
}
static __devinit int apq8074_asoc_machine_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_card_apq8074;
struct apq8074_asoc_mach_data *pdata;
int ret;
const char *auxpcm_pri_gpio_set = NULL;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
return -EINVAL;
}
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct apq8074_asoc_mach_data), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Can't allocate apq8074_asoc_mach_data\n");
return -ENOMEM;
}
/* Parse AUXPCM info from DT */
ret = apq8074_dtparse_auxpcm(pdev, &pdata->pri_auxpcm_ctrl,
msm_prim_auxpcm_gpio_name);
if (ret) {
dev_err(&pdev->dev,
"%s: Auxpcm pin data parse failed\n", __func__);
goto err;
}
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
snd_soc_card_set_drvdata(card, pdata);
ret = snd_soc_of_parse_card_name(card, "qcom,model");
if (ret)
goto err;
ret = snd_soc_of_parse_audio_routing(card,
"qcom,audio-routing");
if (ret)
goto err;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,taiko-mclk-clk-freq", &pdata->mclk_freq);
if (ret) {
dev_err(&pdev->dev, "Looking up %s property in node %s failed",
"qcom,taiko-mclk-clk-freq",
pdev->dev.of_node->full_name);
goto err;
}
if (pdata->mclk_freq != 9600000) {
dev_err(&pdev->dev, "unsupported taiko mclk freq %u\n",
pdata->mclk_freq);
ret = -EINVAL;
goto err;
}
pdata->mclk_gpio = of_get_named_gpio(pdev->dev.of_node,
"qcom,cdc-mclk-gpios", 0);
if (pdata->mclk_gpio < 0) {
dev_err(&pdev->dev,
"Looking up %s property in node %s failed %d\n",
"qcom, cdc-mclk-gpios", pdev->dev.of_node->full_name,
pdata->mclk_gpio);
ret = -ENODEV;
goto err;
}
ret = apq8074_prepare_codec_mclk(card);
if (ret)
goto err;
if (of_property_read_bool(pdev->dev.of_node, "qcom,hdmi-audio-rx")) {
dev_info(&pdev->dev, "%s(): hdmi audio support present\n",
__func__);
memcpy(apq8074_dai_links, apq8074_common_dai_links,
sizeof(apq8074_common_dai_links));
memcpy(apq8074_dai_links + ARRAY_SIZE(apq8074_common_dai_links),
apq8074_hdmi_dai_link, sizeof(apq8074_hdmi_dai_link));
card->dai_link = apq8074_dai_links;
card->num_links = ARRAY_SIZE(apq8074_dai_links);
} else {
dev_info(&pdev->dev, "%s(): No hdmi audio support\n", __func__);
card->dai_link = apq8074_common_dai_links;
card->num_links = ARRAY_SIZE(apq8074_common_dai_links);
}
pdata->us_euro_gpio = of_get_named_gpio(pdev->dev.of_node,
"qcom,us-euro-gpios", 0);
if (pdata->us_euro_gpio < 0) {
dev_err(&pdev->dev, "Looking up %s property in node %s failed",
"qcom,us-euro-gpios",
pdev->dev.of_node->full_name);
} else {
dev_dbg(&pdev->dev, "%s detected %d",
"qcom,us-euro-gpios", pdata->us_euro_gpio);
mbhc_cfg.swap_gnd_mic = apq8074_swap_gnd_mic;
}
ret = apq8074_prepare_us_euro(card);
if (ret)
dev_err(&pdev->dev, "apq8074_prepare_us_euro failed (%d)\n",
ret);
mutex_init(&cdc_mclk_mutex);
atomic_set(&prim_auxpcm_rsc_ref, 0);
spdev = pdev;
ext_spk_amp_regulator = NULL;
apq8074_liquid_dock_dev = NULL;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
goto err;
}
ret = of_property_read_string(pdev->dev.of_node,
"qcom,prim-auxpcm-gpio-set", &auxpcm_pri_gpio_set);
if (ret) {
dev_err(&pdev->dev, "Looking up %s property in node %s failed",
"qcom,prim-auxpcm-gpio-set",
pdev->dev.of_node->full_name);
goto err;
}
if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-prim")) {
lpaif_pri_muxsel_virt_addr =
ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
} else if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-tert")) {
lpaif_pri_muxsel_virt_addr =
ioremap(LPAIF_TER_MODE_MUXSEL, 4);
} else {
dev_err(&pdev->dev, "Invalid value %s for AUXPCM GPIO set\n",
auxpcm_pri_gpio_set);
ret = -EINVAL;
goto err;
}
if (lpaif_pri_muxsel_virt_addr == NULL) {
pr_err("%s Pri muxsel virt addr is null\n", __func__);
ret = -EINVAL;
goto err;
}
return 0;
err:
if (pdata->mclk_gpio > 0) {
dev_dbg(&pdev->dev, "%s free gpio %d\n",
__func__, pdata->mclk_gpio);
gpio_free(pdata->mclk_gpio);
pdata->mclk_gpio = 0;
}
if (pdata->us_euro_gpio > 0) {
dev_dbg(&pdev->dev, "%s free us_euro gpio %d\n",
__func__, pdata->us_euro_gpio);
gpio_free(pdata->us_euro_gpio);
pdata->us_euro_gpio = 0;
}
devm_kfree(&pdev->dev, pdata);
return ret;
}
static int __devexit apq8074_asoc_machine_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct apq8074_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
if (ext_spk_amp_regulator)
regulator_put(ext_spk_amp_regulator);
if (gpio_is_valid(ext_ult_spk_amp_gpio))
gpio_free(ext_ult_spk_amp_gpio);
gpio_free(pdata->mclk_gpio);
gpio_free(pdata->us_euro_gpio);
if (gpio_is_valid(ext_spk_amp_gpio))
gpio_free(ext_spk_amp_gpio);
if (apq8074_liquid_dock_dev != NULL) {
if (apq8074_liquid_dock_dev->dock_plug_gpio)
gpio_free(apq8074_liquid_dock_dev->dock_plug_gpio);
if (apq8074_liquid_dock_dev->dock_plug_irq)
free_irq(apq8074_liquid_dock_dev->dock_plug_irq,
apq8074_liquid_dock_dev);
kfree(apq8074_liquid_dock_dev);
apq8074_liquid_dock_dev = NULL;
}
iounmap(lpaif_pri_muxsel_virt_addr);
snd_soc_unregister_card(card);
return 0;
}
static const struct of_device_id apq8074_asoc_machine_of_match[] = {
{ .compatible = "qcom,apq8074-audio-taiko", },
{},
};
static struct platform_driver apq8074_asoc_machine_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = apq8074_asoc_machine_of_match,
},
.probe = apq8074_asoc_machine_probe,
.remove = __devexit_p(apq8074_asoc_machine_remove),
};
module_platform_driver(apq8074_asoc_machine_driver);
MODULE_DESCRIPTION("ALSA SoC msm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DEVICE_TABLE(of, apq8074_asoc_machine_of_match);
| gpl-2.0 |
abenagiel/android_kernel_fih_msm7x30 | drivers/staging/ath6kl/htc2/AR6000/ar6k_gmbox_hciuart.c | 432 | 47495 | //------------------------------------------------------------------------------
// <copyright file="ar6k_prot_hciUart.c" company="Atheros">
// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
//
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
//
//------------------------------------------------------------------------------
//==============================================================================
// Protocol module for use in bridging HCI-UART packets over the GMBOX interface
//
// Author(s): ="Atheros"
//==============================================================================
#include "a_config.h"
#include "athdefs.h"
#include "a_types.h"
#include "a_osapi.h"
#include "../htc_debug.h"
#include "hif.h"
#include "htc_packet.h"
#include "ar6k.h"
#include "hci_transport_api.h"
#include "gmboxif.h"
#include "ar6000_diag.h"
#include "hw/apb_map.h"
#include "hw/mbox_reg.h"
#ifdef ATH_AR6K_ENABLE_GMBOX
#define HCI_UART_COMMAND_PKT 0x01
#define HCI_UART_ACL_PKT 0x02
#define HCI_UART_SCO_PKT 0x03
#define HCI_UART_EVENT_PKT 0x04
#define HCI_RECV_WAIT_BUFFERS (1 << 0)
#define HCI_SEND_WAIT_CREDITS (1 << 0)
#define HCI_UART_BRIDGE_CREDIT_SIZE 128
#define CREDIT_POLL_COUNT 256
#define HCI_DELAY_PER_INTERVAL_MS 10
#define BTON_TIMEOUT_MS 500
#define BTOFF_TIMEOUT_MS 500
#define BAUD_TIMEOUT_MS 1
#define BTPWRSAV_TIMEOUT_MS 1
typedef struct {
HCI_TRANSPORT_CONFIG_INFO HCIConfig;
A_BOOL HCIAttached;
A_BOOL HCIStopped;
A_UINT32 RecvStateFlags;
A_UINT32 SendStateFlags;
HCI_TRANSPORT_PACKET_TYPE WaitBufferType;
HTC_PACKET_QUEUE SendQueue; /* write queue holding HCI Command and ACL packets */
HTC_PACKET_QUEUE HCIACLRecvBuffers; /* recv queue holding buffers for incomming ACL packets */
HTC_PACKET_QUEUE HCIEventBuffers; /* recv queue holding buffers for incomming event packets */
AR6K_DEVICE *pDev;
A_MUTEX_T HCIRxLock;
A_MUTEX_T HCITxLock;
int CreditsMax;
int CreditsConsumed;
int CreditsAvailable;
int CreditSize;
int CreditsCurrentSeek;
int SendProcessCount;
} GMBOX_PROTO_HCI_UART;
#define LOCK_HCI_RX(t) A_MUTEX_LOCK(&(t)->HCIRxLock);
#define UNLOCK_HCI_RX(t) A_MUTEX_UNLOCK(&(t)->HCIRxLock);
#define LOCK_HCI_TX(t) A_MUTEX_LOCK(&(t)->HCITxLock);
#define UNLOCK_HCI_TX(t) A_MUTEX_UNLOCK(&(t)->HCITxLock);
#define DO_HCI_RECV_INDICATION(p,pt) \
{ AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI: Indicate Recv on packet:0x%lX status:%d len:%d type:%d \n", \
(unsigned long)(pt),(pt)->Status, A_SUCCESS((pt)->Status) ? (pt)->ActualLength : 0, HCI_GET_PACKET_TYPE(pt))); \
(p)->HCIConfig.pHCIPktRecv((p)->HCIConfig.pContext, (pt)); \
}
#define DO_HCI_SEND_INDICATION(p,pt) \
{ AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Indicate Send on packet:0x%lX status:%d type:%d \n", \
(unsigned long)(pt),(pt)->Status,HCI_GET_PACKET_TYPE(pt))); \
(p)->HCIConfig.pHCISendComplete((p)->HCIConfig.pContext, (pt)); \
}
static A_STATUS HCITrySend(GMBOX_PROTO_HCI_UART *pProt, HTC_PACKET *pPacket, A_BOOL Synchronous);
static void HCIUartCleanup(GMBOX_PROTO_HCI_UART *pProtocol)
{
A_ASSERT(pProtocol != NULL);
A_MUTEX_DELETE(&pProtocol->HCIRxLock);
A_MUTEX_DELETE(&pProtocol->HCITxLock);
A_FREE(pProtocol);
}
static A_STATUS InitTxCreditState(GMBOX_PROTO_HCI_UART *pProt)
{
A_STATUS status;
int credits;
int creditPollCount = CREDIT_POLL_COUNT;
A_BOOL gotCredits = FALSE;
pProt->CreditsConsumed = 0;
do {
if (pProt->CreditsMax != 0) {
/* we can only call this only once per target reset */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI: InitTxCreditState - already called! \n"));
A_ASSERT(FALSE);
status = A_EINVAL;
break;
}
/* read the credit counter. At startup the target will set the credit counter
* to the max available, we read this in a loop because it may take
* multiple credit counter reads to get all credits */
while (creditPollCount) {
credits = 0;
status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
if (A_FAILED(status)) {
break;
}
if (!gotCredits && (0 == credits)) {
creditPollCount--;
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: credit is 0, retrying (%d) \n",creditPollCount));
A_MDELAY(HCI_DELAY_PER_INTERVAL_MS);
continue;
} else {
gotCredits = TRUE;
}
if (0 == credits) {
break;
}
pProt->CreditsMax += credits;
}
if (A_FAILED(status)) {
break;
}
if (0 == creditPollCount) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("** HCI : Failed to get credits! GMBOX Target was not available \n"));
status = A_ERROR;
break;
}
/* now get the size */
status = DevGMboxReadCreditSize(pProt->pDev, &pProt->CreditSize);
if (A_FAILED(status)) {
break;
}
} while (FALSE);
if (A_SUCCESS(status)) {
pProt->CreditsAvailable = pProt->CreditsMax;
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI : InitTxCreditState - credits avail: %d, size: %d \n",
pProt->CreditsAvailable, pProt->CreditSize));
}
return status;
}
static A_STATUS CreditsAvailableCallback(void *pContext, int Credits, A_BOOL CreditIRQEnabled)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
A_BOOL enableCreditIrq = FALSE;
A_BOOL disableCreditIrq = FALSE;
A_BOOL doPendingSends = FALSE;
A_STATUS status = A_OK;
/** this callback is called under 2 conditions:
* 1. The credit IRQ interrupt was enabled and signaled.
* 2. A credit counter read completed.
*
* The function must not assume that the calling context can block !
*/
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback (Credits:%d, IRQ:%s) \n",
Credits, CreditIRQEnabled ? "ON" : "OFF"));
LOCK_HCI_TX(pProt);
do {
if (0 == Credits) {
if (!CreditIRQEnabled) {
/* enable credit IRQ */
enableCreditIrq = TRUE;
}
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: current credit state, consumed:%d available:%d max:%d seek:%d\n",
pProt->CreditsConsumed,
pProt->CreditsAvailable,
pProt->CreditsMax,
pProt->CreditsCurrentSeek));
pProt->CreditsAvailable += Credits;
A_ASSERT(pProt->CreditsAvailable <= pProt->CreditsMax);
pProt->CreditsConsumed -= Credits;
A_ASSERT(pProt->CreditsConsumed >= 0);
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state, consumed:%d available:%d max:%d seek:%d\n",
pProt->CreditsConsumed,
pProt->CreditsAvailable,
pProt->CreditsMax,
pProt->CreditsCurrentSeek));
if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
/* we have enough credits to fullfill at least 1 packet waiting in the queue */
pProt->CreditsCurrentSeek = 0;
pProt->SendStateFlags &= ~HCI_SEND_WAIT_CREDITS;
doPendingSends = TRUE;
if (CreditIRQEnabled) {
/* credit IRQ was enabled, we shouldn't need it anymore */
disableCreditIrq = TRUE;
}
} else {
/* not enough credits yet, enable credit IRQ if we haven't already */
if (!CreditIRQEnabled) {
enableCreditIrq = TRUE;
}
}
} while (FALSE);
UNLOCK_HCI_TX(pProt);
if (enableCreditIrq) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Enabling credit count IRQ...\n"));
/* must use async only */
status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_ENABLE, PROC_IO_ASYNC);
} else if (disableCreditIrq) {
/* must use async only */
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Disabling credit count IRQ...\n"));
status = DevGMboxIRQAction(pProt->pDev, GMBOX_CREDIT_IRQ_DISABLE, PROC_IO_ASYNC);
}
if (doPendingSends) {
HCITrySend(pProt, NULL, FALSE);
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+CreditsAvailableCallback \n"));
return status;
}
static INLINE void NotifyTransportFailure(GMBOX_PROTO_HCI_UART *pProt, A_STATUS status)
{
if (pProt->HCIConfig.TransportFailure != NULL) {
pProt->HCIConfig.TransportFailure(pProt->HCIConfig.pContext, status);
}
}
static void FailureCallback(void *pContext, A_STATUS Status)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
/* target assertion occured */
NotifyTransportFailure(pProt, Status);
}
static void StateDumpCallback(void *pContext)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("============ HCIUart State ======================\n"));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("RecvStateFlags : 0x%X \n",pProt->RecvStateFlags));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendStateFlags : 0x%X \n",pProt->SendStateFlags));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("WaitBufferType : %d \n",pProt->WaitBufferType));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("SendQueue Depth : %d \n",HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsMax : %d \n",pProt->CreditsMax));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsConsumed : %d \n",pProt->CreditsConsumed));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("CreditsAvailable : %d \n",pProt->CreditsAvailable));
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("==================================================\n"));
}
static A_STATUS HCIUartMessagePending(void *pContext, A_UINT8 LookAheadBytes[], int ValidBytes)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)pContext;
A_STATUS status = A_OK;
int totalRecvLength = 0;
HCI_TRANSPORT_PACKET_TYPE pktType = HCI_PACKET_INVALID;
A_BOOL recvRefillCalled = FALSE;
A_BOOL blockRecv = FALSE;
HTC_PACKET *pPacket = NULL;
/** caller guarantees that this is a fully block-able context (synch I/O is allowed) */
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCIUartMessagePending Lookahead Bytes:%d \n",ValidBytes));
LOCK_HCI_RX(pProt);
do {
if (ValidBytes < 3) {
/* not enough for ACL or event header */
break;
}
if ((LookAheadBytes[0] == HCI_UART_ACL_PKT) && (ValidBytes < 5)) {
/* not enough for ACL data header */
break;
}
switch (LookAheadBytes[0]) {
case HCI_UART_EVENT_PKT:
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
LookAheadBytes[1], LookAheadBytes[2]));
totalRecvLength = LookAheadBytes[2];
totalRecvLength += 3; /* add type + event code + length field */
pktType = HCI_EVENT_TYPE;
break;
case HCI_UART_ACL_PKT:
totalRecvLength = (LookAheadBytes[4] << 8) | LookAheadBytes[3];
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI ACL: conn:0x%X length: %d \n",
((LookAheadBytes[2] & 0xF0) << 8) | LookAheadBytes[1], totalRecvLength));
totalRecvLength += 5; /* add type + connection handle + length field */
pktType = HCI_ACL_TYPE;
break;
default:
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",LookAheadBytes[0]));
status = A_EPROTO;
break;
}
if (A_FAILED(status)) {
break;
}
if (pProt->HCIConfig.pHCIPktRecvAlloc != NULL) {
UNLOCK_HCI_RX(pProt);
/* user is using a per-packet allocation callback */
pPacket = pProt->HCIConfig.pHCIPktRecvAlloc(pProt->HCIConfig.pContext,
pktType,
totalRecvLength);
LOCK_HCI_RX(pProt);
} else {
HTC_PACKET_QUEUE *pQueue;
/* user is using a refill handler that can refill multiple HTC buffers */
/* select buffer queue */
if (pktType == HCI_ACL_TYPE) {
pQueue = &pProt->HCIACLRecvBuffers;
} else {
pQueue = &pProt->HCIEventBuffers;
}
if (HTC_QUEUE_EMPTY(pQueue)) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("** HCI pkt type: %d has no buffers available calling allocation handler \n",
pktType));
/* check for refill handler */
if (pProt->HCIConfig.pHCIPktRecvRefill != NULL) {
recvRefillCalled = TRUE;
UNLOCK_HCI_RX(pProt);
/* call the re-fill handler */
pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
pktType,
0);
LOCK_HCI_RX(pProt);
/* check if we have more buffers */
pPacket = HTC_PACKET_DEQUEUE(pQueue);
/* fall through */
}
} else {
pPacket = HTC_PACKET_DEQUEUE(pQueue);
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("HCI pkt type: %d now has %d recv buffers left \n",
pktType, HTC_PACKET_QUEUE_DEPTH(pQueue)));
}
}
if (NULL == pPacket) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("** HCI pkt type: %d has no buffers available stopping recv...\n", pktType));
/* this is not an error, we simply need to mark that we are waiting for buffers.*/
pProt->RecvStateFlags |= HCI_RECV_WAIT_BUFFERS;
pProt->WaitBufferType = pktType;
blockRecv = TRUE;
break;
}
if (totalRecvLength > (int)pPacket->BufferLength) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI-UART pkt: %d requires %d bytes (%d buffer bytes avail) ! \n",
LookAheadBytes[0], totalRecvLength, pPacket->BufferLength));
status = A_EINVAL;
break;
}
} while (FALSE);
UNLOCK_HCI_RX(pProt);
/* locks are released, we can go fetch the packet */
do {
if (A_FAILED(status) || (NULL == pPacket)) {
break;
}
/* do this synchronously, we don't need to be fast here */
pPacket->Completion = NULL;
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI : getting recv packet len:%d hci-uart-type: %s \n",
totalRecvLength, (LookAheadBytes[0] == HCI_UART_EVENT_PKT) ? "EVENT" : "ACL"));
status = DevGMboxRead(pProt->pDev, pPacket, totalRecvLength);
if (A_FAILED(status)) {
break;
}
if (pPacket->pBuffer[0] != LookAheadBytes[0]) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not contain expected packet type: %d ! \n",
pPacket->pBuffer[0]));
status = A_EPROTO;
break;
}
if (pPacket->pBuffer[0] == HCI_UART_EVENT_PKT) {
/* validate event header fields */
if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
(pPacket->pBuffer[2] != LookAheadBytes[2])) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
DebugDumpBytes(LookAheadBytes, 3, "Expected HCI-UART Header");
DebugDumpBytes(pPacket->pBuffer, 3, "** Bad HCI-UART Header");
status = A_EPROTO;
break;
}
} else if (pPacket->pBuffer[0] == HCI_UART_ACL_PKT) {
/* validate acl header fields */
if ((pPacket->pBuffer[1] != LookAheadBytes[1]) ||
(pPacket->pBuffer[2] != LookAheadBytes[2]) ||
(pPacket->pBuffer[3] != LookAheadBytes[3]) ||
(pPacket->pBuffer[4] != LookAheadBytes[4])) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("** HCI buffer does not match lookahead! \n"));
DebugDumpBytes(LookAheadBytes, 5, "Expected HCI-UART Header");
DebugDumpBytes(pPacket->pBuffer, 5, "** Bad HCI-UART Header");
status = A_EPROTO;
break;
}
}
/* adjust buffer to move past packet ID */
pPacket->pBuffer++;
pPacket->ActualLength = totalRecvLength - 1;
pPacket->Status = A_OK;
/* indicate packet */
DO_HCI_RECV_INDICATION(pProt,pPacket);
pPacket = NULL;
/* check if we need to refill recv buffers */
if ((pProt->HCIConfig.pHCIPktRecvRefill != NULL) && !recvRefillCalled) {
HTC_PACKET_QUEUE *pQueue;
int watermark;
if (pktType == HCI_ACL_TYPE) {
watermark = pProt->HCIConfig.ACLRecvBufferWaterMark;
pQueue = &pProt->HCIACLRecvBuffers;
} else {
watermark = pProt->HCIConfig.EventRecvBufferWaterMark;
pQueue = &pProt->HCIEventBuffers;
}
if (HTC_PACKET_QUEUE_DEPTH(pQueue) < watermark) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("** HCI pkt type: %d watermark hit (%d) current:%d \n",
pktType, watermark, HTC_PACKET_QUEUE_DEPTH(pQueue)));
/* call the re-fill handler */
pProt->HCIConfig.pHCIPktRecvRefill(pProt->HCIConfig.pContext,
pktType,
HTC_PACKET_QUEUE_DEPTH(pQueue));
}
}
} while (FALSE);
/* check if we need to disable the reciever */
if (A_FAILED(status) || blockRecv) {
DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_DISABLE, PROC_IO_SYNC);
}
/* see if we need to recycle the recv buffer */
if (A_FAILED(status) && (pPacket != NULL)) {
HTC_PACKET_QUEUE queue;
if (A_EPROTO == status) {
DebugDumpBytes(pPacket->pBuffer, totalRecvLength, "Bad HCI-UART Recv packet");
}
/* recycle packet */
HTC_PACKET_RESET_RX(pPacket);
INIT_HTC_PACKET_QUEUE_AND_ADD(&queue,pPacket);
HCI_TransportAddReceivePkts(pProt,&queue);
NotifyTransportFailure(pProt,status);
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCIUartMessagePending \n"));
return status;
}
static void HCISendPacketCompletion(void *Context, HTC_PACKET *pPacket)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)Context;
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion (pPacket:0x%lX) \n",(unsigned long)pPacket));
if (A_FAILED(pPacket->Status)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" Send Packet (0x%lX) failed: %d , len:%d \n",
(unsigned long)pPacket, pPacket->Status, pPacket->ActualLength));
}
DO_HCI_SEND_INDICATION(pProt,pPacket);
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCISendPacketCompletion \n"));
}
static A_STATUS SeekCreditsSynch(GMBOX_PROTO_HCI_UART *pProt)
{
A_STATUS status = A_OK;
int credits;
int retry = 100;
while (TRUE) {
credits = 0;
status = DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_SYNC, &credits);
if (A_FAILED(status)) {
break;
}
LOCK_HCI_TX(pProt);
pProt->CreditsAvailable += credits;
pProt->CreditsConsumed -= credits;
if (pProt->CreditsAvailable >= pProt->CreditsCurrentSeek) {
pProt->CreditsCurrentSeek = 0;
UNLOCK_HCI_TX(pProt);
break;
}
UNLOCK_HCI_TX(pProt);
retry--;
if (0 == retry) {
status = A_EBUSY;
break;
}
A_MDELAY(20);
}
return status;
}
static A_STATUS HCITrySend(GMBOX_PROTO_HCI_UART *pProt, HTC_PACKET *pPacket, A_BOOL Synchronous)
{
A_STATUS status = A_OK;
int transferLength;
int creditsRequired, remainder;
A_UINT8 hciUartType;
A_BOOL synchSendComplete = FALSE;
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("+HCITrySend (pPacket:0x%lX) %s \n",(unsigned long)pPacket,
Synchronous ? "SYNC" :"ASYNC"));
LOCK_HCI_TX(pProt);
/* increment write processing count on entry */
pProt->SendProcessCount++;
do {
if (pProt->HCIStopped) {
status = A_ECANCELED;
break;
}
if (pPacket != NULL) {
/* packet was supplied */
if (Synchronous) {
/* in synchronous mode, the send queue can only hold 1 packet */
if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
status = A_EBUSY;
A_ASSERT(FALSE);
break;
}
if (pProt->SendProcessCount > 1) {
/* another thread or task is draining the TX queues */
status = A_EBUSY;
A_ASSERT(FALSE);
break;
}
HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
} else {
/* see if adding this packet hits the max depth (asynchronous mode only) */
if ((pProt->HCIConfig.MaxSendQueueDepth > 0) &&
((HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue) + 1) >= pProt->HCIConfig.MaxSendQueueDepth)) {
AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("HCI Send queue is full, Depth:%d, Max:%d \n",
HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue),
pProt->HCIConfig.MaxSendQueueDepth));
/* queue will be full, invoke any callbacks to determine what action to take */
if (pProt->HCIConfig.pHCISendFull != NULL) {
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
("HCI : Calling driver's send full callback.... \n"));
if (pProt->HCIConfig.pHCISendFull(pProt->HCIConfig.pContext,
pPacket) == HCI_SEND_FULL_DROP) {
/* drop it */
status = A_NO_RESOURCE;
break;
}
}
}
HTC_PACKET_ENQUEUE(&pProt->SendQueue,pPacket);
}
}
if (pProt->SendStateFlags & HCI_SEND_WAIT_CREDITS) {
break;
}
if (pProt->SendProcessCount > 1) {
/* another thread or task is draining the TX queues */
break;
}
/***** beyond this point only 1 thread may enter ******/
/* now drain the send queue for transmission as long as we have enough
* credits */
while (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
pPacket = HTC_PACKET_DEQUEUE(&pProt->SendQueue);
switch (HCI_GET_PACKET_TYPE(pPacket)) {
case HCI_COMMAND_TYPE:
hciUartType = HCI_UART_COMMAND_PKT;
break;
case HCI_ACL_TYPE:
hciUartType = HCI_UART_ACL_PKT;
break;
default:
status = A_EINVAL;
A_ASSERT(FALSE);
break;
}
if (A_FAILED(status)) {
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Got head packet:0x%lX , Type:%d Length: %d Remaining Queue Depth: %d\n",
(unsigned long)pPacket, HCI_GET_PACKET_TYPE(pPacket), pPacket->ActualLength,
HTC_PACKET_QUEUE_DEPTH(&pProt->SendQueue)));
transferLength = 1; /* UART type header is 1 byte */
transferLength += pPacket->ActualLength;
transferLength = DEV_CALC_SEND_PADDED_LEN(pProt->pDev, transferLength);
/* figure out how many credits this message requires */
creditsRequired = transferLength / pProt->CreditSize;
remainder = transferLength % pProt->CreditSize;
if (remainder) {
creditsRequired++;
}
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: Creds Required:%d Got:%d\n",
creditsRequired, pProt->CreditsAvailable));
if (creditsRequired > pProt->CreditsAvailable) {
if (Synchronous) {
/* in synchronous mode we need to seek credits in synchronously */
pProt->CreditsCurrentSeek = creditsRequired;
UNLOCK_HCI_TX(pProt);
status = SeekCreditsSynch(pProt);
LOCK_HCI_TX(pProt);
if (A_FAILED(status)) {
break;
}
/* fall through and continue processing this send op */
} else {
/* not enough credits, queue back to the head */
HTC_PACKET_ENQUEUE_TO_HEAD(&pProt->SendQueue,pPacket);
/* waiting for credits */
pProt->SendStateFlags |= HCI_SEND_WAIT_CREDITS;
/* provide a hint to reduce attempts to re-send if credits are dribbling back
* this hint is the short fall of credits */
pProt->CreditsCurrentSeek = creditsRequired;
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: packet:0x%lX placed back in queue. head packet needs: %d credits \n",
(unsigned long)pPacket, pProt->CreditsCurrentSeek));
pPacket = NULL;
UNLOCK_HCI_TX(pProt);
/* schedule a credit counter read, our CreditsAvailableCallback callback will be called
* with the result */
DevGMboxReadCreditCounter(pProt->pDev, PROC_IO_ASYNC, NULL);
LOCK_HCI_TX(pProt);
break;
}
}
/* caller guarantees some head room */
pPacket->pBuffer--;
pPacket->pBuffer[0] = hciUartType;
pProt->CreditsAvailable -= creditsRequired;
pProt->CreditsConsumed += creditsRequired;
A_ASSERT(pProt->CreditsConsumed <= pProt->CreditsMax);
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("HCI: new credit state: consumed:%d available:%d max:%d\n",
pProt->CreditsConsumed, pProt->CreditsAvailable, pProt->CreditsMax));
UNLOCK_HCI_TX(pProt);
/* write it out */
if (Synchronous) {
pPacket->Completion = NULL;
pPacket->pContext = NULL;
} else {
pPacket->Completion = HCISendPacketCompletion;
pPacket->pContext = pProt;
}
status = DevGMboxWrite(pProt->pDev,pPacket,transferLength);
if (Synchronous) {
synchSendComplete = TRUE;
} else {
pPacket = NULL;
}
LOCK_HCI_TX(pProt);
}
} while (FALSE);
pProt->SendProcessCount--;
A_ASSERT(pProt->SendProcessCount >= 0);
UNLOCK_HCI_TX(pProt);
if (Synchronous) {
A_ASSERT(pPacket != NULL);
if (A_SUCCESS(status) && (!synchSendComplete)) {
status = A_EBUSY;
A_ASSERT(FALSE);
LOCK_HCI_TX(pProt);
if (pPacket->ListLink.pNext != NULL) {
/* remove from the queue */
HTC_PACKET_REMOVE(&pProt->SendQueue,pPacket);
}
UNLOCK_HCI_TX(pProt);
}
} else {
if (A_FAILED(status) && (pPacket != NULL)) {
pPacket->Status = status;
DO_HCI_SEND_INDICATION(pProt,pPacket);
}
}
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,("-HCITrySend: \n"));
return status;
}
static void FlushSendQueue(GMBOX_PROTO_HCI_UART *pProt)
{
HTC_PACKET *pPacket;
HTC_PACKET_QUEUE discardQueue;
INIT_HTC_PACKET_QUEUE(&discardQueue);
LOCK_HCI_TX(pProt);
if (!HTC_QUEUE_EMPTY(&pProt->SendQueue)) {
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->SendQueue);
}
UNLOCK_HCI_TX(pProt);
/* discard packets */
while (!HTC_QUEUE_EMPTY(&discardQueue)) {
pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
pPacket->Status = A_ECANCELED;
DO_HCI_SEND_INDICATION(pProt,pPacket);
}
}
static void FlushRecvBuffers(GMBOX_PROTO_HCI_UART *pProt)
{
HTC_PACKET_QUEUE discardQueue;
HTC_PACKET *pPacket;
INIT_HTC_PACKET_QUEUE(&discardQueue);
LOCK_HCI_RX(pProt);
/*transfer list items from ACL and event buffer queues to the discard queue */
if (!HTC_QUEUE_EMPTY(&pProt->HCIACLRecvBuffers)) {
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIACLRecvBuffers);
}
if (!HTC_QUEUE_EMPTY(&pProt->HCIEventBuffers)) {
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&discardQueue,&pProt->HCIEventBuffers);
}
UNLOCK_HCI_RX(pProt);
/* now empty the discard queue */
while (!HTC_QUEUE_EMPTY(&discardQueue)) {
pPacket = HTC_PACKET_DEQUEUE(&discardQueue);
pPacket->Status = A_ECANCELED;
DO_HCI_RECV_INDICATION(pProt,pPacket);
}
}
/*** protocol module install entry point ***/
A_STATUS GMboxProtocolInstall(AR6K_DEVICE *pDev)
{
A_STATUS status = A_OK;
GMBOX_PROTO_HCI_UART *pProtocol = NULL;
do {
pProtocol = A_MALLOC(sizeof(GMBOX_PROTO_HCI_UART));
if (NULL == pProtocol) {
status = A_NO_MEMORY;
break;
}
A_MEMZERO(pProtocol, sizeof(*pProtocol));
pProtocol->pDev = pDev;
INIT_HTC_PACKET_QUEUE(&pProtocol->SendQueue);
INIT_HTC_PACKET_QUEUE(&pProtocol->HCIACLRecvBuffers);
INIT_HTC_PACKET_QUEUE(&pProtocol->HCIEventBuffers);
A_MUTEX_INIT(&pProtocol->HCIRxLock);
A_MUTEX_INIT(&pProtocol->HCITxLock);
} while (FALSE);
if (A_SUCCESS(status)) {
LOCK_AR6K(pDev);
DEV_GMBOX_SET_PROTOCOL(pDev,
HCIUartMessagePending,
CreditsAvailableCallback,
FailureCallback,
StateDumpCallback,
pProtocol);
UNLOCK_AR6K(pDev);
} else {
if (pProtocol != NULL) {
HCIUartCleanup(pProtocol);
}
}
return status;
}
/*** protocol module uninstall entry point ***/
void GMboxProtocolUninstall(AR6K_DEVICE *pDev)
{
GMBOX_PROTO_HCI_UART *pProtocol = (GMBOX_PROTO_HCI_UART *)DEV_GMBOX_GET_PROTOCOL(pDev);
if (pProtocol != NULL) {
/* notify anyone attached */
if (pProtocol->HCIAttached) {
A_ASSERT(pProtocol->HCIConfig.TransportRemoved != NULL);
pProtocol->HCIConfig.TransportRemoved(pProtocol->HCIConfig.pContext);
pProtocol->HCIAttached = FALSE;
}
HCIUartCleanup(pProtocol);
DEV_GMBOX_SET_PROTOCOL(pDev,NULL,NULL,NULL,NULL,NULL);
}
}
static A_STATUS NotifyTransportReady(GMBOX_PROTO_HCI_UART *pProt)
{
HCI_TRANSPORT_PROPERTIES props;
A_STATUS status = A_OK;
do {
A_MEMZERO(&props,sizeof(props));
/* HCI UART only needs one extra byte at the head to indicate the packet TYPE */
props.HeadRoom = 1;
props.TailRoom = 0;
props.IOBlockPad = pProt->pDev->BlockSize;
if (pProt->HCIAttached) {
AR_DEBUG_PRINTF(ATH_DEBUG_ANY,("HCI: notifying attached client to transport... \n"));
A_ASSERT(pProt->HCIConfig.TransportReady != NULL);
status = pProt->HCIConfig.TransportReady(pProt,
&props,
pProt->HCIConfig.pContext);
}
} while (FALSE);
return status;
}
/*********** HCI UART protocol implementation ************************************************/
HCI_TRANSPORT_HANDLE HCI_TransportAttach(void *HTCHandle, HCI_TRANSPORT_CONFIG_INFO *pInfo)
{
GMBOX_PROTO_HCI_UART *pProtocol = NULL;
AR6K_DEVICE *pDev;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportAttach \n"));
pDev = HTCGetAR6KDevice(HTCHandle);
LOCK_AR6K(pDev);
do {
pProtocol = (GMBOX_PROTO_HCI_UART *)DEV_GMBOX_GET_PROTOCOL(pDev);
if (NULL == pProtocol) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not installed! \n"));
break;
}
if (pProtocol->HCIAttached) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol already attached! \n"));
break;
}
A_MEMCPY(&pProtocol->HCIConfig, pInfo, sizeof(HCI_TRANSPORT_CONFIG_INFO));
A_ASSERT(pProtocol->HCIConfig.pHCIPktRecv != NULL);
A_ASSERT(pProtocol->HCIConfig.pHCISendComplete != NULL);
pProtocol->HCIAttached = TRUE;
} while (FALSE);
UNLOCK_AR6K(pDev);
if (pProtocol != NULL) {
/* TODO ... should we use a worker? */
NotifyTransportReady(pProtocol);
}
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach (0x%lX) \n",(unsigned long)pProtocol));
return (HCI_TRANSPORT_HANDLE)pProtocol;
}
void HCI_TransportDetach(HCI_TRANSPORT_HANDLE HciTrans)
{
GMBOX_PROTO_HCI_UART *pProtocol = (GMBOX_PROTO_HCI_UART *)HciTrans;
AR6K_DEVICE *pDev = pProtocol->pDev;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportDetach \n"));
LOCK_AR6K(pDev);
if (!pProtocol->HCIAttached) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("GMBOX protocol not attached! \n"));
UNLOCK_AR6K(pDev);
return;
}
pProtocol->HCIAttached = FALSE;
UNLOCK_AR6K(pDev);
HCI_TransportStop(HciTrans);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportAttach \n"));
}
A_STATUS HCI_TransportAddReceivePkts(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET_QUEUE *pQueue)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
A_STATUS status = A_OK;
A_BOOL unblockRecv = FALSE;
HTC_PACKET *pPacket;
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+HCI_TransportAddReceivePkt \n"));
LOCK_HCI_RX(pProt);
do {
if (pProt->HCIStopped) {
status = A_ECANCELED;
break;
}
pPacket = HTC_GET_PKT_AT_HEAD(pQueue);
if (NULL == pPacket) {
status = A_EINVAL;
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv packet added, type :%d, len:%d num:%d \n",
HCI_GET_PACKET_TYPE(pPacket), pPacket->BufferLength, HTC_PACKET_QUEUE_DEPTH(pQueue)));
if (HCI_GET_PACKET_TYPE(pPacket) == HCI_EVENT_TYPE) {
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIEventBuffers, pQueue);
} else if (HCI_GET_PACKET_TYPE(pPacket) == HCI_ACL_TYPE) {
HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pProt->HCIACLRecvBuffers, pQueue);
} else {
status = A_EINVAL;
break;
}
if (pProt->RecvStateFlags & HCI_RECV_WAIT_BUFFERS) {
if (pProt->WaitBufferType == HCI_GET_PACKET_TYPE(pPacket)) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" HCI recv was blocked on packet type :%d, unblocking.. \n",
pProt->WaitBufferType));
pProt->RecvStateFlags &= ~HCI_RECV_WAIT_BUFFERS;
pProt->WaitBufferType = HCI_PACKET_INVALID;
unblockRecv = TRUE;
}
}
} while (FALSE);
UNLOCK_HCI_RX(pProt);
if (A_FAILED(status)) {
while (!HTC_QUEUE_EMPTY(pQueue)) {
pPacket = HTC_PACKET_DEQUEUE(pQueue);
pPacket->Status = A_ECANCELED;
DO_HCI_RECV_INDICATION(pProt,pPacket);
}
}
if (unblockRecv) {
DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_ASYNC);
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-HCI_TransportAddReceivePkt \n"));
return A_OK;
}
A_STATUS HCI_TransportSendPkt(HCI_TRANSPORT_HANDLE HciTrans, HTC_PACKET *pPacket, A_BOOL Synchronous)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
return HCITrySend(pProt,pPacket,Synchronous);
}
void HCI_TransportStop(HCI_TRANSPORT_HANDLE HciTrans)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStop \n"));
LOCK_AR6K(pProt->pDev);
if (pProt->HCIStopped) {
UNLOCK_AR6K(pProt->pDev);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
return;
}
pProt->HCIStopped = TRUE;
UNLOCK_AR6K(pProt->pDev);
/* disable interrupts */
DevGMboxIRQAction(pProt->pDev, GMBOX_DISABLE_ALL, PROC_IO_SYNC);
FlushSendQueue(pProt);
FlushRecvBuffers(pProt);
/* signal bridge side to power down BT */
DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_OFF, BTOFF_TIMEOUT_MS);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStop \n"));
}
A_STATUS HCI_TransportStart(HCI_TRANSPORT_HANDLE HciTrans)
{
A_STATUS status;
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("+HCI_TransportStart \n"));
/* set stopped in case we have a problem in starting */
pProt->HCIStopped = TRUE;
do {
status = InitTxCreditState(pProt);
if (A_FAILED(status)) {
break;
}
status = DevGMboxIRQAction(pProt->pDev, GMBOX_ERRORS_IRQ_ENABLE, PROC_IO_SYNC);
if (A_FAILED(status)) {
break;
}
/* enable recv */
status = DevGMboxIRQAction(pProt->pDev, GMBOX_RECV_IRQ_ENABLE, PROC_IO_SYNC);
if (A_FAILED(status)) {
break;
}
/* signal bridge side to power up BT */
status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BT_ON, BTON_TIMEOUT_MS);
if (A_FAILED(status)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI_TransportStart : Failed to trigger BT ON \n"));
break;
}
/* we made it */
pProt->HCIStopped = FALSE;
} while (FALSE);
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("-HCI_TransportStart \n"));
return status;
}
A_STATUS HCI_TransportEnableDisableAsyncRecv(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
return DevGMboxIRQAction(pProt->pDev,
Enable ? GMBOX_RECV_IRQ_ENABLE : GMBOX_RECV_IRQ_DISABLE,
PROC_IO_SYNC);
}
A_STATUS HCI_TransportRecvHCIEventSync(HCI_TRANSPORT_HANDLE HciTrans,
HTC_PACKET *pPacket,
int MaxPollMS)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
A_STATUS status = A_OK;
A_UINT8 lookAhead[8];
int bytes;
int totalRecvLength;
MaxPollMS = MaxPollMS / 16;
if (MaxPollMS < 2) {
MaxPollMS = 2;
}
while (MaxPollMS) {
bytes = sizeof(lookAhead);
status = DevGMboxRecvLookAheadPeek(pProt->pDev,lookAhead,&bytes);
if (A_FAILED(status)) {
break;
}
if (bytes < 3) {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI recv poll got bytes: %d, retry : %d \n",
bytes, MaxPollMS));
A_MDELAY(16);
MaxPollMS--;
continue;
}
totalRecvLength = 0;
switch (lookAhead[0]) {
case HCI_UART_EVENT_PKT:
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("HCI Event: %d param length: %d \n",
lookAhead[1], lookAhead[2]));
totalRecvLength = lookAhead[2];
totalRecvLength += 3; /* add type + event code + length field */
break;
default:
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("**Invalid HCI packet type: %d \n",lookAhead[0]));
status = A_EPROTO;
break;
}
if (A_FAILED(status)) {
break;
}
pPacket->Completion = NULL;
status = DevGMboxRead(pProt->pDev,pPacket,totalRecvLength);
if (A_FAILED(status)) {
break;
}
pPacket->pBuffer++;
pPacket->ActualLength = totalRecvLength - 1;
pPacket->Status = A_OK;
break;
}
if (MaxPollMS == 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI recv poll timeout! \n"));
status = A_ERROR;
}
return status;
}
#define LSB_SCRATCH_IDX 4
#define MSB_SCRATCH_IDX 5
A_STATUS HCI_TransportSetBaudRate(HCI_TRANSPORT_HANDLE HciTrans, A_UINT32 Baud)
{
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
HIF_DEVICE *pHIFDevice = (HIF_DEVICE *)(pProt->pDev->HIFDevice);
A_UINT32 scaledBaud, scratchAddr;
A_STATUS status = A_OK;
/* Divide the desired baud rate by 100
* Store the LSB in the local scratch register 4 and the MSB in the local
* scratch register 5 for the target to read
*/
scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * LSB_SCRATCH_IDX);
scaledBaud = (Baud / 100) & LOCAL_SCRATCH_VALUE_MASK;
status = ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
scratchAddr = MBOX_BASE_ADDRESS | (LOCAL_SCRATCH_ADDRESS + 4 * MSB_SCRATCH_IDX);
scaledBaud = ((Baud / 100) >> (LOCAL_SCRATCH_VALUE_MSB+1)) & LOCAL_SCRATCH_VALUE_MASK;
status |= ar6000_WriteRegDiag(pHIFDevice, &scratchAddr, &scaledBaud);
if (A_OK != status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to set up baud rate in scratch register!"));
return status;
}
/* Now interrupt the target to tell it about the baud rate */
status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_BAUD_SET, BAUD_TIMEOUT_MS);
if (A_OK != status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to tell target to change baud rate!"));
}
return status;
}
A_STATUS HCI_TransportEnablePowerMgmt(HCI_TRANSPORT_HANDLE HciTrans, A_BOOL Enable)
{
A_STATUS status;
GMBOX_PROTO_HCI_UART *pProt = (GMBOX_PROTO_HCI_UART *)HciTrans;
if (Enable) {
status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_ON, BTPWRSAV_TIMEOUT_MS);
} else {
status = DevGMboxSetTargetInterrupt(pProt->pDev, MBOX_SIG_HCI_BRIDGE_PWR_SAV_OFF, BTPWRSAV_TIMEOUT_MS);
}
if (A_FAILED(status)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to enable/disable HCI power management!\n"));
} else {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HCI power management enabled/disabled!\n"));
}
return status;
}
#endif //ATH_AR6K_ENABLE_GMBOX
| gpl-2.0 |
garwynn/SC02E_LJF_Kernel | net/wireless_ath/sysfs.c | 688 | 3582 | /*
* This file provides /sys/class/ieee80211/<wiphy name>/
* and some default attributes.
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* This file is GPLv2 as found in COPYING.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
#include <linux/rtnetlink.h>
#include <net/cfg80211.h>
#include "sysfs.h"
#include "core.h"
static inline struct cfg80211_registered_device *dev_to_rdev(
struct device *dev)
{
return container_of(dev, struct cfg80211_registered_device, wiphy.dev);
}
#define SHOW_FMT(name, fmt, member) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
}
SHOW_FMT(index, "%d", wiphy_idx);
SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
char *buf) {
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
}
static ssize_t addresses_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
char *start = buf;
int i;
if (!wiphy->addresses)
return sprintf(buf, "%pM\n", wiphy->perm_addr);
for (i = 0; i < wiphy->n_addresses; i++)
buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
return buf - start;
}
static struct device_attribute ieee80211_dev_attrs[] = {
__ATTR_RO(index),
__ATTR_RO(macaddress),
__ATTR_RO(address_mask),
__ATTR_RO(addresses),
__ATTR_RO(name),
{}
};
static void wiphy_dev_release(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
cfg80211_dev_free(rdev);
}
#ifdef CONFIG_HOTPLUG
static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
{
/* TODO, we probably need stuff here */
return 0;
}
#endif
static int wiphy_suspend(struct device *dev, pm_message_t state)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
rdev->suspend_at = get_seconds();
if (rdev->ops->suspend) {
rtnl_lock();
if (rdev->wiphy.registered)
ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
rtnl_unlock();
}
return ret;
}
static int wiphy_resume(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
/* Age scan results with time spent in suspend */
spin_lock_bh(&rdev->bss_lock);
cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
spin_unlock_bh(&rdev->bss_lock);
if (rdev->ops->resume) {
rtnl_lock();
if (rdev->wiphy.registered)
ret = rdev->ops->resume(&rdev->wiphy);
rtnl_unlock();
}
return ret;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
static const void *wiphy_namespace(struct device *d)
{
struct wiphy *wiphy = container_of(d, struct wiphy, dev);
return wiphy_net(wiphy);
}
#endif
struct class ieee80211_class = {
.name = "ieee80211",
.owner = THIS_MODULE,
.dev_release = wiphy_dev_release,
.dev_attrs = ieee80211_dev_attrs,
#ifdef CONFIG_HOTPLUG
.dev_uevent = wiphy_uevent,
#endif
.suspend = wiphy_suspend,
.resume = wiphy_resume,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
.ns_type = &net_ns_type_operations,
.namespace = wiphy_namespace,
#endif
};
int wiphy_sysfs_init(void)
{
return class_register(&ieee80211_class);
}
void wiphy_sysfs_exit(void)
{
class_unregister(&ieee80211_class);
}
| gpl-2.0 |
gunine/htc-rider-aosp-kernel | drivers/video/fb_defio.c | 944 | 6128 | /*
* linux/drivers/video/fb_defio.c
*
* Copyright (C) 2006 Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/list.h>
/* to support deferred IO */
#include <linux/rmap.h>
#include <linux/pagemap.h>
struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
{
void *screen_base = (void __force *) info->screen_base;
struct page *page;
if (is_vmalloc_addr(screen_base + offs))
page = vmalloc_to_page(screen_base + offs);
else
page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
return page;
}
/* this is to find and return the vmalloc-ed fb pages */
static int fb_deferred_io_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
struct fb_info *info = vma->vm_private_data;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= info->fix.smem_len)
return VM_FAULT_SIGBUS;
page = fb_deferred_io_page(info, offset);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
if (vma->vm_file)
page->mapping = vma->vm_file->f_mapping;
else
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
page->index = vmf->pgoff;
vmf->page = page;
return 0;
}
int fb_deferred_io_fsync(struct file *file, int datasync)
{
struct fb_info *info = file->private_data;
/* Skip if deferred io is compiled-in but disabled on this fbdev */
if (!info->fbdefio)
return 0;
/* Kill off the delayed work */
cancel_rearming_delayed_work(&info->deferred_work);
/* Run it immediately */
return schedule_delayed_work(&info->deferred_work, 0);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
/* vm_ops->page_mkwrite handler */
static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct fb_info *info = vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *cur;
/* this is a callback we get when userspace first tries to
write to the page. we schedule a workqueue. that workqueue
will eventually mkclean the touched pages and execute the
deferred framebuffer IO. then if userspace touches a page
again, we repeat the same scheme */
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
/*
* We want the page to remain locked from ->page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean() being called
* before the PTE is updated, which would leave the page ignored
* by defio.
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED.
*/
lock_page(page);
/* we loop through the pagelist before adding in order
to keep the pagelist sorted */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
/* this check is to catch the case where a new
process could start writing to the same page
through a new pte. this new access can cause the
mkwrite even when the original ps's pte is marked
writable */
if (unlikely(cur == page))
goto page_already_added;
else if (cur->index > page->index)
break;
}
list_add_tail(&page->lru, &cur->lru);
page_already_added:
mutex_unlock(&fbdefio->lock);
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
};
static int fb_deferred_io_set_page_dirty(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
};
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
if (!(info->flags & FBINFO_VIRTFB))
vma->vm_flags |= VM_IO;
vma->vm_private_data = info;
return 0;
}
/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
struct list_head *node, *next;
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
lock_page(cur);
page_mkclean(cur);
unlock_page(cur);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
/* clear the list */
list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
mutex_unlock(&fbdefio->lock);
}
void fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
BUG_ON(!fbdefio);
mutex_init(&fbdefio->lock);
info->fbops->fb_mmap = fb_deferred_io_mmap;
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
INIT_LIST_HEAD(&fbdefio->pagelist);
if (fbdefio->delay == 0) /* set a default of 1 s */
fbdefio->delay = HZ;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
file->f_mapping->a_ops = &fb_deferred_io_aops;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
BUG_ON(!fbdefio);
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
info->fbops->fb_mmap = NULL;
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Flemmard/android_kernel_samsung_msm8660-common | drivers/net/wireless/iwlegacy/iwl-4965-led.c | 2992 | 2257 | /******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-commands.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-4965-led.h"
/* Send led command */
static int
iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
{
struct iwl_host_cmd cmd = {
.id = REPLY_LEDS_CMD,
.len = sizeof(struct iwl_led_cmd),
.data = led_cmd,
.flags = CMD_ASYNC,
.callback = NULL,
};
u32 reg;
reg = iwl_read32(priv, CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
return iwl_legacy_send_cmd(priv, &cmd);
}
/* Set led register off */
void iwl4965_led_enable(struct iwl_priv *priv)
{
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
const struct iwl_led_ops iwl4965_led_ops = {
.cmd = iwl4965_send_led_cmd,
};
| gpl-2.0 |
showp1984/bricked-pyramid-3.0 | arch/m68k/mm/motorola.c | 3248 | 7775 | /*
* linux/arch/m68k/mm/motorola.c
*
* Routines specific to the Motorola MMU, originally from:
* linux/arch/m68k/init.c
* which are Copyright (C) 1995 Hamish Macdonald
*
* Moved 8/20/1999 Sam Creasey
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/dma.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#undef DEBUG
#ifndef mm_cachebits
/*
* Bits to add to page descriptors for "normal" caching mode.
* For 68020/030 this is 0.
* For 68040, this is _PAGE_CACHE040 (cachable, copyback)
*/
unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
/* size of memory already mapped in head.S */
#define INIT_MAPPED_SIZE (4UL<<20)
extern unsigned long availmem;
static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram(ptablep);
flush_tlb_kernel_page(ptablep);
nocache_page(ptablep);
return ptablep;
}
static pmd_t *last_pgtable __initdata = NULL;
pmd_t *zero_pgtable __initdata = NULL;
static pmd_t * __init kernel_ptr_table(void)
{
if (!last_pgtable) {
unsigned long pmd, last;
int i;
/* Find the last ptr table that was used in head.S and
* reuse the remaining space in that page for further
* ptr tables.
*/
last = (unsigned long)kernel_pg_dir;
for (i = 0; i < PTRS_PER_PGD; i++) {
if (!pgd_present(kernel_pg_dir[i]))
continue;
pmd = __pgd_page(kernel_pg_dir[i]);
if (pmd > last)
last = pmd;
}
last_pgtable = (pmd_t *)last;
#ifdef DEBUG
printk("kernel_ptr_init: %p\n", last_pgtable);
#endif
}
last_pgtable += PTRS_PER_PMD;
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
flush_tlb_kernel_page(last_pgtable);
nocache_page(last_pgtable);
}
return last_pgtable;
}
static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
size = m68k_memory[node].size;
physaddr = m68k_memory[node].addr;
virtaddr = (unsigned long)phys_to_virt(physaddr);
physaddr |= m68k_supervisor_cachemode |
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
if (CPU_IS_040_OR_060)
physaddr |= _PAGE_GLOBAL040;
while (size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
if (virtaddr && CPU_IS_020_OR_030) {
if (!(virtaddr & (ROOTTREESIZE-1)) &&
size >= ROOTTREESIZE) {
#ifdef DEBUG
printk ("[very early term]");
#endif
pgd_val(*pgd_dir) = physaddr;
size -= ROOTTREESIZE;
virtaddr += ROOTTREESIZE;
physaddr += ROOTTREESIZE;
continue;
}
}
if (!pgd_present(*pgd_dir)) {
pmd_dir = kernel_ptr_table();
#ifdef DEBUG
printk ("[new pointer %p]", pmd_dir);
#endif
pgd_set(pgd_dir, pmd_dir);
} else
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
if (virtaddr) {
#ifdef DEBUG
printk ("[early term]");
#endif
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
} else {
int i;
#ifdef DEBUG
printk ("[zero map]");
#endif
zero_pgtable = kernel_ptr_table();
pte_dir = (pte_t *)zero_pgtable;
pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
_PAGE_TABLE | _PAGE_ACCESSED;
pte_val(*pte_dir++) = 0;
physaddr += PAGE_SIZE;
for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
pte_val(*pte_dir++) = physaddr;
}
size -= PTRTREESIZE;
virtaddr += PTRTREESIZE;
} else {
if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG
printk ("[new table]");
#endif
pte_dir = kernel_page_table();
pmd_set(pmd_dir, pte_dir);
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
if (virtaddr) {
if (!pte_present(*pte_dir))
pte_val(*pte_dir) = physaddr;
} else
pte_val(*pte_dir) = 0;
size -= PAGE_SIZE;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
}
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long min_addr, max_addr;
unsigned long addr, size, end;
int i;
#ifdef DEBUG
printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
#endif
/* Fix the cache mode in the page descriptors for the 680[46]0. */
if (CPU_IS_040_OR_060) {
int i;
#ifndef mm_cachebits
mm_cachebits = _PAGE_CACHE040;
#endif
for (i = 0; i < 16; i++)
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
min_addr = m68k_memory[0].addr;
max_addr = min_addr + m68k_memory[0].size;
for (i = 1; i < m68k_num_memory;) {
if (m68k_memory[i].addr < min_addr) {
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
m68k_memory[i].addr, m68k_memory[i].size);
printk("Fix your bootloader or use a memfile to make use of this area!\n");
m68k_num_memory--;
memmove(m68k_memory + i, m68k_memory + i + 1,
(m68k_num_memory - i) * sizeof(struct mem_info));
continue;
}
addr = m68k_memory[i].addr + m68k_memory[i].size;
if (addr > max_addr)
max_addr = addr;
i++;
}
m68k_memoffset = min_addr - PAGE_OFFSET;
m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
flush_icache();
high_memory = phys_to_virt(max_addr);
min_low_pfn = availmem >> PAGE_SHIFT;
max_low_pfn = max_addr >> PAGE_SHIFT;
for (i = 0; i < m68k_num_memory; i++) {
addr = m68k_memory[i].addr;
end = addr + m68k_memory[i].size;
m68k_setup_node(i);
availmem = PAGE_ALIGN(availmem);
availmem += init_bootmem_node(NODE_DATA(i),
availmem >> PAGE_SHIFT,
addr >> PAGE_SHIFT,
end >> PAGE_SHIFT);
}
/*
* Map the physical memory available into the kernel virtual
* address space. First initialize the bootmem allocator with
* the memory we already mapped, so map_node() has something
* to allocate.
*/
addr = m68k_memory[0].addr;
size = m68k_memory[0].size;
free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
map_node(0);
if (size > INIT_MAPPED_SIZE)
free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);
for (i = 1; i < m68k_num_memory; i++)
map_node(i);
flush_tlb_all();
/*
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
/*
* Set up SFC/DFC registers
*/
set_fs(KERNEL_DS);
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
for (i = 0; i < m68k_num_memory; i++) {
zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
free_area_init_node(i, zones_size,
m68k_memory[i].addr >> PAGE_SHIFT, NULL);
if (node_present_pages(i))
node_set_state(i, N_NORMAL_MEMORY);
}
}
void free_initmem(void)
{
unsigned long addr;
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
virt_to_page(addr)->flags &= ~(1 << PG_reserved);
init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
}
| gpl-2.0 |
TeamRegular/android_kernel_zara | drivers/platform/x86/sony-laptop.c | 4784 | 88810 | /*
* ACPI Sony Notebook Control Driver (SNC and SPIC)
*
* Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
* Copyright (C) 2007-2009 Mattia Dongili <malattia@linux.it>
*
* Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
* which are copyrighted by their respective authors.
*
* The SNY6001 driver part is based on the sonypi driver which includes
* material from:
*
* Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
*
* Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
*
* Copyright (C) 2001-2002 Alcôve <www.alcove.com>
*
* Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
*
* Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
*
* Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
*
* Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
*
* Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/kfifo.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#include <asm/uaccess.h>
#include <linux/sonypi.h>
#include <linux/sony-laptop.h>
#include <linux/rfkill.h>
#ifdef CONFIG_SONYPI_COMPAT
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
#define dprintk(fmt, ...) \
do { \
if (debug) \
pr_warn(fmt, ##__VA_ARGS__); \
} while (0)
#define SONY_LAPTOP_DRIVER_VERSION "0.6"
#define SONY_NC_CLASS "sony-nc"
#define SONY_NC_HID "SNY5001"
#define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver"
#define SONY_PIC_CLASS "sony-pic"
#define SONY_PIC_HID "SNY6001"
#define SONY_PIC_DRIVER_NAME "Sony Programmable IO Control Driver"
MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
MODULE_LICENSE("GPL");
MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
"the development of this driver");
static int no_spic; /* = 0 */
module_param(no_spic, int, 0444);
MODULE_PARM_DESC(no_spic,
"set this if you don't want to enable the SPIC device");
static int compat; /* = 0 */
module_param(compat, int, 0444);
MODULE_PARM_DESC(compat,
"set this if you want to enable backward compatibility mode");
static unsigned long mask = 0xffffffff;
module_param(mask, ulong, 0644);
MODULE_PARM_DESC(mask,
"set this to the mask of event you want to enable (see doc)");
static int camera; /* = 0 */
module_param(camera, int, 0444);
MODULE_PARM_DESC(camera,
"set this to 1 to enable Motion Eye camera controls "
"(only use it if you have a C1VE or C1VN model)");
#ifdef CONFIG_SONYPI_COMPAT
static int minor = -1;
module_param(minor, int, 0);
MODULE_PARM_DESC(minor,
"minor number of the misc device for the SPIC compatibility code, "
"default is -1 (automatic)");
#endif
static int kbd_backlight = 1;
module_param(kbd_backlight, int, 0444);
MODULE_PARM_DESC(kbd_backlight,
"set this to 0 to disable keyboard backlight, "
"1 to enable it (default: 0)");
static int kbd_backlight_timeout; /* = 0 */
module_param(kbd_backlight_timeout, int, 0444);
MODULE_PARM_DESC(kbd_backlight_timeout,
"set this to 0 to set the default 10 seconds timeout, "
"1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
"(default: 0)");
static void sony_nc_kbd_backlight_resume(void);
enum sony_nc_rfkill {
SONY_WIFI,
SONY_BLUETOOTH,
SONY_WWAN,
SONY_WIMAX,
N_SONY_RFKILL,
};
static int sony_rfkill_handle;
static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
static void sony_nc_rfkill_update(void);
/*********** Input Devices ***********/
#define SONY_LAPTOP_BUF_SIZE 128
struct sony_laptop_input_s {
atomic_t users;
struct input_dev *jog_dev;
struct input_dev *key_dev;
struct kfifo fifo;
spinlock_t fifo_lock;
struct timer_list release_key_timer;
};
static struct sony_laptop_input_s sony_laptop_input = {
.users = ATOMIC_INIT(0),
};
struct sony_laptop_keypress {
struct input_dev *dev;
int key;
};
/* Correspondance table between sonypi events
* and input layer indexes in the keymap
*/
static int sony_laptop_input_index[] = {
-1, /* 0 no event */
-1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */
-1, /* 2 SONYPI_EVENT_JOGDIAL_UP */
-1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
-1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
-1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */
-1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */
0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */
1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */
2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
4, /* 11 SONYPI_EVENT_FNKEY_ESC */
5, /* 12 SONYPI_EVENT_FNKEY_F1 */
6, /* 13 SONYPI_EVENT_FNKEY_F2 */
7, /* 14 SONYPI_EVENT_FNKEY_F3 */
8, /* 15 SONYPI_EVENT_FNKEY_F4 */
9, /* 16 SONYPI_EVENT_FNKEY_F5 */
10, /* 17 SONYPI_EVENT_FNKEY_F6 */
11, /* 18 SONYPI_EVENT_FNKEY_F7 */
12, /* 19 SONYPI_EVENT_FNKEY_F8 */
13, /* 20 SONYPI_EVENT_FNKEY_F9 */
14, /* 21 SONYPI_EVENT_FNKEY_F10 */
15, /* 22 SONYPI_EVENT_FNKEY_F11 */
16, /* 23 SONYPI_EVENT_FNKEY_F12 */
17, /* 24 SONYPI_EVENT_FNKEY_1 */
18, /* 25 SONYPI_EVENT_FNKEY_2 */
19, /* 26 SONYPI_EVENT_FNKEY_D */
20, /* 27 SONYPI_EVENT_FNKEY_E */
21, /* 28 SONYPI_EVENT_FNKEY_F */
22, /* 29 SONYPI_EVENT_FNKEY_S */
23, /* 30 SONYPI_EVENT_FNKEY_B */
24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
25, /* 32 SONYPI_EVENT_PKEY_P1 */
26, /* 33 SONYPI_EVENT_PKEY_P2 */
27, /* 34 SONYPI_EVENT_PKEY_P3 */
28, /* 35 SONYPI_EVENT_BACK_PRESSED */
-1, /* 36 SONYPI_EVENT_LID_CLOSED */
-1, /* 37 SONYPI_EVENT_LID_OPENED */
29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */
30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */
31, /* 40 SONYPI_EVENT_HELP_PRESSED */
32, /* 41 SONYPI_EVENT_FNKEY_ONLY */
33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */
42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
43, /* 52 SONYPI_EVENT_MEYE_FACE */
44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */
45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
-1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
-1, /* 57 SONYPI_EVENT_BATTERY_INSERT */
-1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */
-1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */
47, /* 60 SONYPI_EVENT_WIRELESS_ON */
48, /* 61 SONYPI_EVENT_WIRELESS_OFF */
49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
51, /* 64 SONYPI_EVENT_CD_EJECT_PRESSED */
52, /* 65 SONYPI_EVENT_MODEKEY_PRESSED */
53, /* 66 SONYPI_EVENT_PKEY_P4 */
54, /* 67 SONYPI_EVENT_PKEY_P5 */
55, /* 68 SONYPI_EVENT_SETTINGKEY_PRESSED */
56, /* 69 SONYPI_EVENT_VOLUME_INC_PRESSED */
57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
-1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
58, /* 72 SONYPI_EVENT_MEDIA_PRESSED */
59, /* 72 SONYPI_EVENT_VENDOR_PRESSED */
};
static int sony_laptop_input_keycode_map[] = {
KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */
KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */
KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */
KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */
KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */
KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */
KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */
KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */
KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */
KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */
KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */
KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */
KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */
KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */
KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */
KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */
KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */
KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */
KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */
KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */
KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */
KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */
KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */
KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */
KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */
KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */
KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */
KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */
BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */
KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */
KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */
KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */
KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */
KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */
KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
KEY_ZOOMOUT, /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
KEY_EJECTCD, /* 51 SONYPI_EVENT_CD_EJECT_PRESSED */
KEY_F13, /* 52 SONYPI_EVENT_MODEKEY_PRESSED */
KEY_PROG4, /* 53 SONYPI_EVENT_PKEY_P4 */
KEY_F14, /* 54 SONYPI_EVENT_PKEY_P5 */
KEY_F15, /* 55 SONYPI_EVENT_SETTINGKEY_PRESSED */
KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
KEY_MEDIA, /* 58 SONYPI_EVENT_MEDIA_PRESSED */
KEY_VENDOR, /* 59 SONYPI_EVENT_VENDOR_PRESSED */
};
/* release buttons after a short delay if pressed */
static void do_sony_laptop_release_key(unsigned long unused)
{
struct sony_laptop_keypress kp;
unsigned long flags;
spin_lock_irqsave(&sony_laptop_input.fifo_lock, flags);
if (kfifo_out(&sony_laptop_input.fifo,
(unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
}
/* If there is something in the fifo schedule next release. */
if (kfifo_len(&sony_laptop_input.fifo) != 0)
mod_timer(&sony_laptop_input.release_key_timer,
jiffies + msecs_to_jiffies(10));
spin_unlock_irqrestore(&sony_laptop_input.fifo_lock, flags);
}
/* forward event to the input subsystem */
static void sony_laptop_report_input_event(u8 event)
{
struct input_dev *jog_dev = sony_laptop_input.jog_dev;
struct input_dev *key_dev = sony_laptop_input.key_dev;
struct sony_laptop_keypress kp = { NULL };
int scancode = -1;
if (event == SONYPI_EVENT_FNKEY_RELEASED ||
event == SONYPI_EVENT_ANYBUTTON_RELEASED) {
/* Nothing, not all VAIOs generate this event */
return;
}
/* report events */
switch (event) {
/* jog_dev events */
case SONYPI_EVENT_JOGDIAL_UP:
case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
input_report_rel(jog_dev, REL_WHEEL, 1);
input_sync(jog_dev);
return;
case SONYPI_EVENT_JOGDIAL_DOWN:
case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
input_report_rel(jog_dev, REL_WHEEL, -1);
input_sync(jog_dev);
return;
/* key_dev events */
case SONYPI_EVENT_JOGDIAL_PRESSED:
kp.key = BTN_MIDDLE;
kp.dev = jog_dev;
break;
default:
if (event >= ARRAY_SIZE(sony_laptop_input_index)) {
dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
break;
}
if ((scancode = sony_laptop_input_index[event]) != -1) {
kp.key = sony_laptop_input_keycode_map[scancode];
if (kp.key != KEY_UNKNOWN)
kp.dev = key_dev;
}
break;
}
if (kp.dev) {
/* if we have a scancode we emit it so we can always
remap the key */
if (scancode != -1)
input_event(kp.dev, EV_MSC, MSC_SCAN, scancode);
input_report_key(kp.dev, kp.key, 1);
input_sync(kp.dev);
/* schedule key release */
kfifo_in_locked(&sony_laptop_input.fifo,
(unsigned char *)&kp, sizeof(kp),
&sony_laptop_input.fifo_lock);
mod_timer(&sony_laptop_input.release_key_timer,
jiffies + msecs_to_jiffies(10));
} else
dprintk("unknown input event %.2x\n", event);
}
static int sony_laptop_setup_input(struct acpi_device *acpi_device)
{
struct input_dev *jog_dev;
struct input_dev *key_dev;
int i;
int error;
/* don't run again if already initialized */
if (atomic_add_return(1, &sony_laptop_input.users) > 1)
return 0;
/* kfifo */
spin_lock_init(&sony_laptop_input.fifo_lock);
error = kfifo_alloc(&sony_laptop_input.fifo,
SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
pr_err("kfifo_alloc failed\n");
goto err_dec_users;
}
setup_timer(&sony_laptop_input.release_key_timer,
do_sony_laptop_release_key, 0);
/* input keys */
key_dev = input_allocate_device();
if (!key_dev) {
error = -ENOMEM;
goto err_free_kfifo;
}
key_dev->name = "Sony Vaio Keys";
key_dev->id.bustype = BUS_ISA;
key_dev->id.vendor = PCI_VENDOR_ID_SONY;
key_dev->dev.parent = &acpi_device->dev;
/* Initialize the Input Drivers: special keys */
input_set_capability(key_dev, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, key_dev->evbit);
key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
key_dev->keycode = &sony_laptop_input_keycode_map;
for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++)
__set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit);
__clear_bit(KEY_RESERVED, key_dev->keybit);
error = input_register_device(key_dev);
if (error)
goto err_free_keydev;
sony_laptop_input.key_dev = key_dev;
/* jogdial */
jog_dev = input_allocate_device();
if (!jog_dev) {
error = -ENOMEM;
goto err_unregister_keydev;
}
jog_dev->name = "Sony Vaio Jogdial";
jog_dev->id.bustype = BUS_ISA;
jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
jog_dev->dev.parent = &acpi_device->dev;
input_set_capability(jog_dev, EV_KEY, BTN_MIDDLE);
input_set_capability(jog_dev, EV_REL, REL_WHEEL);
error = input_register_device(jog_dev);
if (error)
goto err_free_jogdev;
sony_laptop_input.jog_dev = jog_dev;
return 0;
err_free_jogdev:
input_free_device(jog_dev);
err_unregister_keydev:
input_unregister_device(key_dev);
/* to avoid kref underflow below at input_free_device */
key_dev = NULL;
err_free_keydev:
input_free_device(key_dev);
err_free_kfifo:
kfifo_free(&sony_laptop_input.fifo);
err_dec_users:
atomic_dec(&sony_laptop_input.users);
return error;
}
static void sony_laptop_remove_input(void)
{
struct sony_laptop_keypress kp = { NULL };
/* Cleanup only after the last user has gone */
if (!atomic_dec_and_test(&sony_laptop_input.users))
return;
del_timer_sync(&sony_laptop_input.release_key_timer);
/*
* Generate key-up events for remaining keys. Note that we don't
* need locking since nobody is adding new events to the kfifo.
*/
while (kfifo_out(&sony_laptop_input.fifo,
(unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
}
/* destroy input devs */
input_unregister_device(sony_laptop_input.key_dev);
sony_laptop_input.key_dev = NULL;
if (sony_laptop_input.jog_dev) {
input_unregister_device(sony_laptop_input.jog_dev);
sony_laptop_input.jog_dev = NULL;
}
kfifo_free(&sony_laptop_input.fifo);
}
/*********** Platform Device ***********/
static atomic_t sony_pf_users = ATOMIC_INIT(0);
static struct platform_driver sony_pf_driver = {
.driver = {
.name = "sony-laptop",
.owner = THIS_MODULE,
}
};
static struct platform_device *sony_pf_device;
static int sony_pf_add(void)
{
int ret = 0;
/* don't run again if already initialized */
if (atomic_add_return(1, &sony_pf_users) > 1)
return 0;
ret = platform_driver_register(&sony_pf_driver);
if (ret)
goto out;
sony_pf_device = platform_device_alloc("sony-laptop", -1);
if (!sony_pf_device) {
ret = -ENOMEM;
goto out_platform_registered;
}
ret = platform_device_add(sony_pf_device);
if (ret)
goto out_platform_alloced;
return 0;
out_platform_alloced:
platform_device_put(sony_pf_device);
sony_pf_device = NULL;
out_platform_registered:
platform_driver_unregister(&sony_pf_driver);
out:
atomic_dec(&sony_pf_users);
return ret;
}
static void sony_pf_remove(void)
{
/* deregister only after the last user has gone */
if (!atomic_dec_and_test(&sony_pf_users))
return;
platform_device_unregister(sony_pf_device);
platform_driver_unregister(&sony_pf_driver);
}
/*********** SNC (SNY5001) Device ***********/
/* the device uses 1-based values, while the backlight subsystem uses
0-based values */
#define SONY_MAX_BRIGHTNESS 8
#define SNC_VALIDATE_IN 0
#define SNC_VALIDATE_OUT 1
static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *,
char *);
static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *,
const char *, size_t);
static int boolean_validate(const int, const int);
static int brightness_default_validate(const int, const int);
struct sony_nc_value {
char *name; /* name of the entry */
char **acpiget; /* names of the ACPI get function */
char **acpiset; /* names of the ACPI set function */
int (*validate)(const int, const int); /* input/output validation */
int value; /* current setting */
int valid; /* Has ever been set */
int debug; /* active only in debug mode ? */
struct device_attribute devattr; /* sysfs attribute */
};
#define SNC_HANDLE_NAMES(_name, _values...) \
static char *snc_##_name[] = { _values, NULL }
#define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \
{ \
.name = __stringify(_name), \
.acpiget = _getters, \
.acpiset = _setters, \
.validate = _validate, \
.debug = _debug, \
.devattr = __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \
}
#define SNC_HANDLE_NULL { .name = NULL }
SNC_HANDLE_NAMES(fnkey_get, "GHKE");
SNC_HANDLE_NAMES(brightness_def_get, "GPBR");
SNC_HANDLE_NAMES(brightness_def_set, "SPBR");
SNC_HANDLE_NAMES(cdpower_get, "GCDP");
SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW");
SNC_HANDLE_NAMES(audiopower_get, "GAZP");
SNC_HANDLE_NAMES(audiopower_set, "AZPW");
SNC_HANDLE_NAMES(lanpower_get, "GLNP");
SNC_HANDLE_NAMES(lanpower_set, "LNPW");
SNC_HANDLE_NAMES(lidstate_get, "GLID");
SNC_HANDLE_NAMES(indicatorlamp_get, "GILS");
SNC_HANDLE_NAMES(indicatorlamp_set, "SILS");
SNC_HANDLE_NAMES(gainbass_get, "GMGB");
SNC_HANDLE_NAMES(gainbass_set, "CMGB");
SNC_HANDLE_NAMES(PID_get, "GPID");
SNC_HANDLE_NAMES(CTR_get, "GCTR");
SNC_HANDLE_NAMES(CTR_set, "SCTR");
SNC_HANDLE_NAMES(PCR_get, "GPCR");
SNC_HANDLE_NAMES(PCR_set, "SPCR");
SNC_HANDLE_NAMES(CMI_get, "GCMI");
SNC_HANDLE_NAMES(CMI_set, "SCMI");
static struct sony_nc_value sony_nc_values[] = {
SNC_HANDLE(brightness_default, snc_brightness_def_get,
snc_brightness_def_set, brightness_default_validate, 0),
SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0),
SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0),
SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set,
boolean_validate, 0),
SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set,
boolean_validate, 1),
SNC_HANDLE(lidstate, snc_lidstate_get, NULL,
boolean_validate, 0),
SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set,
boolean_validate, 0),
SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set,
boolean_validate, 0),
/* unknown methods */
SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1),
SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1),
SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1),
SNC_HANDLE_NULL
};
static acpi_handle sony_nc_acpi_handle;
static struct acpi_device *sony_nc_acpi_device = NULL;
/*
* acpi_evaluate_object wrappers
*/
static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
{
struct acpi_buffer output;
union acpi_object out_obj;
acpi_status status;
output.length = sizeof(out_obj);
output.pointer = &out_obj;
status = acpi_evaluate_object(handle, name, NULL, &output);
if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
*result = out_obj.integer.value;
return 0;
}
pr_warn("acpi_callreadfunc failed\n");
return -1;
}
static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
int *result)
{
struct acpi_object_list params;
union acpi_object in_obj;
struct acpi_buffer output;
union acpi_object out_obj;
acpi_status status;
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = value;
output.length = sizeof(out_obj);
output.pointer = &out_obj;
status = acpi_evaluate_object(handle, name, ¶ms, &output);
if (status == AE_OK) {
if (result != NULL) {
if (out_obj.type != ACPI_TYPE_INTEGER) {
pr_warn("acpi_evaluate_object bad return type\n");
return -1;
}
*result = out_obj.integer.value;
}
return 0;
}
pr_warn("acpi_evaluate_object failed\n");
return -1;
}
struct sony_nc_handles {
u16 cap[0x10];
struct device_attribute devattr;
};
static struct sony_nc_handles *handles;
static ssize_t sony_nc_handles_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t len = 0;
int i;
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ",
handles->cap[i]);
}
len += snprintf(buffer + len, PAGE_SIZE - len, "\n");
return len;
}
static int sony_nc_handles_setup(struct platform_device *pd)
{
int i;
int result;
handles = kzalloc(sizeof(*handles), GFP_KERNEL);
if (!handles)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
if (!acpi_callsetfunc(sony_nc_acpi_handle,
"SN00", i + 0x20, &result)) {
dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
result, i);
handles->cap[i] = result;
}
}
if (debug) {
sysfs_attr_init(&handles->devattr.attr);
handles->devattr.attr.name = "handles";
handles->devattr.attr.mode = S_IRUGO;
handles->devattr.show = sony_nc_handles_show;
/* allow reading capabilities via sysfs */
if (device_create_file(&pd->dev, &handles->devattr)) {
kfree(handles);
handles = NULL;
return -1;
}
}
return 0;
}
static int sony_nc_handles_cleanup(struct platform_device *pd)
{
if (handles) {
if (debug)
device_remove_file(&pd->dev, &handles->devattr);
kfree(handles);
handles = NULL;
}
return 0;
}
static int sony_find_snc_handle(int handle)
{
int i;
/* not initialized yet, return early */
if (!handles)
return -1;
for (i = 0; i < 0x10; i++) {
if (handles->cap[i] == handle) {
dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
handle, i);
return i;
}
}
dprintk("handle 0x%.4x not found\n", handle);
return -1;
}
static int sony_call_snc_handle(int handle, int argument, int *result)
{
int ret = 0;
int offset = sony_find_snc_handle(handle);
if (offset < 0)
return -1;
ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
result);
dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
*result);
return ret;
}
/*
* sony_nc_values input/output validate functions
*/
/* brightness_default_validate:
*
* manipulate input output values to keep consistency with the
* backlight framework for which brightness values are 0-based.
*/
static int brightness_default_validate(const int direction, const int value)
{
switch (direction) {
case SNC_VALIDATE_OUT:
return value - 1;
case SNC_VALIDATE_IN:
if (value >= 0 && value < SONY_MAX_BRIGHTNESS)
return value + 1;
}
return -EINVAL;
}
/* boolean_validate:
*
* on input validate boolean values 0/1, on output just pass the
* received value.
*/
static int boolean_validate(const int direction, const int value)
{
if (direction == SNC_VALIDATE_IN) {
if (value != 0 && value != 1)
return -EINVAL;
}
return value;
}
/*
* Sysfs show/store common to all sony_nc_values
*/
static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buffer)
{
int value;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
if (!*item->acpiget)
return -EIO;
if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
return -EIO;
if (item->validate)
value = item->validate(SNC_VALIDATE_OUT, value);
return snprintf(buffer, PAGE_SIZE, "%d\n", value);
}
static ssize_t sony_nc_sysfs_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
int value;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
if (!item->acpiset)
return -EIO;
if (count > 31)
return -EINVAL;
value = simple_strtoul(buffer, NULL, 10);
if (item->validate)
value = item->validate(SNC_VALIDATE_IN, value);
if (value < 0)
return value;
if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
return -EIO;
item->value = value;
item->valid = 1;
return count;
}
/*
* Backlight device
*/
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
u8 offset;
u8 maxlvl;
};
struct sony_backlight_props sony_bl_props;
static int sony_backlight_update_status(struct backlight_device *bd)
{
return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
bd->props.brightness + 1, NULL);
}
static int sony_backlight_get_brightness(struct backlight_device *bd)
{
int value;
if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
return 0;
/* brightness levels are 1-based, while backlight ones are 0-based */
return value - 1;
}
static int sony_nc_get_brightness_ng(struct backlight_device *bd)
{
int result;
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
sony_call_snc_handle(sdev->handle, 0x0200, &result);
return (result & 0xff) - sdev->offset;
}
static int sony_nc_update_status_ng(struct backlight_device *bd)
{
int value, result;
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness + sdev->offset;
if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
return -EIO;
return value;
}
static const struct backlight_ops sony_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = sony_backlight_update_status,
.get_brightness = sony_backlight_get_brightness,
};
static const struct backlight_ops sony_backlight_ng_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = sony_nc_update_status_ng,
.get_brightness = sony_nc_get_brightness_ng,
};
/*
* New SNC-only Vaios event mapping to driver known keys
*/
struct sony_nc_event {
u8 data;
u8 event;
};
static struct sony_nc_event sony_100_events[] = {
{ 0x90, SONYPI_EVENT_PKEY_P1 },
{ 0x10, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x91, SONYPI_EVENT_PKEY_P2 },
{ 0x11, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x81, SONYPI_EVENT_FNKEY_F1 },
{ 0x01, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x82, SONYPI_EVENT_FNKEY_F2 },
{ 0x02, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x83, SONYPI_EVENT_FNKEY_F3 },
{ 0x03, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x84, SONYPI_EVENT_FNKEY_F4 },
{ 0x04, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x85, SONYPI_EVENT_FNKEY_F5 },
{ 0x05, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x86, SONYPI_EVENT_FNKEY_F6 },
{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x87, SONYPI_EVENT_FNKEY_F7 },
{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x89, SONYPI_EVENT_FNKEY_F9 },
{ 0x09, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
{ 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
{ 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
{ 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED },
{ 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa5, SONYPI_EVENT_VENDOR_PRESSED },
{ 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa6, SONYPI_EVENT_HELP_PRESSED },
{ 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 },
};
static struct sony_nc_event sony_127_events[] = {
{ 0x81, SONYPI_EVENT_MODEKEY_PRESSED },
{ 0x01, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x82, SONYPI_EVENT_PKEY_P1 },
{ 0x02, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x83, SONYPI_EVENT_PKEY_P2 },
{ 0x03, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x84, SONYPI_EVENT_PKEY_P3 },
{ 0x04, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x85, SONYPI_EVENT_PKEY_P4 },
{ 0x05, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x86, SONYPI_EVENT_PKEY_P5 },
{ 0x06, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x87, SONYPI_EVENT_SETTINGKEY_PRESSED },
{ 0x07, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 },
};
/*
* ACPI callbacks
*/
static void sony_nc_notify(struct acpi_device *device, u32 event)
{
u32 ev = event;
if (ev >= 0x90) {
/* New-style event */
int result;
int key_handle = 0;
ev -= 0x90;
if (sony_find_snc_handle(0x100) == ev)
key_handle = 0x100;
if (sony_find_snc_handle(0x127) == ev)
key_handle = 0x127;
if (key_handle) {
struct sony_nc_event *key_event;
if (sony_call_snc_handle(key_handle, 0x200, &result)) {
dprintk("sony_nc_notify, unable to decode"
" event 0x%.2x 0x%.2x\n", key_handle,
ev);
/* restore the original event */
ev = event;
} else {
ev = result & 0xFF;
if (key_handle == 0x100)
key_event = sony_100_events;
else
key_event = sony_127_events;
for (; key_event->data; key_event++) {
if (key_event->data == ev) {
ev = key_event->event;
break;
}
}
if (!key_event->data)
pr_info("Unknown event: 0x%x 0x%x\n",
key_handle, ev);
else
sony_laptop_report_input_event(ev);
}
} else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
sony_nc_rfkill_update();
return;
}
} else
sony_laptop_report_input_event(ev);
dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
}
static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
void *context, void **return_value)
{
struct acpi_device_info *info;
if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
pr_warn("method: name: %4.4s, args %X\n",
(char *)&info->name, info->param_count);
kfree(info);
}
return AE_OK;
}
/*
* ACPI device
*/
static int sony_nc_function_setup(struct acpi_device *device)
{
int result;
/* Enable all events */
acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
/* Setup hotkeys */
sony_call_snc_handle(0x0100, 0, &result);
sony_call_snc_handle(0x0101, 0, &result);
sony_call_snc_handle(0x0102, 0x100, &result);
sony_call_snc_handle(0x0127, 0, &result);
return 0;
}
static int sony_nc_resume(struct acpi_device *device)
{
struct sony_nc_value *item;
acpi_handle handle;
for (item = sony_nc_values; item->name; item++) {
int ret;
if (!item->valid)
continue;
ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
item->value, NULL);
if (ret < 0) {
pr_err("%s: %d\n", __func__, ret);
break;
}
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
&handle))) {
dprintk("Doing SNC setup\n");
sony_nc_function_setup(device);
}
/* re-read rfkill state */
sony_nc_rfkill_update();
/* restore kbd backlight states */
sony_nc_kbd_backlight_resume();
return 0;
}
static void sony_nc_rfkill_cleanup(void)
{
int i;
for (i = 0; i < N_SONY_RFKILL; i++) {
if (sony_rfkill_devices[i]) {
rfkill_unregister(sony_rfkill_devices[i]);
rfkill_destroy(sony_rfkill_devices[i]);
}
}
}
static int sony_nc_rfkill_set(void *data, bool blocked)
{
int result;
int argument = sony_rfkill_address[(long) data] + 0x100;
if (!blocked)
argument |= 0xff0000;
return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
static const struct rfkill_ops sony_rfkill_ops = {
.set_block = sony_nc_rfkill_set,
};
static int sony_nc_setup_rfkill(struct acpi_device *device,
enum sony_nc_rfkill nc_type)
{
int err = 0;
struct rfkill *rfk;
enum rfkill_type type;
const char *name;
int result;
bool hwblock;
switch (nc_type) {
case SONY_WIFI:
type = RFKILL_TYPE_WLAN;
name = "sony-wifi";
break;
case SONY_BLUETOOTH:
type = RFKILL_TYPE_BLUETOOTH;
name = "sony-bluetooth";
break;
case SONY_WWAN:
type = RFKILL_TYPE_WWAN;
name = "sony-wwan";
break;
case SONY_WIMAX:
type = RFKILL_TYPE_WIMAX;
name = "sony-wimax";
break;
default:
return -EINVAL;
}
rfk = rfkill_alloc(name, &device->dev, type,
&sony_rfkill_ops, (void *)nc_type);
if (!rfk)
return -ENOMEM;
sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
hwblock = !(result & 0x1);
rfkill_set_hw_state(rfk, hwblock);
err = rfkill_register(rfk);
if (err) {
rfkill_destroy(rfk);
return err;
}
sony_rfkill_devices[nc_type] = rfk;
return err;
}
static void sony_nc_rfkill_update(void)
{
enum sony_nc_rfkill i;
int result;
bool hwblock;
sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
hwblock = !(result & 0x1);
for (i = 0; i < N_SONY_RFKILL; i++) {
int argument = sony_rfkill_address[i];
if (!sony_rfkill_devices[i])
continue;
if (hwblock) {
if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) {
/* we already know we're blocked */
}
continue;
}
sony_call_snc_handle(sony_rfkill_handle, argument, &result);
rfkill_set_states(sony_rfkill_devices[i],
!(result & 0xf), false);
}
}
static void sony_nc_rfkill_setup(struct acpi_device *device)
{
int offset;
u8 dev_code, i;
acpi_status status;
struct acpi_object_list params;
union acpi_object in_obj;
union acpi_object *device_enum;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
offset = sony_find_snc_handle(0x124);
if (offset == -1) {
offset = sony_find_snc_handle(0x135);
if (offset == -1)
return;
else
sony_rfkill_handle = 0x135;
} else
sony_rfkill_handle = 0x124;
dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
/* need to read the whole buffer returned by the acpi call to SN06
* here otherwise we may miss some features
*/
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = offset;
status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms,
&buffer);
if (ACPI_FAILURE(status)) {
dprintk("Radio device enumeration failed\n");
return;
}
device_enum = (union acpi_object *) buffer.pointer;
if (!device_enum) {
pr_err("No SN06 return object\n");
goto out_no_enum;
}
if (device_enum->type != ACPI_TYPE_BUFFER) {
pr_err("Invalid SN06 return object 0x%.2x\n",
device_enum->type);
goto out_no_enum;
}
/* the buffer is filled with magic numbers describing the devices
* available, 0xff terminates the enumeration
*/
for (i = 0; i < device_enum->buffer.length; i++) {
dev_code = *(device_enum->buffer.pointer + i);
if (dev_code == 0xff)
break;
dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
sony_nc_setup_rfkill(device, SONY_WIFI);
if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
if ((0xf0 & dev_code) == 0x20 &&
!sony_rfkill_devices[SONY_WWAN])
sony_nc_setup_rfkill(device, SONY_WWAN);
if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
sony_nc_setup_rfkill(device, SONY_WIMAX);
}
out_no_enum:
kfree(buffer.pointer);
return;
}
/* Keyboard backlight feature */
#define KBDBL_HANDLER 0x137
#define KBDBL_PRESENT 0xB00
#define SET_MODE 0xC00
#define SET_STATE 0xD00
#define SET_TIMEOUT 0xE00
struct kbd_backlight {
int mode;
int timeout;
struct device_attribute mode_attr;
struct device_attribute timeout_attr;
};
static struct kbd_backlight *kbdbl_handle;
static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
{
int result;
if (value > 1)
return -EINVAL;
if (sony_call_snc_handle(KBDBL_HANDLER,
(value << 0x10) | SET_MODE, &result))
return -EIO;
/* Try to turn the light on/off immediately */
sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
&result);
kbdbl_handle->mode = value;
return 0;
}
static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
int ret = 0;
unsigned long value;
if (count > 31)
return -EINVAL;
if (strict_strtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_mode_set(value);
if (ret < 0)
return ret;
return count;
}
static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
return count;
}
static int __sony_nc_kbd_backlight_timeout_set(u8 value)
{
int result;
if (value > 3)
return -EINVAL;
if (sony_call_snc_handle(KBDBL_HANDLER,
(value << 0x10) | SET_TIMEOUT, &result))
return -EIO;
kbdbl_handle->timeout = value;
return 0;
}
static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
int ret = 0;
unsigned long value;
if (count > 31)
return -EINVAL;
if (strict_strtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_timeout_set(value);
if (ret < 0)
return ret;
return count;
}
static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
return count;
}
static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
{
int result;
if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
return 0;
if (!(result & 0x02))
return 0;
kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
if (!kbdbl_handle)
return -ENOMEM;
sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
goto outkzalloc;
if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
goto outmode;
__sony_nc_kbd_backlight_mode_set(kbd_backlight);
__sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
return 0;
outmode:
device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
outkzalloc:
kfree(kbdbl_handle);
kbdbl_handle = NULL;
return -1;
}
static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
if (kbdbl_handle) {
int result;
device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
/* restore the default hw behaviour */
sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
kfree(kbdbl_handle);
}
return 0;
}
static void sony_nc_kbd_backlight_resume(void)
{
int ignore = 0;
if (!kbdbl_handle)
return;
if (kbdbl_handle->mode == 0)
sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
if (kbdbl_handle->timeout != 0)
sony_call_snc_handle(KBDBL_HANDLER,
(kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
&ignore);
}
static void sony_nc_backlight_ng_read_limits(int handle,
struct sony_backlight_props *props)
{
int offset;
acpi_status status;
u8 brlvl, i;
u8 min = 0xff, max = 0x00;
struct acpi_object_list params;
union acpi_object in_obj;
union acpi_object *lvl_enum;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
props->handle = handle;
props->offset = 0;
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
if (offset < 0)
return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
*/
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = offset;
status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms,
&buffer);
if (ACPI_FAILURE(status))
return;
lvl_enum = (union acpi_object *) buffer.pointer;
if (!lvl_enum) {
pr_err("No SN06 return object.");
return;
}
if (lvl_enum->type != ACPI_TYPE_BUFFER) {
pr_err("Invalid SN06 return object 0x%.2x\n",
lvl_enum->type);
goto out_invalid;
}
/* the buffer lists brightness levels available, brightness levels are
* from 0 to 8 in the array, other values are used by ALS control.
*/
for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
brlvl = *(lvl_enum->buffer.pointer + i);
dprintk("Brightness level: %d\n", brlvl);
if (!brlvl)
break;
if (brlvl > max)
max = brlvl;
if (brlvl < min)
min = brlvl;
}
props->offset = min;
props->maxlvl = max;
dprintk("Brightness levels: min=%d max=%d\n", props->offset,
props->maxlvl);
out_invalid:
kfree(buffer.pointer);
return;
}
static void sony_nc_backlight_setup(void)
{
acpi_handle unused;
int max_brightness = 0;
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
if (sony_find_snc_handle(0x12f) != -1) {
ops = &sony_backlight_ng_ops;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (sony_find_snc_handle(0x137) != -1) {
ops = &sony_backlight_ng_ops;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
max_brightness = SONY_MAX_BRIGHTNESS - 1;
} else
return;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness;
sony_bl_props.dev = backlight_device_register("sony", NULL,
&sony_bl_props,
ops, &props);
if (IS_ERR(sony_bl_props.dev)) {
pr_warn("unable to register backlight device\n");
sony_bl_props.dev = NULL;
} else
sony_bl_props.dev->props.brightness =
ops->get_brightness(sony_bl_props.dev);
}
static void sony_nc_backlight_cleanup(void)
{
if (sony_bl_props.dev)
backlight_device_unregister(sony_bl_props.dev);
}
static int sony_nc_add(struct acpi_device *device)
{
acpi_status status;
int result = 0;
acpi_handle handle;
struct sony_nc_value *item;
pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
sony_nc_acpi_device = device;
strcpy(acpi_device_class(device), "sony/hotkey");
sony_nc_acpi_handle = device->handle;
/* read device status */
result = acpi_bus_get_status(device);
/* bail IFF the above call was successful and the device is not present */
if (!result && !device->status.present) {
dprintk("Device not present\n");
result = -ENODEV;
goto outwalk;
}
result = sony_pf_add();
if (result)
goto outpresent;
if (debug) {
status = acpi_walk_namespace(ACPI_TYPE_METHOD,
sony_nc_acpi_handle, 1, sony_walk_callback,
NULL, NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_warn("unable to walk acpi resources\n");
result = -ENODEV;
goto outpresent;
}
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
&handle))) {
dprintk("Doing SNC setup\n");
result = sony_nc_handles_setup(sony_pf_device);
if (result)
goto outpresent;
result = sony_nc_kbd_backlight_setup(sony_pf_device);
if (result)
goto outsnc;
sony_nc_function_setup(device);
sony_nc_rfkill_setup(device);
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
pr_err("Unable to create input devices\n");
goto outkbdbacklight;
}
if (acpi_video_backlight_support()) {
pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
sony_nc_backlight_setup();
}
/* create sony_pf sysfs attributes related to the SNC device */
for (item = sony_nc_values; item->name; ++item) {
if (!debug && item->debug)
continue;
/* find the available acpiget as described in the DSDT */
for (; item->acpiget && *item->acpiget; ++item->acpiget) {
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
*item->acpiget,
&handle))) {
dprintk("Found %s getter: %s\n",
item->name, *item->acpiget);
item->devattr.attr.mode |= S_IRUGO;
break;
}
}
/* find the available acpiset as described in the DSDT */
for (; item->acpiset && *item->acpiset; ++item->acpiset) {
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
*item->acpiset,
&handle))) {
dprintk("Found %s setter: %s\n",
item->name, *item->acpiset);
item->devattr.attr.mode |= S_IWUSR;
break;
}
}
if (item->devattr.attr.mode != 0) {
result =
device_create_file(&sony_pf_device->dev,
&item->devattr);
if (result)
goto out_sysfs;
}
}
return 0;
out_sysfs:
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_backlight_cleanup();
sony_laptop_remove_input();
outkbdbacklight:
sony_nc_kbd_backlight_cleanup(sony_pf_device);
outsnc:
sony_nc_handles_cleanup(sony_pf_device);
outpresent:
sony_pf_remove();
outwalk:
sony_nc_rfkill_cleanup();
return result;
}
static int sony_nc_remove(struct acpi_device *device, int type)
{
struct sony_nc_value *item;
sony_nc_backlight_cleanup();
sony_nc_acpi_device = NULL;
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_kbd_backlight_cleanup(sony_pf_device);
sony_nc_handles_cleanup(sony_pf_device);
sony_pf_remove();
sony_laptop_remove_input();
sony_nc_rfkill_cleanup();
dprintk(SONY_NC_DRIVER_NAME " removed.\n");
return 0;
}
static const struct acpi_device_id sony_device_ids[] = {
{SONY_NC_HID, 0},
{SONY_PIC_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, sony_device_ids);
static const struct acpi_device_id sony_nc_device_ids[] = {
{SONY_NC_HID, 0},
{"", 0},
};
static struct acpi_driver sony_nc_driver = {
.name = SONY_NC_DRIVER_NAME,
.class = SONY_NC_CLASS,
.ids = sony_nc_device_ids,
.owner = THIS_MODULE,
.ops = {
.add = sony_nc_add,
.remove = sony_nc_remove,
.resume = sony_nc_resume,
.notify = sony_nc_notify,
},
};
/*********** SPIC (SNY6001) Device ***********/
#define SONYPI_DEVICE_TYPE1 0x00000001
#define SONYPI_DEVICE_TYPE2 0x00000002
#define SONYPI_DEVICE_TYPE3 0x00000004
#define SONYPI_TYPE1_OFFSET 0x04
#define SONYPI_TYPE2_OFFSET 0x12
#define SONYPI_TYPE3_OFFSET 0x12
struct sony_pic_ioport {
struct acpi_resource_io io1;
struct acpi_resource_io io2;
struct list_head list;
};
struct sony_pic_irq {
struct acpi_resource_irq irq;
struct list_head list;
};
struct sonypi_eventtypes {
u8 data;
unsigned long mask;
struct sonypi_event *events;
};
struct sony_pic_dev {
struct acpi_device *acpi_dev;
struct sony_pic_irq *cur_irq;
struct sony_pic_ioport *cur_ioport;
struct list_head interrupts;
struct list_head ioports;
struct mutex lock;
struct sonypi_eventtypes *event_types;
int (*handle_irq)(const u8, const u8);
int model;
u16 evport_offset;
u8 camera_power;
u8 bluetooth_power;
u8 wwan_power;
};
static struct sony_pic_dev spic_dev = {
.interrupts = LIST_HEAD_INIT(spic_dev.interrupts),
.ioports = LIST_HEAD_INIT(spic_dev.ioports),
};
static int spic_drv_registered;
/* Event masks */
#define SONYPI_JOGGER_MASK 0x00000001
#define SONYPI_CAPTURE_MASK 0x00000002
#define SONYPI_FNKEY_MASK 0x00000004
#define SONYPI_BLUETOOTH_MASK 0x00000008
#define SONYPI_PKEY_MASK 0x00000010
#define SONYPI_BACK_MASK 0x00000020
#define SONYPI_HELP_MASK 0x00000040
#define SONYPI_LID_MASK 0x00000080
#define SONYPI_ZOOM_MASK 0x00000100
#define SONYPI_THUMBPHRASE_MASK 0x00000200
#define SONYPI_MEYE_MASK 0x00000400
#define SONYPI_MEMORYSTICK_MASK 0x00000800
#define SONYPI_BATTERY_MASK 0x00001000
#define SONYPI_WIRELESS_MASK 0x00002000
struct sonypi_event {
u8 data;
u8 event;
};
/* The set of possible button release events */
static struct sonypi_event sonypi_releaseev[] = {
{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 }
};
/* The set of possible jogger events */
static struct sonypi_event sonypi_joggerev[] = {
{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
{ 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
{ 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
{ 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
{ 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
{ 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
{ 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
{ 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
{ 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
{ 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
{ 0, 0 }
};
/* The set of possible capture button events */
static struct sonypi_event sonypi_captureev[] = {
{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
{ 0, 0 }
};
/* The set of possible fnkeys events */
static struct sonypi_event sonypi_fnkeyev[] = {
{ 0x10, SONYPI_EVENT_FNKEY_ESC },
{ 0x11, SONYPI_EVENT_FNKEY_F1 },
{ 0x12, SONYPI_EVENT_FNKEY_F2 },
{ 0x13, SONYPI_EVENT_FNKEY_F3 },
{ 0x14, SONYPI_EVENT_FNKEY_F4 },
{ 0x15, SONYPI_EVENT_FNKEY_F5 },
{ 0x16, SONYPI_EVENT_FNKEY_F6 },
{ 0x17, SONYPI_EVENT_FNKEY_F7 },
{ 0x18, SONYPI_EVENT_FNKEY_F8 },
{ 0x19, SONYPI_EVENT_FNKEY_F9 },
{ 0x1a, SONYPI_EVENT_FNKEY_F10 },
{ 0x1b, SONYPI_EVENT_FNKEY_F11 },
{ 0x1c, SONYPI_EVENT_FNKEY_F12 },
{ 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x21, SONYPI_EVENT_FNKEY_1 },
{ 0x22, SONYPI_EVENT_FNKEY_2 },
{ 0x31, SONYPI_EVENT_FNKEY_D },
{ 0x32, SONYPI_EVENT_FNKEY_E },
{ 0x33, SONYPI_EVENT_FNKEY_F },
{ 0x34, SONYPI_EVENT_FNKEY_S },
{ 0x35, SONYPI_EVENT_FNKEY_B },
{ 0x36, SONYPI_EVENT_FNKEY_ONLY },
{ 0, 0 }
};
/* The set of possible program key events */
static struct sonypi_event sonypi_pkeyev[] = {
{ 0x01, SONYPI_EVENT_PKEY_P1 },
{ 0x02, SONYPI_EVENT_PKEY_P2 },
{ 0x04, SONYPI_EVENT_PKEY_P3 },
{ 0x20, SONYPI_EVENT_PKEY_P1 },
{ 0, 0 }
};
/* The set of possible bluetooth events */
static struct sonypi_event sonypi_blueev[] = {
{ 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
{ 0x59, SONYPI_EVENT_BLUETOOTH_ON },
{ 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
{ 0, 0 }
};
/* The set of possible wireless events */
static struct sonypi_event sonypi_wlessev[] = {
{ 0x59, SONYPI_EVENT_IGNORE },
{ 0x5a, SONYPI_EVENT_IGNORE },
{ 0, 0 }
};
/* The set of possible back button events */
static struct sonypi_event sonypi_backev[] = {
{ 0x20, SONYPI_EVENT_BACK_PRESSED },
{ 0, 0 }
};
/* The set of possible help button events */
static struct sonypi_event sonypi_helpev[] = {
{ 0x3b, SONYPI_EVENT_HELP_PRESSED },
{ 0, 0 }
};
/* The set of possible lid events */
static struct sonypi_event sonypi_lidev[] = {
{ 0x51, SONYPI_EVENT_LID_CLOSED },
{ 0x50, SONYPI_EVENT_LID_OPENED },
{ 0, 0 }
};
/* The set of possible zoom events */
static struct sonypi_event sonypi_zoomev[] = {
{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
{ 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
{ 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
{ 0x04, SONYPI_EVENT_ZOOM_PRESSED },
{ 0, 0 }
};
/* The set of possible thumbphrase events */
static struct sonypi_event sonypi_thumbphraseev[] = {
{ 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
{ 0, 0 }
};
/* The set of possible motioneye camera events */
static struct sonypi_event sonypi_meyeev[] = {
{ 0x00, SONYPI_EVENT_MEYE_FACE },
{ 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
{ 0, 0 }
};
/* The set of possible memorystick events */
static struct sonypi_event sonypi_memorystickev[] = {
{ 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
{ 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
{ 0, 0 }
};
/* The set of possible battery events */
static struct sonypi_event sonypi_batteryev[] = {
{ 0x20, SONYPI_EVENT_BATTERY_INSERT },
{ 0x30, SONYPI_EVENT_BATTERY_REMOVE },
{ 0, 0 }
};
/* The set of possible volume events */
static struct sonypi_event sonypi_volumeev[] = {
{ 0x01, SONYPI_EVENT_VOLUME_INC_PRESSED },
{ 0x02, SONYPI_EVENT_VOLUME_DEC_PRESSED },
{ 0, 0 }
};
/* The set of possible brightness events */
static struct sonypi_event sonypi_brightnessev[] = {
{ 0x80, SONYPI_EVENT_BRIGHTNESS_PRESSED },
{ 0, 0 }
};
static struct sonypi_eventtypes type1_events[] = {
{ 0, 0xffffffff, sonypi_releaseev },
{ 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
{ 0x30, SONYPI_LID_MASK, sonypi_lidev },
{ 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
{ 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
{ 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
{ 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ 0 },
};
static struct sonypi_eventtypes type2_events[] = {
{ 0, 0xffffffff, sonypi_releaseev },
{ 0x38, SONYPI_LID_MASK, sonypi_lidev },
{ 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
{ 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
{ 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0x11, SONYPI_BACK_MASK, sonypi_backev },
{ 0x21, SONYPI_HELP_MASK, sonypi_helpev },
{ 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
{ 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0 },
};
static struct sonypi_eventtypes type3_events[] = {
{ 0, 0xffffffff, sonypi_releaseev },
{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
{ 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
{ 0x05, SONYPI_PKEY_MASK, sonypi_volumeev },
{ 0x05, SONYPI_PKEY_MASK, sonypi_brightnessev },
{ 0 },
};
/* low level spic calls */
#define ITERATIONS_LONG 10000
#define ITERATIONS_SHORT 10
#define wait_on_command(command, iterations) { \
unsigned int n = iterations; \
while (--n && (command)) \
udelay(1); \
if (!n) \
dprintk("command failed at %s : %s (line %d)\n", \
__FILE__, __func__, __LINE__); \
}
static u8 sony_pic_call1(u8 dev)
{
u8 v1, v2;
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
ITERATIONS_LONG);
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
return v2;
}
static u8 sony_pic_call2(u8 dev, u8 fn)
{
u8 v1;
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
ITERATIONS_LONG);
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
ITERATIONS_LONG);
outb(fn, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
return v1;
}
static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
{
u8 v1;
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
outb(fn, spic_dev.cur_ioport->io1.minimum);
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
outb(v, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
dev, fn, v, v1);
return v1;
}
/*
* minidrivers for SPIC models
*/
static int type3_handle_irq(const u8 data_mask, const u8 ev)
{
/*
* 0x31 could mean we have to take some extra action and wait for
* the next irq for some Type3 models, it will generate a new
* irq and we can read new data from the device:
* - 0x5c and 0x5f requires 0xA0
* - 0x61 requires 0xB3
*/
if (data_mask == 0x31) {
if (ev == 0x5c || ev == 0x5f)
sony_pic_call1(0xA0);
else if (ev == 0x61)
sony_pic_call1(0xB3);
return 0;
}
return 1;
}
static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
{
struct pci_dev *pcidev;
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
if (pcidev) {
dev->model = SONYPI_DEVICE_TYPE1;
dev->evport_offset = SONYPI_TYPE1_OFFSET;
dev->event_types = type1_events;
goto out;
}
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
if (pcidev) {
dev->model = SONYPI_DEVICE_TYPE2;
dev->evport_offset = SONYPI_TYPE2_OFFSET;
dev->event_types = type2_events;
goto out;
}
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
if (pcidev) {
dev->model = SONYPI_DEVICE_TYPE3;
dev->handle_irq = type3_handle_irq;
dev->evport_offset = SONYPI_TYPE3_OFFSET;
dev->event_types = type3_events;
goto out;
}
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
if (pcidev) {
dev->model = SONYPI_DEVICE_TYPE3;
dev->handle_irq = type3_handle_irq;
dev->evport_offset = SONYPI_TYPE3_OFFSET;
dev->event_types = type3_events;
goto out;
}
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
if (pcidev) {
dev->model = SONYPI_DEVICE_TYPE3;
dev->handle_irq = type3_handle_irq;
dev->evport_offset = SONYPI_TYPE3_OFFSET;
dev->event_types = type3_events;
goto out;
}
/* default */
dev->model = SONYPI_DEVICE_TYPE2;
dev->evport_offset = SONYPI_TYPE2_OFFSET;
dev->event_types = type2_events;
out:
if (pcidev)
pci_dev_put(pcidev);
pr_info("detected Type%d model\n",
dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
}
/* camera tests and poweron/poweroff */
#define SONYPI_CAMERA_PICTURE 5
#define SONYPI_CAMERA_CONTROL 0x10
#define SONYPI_CAMERA_BRIGHTNESS 0
#define SONYPI_CAMERA_CONTRAST 1
#define SONYPI_CAMERA_HUE 2
#define SONYPI_CAMERA_COLOR 3
#define SONYPI_CAMERA_SHARPNESS 4
#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
#define SONYPI_CAMERA_MUTE_MASK 0x40
/* the rest don't need a loop until not 0xff */
#define SONYPI_CAMERA_AGC 6
#define SONYPI_CAMERA_AGC_MASK 0x30
#define SONYPI_CAMERA_SHUTTER_MASK 0x7
#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
#define SONYPI_CAMERA_CONTROL 0x10
#define SONYPI_CAMERA_STATUS 7
#define SONYPI_CAMERA_STATUS_READY 0x2
#define SONYPI_CAMERA_STATUS_POSITION 0x4
#define SONYPI_DIRECTION_BACKWARDS 0x4
#define SONYPI_CAMERA_REVISION 8
#define SONYPI_CAMERA_ROMVERSION 9
static int __sony_pic_camera_ready(void)
{
u8 v;
v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS);
return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
}
static int __sony_pic_camera_off(void)
{
if (!camera) {
pr_warn("camera control not enabled\n");
return -ENODEV;
}
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE,
SONYPI_CAMERA_MUTE_MASK),
ITERATIONS_SHORT);
if (spic_dev.camera_power) {
sony_pic_call2(0x91, 0);
spic_dev.camera_power = 0;
}
return 0;
}
static int __sony_pic_camera_on(void)
{
int i, j, x;
if (!camera) {
pr_warn("camera control not enabled\n");
return -ENODEV;
}
if (spic_dev.camera_power)
return 0;
for (j = 5; j > 0; j--) {
for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++)
msleep(10);
sony_pic_call1(0x93);
for (i = 400; i > 0; i--) {
if (__sony_pic_camera_ready())
break;
msleep(10);
}
if (i)
break;
}
if (j == 0) {
pr_warn("failed to power on camera\n");
return -ENODEV;
}
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL,
0x5a),
ITERATIONS_SHORT);
spic_dev.camera_power = 1;
return 0;
}
/* External camera command (exported to the motion eye v4l driver) */
int sony_pic_camera_command(int command, u8 value)
{
if (!camera)
return -EIO;
mutex_lock(&spic_dev.lock);
switch (command) {
case SONY_PIC_COMMAND_SETCAMERA:
if (value)
__sony_pic_camera_on();
else
__sony_pic_camera_off();
break;
case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERACONTRAST:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERAHUE:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERACOLOR:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERASHARPNESS:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERAPICTURE:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value),
ITERATIONS_SHORT);
break;
case SONY_PIC_COMMAND_SETCAMERAAGC:
wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value),
ITERATIONS_SHORT);
break;
default:
pr_err("sony_pic_camera_command invalid: %d\n", command);
break;
}
mutex_unlock(&spic_dev.lock);
return 0;
}
EXPORT_SYMBOL(sony_pic_camera_command);
/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */
static void __sony_pic_set_wwanpower(u8 state)
{
state = !!state;
if (spic_dev.wwan_power == state)
return;
sony_pic_call2(0xB0, state);
sony_pic_call1(0x82);
spic_dev.wwan_power = state;
}
static ssize_t sony_pic_wwanpower_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
unsigned long value;
if (count > 31)
return -EINVAL;
value = simple_strtoul(buffer, NULL, 10);
mutex_lock(&spic_dev.lock);
__sony_pic_set_wwanpower(value);
mutex_unlock(&spic_dev.lock);
return count;
}
static ssize_t sony_pic_wwanpower_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count;
mutex_lock(&spic_dev.lock);
count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power);
mutex_unlock(&spic_dev.lock);
return count;
}
/* bluetooth subsystem power state */
static void __sony_pic_set_bluetoothpower(u8 state)
{
state = !!state;
if (spic_dev.bluetooth_power == state)
return;
sony_pic_call2(0x96, state);
sony_pic_call1(0x82);
spic_dev.bluetooth_power = state;
}
static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
unsigned long value;
if (count > 31)
return -EINVAL;
value = simple_strtoul(buffer, NULL, 10);
mutex_lock(&spic_dev.lock);
__sony_pic_set_bluetoothpower(value);
mutex_unlock(&spic_dev.lock);
return count;
}
static ssize_t sony_pic_bluetoothpower_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
mutex_lock(&spic_dev.lock);
count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power);
mutex_unlock(&spic_dev.lock);
return count;
}
/* fan speed */
/* FAN0 information (reverse engineered from ACPI tables) */
#define SONY_PIC_FAN0_STATUS 0x93
static int sony_pic_set_fanspeed(unsigned long value)
{
return ec_write(SONY_PIC_FAN0_STATUS, value);
}
static int sony_pic_get_fanspeed(u8 *value)
{
return ec_read(SONY_PIC_FAN0_STATUS, value);
}
static ssize_t sony_pic_fanspeed_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
unsigned long value;
if (count > 31)
return -EINVAL;
value = simple_strtoul(buffer, NULL, 10);
if (sony_pic_set_fanspeed(value))
return -EIO;
return count;
}
static ssize_t sony_pic_fanspeed_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
u8 value = 0;
if (sony_pic_get_fanspeed(&value))
return -EIO;
return snprintf(buffer, PAGE_SIZE, "%d\n", value);
}
#define SPIC_ATTR(_name, _mode) \
struct device_attribute spic_attr_##_name = __ATTR(_name, \
_mode, sony_pic_## _name ##_show, \
sony_pic_## _name ##_store)
static SPIC_ATTR(bluetoothpower, 0644);
static SPIC_ATTR(wwanpower, 0644);
static SPIC_ATTR(fanspeed, 0644);
static struct attribute *spic_attributes[] = {
&spic_attr_bluetoothpower.attr,
&spic_attr_wwanpower.attr,
&spic_attr_fanspeed.attr,
NULL
};
static struct attribute_group spic_attribute_group = {
.attrs = spic_attributes
};
/******** SONYPI compatibility **********/
#ifdef CONFIG_SONYPI_COMPAT
/* battery / brightness / temperature addresses */
#define SONYPI_BAT_FLAGS 0x81
#define SONYPI_LCD_LIGHT 0x96
#define SONYPI_BAT1_PCTRM 0xa0
#define SONYPI_BAT1_LEFT 0xa2
#define SONYPI_BAT1_MAXRT 0xa4
#define SONYPI_BAT2_PCTRM 0xa8
#define SONYPI_BAT2_LEFT 0xaa
#define SONYPI_BAT2_MAXRT 0xac
#define SONYPI_BAT1_MAXTK 0xb0
#define SONYPI_BAT1_FULL 0xb2
#define SONYPI_BAT2_MAXTK 0xb8
#define SONYPI_BAT2_FULL 0xba
#define SONYPI_TEMP_STATUS 0xC1
struct sonypi_compat_s {
struct fasync_struct *fifo_async;
struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
atomic_t open_count;
};
static struct sonypi_compat_s sonypi_compat = {
.open_count = ATOMIC_INIT(0),
};
static int sonypi_misc_fasync(int fd, struct file *filp, int on)
{
return fasync_helper(fd, filp, on, &sonypi_compat.fifo_async);
}
static int sonypi_misc_release(struct inode *inode, struct file *file)
{
atomic_dec(&sonypi_compat.open_count);
return 0;
}
static int sonypi_misc_open(struct inode *inode, struct file *file)
{
/* Flush input queue on first open */
unsigned long flags;
spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
if (atomic_inc_return(&sonypi_compat.open_count) == 1)
kfifo_reset(&sonypi_compat.fifo);
spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
return 0;
}
static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
ssize_t ret;
unsigned char c;
if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
kfifo_len(&sonypi_compat.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
(kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c),
&sonypi_compat.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
}
if (ret > 0) {
struct inode *inode = file->f_path.dentry->d_inode;
inode->i_atime = current_fs_time(inode->i_sb);
}
return ret;
}
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
if (kfifo_len(&sonypi_compat.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
static int ec_read16(u8 addr, u16 *value)
{
u8 val_lb, val_hb;
if (ec_read(addr, &val_lb))
return -1;
if (ec_read(addr + 1, &val_hb))
return -1;
*value = val_lb | (val_hb << 8);
return 0;
}
static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
void __user *argp = (void __user *)arg;
u8 val8;
u16 val16;
int value;
mutex_lock(&spic_dev.lock);
switch (cmd) {
case SONYPI_IOCGBRT:
if (sony_bl_props.dev == NULL) {
ret = -EIO;
break;
}
if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
ret = -EIO;
break;
}
val8 = ((value & 0xff) - 1) << 5;
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSBRT:
if (sony_bl_props.dev == NULL) {
ret = -EIO;
break;
}
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
(val8 >> 5) + 1, NULL)) {
ret = -EIO;
break;
}
/* sync the backlight device status */
sony_bl_props.dev->props.brightness =
sony_backlight_get_brightness(sony_bl_props.dev);
break;
case SONYPI_IOCGBAT1CAP:
if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT1REM:
if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT2CAP:
if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT2REM:
if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBATFLAGS:
if (ec_read(SONYPI_BAT_FLAGS, &val8)) {
ret = -EIO;
break;
}
val8 &= 0x07;
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCGBLUE:
val8 = spic_dev.bluetooth_power;
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSBLUE:
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
__sony_pic_set_bluetoothpower(val8);
break;
/* FAN Controls */
case SONYPI_IOCGFAN:
if (sony_pic_get_fanspeed(&val8)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSFAN:
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
if (sony_pic_set_fanspeed(val8))
ret = -EIO;
break;
/* GET Temperature (useful under APM) */
case SONYPI_IOCGTEMP:
if (ec_read(SONYPI_TEMP_STATUS, &val8)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&spic_dev.lock);
return ret;
}
static const struct file_operations sonypi_misc_fops = {
.owner = THIS_MODULE,
.read = sonypi_misc_read,
.poll = sonypi_misc_poll,
.open = sonypi_misc_open,
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
.unlocked_ioctl = sonypi_misc_ioctl,
.llseek = noop_llseek,
};
static struct miscdevice sonypi_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sonypi",
.fops = &sonypi_misc_fops,
};
static void sonypi_compat_report_event(u8 event)
{
kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event,
sizeof(event), &sonypi_compat.fifo_lock);
kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_compat.fifo_proc_list);
}
static int sonypi_compat_init(void)
{
int error;
spin_lock_init(&sonypi_compat.fifo_lock);
error =
kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
pr_err("kfifo_alloc failed\n");
return error;
}
init_waitqueue_head(&sonypi_compat.fifo_proc_list);
if (minor != -1)
sonypi_misc_device.minor = minor;
error = misc_register(&sonypi_misc_device);
if (error) {
pr_err("misc_register failed\n");
goto err_free_kfifo;
}
if (minor == -1)
pr_info("device allocated minor is %d\n",
sonypi_misc_device.minor);
return 0;
err_free_kfifo:
kfifo_free(&sonypi_compat.fifo);
return error;
}
static void sonypi_compat_exit(void)
{
misc_deregister(&sonypi_misc_device);
kfifo_free(&sonypi_compat.fifo);
}
#else
static int sonypi_compat_init(void) { return 0; }
static void sonypi_compat_exit(void) { }
static void sonypi_compat_report_event(u8 event) { }
#endif /* CONFIG_SONYPI_COMPAT */
/*
* ACPI callbacks
*/
static acpi_status
sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
{
u32 i;
struct sony_pic_dev *dev = (struct sony_pic_dev *)context;
switch (resource->type) {
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
{
/* start IO enumeration */
struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
if (!ioport)
return AE_ERROR;
list_add(&ioport->list, &dev->ioports);
return AE_OK;
}
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
/* end IO enumeration */
return AE_OK;
case ACPI_RESOURCE_TYPE_IRQ:
{
struct acpi_resource_irq *p = &resource->data.irq;
struct sony_pic_irq *interrupt = NULL;
if (!p || !p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those those w/ _STA disabled
*/
dprintk("Blank IRQ resource\n");
return AE_OK;
}
for (i = 0; i < p->interrupt_count; i++) {
if (!p->interrupts[i]) {
pr_warn("Invalid IRQ %d\n",
p->interrupts[i]);
continue;
}
interrupt = kzalloc(sizeof(*interrupt),
GFP_KERNEL);
if (!interrupt)
return AE_ERROR;
list_add(&interrupt->list, &dev->interrupts);
interrupt->irq.triggering = p->triggering;
interrupt->irq.polarity = p->polarity;
interrupt->irq.sharable = p->sharable;
interrupt->irq.interrupt_count = 1;
interrupt->irq.interrupts[0] = p->interrupts[i];
}
return AE_OK;
}
case ACPI_RESOURCE_TYPE_IO:
{
struct acpi_resource_io *io = &resource->data.io;
struct sony_pic_ioport *ioport =
list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
if (!io) {
dprintk("Blank IO resource\n");
return AE_OK;
}
if (!ioport->io1.minimum) {
memcpy(&ioport->io1, io, sizeof(*io));
dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
ioport->io1.address_length);
}
else if (!ioport->io2.minimum) {
memcpy(&ioport->io2, io, sizeof(*io));
dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
ioport->io2.address_length);
}
else {
pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
return AE_ERROR;
}
return AE_OK;
}
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type);
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
}
return AE_CTRL_TERMINATE;
}
static int sony_pic_possible_resources(struct acpi_device *device)
{
int result = 0;
acpi_status status = AE_OK;
if (!device)
return -EINVAL;
/* get device status */
/* see acpi_pci_link_get_current acpi_pci_link_get_possible */
dprintk("Evaluating _STA\n");
result = acpi_bus_get_status(device);
if (result) {
pr_warn("Unable to read status\n");
goto end;
}
if (!device->status.enabled)
dprintk("Device disabled\n");
else
dprintk("Device enabled\n");
/*
* Query and parse 'method'
*/
dprintk("Evaluating %s\n", METHOD_NAME__PRS);
status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
sony_pic_read_possible_resource, &spic_dev);
if (ACPI_FAILURE(status)) {
pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
result = -ENODEV;
}
end:
return result;
}
/*
* Disable the spic device by calling its _DIS method
*/
static int sony_pic_disable(struct acpi_device *device)
{
acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
NULL);
if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
return -ENXIO;
dprintk("Device disabled\n");
return 0;
}
/*
* Based on drivers/acpi/pci_link.c:acpi_pci_link_set
*
* Call _SRS to set current resources
*/
static int sony_pic_enable(struct acpi_device *device,
struct sony_pic_ioport *ioport, struct sony_pic_irq *irq)
{
acpi_status status;
int result = 0;
/* Type 1 resource layout is:
* IO
* IO
* IRQNoFlags
* End
*
* Type 2 and 3 resource layout is:
* IO
* IRQNoFlags
* End
*/
struct {
struct acpi_resource res1;
struct acpi_resource res2;
struct acpi_resource res3;
struct acpi_resource res4;
} *resource;
struct acpi_buffer buffer = { 0, NULL };
if (!ioport || !irq)
return -EINVAL;
/* init acpi_buffer */
resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL);
if (!resource)
return -ENOMEM;
buffer.length = sizeof(*resource) + 1;
buffer.pointer = resource;
/* setup Type 1 resources */
if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
/* setup io resources */
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
resource->res1.length = sizeof(struct acpi_resource);
memcpy(&resource->res1.data.io, &ioport->io1,
sizeof(struct acpi_resource_io));
resource->res2.type = ACPI_RESOURCE_TYPE_IO;
resource->res2.length = sizeof(struct acpi_resource);
memcpy(&resource->res2.data.io, &ioport->io2,
sizeof(struct acpi_resource_io));
/* setup irq resource */
resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
resource->res3.length = sizeof(struct acpi_resource);
memcpy(&resource->res3.data.irq, &irq->irq,
sizeof(struct acpi_resource_irq));
/* we requested a shared irq */
resource->res3.data.irq.sharable = ACPI_SHARED;
resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
}
/* setup Type 2/3 resources */
else {
/* setup io resource */
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
resource->res1.length = sizeof(struct acpi_resource);
memcpy(&resource->res1.data.io, &ioport->io1,
sizeof(struct acpi_resource_io));
/* setup irq resource */
resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
resource->res2.length = sizeof(struct acpi_resource);
memcpy(&resource->res2.data.irq, &irq->irq,
sizeof(struct acpi_resource_irq));
/* we requested a shared irq */
resource->res2.data.irq.sharable = ACPI_SHARED;
resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
}
/* Attempt to set the resource */
dprintk("Evaluating _SRS\n");
status = acpi_set_current_resources(device->handle, &buffer);
/* check for total failure */
if (ACPI_FAILURE(status)) {
pr_err("Error evaluating _SRS\n");
result = -ENODEV;
goto end;
}
/* Necessary device initializations calls (from sonypi) */
sony_pic_call1(0x82);
sony_pic_call2(0x81, 0xff);
sony_pic_call1(compat ? 0x92 : 0x82);
end:
kfree(resource);
return result;
}
/*****************
*
* ISR: some event is available
*
*****************/
static irqreturn_t sony_pic_irq(int irq, void *dev_id)
{
int i, j;
u8 ev = 0;
u8 data_mask = 0;
u8 device_event = 0;
struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
ev = inb_p(dev->cur_ioport->io1.minimum);
if (dev->cur_ioport->io2.minimum)
data_mask = inb_p(dev->cur_ioport->io2.minimum);
else
data_mask = inb_p(dev->cur_ioport->io1.minimum +
dev->evport_offset);
dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
ev, data_mask, dev->cur_ioport->io1.minimum,
dev->evport_offset);
if (ev == 0x00 || ev == 0xff)
return IRQ_HANDLED;
for (i = 0; dev->event_types[i].mask; i++) {
if ((data_mask & dev->event_types[i].data) !=
dev->event_types[i].data)
continue;
if (!(mask & dev->event_types[i].mask))
continue;
for (j = 0; dev->event_types[i].events[j].event; j++) {
if (ev == dev->event_types[i].events[j].data) {
device_event =
dev->event_types[i].events[j].event;
/* some events may require ignoring */
if (!device_event)
return IRQ_HANDLED;
goto found;
}
}
}
/* Still not able to decode the event try to pass
* it over to the minidriver
*/
if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
return IRQ_HANDLED;
dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
ev, data_mask, dev->cur_ioport->io1.minimum,
dev->evport_offset);
return IRQ_HANDLED;
found:
sony_laptop_report_input_event(device_event);
acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
sonypi_compat_report_event(device_event);
return IRQ_HANDLED;
}
/*****************
*
* ACPI driver
*
*****************/
static int sony_pic_remove(struct acpi_device *device, int type)
{
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
if (sony_pic_disable(device)) {
pr_err("Couldn't disable device\n");
return -ENXIO;
}
free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
release_region(spic_dev.cur_ioport->io1.minimum,
spic_dev.cur_ioport->io1.address_length);
if (spic_dev.cur_ioport->io2.minimum)
release_region(spic_dev.cur_ioport->io2.minimum,
spic_dev.cur_ioport->io2.address_length);
sonypi_compat_exit();
sony_laptop_remove_input();
/* pf attrs */
sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
sony_pf_remove();
list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
list_del(&io->list);
kfree(io);
}
list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
list_del(&irq->list);
kfree(irq);
}
spic_dev.cur_ioport = NULL;
spic_dev.cur_irq = NULL;
dprintk(SONY_PIC_DRIVER_NAME " removed.\n");
return 0;
}
static int sony_pic_add(struct acpi_device *device)
{
int result;
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
sony_pic_detect_device_type(&spic_dev);
mutex_init(&spic_dev.lock);
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
pr_err("Unable to read possible resources\n");
goto err_free_resources;
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
pr_err("Unable to create input devices\n");
goto err_free_resources;
}
if (sonypi_compat_init())
goto err_remove_input;
/* request io port */
list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
if (request_region(io->io1.minimum, io->io1.address_length,
"Sony Programmable I/O Device")) {
dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
io->io1.minimum, io->io1.maximum,
io->io1.address_length);
/* Type 1 have 2 ioports */
if (io->io2.minimum) {
if (request_region(io->io2.minimum,
io->io2.address_length,
"Sony Programmable I/O Device")) {
dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
io->io2.minimum, io->io2.maximum,
io->io2.address_length);
spic_dev.cur_ioport = io;
break;
}
else {
dprintk("Unable to get I/O port2: "
"0x%.4x (0x%.4x) + 0x%.2x\n",
io->io2.minimum, io->io2.maximum,
io->io2.address_length);
release_region(io->io1.minimum,
io->io1.address_length);
}
}
else {
spic_dev.cur_ioport = io;
break;
}
}
}
if (!spic_dev.cur_ioport) {
pr_err("Failed to request_region\n");
result = -ENODEV;
goto err_remove_compat;
}
/* request IRQ */
list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
0, "sony-laptop", &spic_dev)) {
dprintk("IRQ: %d - triggering: %d - "
"polarity: %d - shr: %d\n",
irq->irq.interrupts[0],
irq->irq.triggering,
irq->irq.polarity,
irq->irq.sharable);
spic_dev.cur_irq = irq;
break;
}
}
if (!spic_dev.cur_irq) {
pr_err("Failed to request_irq\n");
result = -ENODEV;
goto err_release_region;
}
/* set resource status _SRS */
result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
if (result) {
pr_err("Couldn't enable device\n");
goto err_free_irq;
}
spic_dev.bluetooth_power = -1;
/* create device attributes */
result = sony_pf_add();
if (result)
goto err_disable_device;
result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
if (result)
goto err_remove_pf;
return 0;
err_remove_pf:
sony_pf_remove();
err_disable_device:
sony_pic_disable(device);
err_free_irq:
free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
err_release_region:
release_region(spic_dev.cur_ioport->io1.minimum,
spic_dev.cur_ioport->io1.address_length);
if (spic_dev.cur_ioport->io2.minimum)
release_region(spic_dev.cur_ioport->io2.minimum,
spic_dev.cur_ioport->io2.address_length);
err_remove_compat:
sonypi_compat_exit();
err_remove_input:
sony_laptop_remove_input();
err_free_resources:
list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
list_del(&io->list);
kfree(io);
}
list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
list_del(&irq->list);
kfree(irq);
}
spic_dev.cur_ioport = NULL;
spic_dev.cur_irq = NULL;
return result;
}
static int sony_pic_suspend(struct acpi_device *device, pm_message_t state)
{
if (sony_pic_disable(device))
return -ENXIO;
return 0;
}
static int sony_pic_resume(struct acpi_device *device)
{
sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
return 0;
}
static const struct acpi_device_id sony_pic_device_ids[] = {
{SONY_PIC_HID, 0},
{"", 0},
};
static struct acpi_driver sony_pic_driver = {
.name = SONY_PIC_DRIVER_NAME,
.class = SONY_PIC_CLASS,
.ids = sony_pic_device_ids,
.owner = THIS_MODULE,
.ops = {
.add = sony_pic_add,
.remove = sony_pic_remove,
.suspend = sony_pic_suspend,
.resume = sony_pic_resume,
},
};
static struct dmi_system_id __initdata sonypi_dmi_table[] = {
{
.ident = "Sony Vaio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
},
},
{
.ident = "Sony Vaio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
},
},
{ }
};
static int __init sony_laptop_init(void)
{
int result;
if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
result = acpi_bus_register_driver(&sony_pic_driver);
if (result) {
pr_err("Unable to register SPIC driver\n");
goto out;
}
spic_drv_registered = 1;
}
result = acpi_bus_register_driver(&sony_nc_driver);
if (result) {
pr_err("Unable to register SNC driver\n");
goto out_unregister_pic;
}
return 0;
out_unregister_pic:
if (spic_drv_registered)
acpi_bus_unregister_driver(&sony_pic_driver);
out:
return result;
}
static void __exit sony_laptop_exit(void)
{
acpi_bus_unregister_driver(&sony_nc_driver);
if (spic_drv_registered)
acpi_bus_unregister_driver(&sony_pic_driver);
}
module_init(sony_laptop_init);
module_exit(sony_laptop_exit);
| gpl-2.0 |
civato/CivZ-SnapKat-SM_9005-900T | drivers/input/touchscreen/stmpe-ts.c | 5040 | 9697 | /* STMicroelectronics STMPE811 Touchscreen Driver
*
* (C) 2010 Luotao Fu <l.fu@pengutronix.de>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <linux/mfd/stmpe.h>
/* Register layouts and functionalities are identical on all stmpexxx variants
* with touchscreen controller
*/
#define STMPE_REG_INT_STA 0x0B
#define STMPE_REG_ADC_CTRL1 0x20
#define STMPE_REG_ADC_CTRL2 0x21
#define STMPE_REG_TSC_CTRL 0x40
#define STMPE_REG_TSC_CFG 0x41
#define STMPE_REG_FIFO_TH 0x4A
#define STMPE_REG_FIFO_STA 0x4B
#define STMPE_REG_FIFO_SIZE 0x4C
#define STMPE_REG_TSC_DATA_XYZ 0x52
#define STMPE_REG_TSC_FRACTION_Z 0x56
#define STMPE_REG_TSC_I_DRIVE 0x58
#define OP_MOD_XYZ 0
#define STMPE_TSC_CTRL_TSC_EN (1<<0)
#define STMPE_FIFO_STA_RESET (1<<0)
#define STMPE_IRQ_TOUCH_DET 0
#define SAMPLE_TIME(x) ((x & 0xf) << 4)
#define MOD_12B(x) ((x & 0x1) << 3)
#define REF_SEL(x) ((x & 0x1) << 1)
#define ADC_FREQ(x) (x & 0x3)
#define AVE_CTRL(x) ((x & 0x3) << 6)
#define DET_DELAY(x) ((x & 0x7) << 3)
#define SETTLING(x) (x & 0x7)
#define FRACTION_Z(x) (x & 0x7)
#define I_DRIVE(x) (x & 0x1)
#define OP_MODE(x) ((x & 0x7) << 1)
#define STMPE_TS_NAME "stmpe-ts"
#define XY_MASK 0xfff
struct stmpe_touch {
struct stmpe *stmpe;
struct input_dev *idev;
struct delayed_work work;
struct device *dev;
u8 sample_time;
u8 mod_12b;
u8 ref_sel;
u8 adc_freq;
u8 ave_ctrl;
u8 touch_det_delay;
u8 settling;
u8 fraction_z;
u8 i_drive;
};
static int __stmpe_reset_fifo(struct stmpe *stmpe)
{
int ret;
ret = stmpe_set_bits(stmpe, STMPE_REG_FIFO_STA,
STMPE_FIFO_STA_RESET, STMPE_FIFO_STA_RESET);
if (ret)
return ret;
return stmpe_set_bits(stmpe, STMPE_REG_FIFO_STA,
STMPE_FIFO_STA_RESET, 0);
}
static void stmpe_work(struct work_struct *work)
{
int int_sta;
u32 timeout = 40;
struct stmpe_touch *ts =
container_of(work, struct stmpe_touch, work.work);
int_sta = stmpe_reg_read(ts->stmpe, STMPE_REG_INT_STA);
/*
* touch_det sometimes get desasserted or just get stuck. This appears
* to be a silicon bug, We still have to clearify this with the
* manufacture. As a workaround We release the key anyway if the
* touch_det keeps coming in after 4ms, while the FIFO contains no value
* during the whole time.
*/
while ((int_sta & (1 << STMPE_IRQ_TOUCH_DET)) && (timeout > 0)) {
timeout--;
int_sta = stmpe_reg_read(ts->stmpe, STMPE_REG_INT_STA);
udelay(100);
}
/* reset the FIFO before we report release event */
__stmpe_reset_fifo(ts->stmpe);
input_report_abs(ts->idev, ABS_PRESSURE, 0);
input_sync(ts->idev);
}
static irqreturn_t stmpe_ts_handler(int irq, void *data)
{
u8 data_set[4];
int x, y, z;
struct stmpe_touch *ts = data;
/*
* Cancel scheduled polling for release if we have new value
* available. Wait if the polling is already running.
*/
cancel_delayed_work_sync(&ts->work);
/*
* The FIFO sometimes just crashes and stops generating interrupts. This
* appears to be a silicon bug. We still have to clearify this with
* the manufacture. As a workaround we disable the TSC while we are
* collecting data and flush the FIFO after reading
*/
stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
STMPE_TSC_CTRL_TSC_EN, 0);
stmpe_block_read(ts->stmpe, STMPE_REG_TSC_DATA_XYZ, 4, data_set);
x = (data_set[0] << 4) | (data_set[1] >> 4);
y = ((data_set[1] & 0xf) << 8) | data_set[2];
z = data_set[3];
input_report_abs(ts->idev, ABS_X, x);
input_report_abs(ts->idev, ABS_Y, y);
input_report_abs(ts->idev, ABS_PRESSURE, z);
input_sync(ts->idev);
/* flush the FIFO after we have read out our values. */
__stmpe_reset_fifo(ts->stmpe);
/* reenable the tsc */
stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
/* start polling for touch_det to detect release */
schedule_delayed_work(&ts->work, HZ / 50);
return IRQ_HANDLED;
}
static int __devinit stmpe_init_hw(struct stmpe_touch *ts)
{
int ret;
u8 adc_ctrl1, adc_ctrl1_mask, tsc_cfg, tsc_cfg_mask;
struct stmpe *stmpe = ts->stmpe;
struct device *dev = ts->dev;
ret = stmpe_enable(stmpe, STMPE_BLOCK_TOUCHSCREEN | STMPE_BLOCK_ADC);
if (ret) {
dev_err(dev, "Could not enable clock for ADC and TS\n");
return ret;
}
adc_ctrl1 = SAMPLE_TIME(ts->sample_time) | MOD_12B(ts->mod_12b) |
REF_SEL(ts->ref_sel);
adc_ctrl1_mask = SAMPLE_TIME(0xff) | MOD_12B(0xff) | REF_SEL(0xff);
ret = stmpe_set_bits(stmpe, STMPE_REG_ADC_CTRL1,
adc_ctrl1_mask, adc_ctrl1);
if (ret) {
dev_err(dev, "Could not setup ADC\n");
return ret;
}
ret = stmpe_set_bits(stmpe, STMPE_REG_ADC_CTRL2,
ADC_FREQ(0xff), ADC_FREQ(ts->adc_freq));
if (ret) {
dev_err(dev, "Could not setup ADC\n");
return ret;
}
tsc_cfg = AVE_CTRL(ts->ave_ctrl) | DET_DELAY(ts->touch_det_delay) |
SETTLING(ts->settling);
tsc_cfg_mask = AVE_CTRL(0xff) | DET_DELAY(0xff) | SETTLING(0xff);
ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_CFG, tsc_cfg_mask, tsc_cfg);
if (ret) {
dev_err(dev, "Could not config touch\n");
return ret;
}
ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_FRACTION_Z,
FRACTION_Z(0xff), FRACTION_Z(ts->fraction_z));
if (ret) {
dev_err(dev, "Could not config touch\n");
return ret;
}
ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_I_DRIVE,
I_DRIVE(0xff), I_DRIVE(ts->i_drive));
if (ret) {
dev_err(dev, "Could not config touch\n");
return ret;
}
/* set FIFO to 1 for single point reading */
ret = stmpe_reg_write(stmpe, STMPE_REG_FIFO_TH, 1);
if (ret) {
dev_err(dev, "Could not set FIFO\n");
return ret;
}
ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_CTRL,
OP_MODE(0xff), OP_MODE(OP_MOD_XYZ));
if (ret) {
dev_err(dev, "Could not set mode\n");
return ret;
}
return 0;
}
static int stmpe_ts_open(struct input_dev *dev)
{
struct stmpe_touch *ts = input_get_drvdata(dev);
int ret = 0;
ret = __stmpe_reset_fifo(ts->stmpe);
if (ret)
return ret;
return stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
}
static void stmpe_ts_close(struct input_dev *dev)
{
struct stmpe_touch *ts = input_get_drvdata(dev);
cancel_delayed_work_sync(&ts->work);
stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
STMPE_TSC_CTRL_TSC_EN, 0);
}
static int __devinit stmpe_input_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct stmpe_platform_data *pdata = stmpe->pdata;
struct stmpe_touch *ts;
struct input_dev *idev;
struct stmpe_ts_platform_data *ts_pdata = NULL;
int ret;
int ts_irq;
ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
if (ts_irq < 0)
return ts_irq;
ts = kzalloc(sizeof(*ts), GFP_KERNEL);
if (!ts) {
ret = -ENOMEM;
goto err_out;
}
idev = input_allocate_device();
if (!idev) {
ret = -ENOMEM;
goto err_free_ts;
}
platform_set_drvdata(pdev, ts);
ts->stmpe = stmpe;
ts->idev = idev;
ts->dev = &pdev->dev;
if (pdata)
ts_pdata = pdata->ts;
if (ts_pdata) {
ts->sample_time = ts_pdata->sample_time;
ts->mod_12b = ts_pdata->mod_12b;
ts->ref_sel = ts_pdata->ref_sel;
ts->adc_freq = ts_pdata->adc_freq;
ts->ave_ctrl = ts_pdata->ave_ctrl;
ts->touch_det_delay = ts_pdata->touch_det_delay;
ts->settling = ts_pdata->settling;
ts->fraction_z = ts_pdata->fraction_z;
ts->i_drive = ts_pdata->i_drive;
}
INIT_DELAYED_WORK(&ts->work, stmpe_work);
ret = request_threaded_irq(ts_irq, NULL, stmpe_ts_handler,
IRQF_ONESHOT, STMPE_TS_NAME, ts);
if (ret) {
dev_err(&pdev->dev, "Failed to request IRQ %d\n", ts_irq);
goto err_free_input;
}
ret = stmpe_init_hw(ts);
if (ret)
goto err_free_irq;
idev->name = STMPE_TS_NAME;
idev->id.bustype = BUS_I2C;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
idev->open = stmpe_ts_open;
idev->close = stmpe_ts_close;
input_set_drvdata(idev, ts);
input_set_abs_params(idev, ABS_X, 0, XY_MASK, 0, 0);
input_set_abs_params(idev, ABS_Y, 0, XY_MASK, 0, 0);
input_set_abs_params(idev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
ret = input_register_device(idev);
if (ret) {
dev_err(&pdev->dev, "Could not register input device\n");
goto err_free_irq;
}
return ret;
err_free_irq:
free_irq(ts_irq, ts);
err_free_input:
input_free_device(idev);
platform_set_drvdata(pdev, NULL);
err_free_ts:
kfree(ts);
err_out:
return ret;
}
static int __devexit stmpe_ts_remove(struct platform_device *pdev)
{
struct stmpe_touch *ts = platform_get_drvdata(pdev);
unsigned int ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
free_irq(ts_irq, ts);
platform_set_drvdata(pdev, NULL);
input_unregister_device(ts->idev);
kfree(ts);
return 0;
}
static struct platform_driver stmpe_ts_driver = {
.driver = {
.name = STMPE_TS_NAME,
.owner = THIS_MODULE,
},
.probe = stmpe_input_probe,
.remove = __devexit_p(stmpe_ts_remove),
};
module_platform_driver(stmpe_ts_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" STMPE_TS_NAME);
| gpl-2.0 |
MoKee/android_kernel_xiaomi_cancro | drivers/input/touchscreen/htcpen.c | 5040 | 5886 | /*
* HTC Shift touchscreen driver
*
* Copyright (C) 2008 Pau Oliva Fora <pof@eslack.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/ioport.h>
#include <linux/dmi.h>
MODULE_AUTHOR("Pau Oliva Fora <pau@eslack.org>");
MODULE_DESCRIPTION("HTC Shift touchscreen driver");
MODULE_LICENSE("GPL");
#define HTCPEN_PORT_IRQ_CLEAR 0x068
#define HTCPEN_PORT_INIT 0x06c
#define HTCPEN_PORT_INDEX 0x0250
#define HTCPEN_PORT_DATA 0x0251
#define HTCPEN_IRQ 3
#define DEVICE_ENABLE 0xa2
#define DEVICE_DISABLE 0xa3
#define X_INDEX 3
#define Y_INDEX 5
#define TOUCH_INDEX 0xb
#define LSB_XY_INDEX 0xc
#define X_AXIS_MAX 2040
#define Y_AXIS_MAX 2040
static bool invert_x;
module_param(invert_x, bool, 0644);
MODULE_PARM_DESC(invert_x, "If set, X axis is inverted");
static bool invert_y;
module_param(invert_y, bool, 0644);
MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
static irqreturn_t htcpen_interrupt(int irq, void *handle)
{
struct input_dev *htcpen_dev = handle;
unsigned short x, y, xy;
/* 0 = press; 1 = release */
outb_p(TOUCH_INDEX, HTCPEN_PORT_INDEX);
if (inb_p(HTCPEN_PORT_DATA)) {
input_report_key(htcpen_dev, BTN_TOUCH, 0);
} else {
outb_p(X_INDEX, HTCPEN_PORT_INDEX);
x = inb_p(HTCPEN_PORT_DATA);
outb_p(Y_INDEX, HTCPEN_PORT_INDEX);
y = inb_p(HTCPEN_PORT_DATA);
outb_p(LSB_XY_INDEX, HTCPEN_PORT_INDEX);
xy = inb_p(HTCPEN_PORT_DATA);
/* get high resolution value of X and Y using LSB */
x = X_AXIS_MAX - ((x * 8) + ((xy >> 4) & 0xf));
y = (y * 8) + (xy & 0xf);
if (invert_x)
x = X_AXIS_MAX - x;
if (invert_y)
y = Y_AXIS_MAX - y;
if (x != X_AXIS_MAX && x != 0) {
input_report_key(htcpen_dev, BTN_TOUCH, 1);
input_report_abs(htcpen_dev, ABS_X, x);
input_report_abs(htcpen_dev, ABS_Y, y);
}
}
input_sync(htcpen_dev);
inb_p(HTCPEN_PORT_IRQ_CLEAR);
return IRQ_HANDLED;
}
static int htcpen_open(struct input_dev *dev)
{
outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT);
return 0;
}
static void htcpen_close(struct input_dev *dev)
{
outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT);
synchronize_irq(HTCPEN_IRQ);
}
static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev;
int err = -EBUSY;
if (!request_region(HTCPEN_PORT_IRQ_CLEAR, 1, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_IRQ_CLEAR);
goto request_region1_failed;
}
if (!request_region(HTCPEN_PORT_INIT, 1, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_INIT);
goto request_region2_failed;
}
if (!request_region(HTCPEN_PORT_INDEX, 2, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_INDEX);
goto request_region3_failed;
}
htcpen_dev = input_allocate_device();
if (!htcpen_dev) {
printk(KERN_ERR "htcpen: can't allocate device\n");
err = -ENOMEM;
goto input_alloc_failed;
}
htcpen_dev->name = "HTC Shift EC TouchScreen";
htcpen_dev->id.bustype = BUS_ISA;
htcpen_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
htcpen_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(htcpen_dev, ABS_X, 0, X_AXIS_MAX, 0, 0);
input_set_abs_params(htcpen_dev, ABS_Y, 0, Y_AXIS_MAX, 0, 0);
htcpen_dev->open = htcpen_open;
htcpen_dev->close = htcpen_close;
err = request_irq(HTCPEN_IRQ, htcpen_interrupt, 0, "htcpen",
htcpen_dev);
if (err) {
printk(KERN_ERR "htcpen: irq busy\n");
goto request_irq_failed;
}
inb_p(HTCPEN_PORT_IRQ_CLEAR);
err = input_register_device(htcpen_dev);
if (err)
goto input_register_failed;
dev_set_drvdata(dev, htcpen_dev);
return 0;
input_register_failed:
free_irq(HTCPEN_IRQ, htcpen_dev);
request_irq_failed:
input_free_device(htcpen_dev);
input_alloc_failed:
release_region(HTCPEN_PORT_INDEX, 2);
request_region3_failed:
release_region(HTCPEN_PORT_INIT, 1);
request_region2_failed:
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
request_region1_failed:
return err;
}
static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
input_unregister_device(htcpen_dev);
free_irq(HTCPEN_IRQ, htcpen_dev);
release_region(HTCPEN_PORT_INDEX, 2);
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
dev_set_drvdata(dev, NULL);
return 0;
}
#ifdef CONFIG_PM
static int htcpen_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT);
return 0;
}
static int htcpen_isa_resume(struct device *dev, unsigned int n)
{
outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT);
return 0;
}
#endif
static struct isa_driver htcpen_isa_driver = {
.probe = htcpen_isa_probe,
.remove = __devexit_p(htcpen_isa_remove),
#ifdef CONFIG_PM
.suspend = htcpen_isa_suspend,
.resume = htcpen_isa_resume,
#endif
.driver = {
.owner = THIS_MODULE,
.name = "htcpen",
}
};
static struct dmi_system_id __initdata htcshift_dmi_table[] = {
{
.ident = "Shift",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "High Tech Computer Corp"),
DMI_MATCH(DMI_PRODUCT_NAME, "Shift"),
},
},
{ }
};
MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
static int __init htcpen_isa_init(void)
{
if (!dmi_check_system(htcshift_dmi_table))
return -ENODEV;
return isa_register_driver(&htcpen_isa_driver, 1);
}
static void __exit htcpen_isa_exit(void)
{
isa_unregister_driver(&htcpen_isa_driver);
}
module_init(htcpen_isa_init);
module_exit(htcpen_isa_exit);
| gpl-2.0 |
pershoot/android_kernel_asus_tf701t | sound/aoa/soundbus/i2sbus/core.c | 5808 | 12408 | /*
* i2sbus driver
*
* Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
*
* GPL v2, can be found in COPYING.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <asm/macio.h>
#include <asm/dbdma.h>
#include "../soundbus.h"
#include "i2sbus.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_DESCRIPTION("Apple Soundbus: I2S support");
static int force;
module_param(force, int, 0444);
MODULE_PARM_DESC(force, "Force loading i2sbus even when"
" no layout-id property is present");
static struct of_device_id i2sbus_match[] = {
{ .name = "i2s" },
{ }
};
MODULE_DEVICE_TABLE(of, i2sbus_match);
static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
struct dbdma_command_mem *r,
int numcmds)
{
/* one more for rounding, one for branch back, one for stop command */
r->size = (numcmds + 3) * sizeof(struct dbdma_cmd);
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
r->space = dma_alloc_coherent(
&macio_get_pci_dev(i2sdev->macio)->dev,
r->size,
&r->bus_addr,
GFP_KERNEL);
if (!r->space) return -ENOMEM;
memset(r->space, 0, r->size);
r->cmds = (void*)DBDMA_ALIGN(r->space);
r->bus_cmd_start = r->bus_addr +
(dma_addr_t)((char*)r->cmds - (char*)r->space);
return 0;
}
static void free_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
struct dbdma_command_mem *r)
{
if (!r->space) return;
dma_free_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
r->size, r->space, r->bus_addr);
}
static void i2sbus_release_dev(struct device *dev)
{
struct i2sbus_dev *i2sdev;
int i;
i2sdev = container_of(dev, struct i2sbus_dev, sound.ofdev.dev);
if (i2sdev->intfregs) iounmap(i2sdev->intfregs);
if (i2sdev->out.dbdma) iounmap(i2sdev->out.dbdma);
if (i2sdev->in.dbdma) iounmap(i2sdev->in.dbdma);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
if (i2sdev->allocated_resource[i])
release_and_free_resource(i2sdev->allocated_resource[i]);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->out.dbdma_ring);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->in.dbdma_ring);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
free_irq(i2sdev->interrupts[i], i2sdev);
i2sbus_control_remove_dev(i2sdev->control, i2sdev);
mutex_destroy(&i2sdev->lock);
kfree(i2sdev);
}
static irqreturn_t i2sbus_bus_intr(int irq, void *devid)
{
struct i2sbus_dev *dev = devid;
u32 intreg;
spin_lock(&dev->low_lock);
intreg = in_le32(&dev->intfregs->intr_ctl);
/* acknowledge interrupt reasons */
out_le32(&dev->intfregs->intr_ctl, intreg);
spin_unlock(&dev->low_lock);
return IRQ_HANDLED;
}
/*
* XXX FIXME: We test the layout_id's here to get the proper way of
* mapping in various registers, thanks to bugs in Apple device-trees.
* We could instead key off the machine model and the name of the i2s
* node (i2s-a). This we'll do when we move it all to macio_asic.c
* and have that export items for each sub-node too.
*/
static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
int layout, struct resource *res)
{
struct device_node *parent;
int pindex, rc = -ENXIO;
const u32 *reg;
/* Machines with layout 76 and 36 (K2 based) have a weird device
* tree what we need to special case.
* Normal machines just fetch the resource from the i2s-X node.
* Darwin further divides normal machines into old and new layouts
* with a subtely different code path but that doesn't seem necessary
* in practice, they just bloated it. In addition, even on our K2
* case the i2s-modem node, if we ever want to handle it, uses the
* normal layout
*/
if (layout != 76 && layout != 36)
return of_address_to_resource(np, index, res);
parent = of_get_parent(np);
pindex = (index == aoa_resource_i2smmio) ? 0 : 1;
rc = of_address_to_resource(parent, pindex, res);
if (rc)
goto bail;
reg = of_get_property(np, "reg", NULL);
if (reg == NULL) {
rc = -ENXIO;
goto bail;
}
res->start += reg[index * 2];
res->end = res->start + reg[index * 2 + 1] - 1;
bail:
of_node_put(parent);
return rc;
}
/* FIXME: look at device node refcounting */
static int i2sbus_add_dev(struct macio_dev *macio,
struct i2sbus_control *control,
struct device_node *np)
{
struct i2sbus_dev *dev;
struct device_node *child = NULL, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
static const char *rnames[] = { "i2sbus: %s (control)",
"i2sbus: %s (tx)",
"i2sbus: %s (rx)" };
static irq_handler_t ints[] = {
i2sbus_bus_intr,
i2sbus_tx_intr,
i2sbus_rx_intr
};
if (strlen(np->name) != 5)
return 0;
if (strncmp(np->name, "i2s-", 4))
return 0;
dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
if (!dev)
return 0;
i = 0;
while ((child = of_get_next_child(np, child))) {
if (strcmp(child->name, "sound") == 0) {
i++;
sound = child;
}
}
if (i == 1) {
const u32 *id = of_get_property(sound, "layout-id", NULL);
if (id) {
layout = *id;
snprintf(dev->sound.modalias, 32,
"sound-layout-%d", layout);
ok = 1;
} else {
id = of_get_property(sound, "device-id", NULL);
/*
* We probably cannot handle all device-id machines,
* so restrict to those we do handle for now.
*/
if (id && (*id == 22 || *id == 14 || *id == 35)) {
snprintf(dev->sound.modalias, 32,
"aoa-device-id-%d", *id);
ok = 1;
layout = -1;
}
}
}
/* for the time being, until we can handle non-layout-id
* things in some fabric, refuse to attach if there is no
* layout-id property or we haven't been forced to attach.
* When there are two i2s busses and only one has a layout-id,
* then this depends on the order, but that isn't important
* either as the second one in that case is just a modem. */
if (!ok) {
kfree(dev);
return -ENODEV;
}
mutex_init(&dev->lock);
spin_lock_init(&dev->low_lock);
dev->sound.ofdev.archdata.dma_mask = macio->ofdev.archdata.dma_mask;
dev->sound.ofdev.dev.of_node = np;
dev->sound.ofdev.dev.dma_mask = &dev->sound.ofdev.archdata.dma_mask;
dev->sound.ofdev.dev.parent = &macio->ofdev.dev;
dev->sound.ofdev.dev.release = i2sbus_release_dev;
dev->sound.attach_codec = i2sbus_attach_codec;
dev->sound.detach_codec = i2sbus_detach_codec;
dev->sound.pcmid = -1;
dev->macio = macio;
dev->control = control;
dev->bus_number = np->name[4] - 'a';
INIT_LIST_HEAD(&dev->sound.codec_list);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
dev->interrupts[i] = -1;
snprintf(dev->rnames[i], sizeof(dev->rnames[i]),
rnames[i], np->name);
}
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
int irq = irq_of_parse_and_map(np, i);
if (request_irq(irq, ints[i], 0, dev->rnames[i], dev))
goto err;
dev->interrupts[i] = irq;
}
/* Resource handling is problematic as some device-trees contain
* useless crap (ugh ugh ugh). We work around that here by calling
* specific functions for calculating the appropriate resources.
*
* This will all be moved to macio_asic.c at one point
*/
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
if (i2sbus_get_and_fixup_rsrc(np,i,layout,&dev->resources[i]))
goto err;
/* If only we could use our resource dev->resources[i]...
* but request_resource doesn't know about parents and
* contained resources...
*/
dev->allocated_resource[i] =
request_mem_region(dev->resources[i].start,
resource_size(&dev->resources[i]),
dev->rnames[i]);
if (!dev->allocated_resource[i]) {
printk(KERN_ERR "i2sbus: failed to claim resource %d!\n", i);
goto err;
}
}
r = &dev->resources[aoa_resource_i2smmio];
rlen = resource_size(r);
if (rlen < sizeof(struct i2s_interface_regs))
goto err;
dev->intfregs = ioremap(r->start, rlen);
r = &dev->resources[aoa_resource_txdbdma];
rlen = resource_size(r);
if (rlen < sizeof(struct dbdma_regs))
goto err;
dev->out.dbdma = ioremap(r->start, rlen);
r = &dev->resources[aoa_resource_rxdbdma];
rlen = resource_size(r);
if (rlen < sizeof(struct dbdma_regs))
goto err;
dev->in.dbdma = ioremap(r->start, rlen);
if (!dev->intfregs || !dev->out.dbdma || !dev->in.dbdma)
goto err;
if (alloc_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring,
MAX_DBDMA_COMMANDS))
goto err;
if (alloc_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring,
MAX_DBDMA_COMMANDS))
goto err;
if (i2sbus_control_add_dev(dev->control, dev)) {
printk(KERN_ERR "i2sbus: control layer didn't like bus\n");
goto err;
}
if (soundbus_add_one(&dev->sound)) {
printk(KERN_DEBUG "i2sbus: device registration error!\n");
goto err;
}
/* enable this cell */
i2sbus_control_cell(dev->control, dev, 1);
i2sbus_control_enable(dev->control, dev);
i2sbus_control_clock(dev->control, dev, 1);
return 1;
err:
for (i=0;i<3;i++)
if (dev->interrupts[i] != -1)
free_irq(dev->interrupts[i], dev);
free_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring);
free_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring);
if (dev->intfregs) iounmap(dev->intfregs);
if (dev->out.dbdma) iounmap(dev->out.dbdma);
if (dev->in.dbdma) iounmap(dev->in.dbdma);
for (i=0;i<3;i++)
if (dev->allocated_resource[i])
release_and_free_resource(dev->allocated_resource[i]);
mutex_destroy(&dev->lock);
kfree(dev);
return 0;
}
static int i2sbus_probe(struct macio_dev* dev, const struct of_device_id *match)
{
struct device_node *np = NULL;
int got = 0, err;
struct i2sbus_control *control = NULL;
err = i2sbus_control_init(dev, &control);
if (err)
return err;
if (!control) {
printk(KERN_ERR "i2sbus_control_init API breakage\n");
return -ENODEV;
}
while ((np = of_get_next_child(dev->ofdev.dev.of_node, np))) {
if (of_device_is_compatible(np, "i2sbus") ||
of_device_is_compatible(np, "i2s-modem")) {
got += i2sbus_add_dev(dev, control, np);
}
}
if (!got) {
/* found none, clean up */
i2sbus_control_destroy(control);
return -ENODEV;
}
dev_set_drvdata(&dev->ofdev.dev, control);
return 0;
}
static int i2sbus_remove(struct macio_dev* dev)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct i2sbus_dev *i2sdev, *tmp;
list_for_each_entry_safe(i2sdev, tmp, &control->list, item)
soundbus_remove_one(&i2sdev->sound);
return 0;
}
#ifdef CONFIG_PM
static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct codec_info_item *cii;
struct i2sbus_dev* i2sdev;
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
/* Notify Alsa */
if (i2sdev->sound.pcm) {
/* Suspend PCM streams */
snd_pcm_suspend_all(i2sdev->sound.pcm);
}
/* Notify codecs */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
err = 0;
if (cii->codec->suspend)
err = cii->codec->suspend(cii, state);
if (err)
ret = err;
}
/* wait until streams are stopped */
i2sbus_wait_for_stop_both(i2sdev);
}
return ret;
}
static int i2sbus_resume(struct macio_dev* dev)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct codec_info_item *cii;
struct i2sbus_dev* i2sdev;
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
/* reset i2s bus format etc. */
i2sbus_pcm_prepare_both(i2sdev);
/* Notify codecs so they can re-initialize */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
err = 0;
if (cii->codec->resume)
err = cii->codec->resume(cii);
if (err)
ret = err;
}
}
return ret;
}
#endif /* CONFIG_PM */
static int i2sbus_shutdown(struct macio_dev* dev)
{
return 0;
}
static struct macio_driver i2sbus_drv = {
.driver = {
.name = "soundbus-i2s",
.owner = THIS_MODULE,
.of_match_table = i2sbus_match,
},
.probe = i2sbus_probe,
.remove = i2sbus_remove,
#ifdef CONFIG_PM
.suspend = i2sbus_suspend,
.resume = i2sbus_resume,
#endif
.shutdown = i2sbus_shutdown,
};
static int __init soundbus_i2sbus_init(void)
{
return macio_register_driver(&i2sbus_drv);
}
static void __exit soundbus_i2sbus_exit(void)
{
macio_unregister_driver(&i2sbus_drv);
}
module_init(soundbus_i2sbus_init);
module_exit(soundbus_i2sbus_exit);
| gpl-2.0 |
embeddedEnsicaen/kernel | sound/aoa/soundbus/i2sbus/core.c | 5808 | 12408 | /*
* i2sbus driver
*
* Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
*
* GPL v2, can be found in COPYING.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <asm/macio.h>
#include <asm/dbdma.h>
#include "../soundbus.h"
#include "i2sbus.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_DESCRIPTION("Apple Soundbus: I2S support");
static int force;
module_param(force, int, 0444);
MODULE_PARM_DESC(force, "Force loading i2sbus even when"
" no layout-id property is present");
static struct of_device_id i2sbus_match[] = {
{ .name = "i2s" },
{ }
};
MODULE_DEVICE_TABLE(of, i2sbus_match);
static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
struct dbdma_command_mem *r,
int numcmds)
{
/* one more for rounding, one for branch back, one for stop command */
r->size = (numcmds + 3) * sizeof(struct dbdma_cmd);
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
r->space = dma_alloc_coherent(
&macio_get_pci_dev(i2sdev->macio)->dev,
r->size,
&r->bus_addr,
GFP_KERNEL);
if (!r->space) return -ENOMEM;
memset(r->space, 0, r->size);
r->cmds = (void*)DBDMA_ALIGN(r->space);
r->bus_cmd_start = r->bus_addr +
(dma_addr_t)((char*)r->cmds - (char*)r->space);
return 0;
}
static void free_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
struct dbdma_command_mem *r)
{
if (!r->space) return;
dma_free_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
r->size, r->space, r->bus_addr);
}
static void i2sbus_release_dev(struct device *dev)
{
struct i2sbus_dev *i2sdev;
int i;
i2sdev = container_of(dev, struct i2sbus_dev, sound.ofdev.dev);
if (i2sdev->intfregs) iounmap(i2sdev->intfregs);
if (i2sdev->out.dbdma) iounmap(i2sdev->out.dbdma);
if (i2sdev->in.dbdma) iounmap(i2sdev->in.dbdma);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
if (i2sdev->allocated_resource[i])
release_and_free_resource(i2sdev->allocated_resource[i]);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->out.dbdma_ring);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->in.dbdma_ring);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
free_irq(i2sdev->interrupts[i], i2sdev);
i2sbus_control_remove_dev(i2sdev->control, i2sdev);
mutex_destroy(&i2sdev->lock);
kfree(i2sdev);
}
static irqreturn_t i2sbus_bus_intr(int irq, void *devid)
{
struct i2sbus_dev *dev = devid;
u32 intreg;
spin_lock(&dev->low_lock);
intreg = in_le32(&dev->intfregs->intr_ctl);
/* acknowledge interrupt reasons */
out_le32(&dev->intfregs->intr_ctl, intreg);
spin_unlock(&dev->low_lock);
return IRQ_HANDLED;
}
/*
* XXX FIXME: We test the layout_id's here to get the proper way of
* mapping in various registers, thanks to bugs in Apple device-trees.
* We could instead key off the machine model and the name of the i2s
* node (i2s-a). This we'll do when we move it all to macio_asic.c
* and have that export items for each sub-node too.
*/
static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
int layout, struct resource *res)
{
struct device_node *parent;
int pindex, rc = -ENXIO;
const u32 *reg;
/* Machines with layout 76 and 36 (K2 based) have a weird device
* tree what we need to special case.
* Normal machines just fetch the resource from the i2s-X node.
* Darwin further divides normal machines into old and new layouts
* with a subtely different code path but that doesn't seem necessary
* in practice, they just bloated it. In addition, even on our K2
* case the i2s-modem node, if we ever want to handle it, uses the
* normal layout
*/
if (layout != 76 && layout != 36)
return of_address_to_resource(np, index, res);
parent = of_get_parent(np);
pindex = (index == aoa_resource_i2smmio) ? 0 : 1;
rc = of_address_to_resource(parent, pindex, res);
if (rc)
goto bail;
reg = of_get_property(np, "reg", NULL);
if (reg == NULL) {
rc = -ENXIO;
goto bail;
}
res->start += reg[index * 2];
res->end = res->start + reg[index * 2 + 1] - 1;
bail:
of_node_put(parent);
return rc;
}
/* FIXME: look at device node refcounting */
static int i2sbus_add_dev(struct macio_dev *macio,
struct i2sbus_control *control,
struct device_node *np)
{
struct i2sbus_dev *dev;
struct device_node *child = NULL, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
static const char *rnames[] = { "i2sbus: %s (control)",
"i2sbus: %s (tx)",
"i2sbus: %s (rx)" };
static irq_handler_t ints[] = {
i2sbus_bus_intr,
i2sbus_tx_intr,
i2sbus_rx_intr
};
if (strlen(np->name) != 5)
return 0;
if (strncmp(np->name, "i2s-", 4))
return 0;
dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
if (!dev)
return 0;
i = 0;
while ((child = of_get_next_child(np, child))) {
if (strcmp(child->name, "sound") == 0) {
i++;
sound = child;
}
}
if (i == 1) {
const u32 *id = of_get_property(sound, "layout-id", NULL);
if (id) {
layout = *id;
snprintf(dev->sound.modalias, 32,
"sound-layout-%d", layout);
ok = 1;
} else {
id = of_get_property(sound, "device-id", NULL);
/*
* We probably cannot handle all device-id machines,
* so restrict to those we do handle for now.
*/
if (id && (*id == 22 || *id == 14 || *id == 35)) {
snprintf(dev->sound.modalias, 32,
"aoa-device-id-%d", *id);
ok = 1;
layout = -1;
}
}
}
/* for the time being, until we can handle non-layout-id
* things in some fabric, refuse to attach if there is no
* layout-id property or we haven't been forced to attach.
* When there are two i2s busses and only one has a layout-id,
* then this depends on the order, but that isn't important
* either as the second one in that case is just a modem. */
if (!ok) {
kfree(dev);
return -ENODEV;
}
mutex_init(&dev->lock);
spin_lock_init(&dev->low_lock);
dev->sound.ofdev.archdata.dma_mask = macio->ofdev.archdata.dma_mask;
dev->sound.ofdev.dev.of_node = np;
dev->sound.ofdev.dev.dma_mask = &dev->sound.ofdev.archdata.dma_mask;
dev->sound.ofdev.dev.parent = &macio->ofdev.dev;
dev->sound.ofdev.dev.release = i2sbus_release_dev;
dev->sound.attach_codec = i2sbus_attach_codec;
dev->sound.detach_codec = i2sbus_detach_codec;
dev->sound.pcmid = -1;
dev->macio = macio;
dev->control = control;
dev->bus_number = np->name[4] - 'a';
INIT_LIST_HEAD(&dev->sound.codec_list);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
dev->interrupts[i] = -1;
snprintf(dev->rnames[i], sizeof(dev->rnames[i]),
rnames[i], np->name);
}
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
int irq = irq_of_parse_and_map(np, i);
if (request_irq(irq, ints[i], 0, dev->rnames[i], dev))
goto err;
dev->interrupts[i] = irq;
}
/* Resource handling is problematic as some device-trees contain
* useless crap (ugh ugh ugh). We work around that here by calling
* specific functions for calculating the appropriate resources.
*
* This will all be moved to macio_asic.c at one point
*/
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
if (i2sbus_get_and_fixup_rsrc(np,i,layout,&dev->resources[i]))
goto err;
/* If only we could use our resource dev->resources[i]...
* but request_resource doesn't know about parents and
* contained resources...
*/
dev->allocated_resource[i] =
request_mem_region(dev->resources[i].start,
resource_size(&dev->resources[i]),
dev->rnames[i]);
if (!dev->allocated_resource[i]) {
printk(KERN_ERR "i2sbus: failed to claim resource %d!\n", i);
goto err;
}
}
r = &dev->resources[aoa_resource_i2smmio];
rlen = resource_size(r);
if (rlen < sizeof(struct i2s_interface_regs))
goto err;
dev->intfregs = ioremap(r->start, rlen);
r = &dev->resources[aoa_resource_txdbdma];
rlen = resource_size(r);
if (rlen < sizeof(struct dbdma_regs))
goto err;
dev->out.dbdma = ioremap(r->start, rlen);
r = &dev->resources[aoa_resource_rxdbdma];
rlen = resource_size(r);
if (rlen < sizeof(struct dbdma_regs))
goto err;
dev->in.dbdma = ioremap(r->start, rlen);
if (!dev->intfregs || !dev->out.dbdma || !dev->in.dbdma)
goto err;
if (alloc_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring,
MAX_DBDMA_COMMANDS))
goto err;
if (alloc_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring,
MAX_DBDMA_COMMANDS))
goto err;
if (i2sbus_control_add_dev(dev->control, dev)) {
printk(KERN_ERR "i2sbus: control layer didn't like bus\n");
goto err;
}
if (soundbus_add_one(&dev->sound)) {
printk(KERN_DEBUG "i2sbus: device registration error!\n");
goto err;
}
/* enable this cell */
i2sbus_control_cell(dev->control, dev, 1);
i2sbus_control_enable(dev->control, dev);
i2sbus_control_clock(dev->control, dev, 1);
return 1;
err:
for (i=0;i<3;i++)
if (dev->interrupts[i] != -1)
free_irq(dev->interrupts[i], dev);
free_dbdma_descriptor_ring(dev, &dev->out.dbdma_ring);
free_dbdma_descriptor_ring(dev, &dev->in.dbdma_ring);
if (dev->intfregs) iounmap(dev->intfregs);
if (dev->out.dbdma) iounmap(dev->out.dbdma);
if (dev->in.dbdma) iounmap(dev->in.dbdma);
for (i=0;i<3;i++)
if (dev->allocated_resource[i])
release_and_free_resource(dev->allocated_resource[i]);
mutex_destroy(&dev->lock);
kfree(dev);
return 0;
}
static int i2sbus_probe(struct macio_dev* dev, const struct of_device_id *match)
{
struct device_node *np = NULL;
int got = 0, err;
struct i2sbus_control *control = NULL;
err = i2sbus_control_init(dev, &control);
if (err)
return err;
if (!control) {
printk(KERN_ERR "i2sbus_control_init API breakage\n");
return -ENODEV;
}
while ((np = of_get_next_child(dev->ofdev.dev.of_node, np))) {
if (of_device_is_compatible(np, "i2sbus") ||
of_device_is_compatible(np, "i2s-modem")) {
got += i2sbus_add_dev(dev, control, np);
}
}
if (!got) {
/* found none, clean up */
i2sbus_control_destroy(control);
return -ENODEV;
}
dev_set_drvdata(&dev->ofdev.dev, control);
return 0;
}
static int i2sbus_remove(struct macio_dev* dev)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct i2sbus_dev *i2sdev, *tmp;
list_for_each_entry_safe(i2sdev, tmp, &control->list, item)
soundbus_remove_one(&i2sdev->sound);
return 0;
}
#ifdef CONFIG_PM
static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct codec_info_item *cii;
struct i2sbus_dev* i2sdev;
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
/* Notify Alsa */
if (i2sdev->sound.pcm) {
/* Suspend PCM streams */
snd_pcm_suspend_all(i2sdev->sound.pcm);
}
/* Notify codecs */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
err = 0;
if (cii->codec->suspend)
err = cii->codec->suspend(cii, state);
if (err)
ret = err;
}
/* wait until streams are stopped */
i2sbus_wait_for_stop_both(i2sdev);
}
return ret;
}
static int i2sbus_resume(struct macio_dev* dev)
{
struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
struct codec_info_item *cii;
struct i2sbus_dev* i2sdev;
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
/* reset i2s bus format etc. */
i2sbus_pcm_prepare_both(i2sdev);
/* Notify codecs so they can re-initialize */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
err = 0;
if (cii->codec->resume)
err = cii->codec->resume(cii);
if (err)
ret = err;
}
}
return ret;
}
#endif /* CONFIG_PM */
static int i2sbus_shutdown(struct macio_dev* dev)
{
return 0;
}
static struct macio_driver i2sbus_drv = {
.driver = {
.name = "soundbus-i2s",
.owner = THIS_MODULE,
.of_match_table = i2sbus_match,
},
.probe = i2sbus_probe,
.remove = i2sbus_remove,
#ifdef CONFIG_PM
.suspend = i2sbus_suspend,
.resume = i2sbus_resume,
#endif
.shutdown = i2sbus_shutdown,
};
static int __init soundbus_i2sbus_init(void)
{
return macio_register_driver(&i2sbus_drv);
}
static void __exit soundbus_i2sbus_exit(void)
{
macio_unregister_driver(&i2sbus_drv);
}
module_init(soundbus_i2sbus_init);
module_exit(soundbus_i2sbus_exit);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.