repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
androidbftab1/bf-kernel | drivers/video/fbdev/core/svgalib.c | 3011 | 20362 | /*
* Common utility functions for VGA-based graphics cards.
*
* Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Some parts are based on David Boucher's viafb (http://davesdomain.org.uk/viafb/)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/svga.h>
#include <asm/types.h>
#include <asm/io.h>
/* Write a CRT register value spread across multiple registers */
void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value)
{
u8 regval, bitval, bitnum;
while (regset->regnum != VGA_REGSET_END_VAL) {
regval = vga_rcrt(regbase, regset->regnum);
bitnum = regset->lowbit;
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
regval = regval & ~bitval;
if (value & 1) regval = regval | bitval;
bitnum ++;
value = value >> 1;
}
vga_wcrt(regbase, regset->regnum, regval);
regset ++;
}
}
/* Write a sequencer register value spread across multiple registers */
void svga_wseq_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value)
{
u8 regval, bitval, bitnum;
while (regset->regnum != VGA_REGSET_END_VAL) {
regval = vga_rseq(regbase, regset->regnum);
bitnum = regset->lowbit;
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
regval = regval & ~bitval;
if (value & 1) regval = regval | bitval;
bitnum ++;
value = value >> 1;
}
vga_wseq(regbase, regset->regnum, regval);
regset ++;
}
}
static unsigned int svga_regset_size(const struct vga_regset *regset)
{
u8 count = 0;
while (regset->regnum != VGA_REGSET_END_VAL) {
count += regset->highbit - regset->lowbit + 1;
regset ++;
}
return 1 << count;
}
/* ------------------------------------------------------------------------- */
/* Set graphics controller registers to sane values */
void svga_set_default_gfx_regs(void __iomem *regbase)
{
/* All standard GFX registers (GR00 - GR08) */
vga_wgfx(regbase, VGA_GFX_SR_VALUE, 0x00);
vga_wgfx(regbase, VGA_GFX_SR_ENABLE, 0x00);
vga_wgfx(regbase, VGA_GFX_COMPARE_VALUE, 0x00);
vga_wgfx(regbase, VGA_GFX_DATA_ROTATE, 0x00);
vga_wgfx(regbase, VGA_GFX_PLANE_READ, 0x00);
vga_wgfx(regbase, VGA_GFX_MODE, 0x00);
/* vga_wgfx(regbase, VGA_GFX_MODE, 0x20); */
/* vga_wgfx(regbase, VGA_GFX_MODE, 0x40); */
vga_wgfx(regbase, VGA_GFX_MISC, 0x05);
/* vga_wgfx(regbase, VGA_GFX_MISC, 0x01); */
vga_wgfx(regbase, VGA_GFX_COMPARE_MASK, 0x0F);
vga_wgfx(regbase, VGA_GFX_BIT_MASK, 0xFF);
}
/* Set attribute controller registers to sane values */
void svga_set_default_atc_regs(void __iomem *regbase)
{
u8 count;
vga_r(regbase, 0x3DA);
vga_w(regbase, VGA_ATT_W, 0x00);
/* All standard ATC registers (AR00 - AR14) */
for (count = 0; count <= 0xF; count ++)
svga_wattr(regbase, count, count);
svga_wattr(regbase, VGA_ATC_MODE, 0x01);
/* svga_wattr(regbase, VGA_ATC_MODE, 0x41); */
svga_wattr(regbase, VGA_ATC_OVERSCAN, 0x00);
svga_wattr(regbase, VGA_ATC_PLANE_ENABLE, 0x0F);
svga_wattr(regbase, VGA_ATC_PEL, 0x00);
svga_wattr(regbase, VGA_ATC_COLOR_PAGE, 0x00);
vga_r(regbase, 0x3DA);
vga_w(regbase, VGA_ATT_W, 0x20);
}
/* Set sequencer registers to sane values */
void svga_set_default_seq_regs(void __iomem *regbase)
{
/* Standard sequencer registers (SR01 - SR04), SR00 is not set */
vga_wseq(regbase, VGA_SEQ_CLOCK_MODE, VGA_SR01_CHAR_CLK_8DOTS);
vga_wseq(regbase, VGA_SEQ_PLANE_WRITE, VGA_SR02_ALL_PLANES);
vga_wseq(regbase, VGA_SEQ_CHARACTER_MAP, 0x00);
/* vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE | VGA_SR04_CHN_4M); */
vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE);
}
/* Set CRTC registers to sane values */
void svga_set_default_crt_regs(void __iomem *regbase)
{
/* Standard CRT registers CR03 CR08 CR09 CR14 CR17 */
svga_wcrt_mask(regbase, 0x03, 0x80, 0x80); /* Enable vertical retrace EVRA */
vga_wcrt(regbase, VGA_CRTC_PRESET_ROW, 0);
svga_wcrt_mask(regbase, VGA_CRTC_MAX_SCAN, 0, 0x1F);
vga_wcrt(regbase, VGA_CRTC_UNDERLINE, 0);
vga_wcrt(regbase, VGA_CRTC_MODE, 0xE3);
}
void svga_set_textmode_vga_regs(void __iomem *regbase)
{
/* svga_wseq_mask(regbase, 0x1, 0x00, 0x01); */ /* Switch 8/9 pixel per char */
vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM);
vga_wseq(regbase, VGA_SEQ_PLANE_WRITE, 0x03);
vga_wcrt(regbase, VGA_CRTC_MAX_SCAN, 0x0f); /* 0x4f */
vga_wcrt(regbase, VGA_CRTC_UNDERLINE, 0x1f);
svga_wcrt_mask(regbase, VGA_CRTC_MODE, 0x23, 0x7f);
vga_wcrt(regbase, VGA_CRTC_CURSOR_START, 0x0d);
vga_wcrt(regbase, VGA_CRTC_CURSOR_END, 0x0e);
vga_wcrt(regbase, VGA_CRTC_CURSOR_HI, 0x00);
vga_wcrt(regbase, VGA_CRTC_CURSOR_LO, 0x00);
vga_wgfx(regbase, VGA_GFX_MODE, 0x10); /* Odd/even memory mode */
vga_wgfx(regbase, VGA_GFX_MISC, 0x0E); /* Misc graphics register - text mode enable */
vga_wgfx(regbase, VGA_GFX_COMPARE_MASK, 0x00);
vga_r(regbase, 0x3DA);
vga_w(regbase, VGA_ATT_W, 0x00);
svga_wattr(regbase, 0x10, 0x0C); /* Attribute Mode Control Register - text mode, blinking and line graphics */
svga_wattr(regbase, 0x13, 0x08); /* Horizontal Pixel Panning Register */
vga_r(regbase, 0x3DA);
vga_w(regbase, VGA_ATT_W, 0x20);
}
#if 0
void svga_dump_var(struct fb_var_screeninfo *var, int node)
{
pr_debug("fb%d: var.vmode : 0x%X\n", node, var->vmode);
pr_debug("fb%d: var.xres : %d\n", node, var->xres);
pr_debug("fb%d: var.yres : %d\n", node, var->yres);
pr_debug("fb%d: var.bits_per_pixel: %d\n", node, var->bits_per_pixel);
pr_debug("fb%d: var.xres_virtual : %d\n", node, var->xres_virtual);
pr_debug("fb%d: var.yres_virtual : %d\n", node, var->yres_virtual);
pr_debug("fb%d: var.left_margin : %d\n", node, var->left_margin);
pr_debug("fb%d: var.right_margin : %d\n", node, var->right_margin);
pr_debug("fb%d: var.upper_margin : %d\n", node, var->upper_margin);
pr_debug("fb%d: var.lower_margin : %d\n", node, var->lower_margin);
pr_debug("fb%d: var.hsync_len : %d\n", node, var->hsync_len);
pr_debug("fb%d: var.vsync_len : %d\n", node, var->vsync_len);
pr_debug("fb%d: var.sync : 0x%X\n", node, var->sync);
pr_debug("fb%d: var.pixclock : %d\n\n", node, var->pixclock);
}
#endif /* 0 */
/* ------------------------------------------------------------------------- */
void svga_settile(struct fb_info *info, struct fb_tilemap *map)
{
const u8 *font = map->data;
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
int i, c;
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
map->width, map->height, map->depth, map->length);
return;
}
fb += 2;
for (c = 0; c < map->length; c++) {
for (i = 0; i < map->height; i++) {
fb_writeb(font[i], fb + i * 4);
// fb[i * 4] = font[i];
}
fb += 128;
font += map->height;
}
}
/* Copy area in text (tileblit) mode */
void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area)
{
int dx, dy;
/* colstride is halved in this function because u16 are used */
int colstride = 1 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK);
int rowstride = colstride * (info->var.xres_virtual / 8);
u16 __iomem *fb = (u16 __iomem *) info->screen_base;
u16 __iomem *src, *dst;
if ((area->sy > area->dy) ||
((area->sy == area->dy) && (area->sx > area->dx))) {
src = fb + area->sx * colstride + area->sy * rowstride;
dst = fb + area->dx * colstride + area->dy * rowstride;
} else {
src = fb + (area->sx + area->width - 1) * colstride
+ (area->sy + area->height - 1) * rowstride;
dst = fb + (area->dx + area->width - 1) * colstride
+ (area->dy + area->height - 1) * rowstride;
colstride = -colstride;
rowstride = -rowstride;
}
for (dy = 0; dy < area->height; dy++) {
u16 __iomem *src2 = src;
u16 __iomem *dst2 = dst;
for (dx = 0; dx < area->width; dx++) {
fb_writew(fb_readw(src2), dst2);
// *dst2 = *src2;
src2 += colstride;
dst2 += colstride;
}
src += rowstride;
dst += rowstride;
}
}
/* Fill area in text (tileblit) mode */
void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect)
{
int dx, dy;
int colstride = 2 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK);
int rowstride = colstride * (info->var.xres_virtual / 8);
int attr = (0x0F & rect->bg) << 4 | (0x0F & rect->fg);
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
fb += rect->sx * colstride + rect->sy * rowstride;
for (dy = 0; dy < rect->height; dy++) {
u8 __iomem *fb2 = fb;
for (dx = 0; dx < rect->width; dx++) {
fb_writeb(rect->index, fb2);
fb_writeb(attr, fb2 + 1);
fb2 += colstride;
}
fb += rowstride;
}
}
/* Write text in text (tileblit) mode */
void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit)
{
int dx, dy, i;
int colstride = 2 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK);
int rowstride = colstride * (info->var.xres_virtual / 8);
int attr = (0x0F & blit->bg) << 4 | (0x0F & blit->fg);
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
fb += blit->sx * colstride + blit->sy * rowstride;
i=0;
for (dy=0; dy < blit->height; dy ++) {
u8 __iomem *fb2 = fb;
for (dx = 0; dx < blit->width; dx ++) {
fb_writeb(blit->indices[i], fb2);
fb_writeb(attr, fb2 + 1);
fb2 += colstride;
i ++;
if (i == blit->length) return;
}
fb += rowstride;
}
}
/* Set cursor in text (tileblit) mode */
void svga_tilecursor(void __iomem *regbase, struct fb_info *info, struct fb_tilecursor *cursor)
{
u8 cs = 0x0d;
u8 ce = 0x0e;
u16 pos = cursor->sx + (info->var.xoffset / 8)
+ (cursor->sy + (info->var.yoffset / 16))
* (info->var.xres_virtual / 8);
if (! cursor -> mode)
return;
svga_wcrt_mask(regbase, 0x0A, 0x20, 0x20); /* disable cursor */
if (cursor -> shape == FB_TILE_CURSOR_NONE)
return;
switch (cursor -> shape) {
case FB_TILE_CURSOR_UNDERLINE:
cs = 0x0d;
break;
case FB_TILE_CURSOR_LOWER_THIRD:
cs = 0x09;
break;
case FB_TILE_CURSOR_LOWER_HALF:
cs = 0x07;
break;
case FB_TILE_CURSOR_TWO_THIRDS:
cs = 0x05;
break;
case FB_TILE_CURSOR_BLOCK:
cs = 0x01;
break;
}
/* set cursor position */
vga_wcrt(regbase, 0x0E, pos >> 8);
vga_wcrt(regbase, 0x0F, pos & 0xFF);
vga_wcrt(regbase, 0x0B, ce); /* set cursor end */
vga_wcrt(regbase, 0x0A, cs); /* set cursor start and enable it */
}
int svga_get_tilemax(struct fb_info *info)
{
return 256;
}
/* Get capabilities of accelerator based on the mode */
void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
struct fb_var_screeninfo *var)
{
if (var->bits_per_pixel == 0) {
/* can only support 256 8x16 bitmap */
caps->x = 1 << (8 - 1);
caps->y = 1 << (16 - 1);
caps->len = 256;
} else {
caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
caps->y = ~(u32)0;
caps->len = ~(u32)0;
}
}
EXPORT_SYMBOL(svga_get_caps);
/* ------------------------------------------------------------------------- */
/*
* Compute PLL settings (M, N, R)
* F_VCO = (F_BASE * M) / N
* F_OUT = F_VCO / (2^R)
*/
static inline u32 abs_diff(u32 a, u32 b)
{
return (a > b) ? (a - b) : (b - a);
}
int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node)
{
u16 am, an, ar;
u32 f_vco, f_current, delta_current, delta_best;
pr_debug("fb%d: ideal frequency: %d kHz\n", node, (unsigned int) f_wanted);
ar = pll->r_max;
f_vco = f_wanted << ar;
/* overflow check */
if ((f_vco >> ar) != f_wanted)
return -EINVAL;
/* It is usually better to have greater VCO clock
because of better frequency stability.
So first try r_max, then r smaller. */
while ((ar > pll->r_min) && (f_vco > pll->f_vco_max)) {
ar--;
f_vco = f_vco >> 1;
}
/* VCO bounds check */
if ((f_vco < pll->f_vco_min) || (f_vco > pll->f_vco_max))
return -EINVAL;
delta_best = 0xFFFFFFFF;
*m = 0;
*n = 0;
*r = ar;
am = pll->m_min;
an = pll->n_min;
while ((am <= pll->m_max) && (an <= pll->n_max)) {
f_current = (pll->f_base * am) / an;
delta_current = abs_diff (f_current, f_vco);
if (delta_current < delta_best) {
delta_best = delta_current;
*m = am;
*n = an;
}
if (f_current <= f_vco) {
am ++;
} else {
an ++;
}
}
f_current = (pll->f_base * *m) / *n;
pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int) (f_current >> ar), (int) f_current);
pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int) *m, (unsigned int) *n, (unsigned int) *r);
return 0;
}
/* ------------------------------------------------------------------------- */
/* Check CRT timing values */
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node)
{
u32 value;
var->xres = (var->xres+7)&~7;
var->left_margin = (var->left_margin+7)&~7;
var->right_margin = (var->right_margin+7)&~7;
var->hsync_len = (var->hsync_len+7)&~7;
/* Check horizontal total */
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
if (((value / 8) - 5) >= svga_regset_size (tm->h_total_regs))
return -EINVAL;
/* Check horizontal display and blank start */
value = var->xres;
if (((value / 8) - 1) >= svga_regset_size (tm->h_display_regs))
return -EINVAL;
if (((value / 8) - 1) >= svga_regset_size (tm->h_blank_start_regs))
return -EINVAL;
/* Check horizontal sync start */
value = var->xres + var->right_margin;
if (((value / 8) - 1) >= svga_regset_size (tm->h_sync_start_regs))
return -EINVAL;
/* Check horizontal blank end (or length) */
value = var->left_margin + var->right_margin + var->hsync_len;
if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_blank_end_regs)))
return -EINVAL;
/* Check horizontal sync end (or length) */
value = var->hsync_len;
if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_sync_end_regs)))
return -EINVAL;
/* Check vertical total */
value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
if ((value - 1) >= svga_regset_size(tm->v_total_regs))
return -EINVAL;
/* Check vertical display and blank start */
value = var->yres;
if ((value - 1) >= svga_regset_size(tm->v_display_regs))
return -EINVAL;
if ((value - 1) >= svga_regset_size(tm->v_blank_start_regs))
return -EINVAL;
/* Check vertical sync start */
value = var->yres + var->lower_margin;
if ((value - 1) >= svga_regset_size(tm->v_sync_start_regs))
return -EINVAL;
/* Check vertical blank end (or length) */
value = var->upper_margin + var->lower_margin + var->vsync_len;
if ((value == 0) || (value >= svga_regset_size (tm->v_blank_end_regs)))
return -EINVAL;
/* Check vertical sync end (or length) */
value = var->vsync_len;
if ((value == 0) || (value >= svga_regset_size (tm->v_sync_end_regs)))
return -EINVAL;
return 0;
}
/* Set CRT timing registers */
void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm,
struct fb_var_screeninfo *var,
u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node)
{
u8 regval;
u32 value;
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal total : %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_total_regs, (value / 8) - 5);
value = var->xres;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal display : %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_display_regs, (value / 8) - 1);
value = var->xres;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal blank start: %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_blank_start_regs, (value / 8) - 1 + hborder);
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal blank end : %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_blank_end_regs, (value / 8) - 1 - hborder);
value = var->xres + var->right_margin;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal sync start : %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_sync_start_regs, (value / 8));
value = var->xres + var->right_margin + var->hsync_len;
value = (value * hmul) / hdiv;
pr_debug("fb%d: horizontal sync end : %d\n", node, value);
svga_wcrt_multi(regbase, tm->h_sync_end_regs, (value / 8));
value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical total : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_total_regs, value - 2);
value = var->yres;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical display : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_display_regs, value - 1);
value = var->yres;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical blank start : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_blank_start_regs, value);
value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical blank end : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_blank_end_regs, value - 2);
value = var->yres + var->lower_margin;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical sync start : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_sync_start_regs, value);
value = var->yres + var->lower_margin + var->vsync_len;
value = (value * vmul) / vdiv;
pr_debug("fb%d: vertical sync end : %d\n", node, value);
svga_wcrt_multi(regbase, tm->v_sync_end_regs, value);
/* Set horizontal and vertical sync pulse polarity in misc register */
regval = vga_r(regbase, VGA_MIS_R);
if (var->sync & FB_SYNC_HOR_HIGH_ACT) {
pr_debug("fb%d: positive horizontal sync\n", node);
regval = regval & ~0x80;
} else {
pr_debug("fb%d: negative horizontal sync\n", node);
regval = regval | 0x80;
}
if (var->sync & FB_SYNC_VERT_HIGH_ACT) {
pr_debug("fb%d: positive vertical sync\n", node);
regval = regval & ~0x40;
} else {
pr_debug("fb%d: negative vertical sync\n\n", node);
regval = regval | 0x40;
}
vga_w(regbase, VGA_MIS_W, regval);
}
/* ------------------------------------------------------------------------- */
static inline int match_format(const struct svga_fb_format *frm,
struct fb_var_screeninfo *var)
{
int i = 0;
int stored = -EINVAL;
while (frm->bits_per_pixel != SVGA_FORMAT_END_VAL)
{
if ((var->bits_per_pixel == frm->bits_per_pixel) &&
(var->red.length <= frm->red.length) &&
(var->green.length <= frm->green.length) &&
(var->blue.length <= frm->blue.length) &&
(var->transp.length <= frm->transp.length) &&
(var->nonstd == frm->nonstd))
return i;
if (var->bits_per_pixel == frm->bits_per_pixel)
stored = i;
i++;
frm++;
}
return stored;
}
int svga_match_format(const struct svga_fb_format *frm,
struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix)
{
int i = match_format(frm, var);
if (i >= 0) {
var->bits_per_pixel = frm[i].bits_per_pixel;
var->red = frm[i].red;
var->green = frm[i].green;
var->blue = frm[i].blue;
var->transp = frm[i].transp;
var->nonstd = frm[i].nonstd;
if (fix != NULL) {
fix->type = frm[i].type;
fix->type_aux = frm[i].type_aux;
fix->visual = frm[i].visual;
fix->xpanstep = frm[i].xpanstep;
}
}
return i;
}
EXPORT_SYMBOL(svga_wcrt_multi);
EXPORT_SYMBOL(svga_wseq_multi);
EXPORT_SYMBOL(svga_set_default_gfx_regs);
EXPORT_SYMBOL(svga_set_default_atc_regs);
EXPORT_SYMBOL(svga_set_default_seq_regs);
EXPORT_SYMBOL(svga_set_default_crt_regs);
EXPORT_SYMBOL(svga_set_textmode_vga_regs);
EXPORT_SYMBOL(svga_settile);
EXPORT_SYMBOL(svga_tilecopy);
EXPORT_SYMBOL(svga_tilefill);
EXPORT_SYMBOL(svga_tileblit);
EXPORT_SYMBOL(svga_tilecursor);
EXPORT_SYMBOL(svga_get_tilemax);
EXPORT_SYMBOL(svga_compute_pll);
EXPORT_SYMBOL(svga_check_timings);
EXPORT_SYMBOL(svga_set_timings);
EXPORT_SYMBOL(svga_match_format);
MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_DESCRIPTION("Common utility functions for VGA-based graphics cards");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pinpong/enigma-i9100 | drivers/watchdog/coh901327_wdt.c | 4035 | 14517 | /*
* coh901327_wdt.c
*
* Copyright (C) 2008-2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/delay.h>
#define DRV_NAME "WDOG COH 901 327"
/*
* COH 901 327 register definitions
*/
/* WDOG_FEED Register 32bit (-/W) */
#define U300_WDOG_FR 0x00
#define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU
/* WDOG_TIMEOUT Register 32bit (R/W) */
#define U300_WDOG_TR 0x04
#define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU
/* WDOG_DISABLE1 Register 32bit (-/W) */
#define U300_WDOG_D1R 0x08
#define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU
/* WDOG_DISABLE2 Register 32bit (R/W) */
#define U300_WDOG_D2R 0x0C
#define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU
#define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU
#define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U
/* WDOG_STATUS Register 32bit (R/W) */
#define U300_WDOG_SR 0x10
#define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U
#define U300_WDOG_SR_STATUS_NORMAL 0x0000U
#define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U
/* WDOG_COUNT Register 32bit (R/-) */
#define U300_WDOG_CR 0x14
#define U300_WDOG_CR_VALID_IND 0x8000U
#define U300_WDOG_CR_VALID_STABLE 0x0000U
#define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU
/* WDOG_JTAGOVR Register 32bit (R/W) */
#define U300_WDOG_JOR 0x18
#define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U
#define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U
/* WDOG_RESTART Register 32bit (-/W) */
#define U300_WDOG_RR 0x1C
#define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU
/* WDOG_IRQ_EVENT Register 32bit (R/W) */
#define U300_WDOG_IER 0x20
#define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U
#define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U
/* WDOG_IRQ_MASK Register 32bit (R/W) */
#define U300_WDOG_IMR 0x24
#define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U
/* WDOG_IRQ_FORCE Register 32bit (R/W) */
#define U300_WDOG_IFR 0x28
#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
/* Default timeout in seconds = 1 minute */
static int margin = 60;
static resource_size_t phybase;
static resource_size_t physize;
static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
static u16 wdogenablestore;
static u16 irqmaskstore;
static struct device *parent;
/*
* The watchdog block is of course always clocked, the
* clk_enable()/clk_disable() calls are mainly for performing reference
* counting higher up in the clock hierarchy.
*/
static struct clk *clk;
/*
* Enabling and disabling functions.
*/
static void coh901327_enable(u16 timeout)
{
u16 val;
unsigned long freq;
unsigned long delay_ns;
clk_enable(clk);
/* Restart timer if it is disabled */
val = readw(virtbase + U300_WDOG_D2R);
if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
virtbase + U300_WDOG_RR);
/* Acknowledge any pending interrupt so it doesn't just fire off */
writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
virtbase + U300_WDOG_IER);
/*
* The interrupt is cleared in the 32 kHz clock domain.
* Wait 3 32 kHz cycles for it to take effect
*/
freq = clk_get_rate(clk);
delay_ns = DIV_ROUND_UP(1000000000, freq); /* Freq to ns and round up */
delay_ns = 3 * delay_ns; /* Wait 3 cycles */
ndelay(delay_ns);
/* Enable the watchdog interrupt */
writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
/* Activate the watchdog timer */
writew(timeout, virtbase + U300_WDOG_TR);
/* Start the watchdog timer */
writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR);
/*
* Extra read so that this change propagate in the watchdog.
*/
(void) readw(virtbase + U300_WDOG_CR);
val = readw(virtbase + U300_WDOG_D2R);
clk_disable(clk);
if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
dev_err(parent,
"%s(): watchdog not enabled! D2R value %04x\n",
__func__, val);
}
static void coh901327_disable(void)
{
u16 val;
clk_enable(clk);
/* Disable the watchdog interrupt if it is active */
writew(0x0000U, virtbase + U300_WDOG_IMR);
/* If the watchdog is currently enabled, attempt to disable it */
val = readw(virtbase + U300_WDOG_D2R);
if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) {
writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER,
virtbase + U300_WDOG_D1R);
writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
virtbase + U300_WDOG_D2R);
/* Write this twice (else problems occur) */
writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
virtbase + U300_WDOG_D2R);
}
val = readw(virtbase + U300_WDOG_D2R);
clk_disable(clk);
if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
dev_err(parent,
"%s(): watchdog not disabled! D2R value %04x\n",
__func__, val);
}
static void coh901327_start(void)
{
coh901327_enable(margin * 100);
}
static void coh901327_keepalive(void)
{
clk_enable(clk);
/* Feed the watchdog */
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
clk_disable(clk);
}
static int coh901327_settimeout(int time)
{
/*
* Max margin is 327 since the 10ms
* timeout register is max
* 0x7FFF = 327670ms ~= 327s.
*/
if (time <= 0 || time > 327)
return -EINVAL;
margin = time;
clk_enable(clk);
/* Set new timeout value */
writew(margin * 100, virtbase + U300_WDOG_TR);
/* Feed the dog */
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
clk_disable(clk);
return 0;
}
/*
* This interrupt occurs 10 ms before the watchdog WILL bark.
*/
static irqreturn_t coh901327_interrupt(int irq, void *data)
{
u16 val;
/*
* Ack IRQ? If this occurs we're FUBAR anyway, so
* just acknowledge, disable the interrupt and await the imminent end.
* If you at some point need a host of callbacks to be called
* when the system is about to watchdog-reset, add them here!
*
* NOTE: on future versions of this IP-block, it will be possible
* to prevent a watchdog reset by feeding the watchdog at this
* point.
*/
clk_enable(clk);
val = readw(virtbase + U300_WDOG_IER);
if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND)
writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
virtbase + U300_WDOG_IER);
writew(0x0000U, virtbase + U300_WDOG_IMR);
clk_disable(clk);
dev_crit(parent, "watchdog is barking!\n");
return IRQ_HANDLED;
}
/*
* Allow only one user (daemon) to open the watchdog
*/
static int coh901327_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(1, &coh901327_users))
return -EBUSY;
coh901327_start();
return nonseekable_open(inode, file);
}
static int coh901327_release(struct inode *inode, struct file *file)
{
clear_bit(1, &coh901327_users);
coh901327_disable();
return 0;
}
static ssize_t coh901327_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len)
coh901327_keepalive();
return len;
}
static long coh901327_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
u16 val;
int time;
int new_options;
union {
struct watchdog_info __user *ident;
int __user *i;
} uarg;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET |
WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING,
.identity = "COH 901 327 Watchdog",
.firmware_version = 1,
};
uarg.i = (int __user *)arg;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(uarg.ident, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
ret = put_user(0, uarg.i);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(boot_status, uarg.i);
break;
case WDIOC_SETOPTIONS:
ret = get_user(new_options, uarg.i);
if (ret)
break;
if (new_options & WDIOS_DISABLECARD)
coh901327_disable();
if (new_options & WDIOS_ENABLECARD)
coh901327_start();
ret = 0;
break;
case WDIOC_KEEPALIVE:
coh901327_keepalive();
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, uarg.i);
if (ret)
break;
ret = coh901327_settimeout(time);
if (ret)
break;
/* Then fall through to return set value */
case WDIOC_GETTIMEOUT:
ret = put_user(margin, uarg.i);
break;
case WDIOC_GETTIMELEFT:
clk_enable(clk);
/* Read repeatedly until the value is stable! */
val = readw(virtbase + U300_WDOG_CR);
while (val & U300_WDOG_CR_VALID_IND)
val = readw(virtbase + U300_WDOG_CR);
val &= U300_WDOG_CR_COUNT_VALUE_MASK;
clk_disable(clk);
if (val != 0)
val /= 100;
ret = put_user(val, uarg.i);
break;
}
return ret;
}
static const struct file_operations coh901327_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = coh901327_write,
.unlocked_ioctl = coh901327_ioctl,
.open = coh901327_open,
.release = coh901327_release,
};
static struct miscdevice coh901327_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &coh901327_fops,
};
static int __exit coh901327_remove(struct platform_device *pdev)
{
misc_deregister(&coh901327_miscdev);
coh901327_disable();
free_irq(irq, pdev);
clk_put(clk);
iounmap(virtbase);
release_mem_region(phybase, physize);
return 0;
}
static int __init coh901327_probe(struct platform_device *pdev)
{
int ret;
u16 val;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
parent = &pdev->dev;
physize = resource_size(res);
phybase = res->start;
if (request_mem_region(phybase, physize, DRV_NAME) == NULL) {
ret = -EBUSY;
goto out;
}
virtbase = ioremap(phybase, physize);
if (!virtbase) {
ret = -ENOMEM;
goto out_no_remap;
}
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "could not get clock\n");
goto out_no_clk;
}
ret = clk_enable(clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clock\n");
goto out_no_clk_enable;
}
val = readw(virtbase + U300_WDOG_SR);
switch (val) {
case U300_WDOG_SR_STATUS_TIMED_OUT:
dev_info(&pdev->dev,
"watchdog timed out since last chip reset!\n");
boot_status = WDIOF_CARDRESET;
/* Status will be cleared below */
break;
case U300_WDOG_SR_STATUS_NORMAL:
dev_info(&pdev->dev,
"in normal status, no timeouts have occurred.\n");
break;
default:
dev_info(&pdev->dev,
"contains an illegal status code (%08x)\n", val);
break;
}
val = readw(virtbase + U300_WDOG_D2R);
switch (val) {
case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
dev_info(&pdev->dev, "currently disabled.\n");
break;
case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
dev_info(&pdev->dev,
"currently enabled! (disabling it now)\n");
coh901327_disable();
break;
default:
dev_err(&pdev->dev,
"contains an illegal enable/disable code (%08x)\n",
val);
break;
}
/* Reset the watchdog */
writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
irq = platform_get_irq(pdev, 0);
if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
DRV_NAME " Bark", pdev)) {
ret = -EIO;
goto out_no_irq;
}
clk_disable(clk);
ret = misc_register(&coh901327_miscdev);
if (ret == 0)
dev_info(&pdev->dev,
"initialized. timer margin=%d sec\n", margin);
else
goto out_no_wdog;
return 0;
out_no_wdog:
free_irq(irq, pdev);
out_no_irq:
clk_disable(clk);
out_no_clk_enable:
clk_put(clk);
out_no_clk:
iounmap(virtbase);
out_no_remap:
release_mem_region(phybase, SZ_4K);
out:
return ret;
}
#ifdef CONFIG_PM
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
wdogenablestore = readw(virtbase + U300_WDOG_D2R);
/* If watchdog is on, disable it here and now */
if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
coh901327_disable();
return 0;
}
static int coh901327_resume(struct platform_device *pdev)
{
/* Restore the watchdog interrupt */
writew(irqmaskstore, virtbase + U300_WDOG_IMR);
if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) {
/* Restart the watchdog timer */
writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
virtbase + U300_WDOG_RR);
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
}
return 0;
}
#else
#define coh901327_suspend NULL
#define coh901327_resume NULL
#endif
/*
* Mistreating the watchdog is the only way to perform a software reset of the
* system on EMP platforms. So we implement this and export a symbol for it.
*/
void coh901327_watchdog_reset(void)
{
/* Enable even if on JTAG too */
writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE,
virtbase + U300_WDOG_JOR);
/*
* Timeout = 5s, we have to wait for the watchdog reset to
* actually take place: the watchdog will be reloaded with the
* default value immediately, so we HAVE to reboot and get back
* into the kernel in 30s, or the device will reboot again!
* The boot loader will typically deactivate the watchdog, so we
* need time enough for the boot loader to get to the point of
* deactivating the watchdog before it is shut down by it.
*
* NOTE: on future versions of the watchdog, this restriction is
* gone: the watchdog will be reloaded with a default value (1 min)
* instead of last value, and you can conveniently set the watchdog
* timeout to 10ms (value = 1) without any problems.
*/
coh901327_enable(500);
/* Return and await doom */
}
static struct platform_driver coh901327_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "coh901327_wdog",
},
.remove = __exit_p(coh901327_remove),
.suspend = coh901327_suspend,
.resume = coh901327_resume,
};
static int __init coh901327_init(void)
{
return platform_driver_probe(&coh901327_driver, coh901327_probe);
}
module_init(coh901327_init);
static void __exit coh901327_exit(void)
{
platform_driver_unregister(&coh901327_driver);
}
module_exit(coh901327_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
MODULE_DESCRIPTION("COH 901 327 Watchdog");
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
syhost/android_kernel_pantech_ef59l | drivers/spi/spi-topcliff-pch.c | 4803 | 49513 | /*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/spi/spidev.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/pch_dma.h>
/* Register offsets */
#define PCH_SPCR 0x00 /* SPI control register */
#define PCH_SPBRR 0x04 /* SPI baud rate register */
#define PCH_SPSR 0x08 /* SPI status register */
#define PCH_SPDWR 0x0C /* SPI write data register */
#define PCH_SPDRR 0x10 /* SPI read data register */
#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
#define PCH_SRST 0x1C /* SPI reset register */
#define PCH_ADDRESS_SIZE 0x20
#define PCH_SPSR_TFD 0x000007C0
#define PCH_SPSR_RFD 0x0000F800
#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
#define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15
#define PCH_TX_THOLD 2
#define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16
#define STATUS_RUNNING 1
#define STATUS_EXITING 2
#define PCH_SLEEP_TIME 10
#define SSN_LOW 0x02U
#define SSN_HIGH 0x03U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816
#define SPCR_SPE_BIT (1 << 0)
#define SPCR_MSTR_BIT (1 << 1)
#define SPCR_LSBF_BIT (1 << 4)
#define SPCR_CPHA_BIT (1 << 5)
#define SPCR_CPOL_BIT (1 << 6)
#define SPCR_TFIE_BIT (1 << 8)
#define SPCR_RFIE_BIT (1 << 9)
#define SPCR_FIE_BIT (1 << 10)
#define SPCR_ORIE_BIT (1 << 11)
#define SPCR_MDFIE_BIT (1 << 12)
#define SPCR_FICLR_BIT (1 << 24)
#define SPSR_TFI_BIT (1 << 0)
#define SPSR_RFI_BIT (1 << 1)
#define SPSR_FI_BIT (1 << 2)
#define SPSR_ORF_BIT (1 << 3)
#define SPBRR_SIZE_BIT (1 << 10)
#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
#define SPCR_RFIC_FIELD 20
#define SPCR_TFIC_FIELD 16
#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
#define PCI_DEVICE_ID_ML7831_SPI 0x8816
/*
* Set the number of SPI instance max
* Intel EG20T PCH : 1ch
* LAPIS Semiconductor ML7213 IOH : 2ch
* LAPIS Semiconductor ML7223 IOH : 1ch
* LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_SPI_MAX_DEV 2
#define PCH_BUF_SIZE 4096
#define PCH_DMA_TRANS_SIZE 12
static int use_dma = 1;
struct pch_spi_dma_ctrl {
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
struct pch_dma_slave param_tx;
struct pch_dma_slave param_rx;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
struct scatterlist *sg_rx_p;
struct scatterlist sg_tx;
struct scatterlist sg_rx;
int nent;
void *tx_buf_virt;
void *rx_buf_virt;
dma_addr_t tx_buf_dma;
dma_addr_t rx_buf_dma;
};
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
* @master: Pointer to the SPI master structure
* @work: Reference to work queue handler
* @wk: Workqueue for carrying out execution of the
* requests
* @wait: Wait queue for waking up upon receiving an
* interrupt.
* @transfer_complete: Status of SPI Transfer
* @bcurrent_msg_processing: Status flag for message processing
* @lock: Lock for protecting this structure
* @queue: SPI Message queue
* @status: Status of the SPI driver
* @bpw_len: Length of data to be transferred in bits per
* word
* @transfer_active: Flag showing active transfer
* @tx_index: Transmit data count; for bookkeeping during
* transfer
* @rx_index: Receive data count; for bookkeeping during
* transfer
* @tx_buff: Buffer for data to be transmitted
* @rx_index: Buffer for Received data
* @n_curnt_chip: The chip number that this SPI driver currently
* operates on
* @current_chip: Reference to the current chip that this SPI
* driver currently operates on
* @current_msg: The current message that this SPI driver is
* handling
* @cur_trans: The current transfer that this SPI driver is
* handling
* @board_dat: Reference to the SPI device data structure
* @plat_dev: platform_device structure
* @ch: SPI channel number
* @irq_reg_sts: Status of IRQ registration
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
struct spi_master *master;
struct work_struct work;
struct workqueue_struct *wk;
wait_queue_head_t wait;
u8 transfer_complete;
u8 bcurrent_msg_processing;
spinlock_t lock;
struct list_head queue;
u8 status;
u32 bpw_len;
u8 transfer_active;
u32 tx_index;
u32 rx_index;
u16 *pkt_tx_buff;
u16 *pkt_rx_buff;
u8 n_curnt_chip;
struct spi_device *current_chip;
struct spi_message *current_msg;
struct spi_transfer *cur_trans;
struct pch_spi_board_data *board_dat;
struct platform_device *plat_dev;
int ch;
struct pch_spi_dma_ctrl dma;
int use_dma;
u8 irq_reg_sts;
int save_total_len;
};
/**
* struct pch_spi_board_data - Holds the SPI device specific details
* @pdev: Pointer to the PCI device
* @suspend_sts: Status of suspend
* @num: The number of SPI device instance
*/
struct pch_spi_board_data {
struct pci_dev *pdev;
u8 suspend_sts;
int num;
};
struct pch_pd_dev_save {
int num;
struct platform_device *pd_save[PCH_SPI_MAX_DEV];
struct pch_spi_board_data *board_dat;
};
static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
{ }
};
/**
* pch_spi_writereg() - Performs register writes
* @master: Pointer to struct spi_master.
* @idx: Register offset.
* @val: Value to be written to register.
*/
static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
iowrite32(val, (data->io_remap_addr + idx));
}
/**
* pch_spi_readreg() - Performs register reads
* @master: Pointer to struct spi_master.
* @idx: Register offset.
*/
static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
return ioread32(data->io_remap_addr + idx);
}
static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
u32 set, u32 clr)
{
u32 tmp = pch_spi_readreg(master, idx);
tmp = (tmp & ~clr) | set;
pch_spi_writereg(master, idx, tmp);
}
static void pch_spi_set_master_mode(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
}
/**
* pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
* @master: Pointer to struct spi_master.
*/
static void pch_spi_clear_fifo(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
}
static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
void __iomem *io_remap_addr)
{
u32 n_read, tx_index, rx_index, bpw_len;
u16 *pkt_rx_buffer, *pkt_tx_buff;
int read_cnt;
u32 reg_spcr_val;
void __iomem *spsr;
void __iomem *spdrr;
void __iomem *spdwr;
spsr = io_remap_addr + PCH_SPSR;
iowrite32(reg_spsr_val, spsr);
if (data->transfer_active) {
rx_index = data->rx_index;
tx_index = data->tx_index;
bpw_len = data->bpw_len;
pkt_rx_buffer = data->pkt_rx_buff;
pkt_tx_buff = data->pkt_tx_buff;
spdrr = io_remap_addr + PCH_SPDRR;
spdwr = io_remap_addr + PCH_SPDWR;
n_read = PCH_READABLE(reg_spsr_val);
for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
pkt_rx_buffer[rx_index++] = ioread32(spdrr);
if (tx_index < bpw_len)
iowrite32(pkt_tx_buff[tx_index++], spdwr);
}
/* disable RFI if not needed */
if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
/* reset rx threshold */
reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
}
/* update counts */
data->tx_index = tx_index;
data->rx_index = rx_index;
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
if ((tx_index == bpw_len) && (rx_index == tx_index)) {
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
PCH_ALL);
/* transfer is completed;
inform pch_spi_process_messages */
data->transfer_complete = true;
data->transfer_active = false;
wake_up(&data->wait);
} else {
dev_err(&data->master->dev,
"%s : Transfer is not completed",
__func__);
}
}
}
}
/**
* pch_spi_handler() - Interrupt handler
* @irq: The interrupt number.
* @dev_id: Pointer to struct pch_spi_board_data.
*/
static irqreturn_t pch_spi_handler(int irq, void *dev_id)
{
u32 reg_spsr_val;
void __iomem *spsr;
void __iomem *io_remap_addr;
irqreturn_t ret = IRQ_NONE;
struct pch_spi_data *data = dev_id;
struct pch_spi_board_data *board_dat = data->board_dat;
if (board_dat->suspend_sts) {
dev_dbg(&board_dat->pdev->dev,
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
if (reg_spsr_val & SPSR_ORF_BIT) {
dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
if (data->current_msg->complete != 0) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete(data->current_msg->context);
data->bcurrent_msg_processing = false;
data->current_msg = NULL;
data->cur_trans = NULL;
}
}
if (data->use_dma)
return IRQ_NONE;
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
ret = IRQ_HANDLED;
}
dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
__func__, ret);
return ret;
}
/**
* pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
* @master: Pointer to struct spi_master.
* @speed_hz: Baud rate.
*/
static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
{
u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
/* if baud rate is less than we can support limit it */
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
* pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
* @master: Pointer to struct spi_master.
* @bits_per_word: Bits per word for SPI transfer.
*/
static void pch_spi_set_bits_per_word(struct spi_master *master,
u8 bits_per_word)
{
if (bits_per_word == 8)
pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
else
pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
}
/**
* pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
* @spi: Pointer to struct spi_device.
*/
static void pch_spi_setup_transfer(struct spi_device *spi)
{
u32 flags = 0;
dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
__func__, pch_spi_readreg(spi->master, PCH_SPBRR),
spi->max_speed_hz);
pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
/* set bits per word */
pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
if (!(spi->mode & SPI_LSB_FIRST))
flags |= SPCR_LSBF_BIT;
if (spi->mode & SPI_CPOL)
flags |= SPCR_CPOL_BIT;
if (spi->mode & SPI_CPHA)
flags |= SPCR_CPHA_BIT;
pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
(SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
/* Clear the FIFO by toggling FICLR to 1 and back to 0 */
pch_spi_clear_fifo(spi->master);
}
/**
* pch_spi_reset() - Clears SPI registers
* @master: Pointer to struct spi_master.
*/
static void pch_spi_reset(struct spi_master *master)
{
/* write 1 to reset SPI */
pch_spi_writereg(master, PCH_SRST, 0x1);
/* clear reset */
pch_spi_writereg(master, PCH_SRST, 0x0);
}
static int pch_spi_setup(struct spi_device *pspi)
{
/* check bits per word */
if (pspi->bits_per_word == 0) {
pspi->bits_per_word = 8;
dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
}
if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
return -EINVAL;
}
/* Check baud rate setting */
/* if baud rate of chip is greater than
max we can support,return error */
if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
pspi->max_speed_hz = PCH_MAX_BAUDRATE;
dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
(pspi->mode) & (SPI_CPOL | SPI_CPHA));
return 0;
}
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
struct spi_transfer *transfer;
struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
int retval;
unsigned long flags;
/* validate spi message and baud rate */
if (unlikely(list_empty(&pmsg->transfers) == 1)) {
dev_err(&pspi->dev, "%s list empty\n", __func__);
retval = -EINVAL;
goto err_out;
}
if (unlikely(pspi->max_speed_hz == 0)) {
dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n",
__func__, pspi->max_speed_hz);
retval = -EINVAL;
goto err_out;
}
dev_dbg(&pspi->dev, "%s Transfer List not empty. "
"Transfer Speed is set.\n", __func__);
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
if (!transfer->tx_buf && !transfer->rx_buf) {
dev_err(&pspi->dev,
"%s Tx and Rx buffer NULL\n", __func__);
retval = -EINVAL;
goto err_return_spinlock;
}
if (!transfer->len) {
dev_err(&pspi->dev, "%s Transfer length invalid\n",
__func__);
retval = -EINVAL;
goto err_return_spinlock;
}
dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
" valid\n", __func__);
/* if baud rate has been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
transfer->speed_hz = PCH_MAX_BAUDRATE;
/* if bits per word has been specified validate the same */
if (transfer->bits_per_word) {
if ((transfer->bits_per_word != 8)
&& (transfer->bits_per_word != 16)) {
retval = -EINVAL;
dev_err(&pspi->dev,
"%s Invalid bits per word\n", __func__);
goto err_return_spinlock;
}
}
}
spin_unlock_irqrestore(&data->lock, flags);
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
retval = -ESHUTDOWN;
goto err_out;
}
/* If suspended ,return -EINVAL */
if (data->board_dat->suspend_sts) {
dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
retval = -EINVAL;
goto err_out;
}
/* set status of message */
pmsg->actual_length = 0;
dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
pmsg->status = -EINPROGRESS;
spin_lock_irqsave(&data->lock, flags);
/* add message to queue */
list_add_tail(&pmsg->queue, &data->queue);
spin_unlock_irqrestore(&data->lock, flags);
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
/* schedule work queue to run */
queue_work(data->wk, &data->work);
dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
retval = 0;
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
err_return_spinlock:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
spin_unlock_irqrestore(&data->lock, flags);
return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
struct spi_device *pspi)
{
if (data->current_chip != NULL) {
if (pspi->chip_select != data->n_curnt_chip) {
dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
data->current_chip = NULL;
}
}
data->current_chip = pspi;
data->n_curnt_chip = data->current_chip->chip_select;
dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
pch_spi_setup_transfer(pspi);
}
static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
{
int size;
u32 n_writes;
int j;
struct spi_message *pmsg;
const u8 *tx_buf;
const u16 *tx_sbuf;
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
pch_spi_set_bits_per_word(data->master,
data->cur_trans->bits_per_word);
*bpw = data->cur_trans->bits_per_word;
} else {
*bpw = data->current_msg->spi->bits_per_word;
}
/* reset Tx/Rx index */
data->tx_index = 0;
data->rx_index = 0;
data->bpw_len = data->cur_trans->len / (*bpw / 8);
/* find alloc size */
size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
/* allocate memory for pkt_tx_buff & pkt_rx_buffer */
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
if (!data->pkt_rx_buff)
kfree(data->pkt_tx_buff);
}
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -ENOMEM;
if (pmsg->complete != 0)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
return;
}
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
tx_buf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_buf++;
} else {
tx_sbuf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_sbuf++;
}
}
/* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
n_writes = data->bpw_len;
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
"0x2 to SSNXCR\n", __func__);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
/* update tx_index */
data->tx_index = j;
/* reset transfer complete flag */
data->transfer_complete = false;
data->transfer_active = true;
}
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
struct spi_message *pmsg;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
if (data->current_msg->complete != 0) {
dev_dbg(&data->master->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
}
/* update status in global variable */
data->bcurrent_msg_processing = false;
dev_dbg(&data->master->dev,
"%s:data->bcurrent_msg_processing = false\n", __func__);
data->current_msg = NULL;
data->cur_trans = NULL;
/* check if we have items in list and not suspending
* return 1 if list empty */
if ((list_empty(&data->queue) == 0) &&
(!data->board_dat->suspend_sts) &&
(data->status != STATUS_EXITING)) {
/* We have some more work to do (either there is more tranint
* bpw;sfer requests in the current message or there are
*more messages)
*/
dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
queue_work(data->wk, &data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
}
}
static void pch_spi_set_ir(struct pch_spi_data *data)
{
/* enable interrupts, set threshold, enable SPI */
if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_RFIE_BIT |
SPCR_ORIE_BIT | SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
else
/* set receive threshold to maximum */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_ORIE_BIT |
SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete);
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
/* Disable interrupts and SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
/* clear FIFO */
pch_spi_clear_fifo(data->master);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
{
int j;
u8 *rx_buf;
u16 *rx_sbuf;
/* copy Rx Data */
if (!data->cur_trans->rx_buf)
return;
if (bpw == 8) {
rx_buf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
} else {
rx_sbuf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = data->pkt_rx_buff[j];
}
}
static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
{
int j;
u8 *rx_buf;
u16 *rx_sbuf;
const u8 *rx_dma_buf;
const u16 *rx_dma_sbuf;
/* copy Rx Data */
if (!data->cur_trans->rx_buf)
return;
if (bpw == 8) {
rx_buf = data->cur_trans->rx_buf;
rx_dma_buf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = *rx_dma_buf++ & 0xFF;
data->cur_trans->rx_buf = rx_buf;
} else {
rx_sbuf = data->cur_trans->rx_buf;
rx_dma_sbuf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = *rx_dma_sbuf++;
data->cur_trans->rx_buf = rx_sbuf;
}
}
static int pch_spi_start_transfer(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
unsigned long flags;
int rtn;
dma = &data->dma;
spin_lock_irqsave(&data->lock, flags);
/* disable interrupts, SPI set enable */
pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
spin_unlock_irqrestore(&data->lock, flags);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
if (!rtn)
dev_err(&data->master->dev,
"%s wait-event timeout\n", __func__);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
DMA_FROM_DEVICE);
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
async_tx_ack(dma->desc_rx);
async_tx_ack(dma->desc_tx);
kfree(dma->sg_tx_p);
kfree(dma->sg_rx_p);
spin_lock_irqsave(&data->lock, flags);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
SPCR_SPE_BIT);
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
/* clear FIFO */
pch_spi_clear_fifo(data->master);
spin_unlock_irqrestore(&data->lock, flags);
return rtn;
}
static void pch_dma_rx_complete(void *arg)
{
struct pch_spi_data *data = arg;
/* transfer is completed;inform pch_spi_process_messages_dma */
data->transfer_complete = true;
wake_up_interruptible(&data->wait);
}
static bool pch_spi_filter(struct dma_chan *chan, void *slave)
{
struct pch_dma_slave *param = slave;
if ((chan->chan_id == param->chan_id) &&
(param->dma_dev == chan->device->dev)) {
chan->private = param;
return true;
} else {
return false;
}
}
static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
struct pci_dev *dma_dev;
struct pch_dma_slave *param;
struct pch_spi_dma_ctrl *dma;
unsigned int width;
if (bpw == 8)
width = PCH_DMA_WIDTH_1_BYTE;
else
width = PCH_DMA_WIDTH_2_BYTES;
dma = &data->dma;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Get DMA's dev information */
dma_dev = pci_get_bus_and_slot(data->board_dat->pdev->bus->number,
PCI_DEVFN(12, 0));
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
data->use_dma = 0;
return;
}
dma->chan_tx = chan;
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
data->use_dma = 0;
return;
}
dma->chan_rx = chan;
}
static void pch_spi_release_dma(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
if (dma->chan_tx) {
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
}
if (dma->chan_rx) {
dma_release_channel(dma->chan_rx);
dma->chan_rx = NULL;
}
return;
}
static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
{
const u8 *tx_buf;
const u16 *tx_sbuf;
u8 *tx_dma_buf;
u16 *tx_dma_sbuf;
struct scatterlist *sg;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
int num;
int i;
int size;
int rem;
int head;
unsigned long flags;
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
spin_unlock_irqrestore(&data->lock, flags);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word !=
data->cur_trans->bits_per_word)) {
dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_set_bits_per_word(data->master,
data->cur_trans->bits_per_word);
spin_unlock_irqrestore(&data->lock, flags);
*bpw = data->cur_trans->bits_per_word;
} else {
*bpw = data->current_msg->spi->bits_per_word;
}
data->bpw_len = data->cur_trans->len / (*bpw / 8);
if (data->bpw_len > PCH_BUF_SIZE) {
data->bpw_len = PCH_BUF_SIZE;
data->cur_trans->len -= PCH_BUF_SIZE;
}
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
tx_buf = data->cur_trans->tx_buf;
tx_dma_buf = dma->tx_buf_virt;
for (i = 0; i < data->bpw_len; i++)
*tx_dma_buf++ = *tx_buf++;
} else {
tx_sbuf = data->cur_trans->tx_buf;
tx_dma_sbuf = dma->tx_buf_virt;
for (i = 0; i < data->bpw_len; i++)
*tx_dma_sbuf++ = *tx_sbuf++;
}
}
/* Calculate Rx parameter for DMA transmitting */
if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
} else {
num = data->bpw_len / PCH_DMA_TRANS_SIZE;
rem = PCH_DMA_TRANS_SIZE;
}
size = PCH_DMA_TRANS_SIZE;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
}
dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
__func__, num, size, rem);
spin_lock_irqsave(&data->lock, flags);
/* set receive fifo threshold and transmit fifo threshold */
pch_spi_setclr_reg(data->master, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
(PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
spin_unlock_irqrestore(&data->lock, flags);
/* RX */
dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_rx_p;
for (i = 0; i < num; i++, sg++) {
if (i == (num - 2)) {
sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
} else if (i == (num - 1)) {
sg->offset = size * (i - 1) + rem;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
} else {
sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
}
sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
}
sg = dma->sg_rx_p;
desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
__func__);
return;
}
dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
desc_rx->callback = pch_dma_rx_complete;
desc_rx->callback_param = data;
dma->nent = num;
dma->desc_rx = desc_rx;
/* Calculate Tx parameter for DMA transmitting */
if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
} else {
num = data->bpw_len / PCH_DMA_TRANS_SIZE;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
PCH_DMA_TRANS_SIZE - head;
}
size = PCH_DMA_TRANS_SIZE;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
head = 0;
}
dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_tx_p;
for (i = 0; i < num; i++, sg++) {
if (i == 0) {
sg->offset = 0;
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
sg->offset);
sg_dma_len(sg) = size + head;
} else if (i == (num - 1)) {
sg->offset = head + size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
} else {
sg->offset = head + size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
}
sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
}
sg = dma->sg_tx_p;
desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
__func__);
return;
}
dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
desc_tx->callback = NULL;
desc_tx->callback_param = data;
dma->nent = num;
dma->desc_tx = desc_tx;
dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
"0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
desc_rx->tx_submit(desc_rx);
desc_tx->tx_submit(desc_tx);
spin_unlock_irqrestore(&data->lock, flags);
/* reset transfer complete flag */
data->transfer_complete = false;
}
static void pch_spi_process_messages(struct work_struct *pwork)
{
struct spi_message *pmsg;
struct pch_spi_data *data;
int bpw;
data = container_of(pwork, struct pch_spi_data, work);
dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
"flushing queue\n", __func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete != 0) {
spin_unlock(&data->lock);
pmsg->complete(pmsg->context);
spin_lock(&data->lock);
}
/* delete from queue */
list_del_init(&pmsg->queue);
}
spin_unlock(&data->lock);
return;
}
data->bcurrent_msg_processing = true;
dev_dbg(&data->master->dev,
"%s Set data->bcurrent_msg_processing= true\n", __func__);
/* Get the message from the queue and delete it from there. */
data->current_msg = list_entry(data->queue.next, struct spi_message,
queue);
list_del_init(&data->current_msg->queue);
data->current_msg->status = 0;
pch_spi_select_chip(data, data->current_msg->spi);
spin_unlock(&data->lock);
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do {
int cnt;
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
spin_lock(&data->lock);
if (data->cur_trans == NULL) {
data->cur_trans =
list_entry(data->current_msg->transfers.next,
struct spi_transfer, transfer_list);
dev_dbg(&data->master->dev, "%s "
":Getting 1st transfer message\n", __func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer, transfer_list);
dev_dbg(&data->master->dev, "%s "
":Getting next transfer message\n", __func__);
}
spin_unlock(&data->lock);
if (!data->cur_trans->len)
goto out;
cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
data->save_total_len = data->cur_trans->len;
if (data->use_dma) {
int i;
char *save_rx_buf = data->cur_trans->rx_buf;
for (i = 0; i < cnt; i ++) {
pch_spi_handle_dma(data, &bpw);
if (!pch_spi_start_transfer(data)) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete
(data->current_msg->context);
data->bcurrent_msg_processing = false;
data->current_msg = NULL;
data->cur_trans = NULL;
goto out;
}
pch_spi_copy_rx_data_for_dma(data, bpw);
}
data->cur_trans->rx_buf = save_rx_buf;
} else {
pch_spi_set_tx(data, &bpw);
pch_spi_set_ir(data);
pch_spi_copy_rx_data(data, bpw);
kfree(data->pkt_rx_buff);
data->pkt_rx_buff = NULL;
kfree(data->pkt_tx_buff);
data->pkt_tx_buff = NULL;
}
/* increment message count */
data->cur_trans->len = data->save_total_len;
data->current_msg->actual_length += data->cur_trans->len;
dev_dbg(&data->master->dev,
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
/* check for delay */
if (data->cur_trans->delay_usecs) {
dev_dbg(&data->master->dev, "%s:"
"delay in usec=%d\n", __func__,
data->cur_trans->delay_usecs);
udelay(data->cur_trans->delay_usecs);
}
spin_lock(&data->lock);
/* No more transfer in this message. */
if ((data->cur_trans->transfer_list.next) ==
&(data->current_msg->transfers)) {
pch_spi_nomore_transfer(data);
}
spin_unlock(&data->lock);
} while (data->cur_trans != NULL);
out:
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* free workqueue */
if (data->wk != NULL) {
destroy_workqueue(data->wk);
data->wk = NULL;
dev_dbg(&board_dat->pdev->dev,
"%s destroy_workqueue invoked successfully\n",
__func__);
}
}
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
int retval = 0;
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* create workqueue */
data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
if (!data->wk) {
dev_err(&board_dat->pdev->dev,
"%s create_singlet hread_workqueue failed\n", __func__);
retval = -EBUSY;
goto err_return;
}
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
err_return:
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s FAIL:invoking pch_spi_free_resources\n", __func__);
pch_spi_free_resources(board_dat, data);
}
dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
return retval;
}
static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
if (dma->tx_buf_dma)
dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
dma->tx_buf_virt, dma->tx_buf_dma);
if (dma->rx_buf_dma)
dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
dma->rx_buf_virt, dma->rx_buf_dma);
return;
}
static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
/* Get Consistent memory for Tx DMA */
dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
/* Get Consistent memory for Rx DMA */
dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
}
static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
struct spi_master *master;
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data;
dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
master = spi_alloc_master(&board_dat->pdev->dev,
sizeof(struct pch_spi_data));
if (!master) {
dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
plat_dev->id);
return -ENOMEM;
}
data = spi_master_get_devdata(master);
data->master = master;
platform_set_drvdata(plat_dev, data);
/* baseaddress + address offset) */
data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
PCH_ADDRESS_SIZE * plat_dev->id;
data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
PCH_ADDRESS_SIZE * plat_dev->id;
if (!data->io_remap_addr) {
dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
data->n_curnt_chip = 255;
data->status = STATUS_RUNNING;
data->ch = plat_dev->id;
data->use_dma = use_dma;
INIT_LIST_HEAD(&data->queue);
spin_lock_init(&data->lock);
INIT_WORK(&data->work, pch_spi_process_messages);
init_waitqueue_head(&data->wait);
ret = pch_spi_get_resources(board_dat, data);
if (ret) {
dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
goto err_spi_get_resources;
}
ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME, data);
if (ret) {
dev_err(&plat_dev->dev,
"%s request_irq failed\n", __func__);
goto err_request_irq;
}
data->irq_reg_sts = true;
pch_spi_set_master_mode(master);
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
"%s spi_register_master FAILED\n", __func__);
goto err_spi_register_master;
}
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
pch_alloc_dma_buf(board_dat, data);
}
return 0;
err_spi_register_master:
free_irq(board_dat->pdev->irq, board_dat);
err_request_irq:
pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
pci_iounmap(board_dat->pdev, data->io_remap_addr);
err_pci_iomap:
spi_master_put(master);
return ret;
}
static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(plat_dev);
int count;
unsigned long flags;
dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
__func__, plat_dev->id, board_dat->pdev->irq);
if (use_dma)
pch_free_dma_buf(board_dat, data);
/* check for any pending messages; no action is taken if the queue
* is still full; but at least we tried. Unload anyway */
count = 500;
spin_lock_irqsave(&data->lock, flags);
data->status = STATUS_EXITING;
while ((list_empty(&data->queue) == 0) && --count) {
dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
__func__);
spin_unlock_irqrestore(&data->lock, flags);
msleep(PCH_SLEEP_TIME);
spin_lock_irqsave(&data->lock, flags);
}
spin_unlock_irqrestore(&data->lock, flags);
pch_spi_free_resources(board_dat, data);
/* disable interrupts & free IRQ */
if (data->irq_reg_sts) {
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
data->irq_reg_sts = false;
free_irq(board_dat->pdev->irq, data);
}
pci_iounmap(board_dat->pdev, data->io_remap_addr);
spi_unregister_master(data->master);
spi_master_put(data->master);
platform_set_drvdata(plat_dev, NULL);
return 0;
}
#ifdef CONFIG_PM
static int pch_spi_pd_suspend(struct platform_device *pd_dev,
pm_message_t state)
{
u8 count;
struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(pd_dev);
dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
/* check if the current message is processed:
Only after thats done the transfer will be suspended */
count = 255;
while ((--count) > 0) {
if (!(data->bcurrent_msg_processing))
break;
msleep(PCH_SLEEP_TIME);
}
/* Free IRQ */
if (data->irq_reg_sts) {
/* disable all interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
pch_spi_reset(data->master);
free_irq(board_dat->pdev->irq, data);
data->irq_reg_sts = false;
dev_dbg(&pd_dev->dev,
"%s free_irq invoked successfully.\n", __func__);
}
return 0;
}
static int pch_spi_pd_resume(struct platform_device *pd_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(pd_dev);
int retval;
if (!board_dat) {
dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
if (!data->irq_reg_sts) {
/* register IRQ */
retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME, data);
if (retval < 0) {
dev_err(&pd_dev->dev,
"%s request_irq failed\n", __func__);
return retval;
}
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
pch_spi_set_master_mode(data->master);
data->irq_reg_sts = true;
}
return 0;
}
#else
#define pch_spi_pd_suspend NULL
#define pch_spi_pd_resume NULL
#endif
static struct platform_driver pch_spi_pd_driver = {
.driver = {
.name = "pch-spi",
.owner = THIS_MODULE,
},
.probe = pch_spi_pd_probe,
.remove = __devexit_p(pch_spi_pd_remove),
.suspend = pch_spi_pd_suspend,
.resume = pch_spi_pd_resume
};
static int __devinit pch_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
struct platform_device *pd_dev = NULL;
int retval;
int i;
struct pch_pd_dev_save *pd_dev_save;
pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
if (!pd_dev_save) {
dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
return -ENOMEM;
}
board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
if (!board_dat) {
dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
retval = -ENOMEM;
goto err_no_mem;
}
retval = pci_request_regions(pdev, KBUILD_MODNAME);
if (retval) {
dev_err(&pdev->dev, "%s request_region failed\n", __func__);
goto pci_request_regions;
}
board_dat->pdev = pdev;
board_dat->num = id->driver_data;
pd_dev_save->num = id->driver_data;
pd_dev_save->board_dat = board_dat;
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
goto pci_enable_device;
}
for (i = 0; i < board_dat->num; i++) {
pd_dev = platform_device_alloc("pch-spi", i);
if (!pd_dev) {
dev_err(&pdev->dev, "platform_device_alloc failed\n");
goto err_platform_device;
}
pd_dev_save->pd_save[i] = pd_dev;
pd_dev->dev.parent = &pdev->dev;
retval = platform_device_add_data(pd_dev, board_dat,
sizeof(*board_dat));
if (retval) {
dev_err(&pdev->dev,
"platform_device_add_data failed\n");
platform_device_put(pd_dev);
goto err_platform_device;
}
retval = platform_device_add(pd_dev);
if (retval) {
dev_err(&pdev->dev, "platform_device_add failed\n");
platform_device_put(pd_dev);
goto err_platform_device;
}
}
pci_set_drvdata(pdev, pd_dev_save);
return 0;
err_platform_device:
pci_disable_device(pdev);
pci_enable_device:
pci_release_regions(pdev);
pci_request_regions:
kfree(board_dat);
err_no_mem:
kfree(pd_dev_save);
return retval;
}
static void __devexit pch_spi_remove(struct pci_dev *pdev)
{
int i;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
for (i = 0; i < pd_dev_save->num; i++)
platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_release_regions(pdev);
kfree(pd_dev_save->board_dat);
kfree(pd_dev_save);
}
#ifdef CONFIG_PM
static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
{
int retval;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
pd_dev_save->board_dat->suspend_sts = true;
/* save config space */
retval = pci_save_state(pdev);
if (retval == 0) {
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
} else {
dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
}
return retval;
}
static int pch_spi_resume(struct pci_dev *pdev)
{
int retval;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
retval = pci_enable_device(pdev);
if (retval < 0) {
dev_err(&pdev->dev,
"%s pci_enable_device failed\n", __func__);
} else {
pci_enable_wake(pdev, PCI_D3hot, 0);
/* set suspend status to false */
pd_dev_save->board_dat->suspend_sts = false;
}
return retval;
}
#else
#define pch_spi_suspend NULL
#define pch_spi_resume NULL
#endif
static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
.remove = pch_spi_remove,
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
static int __init pch_spi_init(void)
{
int ret;
ret = platform_driver_register(&pch_spi_pd_driver);
if (ret)
return ret;
ret = pci_register_driver(&pch_spi_pcidev_driver);
if (ret)
return ret;
return 0;
}
module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
pci_unregister_driver(&pch_spi_pcidev_driver);
platform_driver_unregister(&pch_spi_pd_driver);
}
module_exit(pch_spi_exit);
module_param(use_dma, int, 0644);
MODULE_PARM_DESC(use_dma,
"to use DMA for data transfers pass 1 else 0; default 1");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
| gpl-2.0 |
andrsbub/linux | drivers/input/input-compat.c | 8899 | 3372 | /*
* 32bit compatibility wrappers for the input subsystem.
*
* Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/export.h>
#include <asm/uaccess.h>
#include "input-compat.h"
#ifdef CONFIG_COMPAT
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
sizeof(struct input_event_compat)))
return -EFAULT;
event->time.tv_sec = compat_event.time.tv_sec;
event->time.tv_usec = compat_event.time.tv_usec;
event->type = compat_event.type;
event->code = compat_event.code;
event->value = compat_event.value;
} else {
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
compat_event.time.tv_usec = event->time.tv_usec;
compat_event.type = event->type;
compat_event.code = event->code;
compat_event.value = event->value;
if (copy_to_user(buffer, &compat_event,
sizeof(struct input_event_compat)))
return -EFAULT;
} else {
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (INPUT_COMPAT_TEST) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
return -EINVAL;
/*
* It so happens that the pointer which needs to be changed
* is the last field in the structure, so we can retrieve the
* whole thing and replace just the pointer.
*/
compat_effect = (struct ff_effect_compat *)effect;
if (copy_from_user(compat_effect, buffer,
sizeof(struct ff_effect_compat)))
return -EFAULT;
if (compat_effect->type == FF_PERIODIC &&
compat_effect->u.periodic.waveform == FF_CUSTOM)
effect->u.periodic.custom_data =
compat_ptr(compat_effect->u.periodic.custom_data);
} else {
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
}
return 0;
}
#else
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
return 0;
}
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
EXPORT_SYMBOL_GPL(input_event_to_user);
EXPORT_SYMBOL_GPL(input_ff_effect_from_user);
| gpl-2.0 |
AD5GB/android_kernel_googlesource-common | drivers/input/input-compat.c | 8899 | 3372 | /*
* 32bit compatibility wrappers for the input subsystem.
*
* Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/export.h>
#include <asm/uaccess.h>
#include "input-compat.h"
#ifdef CONFIG_COMPAT
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
sizeof(struct input_event_compat)))
return -EFAULT;
event->time.tv_sec = compat_event.time.tv_sec;
event->time.tv_usec = compat_event.time.tv_usec;
event->type = compat_event.type;
event->code = compat_event.code;
event->value = compat_event.value;
} else {
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
compat_event.time.tv_usec = event->time.tv_usec;
compat_event.type = event->type;
compat_event.code = event->code;
compat_event.value = event->value;
if (copy_to_user(buffer, &compat_event,
sizeof(struct input_event_compat)))
return -EFAULT;
} else {
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (INPUT_COMPAT_TEST) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
return -EINVAL;
/*
* It so happens that the pointer which needs to be changed
* is the last field in the structure, so we can retrieve the
* whole thing and replace just the pointer.
*/
compat_effect = (struct ff_effect_compat *)effect;
if (copy_from_user(compat_effect, buffer,
sizeof(struct ff_effect_compat)))
return -EFAULT;
if (compat_effect->type == FF_PERIODIC &&
compat_effect->u.periodic.waveform == FF_CUSTOM)
effect->u.periodic.custom_data =
compat_ptr(compat_effect->u.periodic.custom_data);
} else {
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
}
return 0;
}
#else
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
return 0;
}
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
EXPORT_SYMBOL_GPL(input_event_to_user);
EXPORT_SYMBOL_GPL(input_ff_effect_from_user);
| gpl-2.0 |
HRTKernel/Hacker_Kernel_SM-G92X | fs/nls/nls_ascii.c | 12227 | 5874 | /*
* linux/fs/nls/nls_ascii.c
*
* Charset ascii translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char *const page_uni2charset[256] = {
page00,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "ascii",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_ascii(void)
{
return register_nls(&table);
}
static void __exit exit_nls_ascii(void)
{
unregister_nls(&table);
}
module_init(init_nls_ascii)
module_exit(exit_nls_ascii)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
ayushrox/Pandora_kernel | fs/nls/nls_ascii.c | 12227 | 5874 | /*
* linux/fs/nls/nls_ascii.c
*
* Charset ascii translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char *const page_uni2charset[256] = {
page00,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "ascii",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_ascii(void)
{
return register_nls(&table);
}
static void __exit exit_nls_ascii(void)
{
unregister_nls(&table);
}
module_init(init_nls_ascii)
module_exit(exit_nls_ascii)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
nicolaerosia/linux-bn-omap4 | sound/pci/rme9652/hdsp.c | 196 | 150614 | /*
* ALSA driver for RME Hammerfall DSP audio interface(s)
*
* Copyright (c) 2002 Paul Davis
* Marcus Andersson
* Thomas Charbonnel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/info.h>
#include <sound/asoundef.h>
#include <sound/rawmidi.h>
#include <sound/hwdep.h>
#include <sound/initval.h>
#include <sound/hdsp.h>
#include <asm/byteorder.h>
#include <asm/current.h>
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for RME Hammerfall DSP interface.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for RME Hammerfall DSP interface.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable/disable specific Hammerfall DSP soundcards.");
MODULE_AUTHOR("Paul Davis <paul@linuxaudiosystems.com>, Marcus Andersson, Thomas Charbonnel <thomas@undata.org>");
MODULE_DESCRIPTION("RME Hammerfall DSP");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP},"
"{RME HDSP-9652},"
"{RME HDSP-9632}}");
MODULE_FIRMWARE("rpm_firmware.bin");
MODULE_FIRMWARE("multiface_firmware.bin");
MODULE_FIRMWARE("multiface_firmware_rev11.bin");
MODULE_FIRMWARE("digiface_firmware.bin");
MODULE_FIRMWARE("digiface_firmware_rev11.bin");
#define HDSP_MAX_CHANNELS 26
#define HDSP_MAX_DS_CHANNELS 14
#define HDSP_MAX_QS_CHANNELS 8
#define DIGIFACE_SS_CHANNELS 26
#define DIGIFACE_DS_CHANNELS 14
#define MULTIFACE_SS_CHANNELS 18
#define MULTIFACE_DS_CHANNELS 14
#define H9652_SS_CHANNELS 26
#define H9652_DS_CHANNELS 14
/* This does not include possible Analog Extension Boards
AEBs are detected at card initialization
*/
#define H9632_SS_CHANNELS 12
#define H9632_DS_CHANNELS 8
#define H9632_QS_CHANNELS 4
#define RPM_CHANNELS 6
/* Write registers. These are defined as byte-offsets from the iobase value.
*/
#define HDSP_resetPointer 0
#define HDSP_freqReg 0
#define HDSP_outputBufferAddress 32
#define HDSP_inputBufferAddress 36
#define HDSP_controlRegister 64
#define HDSP_interruptConfirmation 96
#define HDSP_outputEnable 128
#define HDSP_control2Reg 256
#define HDSP_midiDataOut0 352
#define HDSP_midiDataOut1 356
#define HDSP_fifoData 368
#define HDSP_inputEnable 384
/* Read registers. These are defined as byte-offsets from the iobase value
*/
#define HDSP_statusRegister 0
#define HDSP_timecode 128
#define HDSP_status2Register 192
#define HDSP_midiDataIn0 360
#define HDSP_midiDataIn1 364
#define HDSP_midiStatusOut0 384
#define HDSP_midiStatusOut1 388
#define HDSP_midiStatusIn0 392
#define HDSP_midiStatusIn1 396
#define HDSP_fifoStatus 400
/* the meters are regular i/o-mapped registers, but offset
considerably from the rest. the peak registers are reset
when read; the least-significant 4 bits are full-scale counters;
the actual peak value is in the most-significant 24 bits.
*/
#define HDSP_playbackPeakLevel 4096 /* 26 * 32 bit values */
#define HDSP_inputPeakLevel 4224 /* 26 * 32 bit values */
#define HDSP_outputPeakLevel 4352 /* (26+2) * 32 bit values */
#define HDSP_playbackRmsLevel 4612 /* 26 * 64 bit values */
#define HDSP_inputRmsLevel 4868 /* 26 * 64 bit values */
/* This is for H9652 cards
Peak values are read downward from the base
Rms values are read upward
There are rms values for the outputs too
26*3 values are read in ss mode
14*3 in ds mode, with no gap between values
*/
#define HDSP_9652_peakBase 7164
#define HDSP_9652_rmsBase 4096
/* c.f. the hdsp_9632_meters_t struct */
#define HDSP_9632_metersBase 4096
#define HDSP_IO_EXTENT 7168
/* control2 register bits */
#define HDSP_TMS 0x01
#define HDSP_TCK 0x02
#define HDSP_TDI 0x04
#define HDSP_JTAG 0x08
#define HDSP_PWDN 0x10
#define HDSP_PROGRAM 0x020
#define HDSP_CONFIG_MODE_0 0x040
#define HDSP_CONFIG_MODE_1 0x080
#define HDSP_VERSION_BIT (0x100 | HDSP_S_LOAD)
#define HDSP_BIGENDIAN_MODE 0x200
#define HDSP_RD_MULTIPLE 0x400
#define HDSP_9652_ENABLE_MIXER 0x800
#define HDSP_S200 0x800
#define HDSP_S300 (0x100 | HDSP_S200) /* dummy, purpose of 0x100 unknown */
#define HDSP_CYCLIC_MODE 0x1000
#define HDSP_TDO 0x10000000
#define HDSP_S_PROGRAM (HDSP_CYCLIC_MODE|HDSP_PROGRAM|HDSP_CONFIG_MODE_0)
#define HDSP_S_LOAD (HDSP_CYCLIC_MODE|HDSP_PROGRAM|HDSP_CONFIG_MODE_1)
/* Control Register bits */
#define HDSP_Start (1<<0) /* start engine */
#define HDSP_Latency0 (1<<1) /* buffer size = 2^n where n is defined by Latency{2,1,0} */
#define HDSP_Latency1 (1<<2) /* [ see above ] */
#define HDSP_Latency2 (1<<3) /* [ see above ] */
#define HDSP_ClockModeMaster (1<<4) /* 1=Master, 0=Slave/Autosync */
#define HDSP_AudioInterruptEnable (1<<5) /* what do you think ? */
#define HDSP_Frequency0 (1<<6) /* 0=44.1kHz/88.2kHz/176.4kHz 1=48kHz/96kHz/192kHz */
#define HDSP_Frequency1 (1<<7) /* 0=32kHz/64kHz/128kHz */
#define HDSP_DoubleSpeed (1<<8) /* 0=normal speed, 1=double speed */
#define HDSP_SPDIFProfessional (1<<9) /* 0=consumer, 1=professional */
#define HDSP_SPDIFEmphasis (1<<10) /* 0=none, 1=on */
#define HDSP_SPDIFNonAudio (1<<11) /* 0=off, 1=on */
#define HDSP_SPDIFOpticalOut (1<<12) /* 1=use 1st ADAT connector for SPDIF, 0=do not */
#define HDSP_SyncRef2 (1<<13)
#define HDSP_SPDIFInputSelect0 (1<<14)
#define HDSP_SPDIFInputSelect1 (1<<15)
#define HDSP_SyncRef0 (1<<16)
#define HDSP_SyncRef1 (1<<17)
#define HDSP_AnalogExtensionBoard (1<<18) /* For H9632 cards */
#define HDSP_XLRBreakoutCable (1<<20) /* For H9632 cards */
#define HDSP_Midi0InterruptEnable (1<<22)
#define HDSP_Midi1InterruptEnable (1<<23)
#define HDSP_LineOut (1<<24)
#define HDSP_ADGain0 (1<<25) /* From here : H9632 specific */
#define HDSP_ADGain1 (1<<26)
#define HDSP_DAGain0 (1<<27)
#define HDSP_DAGain1 (1<<28)
#define HDSP_PhoneGain0 (1<<29)
#define HDSP_PhoneGain1 (1<<30)
#define HDSP_QuadSpeed (1<<31)
/* RPM uses some of the registers for special purposes */
#define HDSP_RPM_Inp12 0x04A00
#define HDSP_RPM_Inp12_Phon_6dB 0x00800 /* Dolby */
#define HDSP_RPM_Inp12_Phon_0dB 0x00000 /* .. */
#define HDSP_RPM_Inp12_Phon_n6dB 0x04000 /* inp_0 */
#define HDSP_RPM_Inp12_Line_0dB 0x04200 /* Dolby+PRO */
#define HDSP_RPM_Inp12_Line_n6dB 0x00200 /* PRO */
#define HDSP_RPM_Inp34 0x32000
#define HDSP_RPM_Inp34_Phon_6dB 0x20000 /* SyncRef1 */
#define HDSP_RPM_Inp34_Phon_0dB 0x00000 /* .. */
#define HDSP_RPM_Inp34_Phon_n6dB 0x02000 /* SyncRef2 */
#define HDSP_RPM_Inp34_Line_0dB 0x30000 /* SyncRef1+SyncRef0 */
#define HDSP_RPM_Inp34_Line_n6dB 0x10000 /* SyncRef0 */
#define HDSP_RPM_Bypass 0x01000
#define HDSP_RPM_Disconnect 0x00001
#define HDSP_ADGainMask (HDSP_ADGain0|HDSP_ADGain1)
#define HDSP_ADGainMinus10dBV HDSP_ADGainMask
#define HDSP_ADGainPlus4dBu (HDSP_ADGain0)
#define HDSP_ADGainLowGain 0
#define HDSP_DAGainMask (HDSP_DAGain0|HDSP_DAGain1)
#define HDSP_DAGainHighGain HDSP_DAGainMask
#define HDSP_DAGainPlus4dBu (HDSP_DAGain0)
#define HDSP_DAGainMinus10dBV 0
#define HDSP_PhoneGainMask (HDSP_PhoneGain0|HDSP_PhoneGain1)
#define HDSP_PhoneGain0dB HDSP_PhoneGainMask
#define HDSP_PhoneGainMinus6dB (HDSP_PhoneGain0)
#define HDSP_PhoneGainMinus12dB 0
#define HDSP_LatencyMask (HDSP_Latency0|HDSP_Latency1|HDSP_Latency2)
#define HDSP_FrequencyMask (HDSP_Frequency0|HDSP_Frequency1|HDSP_DoubleSpeed|HDSP_QuadSpeed)
#define HDSP_SPDIFInputMask (HDSP_SPDIFInputSelect0|HDSP_SPDIFInputSelect1)
#define HDSP_SPDIFInputADAT1 0
#define HDSP_SPDIFInputCoaxial (HDSP_SPDIFInputSelect0)
#define HDSP_SPDIFInputCdrom (HDSP_SPDIFInputSelect1)
#define HDSP_SPDIFInputAES (HDSP_SPDIFInputSelect0|HDSP_SPDIFInputSelect1)
#define HDSP_SyncRefMask (HDSP_SyncRef0|HDSP_SyncRef1|HDSP_SyncRef2)
#define HDSP_SyncRef_ADAT1 0
#define HDSP_SyncRef_ADAT2 (HDSP_SyncRef0)
#define HDSP_SyncRef_ADAT3 (HDSP_SyncRef1)
#define HDSP_SyncRef_SPDIF (HDSP_SyncRef0|HDSP_SyncRef1)
#define HDSP_SyncRef_WORD (HDSP_SyncRef2)
#define HDSP_SyncRef_ADAT_SYNC (HDSP_SyncRef0|HDSP_SyncRef2)
/* Sample Clock Sources */
#define HDSP_CLOCK_SOURCE_AUTOSYNC 0
#define HDSP_CLOCK_SOURCE_INTERNAL_32KHZ 1
#define HDSP_CLOCK_SOURCE_INTERNAL_44_1KHZ 2
#define HDSP_CLOCK_SOURCE_INTERNAL_48KHZ 3
#define HDSP_CLOCK_SOURCE_INTERNAL_64KHZ 4
#define HDSP_CLOCK_SOURCE_INTERNAL_88_2KHZ 5
#define HDSP_CLOCK_SOURCE_INTERNAL_96KHZ 6
#define HDSP_CLOCK_SOURCE_INTERNAL_128KHZ 7
#define HDSP_CLOCK_SOURCE_INTERNAL_176_4KHZ 8
#define HDSP_CLOCK_SOURCE_INTERNAL_192KHZ 9
/* Preferred sync reference choices - used by "pref_sync_ref" control switch */
#define HDSP_SYNC_FROM_WORD 0
#define HDSP_SYNC_FROM_SPDIF 1
#define HDSP_SYNC_FROM_ADAT1 2
#define HDSP_SYNC_FROM_ADAT_SYNC 3
#define HDSP_SYNC_FROM_ADAT2 4
#define HDSP_SYNC_FROM_ADAT3 5
/* SyncCheck status */
#define HDSP_SYNC_CHECK_NO_LOCK 0
#define HDSP_SYNC_CHECK_LOCK 1
#define HDSP_SYNC_CHECK_SYNC 2
/* AutoSync references - used by "autosync_ref" control switch */
#define HDSP_AUTOSYNC_FROM_WORD 0
#define HDSP_AUTOSYNC_FROM_ADAT_SYNC 1
#define HDSP_AUTOSYNC_FROM_SPDIF 2
#define HDSP_AUTOSYNC_FROM_NONE 3
#define HDSP_AUTOSYNC_FROM_ADAT1 4
#define HDSP_AUTOSYNC_FROM_ADAT2 5
#define HDSP_AUTOSYNC_FROM_ADAT3 6
/* Possible sources of S/PDIF input */
#define HDSP_SPDIFIN_OPTICAL 0 /* optical (ADAT1) */
#define HDSP_SPDIFIN_COAXIAL 1 /* coaxial (RCA) */
#define HDSP_SPDIFIN_INTERNAL 2 /* internal (CDROM) */
#define HDSP_SPDIFIN_AES 3 /* xlr for H9632 (AES)*/
#define HDSP_Frequency32KHz HDSP_Frequency0
#define HDSP_Frequency44_1KHz HDSP_Frequency1
#define HDSP_Frequency48KHz (HDSP_Frequency1|HDSP_Frequency0)
#define HDSP_Frequency64KHz (HDSP_DoubleSpeed|HDSP_Frequency0)
#define HDSP_Frequency88_2KHz (HDSP_DoubleSpeed|HDSP_Frequency1)
#define HDSP_Frequency96KHz (HDSP_DoubleSpeed|HDSP_Frequency1|HDSP_Frequency0)
/* For H9632 cards */
#define HDSP_Frequency128KHz (HDSP_QuadSpeed|HDSP_DoubleSpeed|HDSP_Frequency0)
#define HDSP_Frequency176_4KHz (HDSP_QuadSpeed|HDSP_DoubleSpeed|HDSP_Frequency1)
#define HDSP_Frequency192KHz (HDSP_QuadSpeed|HDSP_DoubleSpeed|HDSP_Frequency1|HDSP_Frequency0)
/* RME says n = 104857600000000, but in the windows MADI driver, I see:
return 104857600000000 / rate; // 100 MHz
return 110100480000000 / rate; // 105 MHz
*/
#define DDS_NUMERATOR 104857600000000ULL; /* = 2^20 * 10^8 */
#define hdsp_encode_latency(x) (((x)<<1) & HDSP_LatencyMask)
#define hdsp_decode_latency(x) (((x) & HDSP_LatencyMask)>>1)
#define hdsp_encode_spdif_in(x) (((x)&0x3)<<14)
#define hdsp_decode_spdif_in(x) (((x)>>14)&0x3)
/* Status Register bits */
#define HDSP_audioIRQPending (1<<0)
#define HDSP_Lock2 (1<<1) /* this is for Digiface and H9652 */
#define HDSP_spdifFrequency3 HDSP_Lock2 /* this is for H9632 only */
#define HDSP_Lock1 (1<<2)
#define HDSP_Lock0 (1<<3)
#define HDSP_SPDIFSync (1<<4)
#define HDSP_TimecodeLock (1<<5)
#define HDSP_BufferPositionMask 0x000FFC0 /* Bit 6..15 : h/w buffer pointer */
#define HDSP_Sync2 (1<<16)
#define HDSP_Sync1 (1<<17)
#define HDSP_Sync0 (1<<18)
#define HDSP_DoubleSpeedStatus (1<<19)
#define HDSP_ConfigError (1<<20)
#define HDSP_DllError (1<<21)
#define HDSP_spdifFrequency0 (1<<22)
#define HDSP_spdifFrequency1 (1<<23)
#define HDSP_spdifFrequency2 (1<<24)
#define HDSP_SPDIFErrorFlag (1<<25)
#define HDSP_BufferID (1<<26)
#define HDSP_TimecodeSync (1<<27)
#define HDSP_AEBO (1<<28) /* H9632 specific Analog Extension Boards */
#define HDSP_AEBI (1<<29) /* 0 = present, 1 = absent */
#define HDSP_midi0IRQPending (1<<30)
#define HDSP_midi1IRQPending (1<<31)
#define HDSP_spdifFrequencyMask (HDSP_spdifFrequency0|HDSP_spdifFrequency1|HDSP_spdifFrequency2)
#define HDSP_spdifFrequencyMask_9632 (HDSP_spdifFrequency0|\
HDSP_spdifFrequency1|\
HDSP_spdifFrequency2|\
HDSP_spdifFrequency3)
#define HDSP_spdifFrequency32KHz (HDSP_spdifFrequency0)
#define HDSP_spdifFrequency44_1KHz (HDSP_spdifFrequency1)
#define HDSP_spdifFrequency48KHz (HDSP_spdifFrequency0|HDSP_spdifFrequency1)
#define HDSP_spdifFrequency64KHz (HDSP_spdifFrequency2)
#define HDSP_spdifFrequency88_2KHz (HDSP_spdifFrequency0|HDSP_spdifFrequency2)
#define HDSP_spdifFrequency96KHz (HDSP_spdifFrequency2|HDSP_spdifFrequency1)
/* This is for H9632 cards */
#define HDSP_spdifFrequency128KHz (HDSP_spdifFrequency0|\
HDSP_spdifFrequency1|\
HDSP_spdifFrequency2)
#define HDSP_spdifFrequency176_4KHz HDSP_spdifFrequency3
#define HDSP_spdifFrequency192KHz (HDSP_spdifFrequency3|HDSP_spdifFrequency0)
/* Status2 Register bits */
#define HDSP_version0 (1<<0)
#define HDSP_version1 (1<<1)
#define HDSP_version2 (1<<2)
#define HDSP_wc_lock (1<<3)
#define HDSP_wc_sync (1<<4)
#define HDSP_inp_freq0 (1<<5)
#define HDSP_inp_freq1 (1<<6)
#define HDSP_inp_freq2 (1<<7)
#define HDSP_SelSyncRef0 (1<<8)
#define HDSP_SelSyncRef1 (1<<9)
#define HDSP_SelSyncRef2 (1<<10)
#define HDSP_wc_valid (HDSP_wc_lock|HDSP_wc_sync)
#define HDSP_systemFrequencyMask (HDSP_inp_freq0|HDSP_inp_freq1|HDSP_inp_freq2)
#define HDSP_systemFrequency32 (HDSP_inp_freq0)
#define HDSP_systemFrequency44_1 (HDSP_inp_freq1)
#define HDSP_systemFrequency48 (HDSP_inp_freq0|HDSP_inp_freq1)
#define HDSP_systemFrequency64 (HDSP_inp_freq2)
#define HDSP_systemFrequency88_2 (HDSP_inp_freq0|HDSP_inp_freq2)
#define HDSP_systemFrequency96 (HDSP_inp_freq1|HDSP_inp_freq2)
/* FIXME : more values for 9632 cards ? */
#define HDSP_SelSyncRefMask (HDSP_SelSyncRef0|HDSP_SelSyncRef1|HDSP_SelSyncRef2)
#define HDSP_SelSyncRef_ADAT1 0
#define HDSP_SelSyncRef_ADAT2 (HDSP_SelSyncRef0)
#define HDSP_SelSyncRef_ADAT3 (HDSP_SelSyncRef1)
#define HDSP_SelSyncRef_SPDIF (HDSP_SelSyncRef0|HDSP_SelSyncRef1)
#define HDSP_SelSyncRef_WORD (HDSP_SelSyncRef2)
#define HDSP_SelSyncRef_ADAT_SYNC (HDSP_SelSyncRef0|HDSP_SelSyncRef2)
/* Card state flags */
#define HDSP_InitializationComplete (1<<0)
#define HDSP_FirmwareLoaded (1<<1)
#define HDSP_FirmwareCached (1<<2)
/* FIFO wait times, defined in terms of 1/10ths of msecs */
#define HDSP_LONG_WAIT 5000
#define HDSP_SHORT_WAIT 30
#define UNITY_GAIN 32768
#define MINUS_INFINITY_GAIN 0
/* the size of a substream (1 mono data stream) */
#define HDSP_CHANNEL_BUFFER_SAMPLES (16*1024)
#define HDSP_CHANNEL_BUFFER_BYTES (4*HDSP_CHANNEL_BUFFER_SAMPLES)
/* the size of the area we need to allocate for DMA transfers. the
size is the same regardless of the number of channels - the
Multiface still uses the same memory area.
Note that we allocate 1 more channel than is apparently needed
because the h/w seems to write 1 byte beyond the end of the last
page. Sigh.
*/
#define HDSP_DMA_AREA_BYTES ((HDSP_MAX_CHANNELS+1) * HDSP_CHANNEL_BUFFER_BYTES)
#define HDSP_DMA_AREA_KILOBYTES (HDSP_DMA_AREA_BYTES/1024)
#define HDSP_FIRMWARE_SIZE (24413 * 4)
struct hdsp_9632_meters {
u32 input_peak[16];
u32 playback_peak[16];
u32 output_peak[16];
u32 xxx_peak[16];
u32 padding[64];
u32 input_rms_low[16];
u32 playback_rms_low[16];
u32 output_rms_low[16];
u32 xxx_rms_low[16];
u32 input_rms_high[16];
u32 playback_rms_high[16];
u32 output_rms_high[16];
u32 xxx_rms_high[16];
};
struct hdsp_midi {
struct hdsp *hdsp;
int id;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *input;
struct snd_rawmidi_substream *output;
char istimer; /* timer in use */
struct timer_list timer;
spinlock_t lock;
int pending;
};
struct hdsp {
spinlock_t lock;
struct snd_pcm_substream *capture_substream;
struct snd_pcm_substream *playback_substream;
struct hdsp_midi midi[2];
struct tasklet_struct midi_tasklet;
int use_midi_tasklet;
int precise_ptr;
u32 control_register; /* cached value */
u32 control2_register; /* cached value */
u32 creg_spdif;
u32 creg_spdif_stream;
int clock_source_locked;
char *card_name; /* digiface/multiface/rpm */
enum HDSP_IO_Type io_type; /* ditto, but for code use */
unsigned short firmware_rev;
unsigned short state; /* stores state bits */
const struct firmware *firmware;
u32 *fw_uploaded;
size_t period_bytes; /* guess what this is */
unsigned char max_channels;
unsigned char qs_in_channels; /* quad speed mode for H9632 */
unsigned char ds_in_channels;
unsigned char ss_in_channels; /* different for multiface/digiface */
unsigned char qs_out_channels;
unsigned char ds_out_channels;
unsigned char ss_out_channels;
struct snd_dma_buffer capture_dma_buf;
struct snd_dma_buffer playback_dma_buf;
unsigned char *capture_buffer; /* suitably aligned address */
unsigned char *playback_buffer; /* suitably aligned address */
pid_t capture_pid;
pid_t playback_pid;
int running;
int system_sample_rate;
char *channel_map;
int dev;
int irq;
unsigned long port;
void __iomem *iobase;
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_hwdep *hwdep;
struct pci_dev *pci;
struct snd_kcontrol *spdif_ctl;
unsigned short mixer_matrix[HDSP_MATRIX_MIXER_SIZE];
unsigned int dds_value; /* last value written to freq register */
};
/* These tables map the ALSA channels 1..N to the channels that we
need to use in order to find the relevant channel buffer. RME
refer to this kind of mapping as between "the ADAT channel and
the DMA channel." We index it using the logical audio channel,
and the value is the DMA channel (i.e. channel buffer number)
where the data for that channel can be read/written from/to.
*/
static char channel_map_df_ss[HDSP_MAX_CHANNELS] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25
};
static char channel_map_mf_ss[HDSP_MAX_CHANNELS] = { /* Multiface */
/* Analog */
0, 1, 2, 3, 4, 5, 6, 7,
/* ADAT 2 */
16, 17, 18, 19, 20, 21, 22, 23,
/* SPDIF */
24, 25,
-1, -1, -1, -1, -1, -1, -1, -1
};
static char channel_map_ds[HDSP_MAX_CHANNELS] = {
/* ADAT channels are remapped */
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23,
/* channels 12 and 13 are S/PDIF */
24, 25,
/* others don't exist */
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
static char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = {
/* ADAT channels */
0, 1, 2, 3, 4, 5, 6, 7,
/* SPDIF */
8, 9,
/* Analog */
10, 11,
/* AO4S-192 and AI4S-192 extension boards */
12, 13, 14, 15,
/* others don't exist */
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1
};
static char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = {
/* ADAT */
1, 3, 5, 7,
/* SPDIF */
8, 9,
/* Analog */
10, 11,
/* AO4S-192 and AI4S-192 extension boards */
12, 13, 14, 15,
/* others don't exist */
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1
};
static char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = {
/* ADAT is disabled in this mode */
/* SPDIF */
8, 9,
/* Analog */
10, 11,
/* AO4S-192 and AI4S-192 extension boards */
12, 13, 14, 15,
/* others don't exist */
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1
};
static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size)
{
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
dmab->dev.dev = snd_dma_pci_data(pci);
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
size, dmab) < 0)
return -ENOMEM;
return 0;
}
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
{
if (dmab->area)
snd_dma_free_pages(dmab);
}
static const struct pci_device_id snd_hdsp_ids[] = {
{
.vendor = PCI_VENDOR_ID_XILINX,
.device = PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, /* RME Hammerfall-DSP */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, snd_hdsp_ids);
/* prototypes */
static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp);
static int snd_hdsp_create_pcm(struct snd_card *card, struct hdsp *hdsp);
static int snd_hdsp_enable_io (struct hdsp *hdsp);
static void snd_hdsp_initialize_midi_flush (struct hdsp *hdsp);
static void snd_hdsp_initialize_channels (struct hdsp *hdsp);
static int hdsp_fifo_wait(struct hdsp *hdsp, int count, int timeout);
static int hdsp_autosync_ref(struct hdsp *hdsp);
static int snd_hdsp_set_defaults(struct hdsp *hdsp);
static void snd_hdsp_9652_enable_mixer (struct hdsp *hdsp);
static int hdsp_playback_to_output_key (struct hdsp *hdsp, int in, int out)
{
switch (hdsp->io_type) {
case Multiface:
case Digiface:
case RPM:
default:
if (hdsp->firmware_rev == 0xa)
return (64 * out) + (32 + (in));
else
return (52 * out) + (26 + (in));
case H9632:
return (32 * out) + (16 + (in));
case H9652:
return (52 * out) + (26 + (in));
}
}
static int hdsp_input_to_output_key (struct hdsp *hdsp, int in, int out)
{
switch (hdsp->io_type) {
case Multiface:
case Digiface:
case RPM:
default:
if (hdsp->firmware_rev == 0xa)
return (64 * out) + in;
else
return (52 * out) + in;
case H9632:
return (32 * out) + in;
case H9652:
return (52 * out) + in;
}
}
static void hdsp_write(struct hdsp *hdsp, int reg, int val)
{
writel(val, hdsp->iobase + reg);
}
static unsigned int hdsp_read(struct hdsp *hdsp, int reg)
{
return readl (hdsp->iobase + reg);
}
static int hdsp_check_for_iobox (struct hdsp *hdsp)
{
int i;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return 0;
for (i = 0; i < 500; i++) {
if (0 == (hdsp_read(hdsp, HDSP_statusRegister) &
HDSP_ConfigError)) {
if (i) {
dev_dbg(hdsp->card->dev,
"IO box found after %d ms\n",
(20 * i));
}
return 0;
}
msleep(20);
}
dev_err(hdsp->card->dev, "no IO box connected!\n");
hdsp->state &= ~HDSP_FirmwareLoaded;
return -EIO;
}
static int hdsp_wait_for_iobox(struct hdsp *hdsp, unsigned int loops,
unsigned int delay)
{
unsigned int i;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
for (i = 0; i != loops; ++i) {
if (hdsp_read(hdsp, HDSP_statusRegister) & HDSP_ConfigError)
msleep(delay);
else {
dev_dbg(hdsp->card->dev, "iobox found after %ums!\n",
i * delay);
return 0;
}
}
dev_info(hdsp->card->dev, "no IO box connected!\n");
hdsp->state &= ~HDSP_FirmwareLoaded;
return -EIO;
}
static int snd_hdsp_load_firmware_from_cache(struct hdsp *hdsp) {
int i;
unsigned long flags;
const u32 *cache;
if (hdsp->fw_uploaded)
cache = hdsp->fw_uploaded;
else {
if (!hdsp->firmware)
return -ENODEV;
cache = (u32 *)hdsp->firmware->data;
if (!cache)
return -ENODEV;
}
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
dev_info(hdsp->card->dev, "loading firmware\n");
hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_PROGRAM);
hdsp_write (hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait (hdsp, 0, HDSP_LONG_WAIT)) {
dev_info(hdsp->card->dev,
"timeout waiting for download preparation\n");
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S200);
return -EIO;
}
hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
for (i = 0; i < HDSP_FIRMWARE_SIZE / 4; ++i) {
hdsp_write(hdsp, HDSP_fifoData, cache[i]);
if (hdsp_fifo_wait (hdsp, 127, HDSP_LONG_WAIT)) {
dev_info(hdsp->card->dev,
"timeout during firmware loading\n");
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S200);
return -EIO;
}
}
hdsp_fifo_wait(hdsp, 3, HDSP_LONG_WAIT);
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S200);
ssleep(3);
#ifdef SNDRV_BIG_ENDIAN
hdsp->control2_register = HDSP_BIGENDIAN_MODE;
#else
hdsp->control2_register = 0;
#endif
hdsp_write (hdsp, HDSP_control2Reg, hdsp->control2_register);
dev_info(hdsp->card->dev, "finished firmware loading\n");
}
if (hdsp->state & HDSP_InitializationComplete) {
dev_info(hdsp->card->dev,
"firmware loaded from cache, restoring defaults\n");
spin_lock_irqsave(&hdsp->lock, flags);
snd_hdsp_set_defaults(hdsp);
spin_unlock_irqrestore(&hdsp->lock, flags);
}
hdsp->state |= HDSP_FirmwareLoaded;
return 0;
}
static int hdsp_get_iobox_version (struct hdsp *hdsp)
{
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
hdsp_write(hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT) < 0) {
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S300);
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
}
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S200 | HDSP_PROGRAM);
hdsp_write (hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT) < 0) {
hdsp->io_type = Multiface;
dev_info(hdsp->card->dev, "Multiface found\n");
return 0;
}
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
hdsp_write(hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT) == 0) {
hdsp->io_type = Digiface;
dev_info(hdsp->card->dev, "Digiface found\n");
return 0;
}
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S300);
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
hdsp_write(hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT) == 0) {
hdsp->io_type = Multiface;
dev_info(hdsp->card->dev, "Multiface found\n");
return 0;
}
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S300);
hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
hdsp_write(hdsp, HDSP_fifoData, 0);
if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT) < 0) {
hdsp->io_type = Multiface;
dev_info(hdsp->card->dev, "Multiface found\n");
return 0;
}
hdsp->io_type = RPM;
dev_info(hdsp->card->dev, "RPM found\n");
return 0;
} else {
/* firmware was already loaded, get iobox type */
if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
hdsp->io_type = RPM;
else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
hdsp->io_type = Multiface;
else
hdsp->io_type = Digiface;
}
return 0;
}
static int hdsp_request_fw_loader(struct hdsp *hdsp);
static int hdsp_check_for_firmware (struct hdsp *hdsp, int load_on_demand)
{
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
hdsp->state &= ~HDSP_FirmwareLoaded;
if (! load_on_demand)
return -EIO;
dev_err(hdsp->card->dev, "firmware not present.\n");
/* try to load firmware */
if (! (hdsp->state & HDSP_FirmwareCached)) {
if (! hdsp_request_fw_loader(hdsp))
return 0;
dev_err(hdsp->card->dev,
"No firmware loaded nor cached, please upload firmware.\n");
return -EIO;
}
if (snd_hdsp_load_firmware_from_cache(hdsp) != 0) {
dev_err(hdsp->card->dev,
"Firmware loading from cache failed, please upload manually.\n");
return -EIO;
}
}
return 0;
}
static int hdsp_fifo_wait(struct hdsp *hdsp, int count, int timeout)
{
int i;
/* the fifoStatus registers reports on how many words
are available in the command FIFO.
*/
for (i = 0; i < timeout; i++) {
if ((int)(hdsp_read (hdsp, HDSP_fifoStatus) & 0xff) <= count)
return 0;
/* not very friendly, but we only do this during a firmware
load and changing the mixer, so we just put up with it.
*/
udelay (100);
}
dev_warn(hdsp->card->dev,
"wait for FIFO status <= %d failed after %d iterations\n",
count, timeout);
return -1;
}
static int hdsp_read_gain (struct hdsp *hdsp, unsigned int addr)
{
if (addr >= HDSP_MATRIX_MIXER_SIZE)
return 0;
return hdsp->mixer_matrix[addr];
}
static int hdsp_write_gain(struct hdsp *hdsp, unsigned int addr, unsigned short data)
{
unsigned int ad;
if (addr >= HDSP_MATRIX_MIXER_SIZE)
return -1;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632) {
/* from martin bjornsen:
"You can only write dwords to the
mixer memory which contain two
mixer values in the low and high
word. So if you want to change
value 0 you have to read value 1
from the cache and write both to
the first dword in the mixer
memory."
*/
if (hdsp->io_type == H9632 && addr >= 512)
return 0;
if (hdsp->io_type == H9652 && addr >= 1352)
return 0;
hdsp->mixer_matrix[addr] = data;
/* `addr' addresses a 16-bit wide address, but
the address space accessed via hdsp_write
uses byte offsets. put another way, addr
varies from 0 to 1351, but to access the
corresponding memory location, we need
to access 0 to 2703 ...
*/
ad = addr/2;
hdsp_write (hdsp, 4096 + (ad*4),
(hdsp->mixer_matrix[(addr&0x7fe)+1] << 16) +
hdsp->mixer_matrix[addr&0x7fe]);
return 0;
} else {
ad = (addr << 16) + data;
if (hdsp_fifo_wait(hdsp, 127, HDSP_LONG_WAIT))
return -1;
hdsp_write (hdsp, HDSP_fifoData, ad);
hdsp->mixer_matrix[addr] = data;
}
return 0;
}
static int snd_hdsp_use_is_exclusive(struct hdsp *hdsp)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&hdsp->lock, flags);
if ((hdsp->playback_pid != hdsp->capture_pid) &&
(hdsp->playback_pid >= 0) && (hdsp->capture_pid >= 0))
ret = 0;
spin_unlock_irqrestore(&hdsp->lock, flags);
return ret;
}
static int hdsp_spdif_sample_rate(struct hdsp *hdsp)
{
unsigned int status = hdsp_read(hdsp, HDSP_statusRegister);
unsigned int rate_bits = (status & HDSP_spdifFrequencyMask);
/* For the 9632, the mask is different */
if (hdsp->io_type == H9632)
rate_bits = (status & HDSP_spdifFrequencyMask_9632);
if (status & HDSP_SPDIFErrorFlag)
return 0;
switch (rate_bits) {
case HDSP_spdifFrequency32KHz: return 32000;
case HDSP_spdifFrequency44_1KHz: return 44100;
case HDSP_spdifFrequency48KHz: return 48000;
case HDSP_spdifFrequency64KHz: return 64000;
case HDSP_spdifFrequency88_2KHz: return 88200;
case HDSP_spdifFrequency96KHz: return 96000;
case HDSP_spdifFrequency128KHz:
if (hdsp->io_type == H9632) return 128000;
break;
case HDSP_spdifFrequency176_4KHz:
if (hdsp->io_type == H9632) return 176400;
break;
case HDSP_spdifFrequency192KHz:
if (hdsp->io_type == H9632) return 192000;
break;
default:
break;
}
dev_warn(hdsp->card->dev,
"unknown spdif frequency status; bits = 0x%x, status = 0x%x\n",
rate_bits, status);
return 0;
}
static int hdsp_external_sample_rate(struct hdsp *hdsp)
{
unsigned int status2 = hdsp_read(hdsp, HDSP_status2Register);
unsigned int rate_bits = status2 & HDSP_systemFrequencyMask;
/* For the 9632 card, there seems to be no bit for indicating external
* sample rate greater than 96kHz. The card reports the corresponding
* single speed. So the best means seems to get spdif rate when
* autosync reference is spdif */
if (hdsp->io_type == H9632 &&
hdsp_autosync_ref(hdsp) == HDSP_AUTOSYNC_FROM_SPDIF)
return hdsp_spdif_sample_rate(hdsp);
switch (rate_bits) {
case HDSP_systemFrequency32: return 32000;
case HDSP_systemFrequency44_1: return 44100;
case HDSP_systemFrequency48: return 48000;
case HDSP_systemFrequency64: return 64000;
case HDSP_systemFrequency88_2: return 88200;
case HDSP_systemFrequency96: return 96000;
default:
return 0;
}
}
static void hdsp_compute_period_size(struct hdsp *hdsp)
{
hdsp->period_bytes = 1 << ((hdsp_decode_latency(hdsp->control_register) + 8));
}
static snd_pcm_uframes_t hdsp_hw_pointer(struct hdsp *hdsp)
{
int position;
position = hdsp_read(hdsp, HDSP_statusRegister);
if (!hdsp->precise_ptr)
return (position & HDSP_BufferID) ? (hdsp->period_bytes / 4) : 0;
position &= HDSP_BufferPositionMask;
position /= 4;
position &= (hdsp->period_bytes/2) - 1;
return position;
}
static void hdsp_reset_hw_pointer(struct hdsp *hdsp)
{
hdsp_write (hdsp, HDSP_resetPointer, 0);
if (hdsp->io_type == H9632 && hdsp->firmware_rev >= 152)
/* HDSP_resetPointer = HDSP_freqReg, which is strange and
* requires (?) to write again DDS value after a reset pointer
* (at least, it works like this) */
hdsp_write (hdsp, HDSP_freqReg, hdsp->dds_value);
}
static void hdsp_start_audio(struct hdsp *s)
{
s->control_register |= (HDSP_AudioInterruptEnable | HDSP_Start);
hdsp_write(s, HDSP_controlRegister, s->control_register);
}
static void hdsp_stop_audio(struct hdsp *s)
{
s->control_register &= ~(HDSP_Start | HDSP_AudioInterruptEnable);
hdsp_write(s, HDSP_controlRegister, s->control_register);
}
static void hdsp_silence_playback(struct hdsp *hdsp)
{
memset(hdsp->playback_buffer, 0, HDSP_DMA_AREA_BYTES);
}
static int hdsp_set_interrupt_interval(struct hdsp *s, unsigned int frames)
{
int n;
spin_lock_irq(&s->lock);
frames >>= 7;
n = 0;
while (frames) {
n++;
frames >>= 1;
}
s->control_register &= ~HDSP_LatencyMask;
s->control_register |= hdsp_encode_latency(n);
hdsp_write(s, HDSP_controlRegister, s->control_register);
hdsp_compute_period_size(s);
spin_unlock_irq(&s->lock);
return 0;
}
static void hdsp_set_dds_value(struct hdsp *hdsp, int rate)
{
u64 n;
if (rate >= 112000)
rate /= 4;
else if (rate >= 56000)
rate /= 2;
n = DDS_NUMERATOR;
n = div_u64(n, rate);
/* n should be less than 2^32 for being written to FREQ register */
snd_BUG_ON(n >> 32);
/* HDSP_freqReg and HDSP_resetPointer are the same, so keep the DDS
value to write it after a reset */
hdsp->dds_value = n;
hdsp_write(hdsp, HDSP_freqReg, hdsp->dds_value);
}
static int hdsp_set_rate(struct hdsp *hdsp, int rate, int called_internally)
{
int reject_if_open = 0;
int current_rate;
int rate_bits;
/* ASSUMPTION: hdsp->lock is either held, or
there is no need for it (e.g. during module
initialization).
*/
if (!(hdsp->control_register & HDSP_ClockModeMaster)) {
if (called_internally) {
/* request from ctl or card initialization */
dev_err(hdsp->card->dev,
"device is not running as a clock master: cannot set sample rate.\n");
return -1;
} else {
/* hw_param request while in AutoSync mode */
int external_freq = hdsp_external_sample_rate(hdsp);
int spdif_freq = hdsp_spdif_sample_rate(hdsp);
if ((spdif_freq == external_freq*2) && (hdsp_autosync_ref(hdsp) >= HDSP_AUTOSYNC_FROM_ADAT1))
dev_info(hdsp->card->dev,
"Detected ADAT in double speed mode\n");
else if (hdsp->io_type == H9632 && (spdif_freq == external_freq*4) && (hdsp_autosync_ref(hdsp) >= HDSP_AUTOSYNC_FROM_ADAT1))
dev_info(hdsp->card->dev,
"Detected ADAT in quad speed mode\n");
else if (rate != external_freq) {
dev_info(hdsp->card->dev,
"No AutoSync source for requested rate\n");
return -1;
}
}
}
current_rate = hdsp->system_sample_rate;
/* Changing from a "single speed" to a "double speed" rate is
not allowed if any substreams are open. This is because
such a change causes a shift in the location of
the DMA buffers and a reduction in the number of available
buffers.
Note that a similar but essentially insoluble problem
exists for externally-driven rate changes. All we can do
is to flag rate changes in the read/write routines. */
if (rate > 96000 && hdsp->io_type != H9632)
return -EINVAL;
switch (rate) {
case 32000:
if (current_rate > 48000)
reject_if_open = 1;
rate_bits = HDSP_Frequency32KHz;
break;
case 44100:
if (current_rate > 48000)
reject_if_open = 1;
rate_bits = HDSP_Frequency44_1KHz;
break;
case 48000:
if (current_rate > 48000)
reject_if_open = 1;
rate_bits = HDSP_Frequency48KHz;
break;
case 64000:
if (current_rate <= 48000 || current_rate > 96000)
reject_if_open = 1;
rate_bits = HDSP_Frequency64KHz;
break;
case 88200:
if (current_rate <= 48000 || current_rate > 96000)
reject_if_open = 1;
rate_bits = HDSP_Frequency88_2KHz;
break;
case 96000:
if (current_rate <= 48000 || current_rate > 96000)
reject_if_open = 1;
rate_bits = HDSP_Frequency96KHz;
break;
case 128000:
if (current_rate < 128000)
reject_if_open = 1;
rate_bits = HDSP_Frequency128KHz;
break;
case 176400:
if (current_rate < 128000)
reject_if_open = 1;
rate_bits = HDSP_Frequency176_4KHz;
break;
case 192000:
if (current_rate < 128000)
reject_if_open = 1;
rate_bits = HDSP_Frequency192KHz;
break;
default:
return -EINVAL;
}
if (reject_if_open && (hdsp->capture_pid >= 0 || hdsp->playback_pid >= 0)) {
dev_warn(hdsp->card->dev,
"cannot change speed mode (capture PID = %d, playback PID = %d)\n",
hdsp->capture_pid,
hdsp->playback_pid);
return -EBUSY;
}
hdsp->control_register &= ~HDSP_FrequencyMask;
hdsp->control_register |= rate_bits;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
/* For HDSP9632 rev 152, need to set DDS value in FREQ register */
if (hdsp->io_type == H9632 && hdsp->firmware_rev >= 152)
hdsp_set_dds_value(hdsp, rate);
if (rate >= 128000) {
hdsp->channel_map = channel_map_H9632_qs;
} else if (rate > 48000) {
if (hdsp->io_type == H9632)
hdsp->channel_map = channel_map_H9632_ds;
else
hdsp->channel_map = channel_map_ds;
} else {
switch (hdsp->io_type) {
case RPM:
case Multiface:
hdsp->channel_map = channel_map_mf_ss;
break;
case Digiface:
case H9652:
hdsp->channel_map = channel_map_df_ss;
break;
case H9632:
hdsp->channel_map = channel_map_H9632_ss;
break;
default:
/* should never happen */
break;
}
}
hdsp->system_sample_rate = rate;
return 0;
}
/*----------------------------------------------------------------------------
MIDI
----------------------------------------------------------------------------*/
static unsigned char snd_hdsp_midi_read_byte (struct hdsp *hdsp, int id)
{
/* the hardware already does the relevant bit-mask with 0xff */
if (id)
return hdsp_read(hdsp, HDSP_midiDataIn1);
else
return hdsp_read(hdsp, HDSP_midiDataIn0);
}
static void snd_hdsp_midi_write_byte (struct hdsp *hdsp, int id, int val)
{
/* the hardware already does the relevant bit-mask with 0xff */
if (id)
hdsp_write(hdsp, HDSP_midiDataOut1, val);
else
hdsp_write(hdsp, HDSP_midiDataOut0, val);
}
static int snd_hdsp_midi_input_available (struct hdsp *hdsp, int id)
{
if (id)
return (hdsp_read(hdsp, HDSP_midiStatusIn1) & 0xff);
else
return (hdsp_read(hdsp, HDSP_midiStatusIn0) & 0xff);
}
static int snd_hdsp_midi_output_possible (struct hdsp *hdsp, int id)
{
int fifo_bytes_used;
if (id)
fifo_bytes_used = hdsp_read(hdsp, HDSP_midiStatusOut1) & 0xff;
else
fifo_bytes_used = hdsp_read(hdsp, HDSP_midiStatusOut0) & 0xff;
if (fifo_bytes_used < 128)
return 128 - fifo_bytes_used;
else
return 0;
}
static void snd_hdsp_flush_midi_input (struct hdsp *hdsp, int id)
{
while (snd_hdsp_midi_input_available (hdsp, id))
snd_hdsp_midi_read_byte (hdsp, id);
}
static int snd_hdsp_midi_output_write (struct hdsp_midi *hmidi)
{
unsigned long flags;
int n_pending;
int to_write;
int i;
unsigned char buf[128];
/* Output is not interrupt driven */
spin_lock_irqsave (&hmidi->lock, flags);
if (hmidi->output) {
if (!snd_rawmidi_transmit_empty (hmidi->output)) {
if ((n_pending = snd_hdsp_midi_output_possible (hmidi->hdsp, hmidi->id)) > 0) {
if (n_pending > (int)sizeof (buf))
n_pending = sizeof (buf);
if ((to_write = snd_rawmidi_transmit (hmidi->output, buf, n_pending)) > 0) {
for (i = 0; i < to_write; ++i)
snd_hdsp_midi_write_byte (hmidi->hdsp, hmidi->id, buf[i]);
}
}
}
}
spin_unlock_irqrestore (&hmidi->lock, flags);
return 0;
}
static int snd_hdsp_midi_input_read (struct hdsp_midi *hmidi)
{
unsigned char buf[128]; /* this buffer is designed to match the MIDI input FIFO size */
unsigned long flags;
int n_pending;
int i;
spin_lock_irqsave (&hmidi->lock, flags);
if ((n_pending = snd_hdsp_midi_input_available (hmidi->hdsp, hmidi->id)) > 0) {
if (hmidi->input) {
if (n_pending > (int)sizeof (buf))
n_pending = sizeof (buf);
for (i = 0; i < n_pending; ++i)
buf[i] = snd_hdsp_midi_read_byte (hmidi->hdsp, hmidi->id);
if (n_pending)
snd_rawmidi_receive (hmidi->input, buf, n_pending);
} else {
/* flush the MIDI input FIFO */
while (--n_pending)
snd_hdsp_midi_read_byte (hmidi->hdsp, hmidi->id);
}
}
hmidi->pending = 0;
if (hmidi->id)
hmidi->hdsp->control_register |= HDSP_Midi1InterruptEnable;
else
hmidi->hdsp->control_register |= HDSP_Midi0InterruptEnable;
hdsp_write(hmidi->hdsp, HDSP_controlRegister, hmidi->hdsp->control_register);
spin_unlock_irqrestore (&hmidi->lock, flags);
return snd_hdsp_midi_output_write (hmidi);
}
static void snd_hdsp_midi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct hdsp *hdsp;
struct hdsp_midi *hmidi;
unsigned long flags;
u32 ie;
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
hdsp = hmidi->hdsp;
ie = hmidi->id ? HDSP_Midi1InterruptEnable : HDSP_Midi0InterruptEnable;
spin_lock_irqsave (&hdsp->lock, flags);
if (up) {
if (!(hdsp->control_register & ie)) {
snd_hdsp_flush_midi_input (hdsp, hmidi->id);
hdsp->control_register |= ie;
}
} else {
hdsp->control_register &= ~ie;
tasklet_kill(&hdsp->midi_tasklet);
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
spin_unlock_irqrestore (&hdsp->lock, flags);
}
static void snd_hdsp_midi_output_timer(unsigned long data)
{
struct hdsp_midi *hmidi = (struct hdsp_midi *) data;
unsigned long flags;
snd_hdsp_midi_output_write(hmidi);
spin_lock_irqsave (&hmidi->lock, flags);
/* this does not bump hmidi->istimer, because the
kernel automatically removed the timer when it
expired, and we are now adding it back, thus
leaving istimer wherever it was set before.
*/
if (hmidi->istimer)
mod_timer(&hmidi->timer, 1 + jiffies);
spin_unlock_irqrestore (&hmidi->lock, flags);
}
static void snd_hdsp_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct hdsp_midi *hmidi;
unsigned long flags;
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
spin_lock_irqsave (&hmidi->lock, flags);
if (up) {
if (!hmidi->istimer) {
setup_timer(&hmidi->timer, snd_hdsp_midi_output_timer,
(unsigned long) hmidi);
mod_timer(&hmidi->timer, 1 + jiffies);
hmidi->istimer++;
}
} else {
if (hmidi->istimer && --hmidi->istimer <= 0)
del_timer (&hmidi->timer);
}
spin_unlock_irqrestore (&hmidi->lock, flags);
if (up)
snd_hdsp_midi_output_write(hmidi);
}
static int snd_hdsp_midi_input_open(struct snd_rawmidi_substream *substream)
{
struct hdsp_midi *hmidi;
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
spin_lock_irq (&hmidi->lock);
snd_hdsp_flush_midi_input (hmidi->hdsp, hmidi->id);
hmidi->input = substream;
spin_unlock_irq (&hmidi->lock);
return 0;
}
static int snd_hdsp_midi_output_open(struct snd_rawmidi_substream *substream)
{
struct hdsp_midi *hmidi;
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
spin_lock_irq (&hmidi->lock);
hmidi->output = substream;
spin_unlock_irq (&hmidi->lock);
return 0;
}
static int snd_hdsp_midi_input_close(struct snd_rawmidi_substream *substream)
{
struct hdsp_midi *hmidi;
snd_hdsp_midi_input_trigger (substream, 0);
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
spin_lock_irq (&hmidi->lock);
hmidi->input = NULL;
spin_unlock_irq (&hmidi->lock);
return 0;
}
static int snd_hdsp_midi_output_close(struct snd_rawmidi_substream *substream)
{
struct hdsp_midi *hmidi;
snd_hdsp_midi_output_trigger (substream, 0);
hmidi = (struct hdsp_midi *) substream->rmidi->private_data;
spin_lock_irq (&hmidi->lock);
hmidi->output = NULL;
spin_unlock_irq (&hmidi->lock);
return 0;
}
static struct snd_rawmidi_ops snd_hdsp_midi_output =
{
.open = snd_hdsp_midi_output_open,
.close = snd_hdsp_midi_output_close,
.trigger = snd_hdsp_midi_output_trigger,
};
static struct snd_rawmidi_ops snd_hdsp_midi_input =
{
.open = snd_hdsp_midi_input_open,
.close = snd_hdsp_midi_input_close,
.trigger = snd_hdsp_midi_input_trigger,
};
static int snd_hdsp_create_midi (struct snd_card *card, struct hdsp *hdsp, int id)
{
char buf[40];
hdsp->midi[id].id = id;
hdsp->midi[id].rmidi = NULL;
hdsp->midi[id].input = NULL;
hdsp->midi[id].output = NULL;
hdsp->midi[id].hdsp = hdsp;
hdsp->midi[id].istimer = 0;
hdsp->midi[id].pending = 0;
spin_lock_init (&hdsp->midi[id].lock);
snprintf(buf, sizeof(buf), "%s MIDI %d", card->shortname, id + 1);
if (snd_rawmidi_new (card, buf, id, 1, 1, &hdsp->midi[id].rmidi) < 0)
return -1;
sprintf(hdsp->midi[id].rmidi->name, "HDSP MIDI %d", id+1);
hdsp->midi[id].rmidi->private_data = &hdsp->midi[id];
snd_rawmidi_set_ops (hdsp->midi[id].rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_hdsp_midi_output);
snd_rawmidi_set_ops (hdsp->midi[id].rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_hdsp_midi_input);
hdsp->midi[id].rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
return 0;
}
/*-----------------------------------------------------------------------------
Control Interface
----------------------------------------------------------------------------*/
static u32 snd_hdsp_convert_from_aes(struct snd_aes_iec958 *aes)
{
u32 val = 0;
val |= (aes->status[0] & IEC958_AES0_PROFESSIONAL) ? HDSP_SPDIFProfessional : 0;
val |= (aes->status[0] & IEC958_AES0_NONAUDIO) ? HDSP_SPDIFNonAudio : 0;
if (val & HDSP_SPDIFProfessional)
val |= (aes->status[0] & IEC958_AES0_PRO_EMPHASIS_5015) ? HDSP_SPDIFEmphasis : 0;
else
val |= (aes->status[0] & IEC958_AES0_CON_EMPHASIS_5015) ? HDSP_SPDIFEmphasis : 0;
return val;
}
static void snd_hdsp_convert_to_aes(struct snd_aes_iec958 *aes, u32 val)
{
aes->status[0] = ((val & HDSP_SPDIFProfessional) ? IEC958_AES0_PROFESSIONAL : 0) |
((val & HDSP_SPDIFNonAudio) ? IEC958_AES0_NONAUDIO : 0);
if (val & HDSP_SPDIFProfessional)
aes->status[0] |= (val & HDSP_SPDIFEmphasis) ? IEC958_AES0_PRO_EMPHASIS_5015 : 0;
else
aes->status[0] |= (val & HDSP_SPDIFEmphasis) ? IEC958_AES0_CON_EMPHASIS_5015 : 0;
}
static int snd_hdsp_control_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_hdsp_control_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
snd_hdsp_convert_to_aes(&ucontrol->value.iec958, hdsp->creg_spdif);
return 0;
}
static int snd_hdsp_control_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
u32 val;
val = snd_hdsp_convert_from_aes(&ucontrol->value.iec958);
spin_lock_irq(&hdsp->lock);
change = val != hdsp->creg_spdif;
hdsp->creg_spdif = val;
spin_unlock_irq(&hdsp->lock);
return change;
}
static int snd_hdsp_control_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_hdsp_control_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
snd_hdsp_convert_to_aes(&ucontrol->value.iec958, hdsp->creg_spdif_stream);
return 0;
}
static int snd_hdsp_control_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
u32 val;
val = snd_hdsp_convert_from_aes(&ucontrol->value.iec958);
spin_lock_irq(&hdsp->lock);
change = val != hdsp->creg_spdif_stream;
hdsp->creg_spdif_stream = val;
hdsp->control_register &= ~(HDSP_SPDIFProfessional | HDSP_SPDIFNonAudio | HDSP_SPDIFEmphasis);
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register |= val);
spin_unlock_irq(&hdsp->lock);
return change;
}
static int snd_hdsp_control_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int snd_hdsp_control_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = kcontrol->private_value;
return 0;
}
#define HDSP_SPDIF_IN(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_spdif_in, \
.get = snd_hdsp_get_spdif_in, \
.put = snd_hdsp_put_spdif_in }
static unsigned int hdsp_spdif_in(struct hdsp *hdsp)
{
return hdsp_decode_spdif_in(hdsp->control_register & HDSP_SPDIFInputMask);
}
static int hdsp_set_spdif_input(struct hdsp *hdsp, int in)
{
hdsp->control_register &= ~HDSP_SPDIFInputMask;
hdsp->control_register |= hdsp_encode_spdif_in(in);
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_info_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[4] = {
"Optical", "Coaxial", "Internal", "AES"
};
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 4 : 3,
texts);
}
static int snd_hdsp_get_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_spdif_in(hdsp);
return 0;
}
static int snd_hdsp_put_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0] % ((hdsp->io_type == H9632) ? 4 : 3);
spin_lock_irq(&hdsp->lock);
change = val != hdsp_spdif_in(hdsp);
if (change)
hdsp_set_spdif_input(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_TOGGLE_SETTING(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.private_value = xindex, \
.info = snd_hdsp_info_toggle_setting, \
.get = snd_hdsp_get_toggle_setting, \
.put = snd_hdsp_put_toggle_setting \
}
static int hdsp_toggle_setting(struct hdsp *hdsp, u32 regmask)
{
return (hdsp->control_register & regmask) ? 1 : 0;
}
static int hdsp_set_toggle_setting(struct hdsp *hdsp, u32 regmask, int out)
{
if (out)
hdsp->control_register |= regmask;
else
hdsp->control_register &= ~regmask;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
#define snd_hdsp_info_toggle_setting snd_ctl_boolean_mono_info
static int snd_hdsp_get_toggle_setting(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
u32 regmask = kcontrol->private_value;
spin_lock_irq(&hdsp->lock);
ucontrol->value.integer.value[0] = hdsp_toggle_setting(hdsp, regmask);
spin_unlock_irq(&hdsp->lock);
return 0;
}
static int snd_hdsp_put_toggle_setting(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
u32 regmask = kcontrol->private_value;
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdsp->lock);
change = (int) val != hdsp_toggle_setting(hdsp, regmask);
if (change)
hdsp_set_toggle_setting(hdsp, regmask, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_SPDIF_SAMPLE_RATE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ, \
.info = snd_hdsp_info_spdif_sample_rate, \
.get = snd_hdsp_get_spdif_sample_rate \
}
static int snd_hdsp_info_spdif_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"32000", "44100", "48000", "64000", "88200", "96000",
"None", "128000", "176400", "192000"
};
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
texts);
}
static int snd_hdsp_get_spdif_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
switch (hdsp_spdif_sample_rate(hdsp)) {
case 32000:
ucontrol->value.enumerated.item[0] = 0;
break;
case 44100:
ucontrol->value.enumerated.item[0] = 1;
break;
case 48000:
ucontrol->value.enumerated.item[0] = 2;
break;
case 64000:
ucontrol->value.enumerated.item[0] = 3;
break;
case 88200:
ucontrol->value.enumerated.item[0] = 4;
break;
case 96000:
ucontrol->value.enumerated.item[0] = 5;
break;
case 128000:
ucontrol->value.enumerated.item[0] = 7;
break;
case 176400:
ucontrol->value.enumerated.item[0] = 8;
break;
case 192000:
ucontrol->value.enumerated.item[0] = 9;
break;
default:
ucontrol->value.enumerated.item[0] = 6;
}
return 0;
}
#define HDSP_SYSTEM_SAMPLE_RATE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ, \
.info = snd_hdsp_info_system_sample_rate, \
.get = snd_hdsp_get_system_sample_rate \
}
static int snd_hdsp_info_system_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
return 0;
}
static int snd_hdsp_get_system_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp->system_sample_rate;
return 0;
}
#define HDSP_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ, \
.info = snd_hdsp_info_autosync_sample_rate, \
.get = snd_hdsp_get_autosync_sample_rate \
}
static int snd_hdsp_info_autosync_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
static const char * const texts[] = {
"32000", "44100", "48000", "64000", "88200", "96000",
"None", "128000", "176400", "192000"
};
return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
texts);
}
static int snd_hdsp_get_autosync_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
switch (hdsp_external_sample_rate(hdsp)) {
case 32000:
ucontrol->value.enumerated.item[0] = 0;
break;
case 44100:
ucontrol->value.enumerated.item[0] = 1;
break;
case 48000:
ucontrol->value.enumerated.item[0] = 2;
break;
case 64000:
ucontrol->value.enumerated.item[0] = 3;
break;
case 88200:
ucontrol->value.enumerated.item[0] = 4;
break;
case 96000:
ucontrol->value.enumerated.item[0] = 5;
break;
case 128000:
ucontrol->value.enumerated.item[0] = 7;
break;
case 176400:
ucontrol->value.enumerated.item[0] = 8;
break;
case 192000:
ucontrol->value.enumerated.item[0] = 9;
break;
default:
ucontrol->value.enumerated.item[0] = 6;
}
return 0;
}
#define HDSP_SYSTEM_CLOCK_MODE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ, \
.info = snd_hdsp_info_system_clock_mode, \
.get = snd_hdsp_get_system_clock_mode \
}
static int hdsp_system_clock_mode(struct hdsp *hdsp)
{
if (hdsp->control_register & HDSP_ClockModeMaster)
return 0;
else if (hdsp_external_sample_rate(hdsp) != hdsp->system_sample_rate)
return 0;
return 1;
}
static int snd_hdsp_info_system_clock_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"Master", "Slave" };
return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_hdsp_get_system_clock_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_system_clock_mode(hdsp);
return 0;
}
#define HDSP_CLOCK_SOURCE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_clock_source, \
.get = snd_hdsp_get_clock_source, \
.put = snd_hdsp_put_clock_source \
}
static int hdsp_clock_source(struct hdsp *hdsp)
{
if (hdsp->control_register & HDSP_ClockModeMaster) {
switch (hdsp->system_sample_rate) {
case 32000:
return 1;
case 44100:
return 2;
case 48000:
return 3;
case 64000:
return 4;
case 88200:
return 5;
case 96000:
return 6;
case 128000:
return 7;
case 176400:
return 8;
case 192000:
return 9;
default:
return 3;
}
} else {
return 0;
}
}
static int hdsp_set_clock_source(struct hdsp *hdsp, int mode)
{
int rate;
switch (mode) {
case HDSP_CLOCK_SOURCE_AUTOSYNC:
if (hdsp_external_sample_rate(hdsp) != 0) {
if (!hdsp_set_rate(hdsp, hdsp_external_sample_rate(hdsp), 1)) {
hdsp->control_register &= ~HDSP_ClockModeMaster;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
}
return -1;
case HDSP_CLOCK_SOURCE_INTERNAL_32KHZ:
rate = 32000;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_44_1KHZ:
rate = 44100;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_48KHZ:
rate = 48000;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_64KHZ:
rate = 64000;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_88_2KHZ:
rate = 88200;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_96KHZ:
rate = 96000;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_128KHZ:
rate = 128000;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_176_4KHZ:
rate = 176400;
break;
case HDSP_CLOCK_SOURCE_INTERNAL_192KHZ:
rate = 192000;
break;
default:
rate = 48000;
}
hdsp->control_register |= HDSP_ClockModeMaster;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
hdsp_set_rate(hdsp, rate, 1);
return 0;
}
static int snd_hdsp_info_clock_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"AutoSync", "Internal 32.0 kHz", "Internal 44.1 kHz",
"Internal 48.0 kHz", "Internal 64.0 kHz", "Internal 88.2 kHz",
"Internal 96.0 kHz", "Internal 128 kHz", "Internal 176.4 kHz",
"Internal 192.0 KHz"
};
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
texts);
}
static int snd_hdsp_get_clock_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_clock_source(hdsp);
return 0;
}
static int snd_hdsp_put_clock_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0) val = 0;
if (hdsp->io_type == H9632) {
if (val > 9)
val = 9;
} else {
if (val > 6)
val = 6;
}
spin_lock_irq(&hdsp->lock);
if (val != hdsp_clock_source(hdsp))
change = (hdsp_set_clock_source(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
#define snd_hdsp_info_clock_source_lock snd_ctl_boolean_mono_info
static int snd_hdsp_get_clock_source_lock(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = hdsp->clock_source_locked;
return 0;
}
static int snd_hdsp_put_clock_source_lock(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
change = (int)ucontrol->value.integer.value[0] != hdsp->clock_source_locked;
if (change)
hdsp->clock_source_locked = !!ucontrol->value.integer.value[0];
return change;
}
#define HDSP_DA_GAIN(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_da_gain, \
.get = snd_hdsp_get_da_gain, \
.put = snd_hdsp_put_da_gain \
}
static int hdsp_da_gain(struct hdsp *hdsp)
{
switch (hdsp->control_register & HDSP_DAGainMask) {
case HDSP_DAGainHighGain:
return 0;
case HDSP_DAGainPlus4dBu:
return 1;
case HDSP_DAGainMinus10dBV:
return 2;
default:
return 1;
}
}
static int hdsp_set_da_gain(struct hdsp *hdsp, int mode)
{
hdsp->control_register &= ~HDSP_DAGainMask;
switch (mode) {
case 0:
hdsp->control_register |= HDSP_DAGainHighGain;
break;
case 1:
hdsp->control_register |= HDSP_DAGainPlus4dBu;
break;
case 2:
hdsp->control_register |= HDSP_DAGainMinus10dBV;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_info_da_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"Hi Gain", "+4 dBu", "-10 dbV"};
return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_da_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_da_gain(hdsp);
return 0;
}
static int snd_hdsp_put_da_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0) val = 0;
if (val > 2) val = 2;
spin_lock_irq(&hdsp->lock);
if (val != hdsp_da_gain(hdsp))
change = (hdsp_set_da_gain(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_AD_GAIN(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_ad_gain, \
.get = snd_hdsp_get_ad_gain, \
.put = snd_hdsp_put_ad_gain \
}
static int hdsp_ad_gain(struct hdsp *hdsp)
{
switch (hdsp->control_register & HDSP_ADGainMask) {
case HDSP_ADGainMinus10dBV:
return 0;
case HDSP_ADGainPlus4dBu:
return 1;
case HDSP_ADGainLowGain:
return 2;
default:
return 1;
}
}
static int hdsp_set_ad_gain(struct hdsp *hdsp, int mode)
{
hdsp->control_register &= ~HDSP_ADGainMask;
switch (mode) {
case 0:
hdsp->control_register |= HDSP_ADGainMinus10dBV;
break;
case 1:
hdsp->control_register |= HDSP_ADGainPlus4dBu;
break;
case 2:
hdsp->control_register |= HDSP_ADGainLowGain;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_info_ad_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"-10 dBV", "+4 dBu", "Lo Gain"};
return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_ad_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_ad_gain(hdsp);
return 0;
}
static int snd_hdsp_put_ad_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0) val = 0;
if (val > 2) val = 2;
spin_lock_irq(&hdsp->lock);
if (val != hdsp_ad_gain(hdsp))
change = (hdsp_set_ad_gain(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_PHONE_GAIN(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_phone_gain, \
.get = snd_hdsp_get_phone_gain, \
.put = snd_hdsp_put_phone_gain \
}
static int hdsp_phone_gain(struct hdsp *hdsp)
{
switch (hdsp->control_register & HDSP_PhoneGainMask) {
case HDSP_PhoneGain0dB:
return 0;
case HDSP_PhoneGainMinus6dB:
return 1;
case HDSP_PhoneGainMinus12dB:
return 2;
default:
return 0;
}
}
static int hdsp_set_phone_gain(struct hdsp *hdsp, int mode)
{
hdsp->control_register &= ~HDSP_PhoneGainMask;
switch (mode) {
case 0:
hdsp->control_register |= HDSP_PhoneGain0dB;
break;
case 1:
hdsp->control_register |= HDSP_PhoneGainMinus6dB;
break;
case 2:
hdsp->control_register |= HDSP_PhoneGainMinus12dB;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_info_phone_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"0 dB", "-6 dB", "-12 dB"};
return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_phone_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_phone_gain(hdsp);
return 0;
}
static int snd_hdsp_put_phone_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0) val = 0;
if (val > 2) val = 2;
spin_lock_irq(&hdsp->lock);
if (val != hdsp_phone_gain(hdsp))
change = (hdsp_set_phone_gain(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_PREF_SYNC_REF(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_pref_sync_ref, \
.get = snd_hdsp_get_pref_sync_ref, \
.put = snd_hdsp_put_pref_sync_ref \
}
static int hdsp_pref_sync_ref(struct hdsp *hdsp)
{
/* Notice that this looks at the requested sync source,
not the one actually in use.
*/
switch (hdsp->control_register & HDSP_SyncRefMask) {
case HDSP_SyncRef_ADAT1:
return HDSP_SYNC_FROM_ADAT1;
case HDSP_SyncRef_ADAT2:
return HDSP_SYNC_FROM_ADAT2;
case HDSP_SyncRef_ADAT3:
return HDSP_SYNC_FROM_ADAT3;
case HDSP_SyncRef_SPDIF:
return HDSP_SYNC_FROM_SPDIF;
case HDSP_SyncRef_WORD:
return HDSP_SYNC_FROM_WORD;
case HDSP_SyncRef_ADAT_SYNC:
return HDSP_SYNC_FROM_ADAT_SYNC;
default:
return HDSP_SYNC_FROM_WORD;
}
return 0;
}
static int hdsp_set_pref_sync_ref(struct hdsp *hdsp, int pref)
{
hdsp->control_register &= ~HDSP_SyncRefMask;
switch (pref) {
case HDSP_SYNC_FROM_ADAT1:
hdsp->control_register &= ~HDSP_SyncRefMask; /* clear SyncRef bits */
break;
case HDSP_SYNC_FROM_ADAT2:
hdsp->control_register |= HDSP_SyncRef_ADAT2;
break;
case HDSP_SYNC_FROM_ADAT3:
hdsp->control_register |= HDSP_SyncRef_ADAT3;
break;
case HDSP_SYNC_FROM_SPDIF:
hdsp->control_register |= HDSP_SyncRef_SPDIF;
break;
case HDSP_SYNC_FROM_WORD:
hdsp->control_register |= HDSP_SyncRef_WORD;
break;
case HDSP_SYNC_FROM_ADAT_SYNC:
hdsp->control_register |= HDSP_SyncRef_ADAT_SYNC;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_info_pref_sync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"Word", "IEC958", "ADAT1", "ADAT Sync", "ADAT2", "ADAT3"
};
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int num_items;
switch (hdsp->io_type) {
case Digiface:
case H9652:
num_items = 6;
break;
case Multiface:
num_items = 4;
break;
case H9632:
num_items = 3;
break;
default:
return -EINVAL;
}
return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
static int snd_hdsp_get_pref_sync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_pref_sync_ref(hdsp);
return 0;
}
static int snd_hdsp_put_pref_sync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change, max;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
switch (hdsp->io_type) {
case Digiface:
case H9652:
max = 6;
break;
case Multiface:
max = 4;
break;
case H9632:
max = 3;
break;
default:
return -EIO;
}
val = ucontrol->value.enumerated.item[0] % max;
spin_lock_irq(&hdsp->lock);
change = (int)val != hdsp_pref_sync_ref(hdsp);
hdsp_set_pref_sync_ref(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_AUTOSYNC_REF(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ, \
.info = snd_hdsp_info_autosync_ref, \
.get = snd_hdsp_get_autosync_ref, \
}
static int hdsp_autosync_ref(struct hdsp *hdsp)
{
/* This looks at the autosync selected sync reference */
unsigned int status2 = hdsp_read(hdsp, HDSP_status2Register);
switch (status2 & HDSP_SelSyncRefMask) {
case HDSP_SelSyncRef_WORD:
return HDSP_AUTOSYNC_FROM_WORD;
case HDSP_SelSyncRef_ADAT_SYNC:
return HDSP_AUTOSYNC_FROM_ADAT_SYNC;
case HDSP_SelSyncRef_SPDIF:
return HDSP_AUTOSYNC_FROM_SPDIF;
case HDSP_SelSyncRefMask:
return HDSP_AUTOSYNC_FROM_NONE;
case HDSP_SelSyncRef_ADAT1:
return HDSP_AUTOSYNC_FROM_ADAT1;
case HDSP_SelSyncRef_ADAT2:
return HDSP_AUTOSYNC_FROM_ADAT2;
case HDSP_SelSyncRef_ADAT3:
return HDSP_AUTOSYNC_FROM_ADAT3;
default:
return HDSP_AUTOSYNC_FROM_WORD;
}
return 0;
}
static int snd_hdsp_info_autosync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"Word", "ADAT Sync", "IEC958", "None", "ADAT1", "ADAT2", "ADAT3"
};
return snd_ctl_enum_info(uinfo, 1, 7, texts);
}
static int snd_hdsp_get_autosync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_autosync_ref(hdsp);
return 0;
}
#define HDSP_PRECISE_POINTER(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_precise_pointer, \
.get = snd_hdsp_get_precise_pointer, \
.put = snd_hdsp_put_precise_pointer \
}
static int hdsp_set_precise_pointer(struct hdsp *hdsp, int precise)
{
if (precise)
hdsp->precise_ptr = 1;
else
hdsp->precise_ptr = 0;
return 0;
}
#define snd_hdsp_info_precise_pointer snd_ctl_boolean_mono_info
static int snd_hdsp_get_precise_pointer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&hdsp->lock);
ucontrol->value.integer.value[0] = hdsp->precise_ptr;
spin_unlock_irq(&hdsp->lock);
return 0;
}
static int snd_hdsp_put_precise_pointer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdsp->lock);
change = (int)val != hdsp->precise_ptr;
hdsp_set_precise_pointer(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_USE_MIDI_TASKLET(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_use_midi_tasklet, \
.get = snd_hdsp_get_use_midi_tasklet, \
.put = snd_hdsp_put_use_midi_tasklet \
}
static int hdsp_set_use_midi_tasklet(struct hdsp *hdsp, int use_tasklet)
{
if (use_tasklet)
hdsp->use_midi_tasklet = 1;
else
hdsp->use_midi_tasklet = 0;
return 0;
}
#define snd_hdsp_info_use_midi_tasklet snd_ctl_boolean_mono_info
static int snd_hdsp_get_use_midi_tasklet(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
spin_lock_irq(&hdsp->lock);
ucontrol->value.integer.value[0] = hdsp->use_midi_tasklet;
spin_unlock_irq(&hdsp->lock);
return 0;
}
static int snd_hdsp_put_use_midi_tasklet(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdsp->lock);
change = (int)val != hdsp->use_midi_tasklet;
hdsp_set_use_midi_tasklet(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_MIXER(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
.name = xname, \
.index = xindex, \
.device = 0, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.info = snd_hdsp_info_mixer, \
.get = snd_hdsp_get_mixer, \
.put = snd_hdsp_put_mixer \
}
static int snd_hdsp_info_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 3;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 65536;
uinfo->value.integer.step = 1;
return 0;
}
static int snd_hdsp_get_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int source;
int destination;
int addr;
source = ucontrol->value.integer.value[0];
destination = ucontrol->value.integer.value[1];
if (source >= hdsp->max_channels)
addr = hdsp_playback_to_output_key(hdsp,source-hdsp->max_channels,destination);
else
addr = hdsp_input_to_output_key(hdsp,source, destination);
spin_lock_irq(&hdsp->lock);
ucontrol->value.integer.value[2] = hdsp_read_gain (hdsp, addr);
spin_unlock_irq(&hdsp->lock);
return 0;
}
static int snd_hdsp_put_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int source;
int destination;
int gain;
int addr;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
source = ucontrol->value.integer.value[0];
destination = ucontrol->value.integer.value[1];
if (source >= hdsp->max_channels)
addr = hdsp_playback_to_output_key(hdsp,source-hdsp->max_channels, destination);
else
addr = hdsp_input_to_output_key(hdsp,source, destination);
gain = ucontrol->value.integer.value[2];
spin_lock_irq(&hdsp->lock);
change = gain != hdsp_read_gain(hdsp, addr);
if (change)
hdsp_write_gain(hdsp, addr, gain);
spin_unlock_irq(&hdsp->lock);
return change;
}
#define HDSP_WC_SYNC_CHECK(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.info = snd_hdsp_info_sync_check, \
.get = snd_hdsp_get_wc_sync_check \
}
static int snd_hdsp_info_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"No Lock", "Lock", "Sync" };
return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int hdsp_wc_sync_check(struct hdsp *hdsp)
{
int status2 = hdsp_read(hdsp, HDSP_status2Register);
if (status2 & HDSP_wc_lock) {
if (status2 & HDSP_wc_sync)
return 2;
else
return 1;
} else
return 0;
return 0;
}
static int snd_hdsp_get_wc_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_wc_sync_check(hdsp);
return 0;
}
#define HDSP_SPDIF_SYNC_CHECK(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.info = snd_hdsp_info_sync_check, \
.get = snd_hdsp_get_spdif_sync_check \
}
static int hdsp_spdif_sync_check(struct hdsp *hdsp)
{
int status = hdsp_read(hdsp, HDSP_statusRegister);
if (status & HDSP_SPDIFErrorFlag)
return 0;
else {
if (status & HDSP_SPDIFSync)
return 2;
else
return 1;
}
return 0;
}
static int snd_hdsp_get_spdif_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_spdif_sync_check(hdsp);
return 0;
}
#define HDSP_ADATSYNC_SYNC_CHECK(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.info = snd_hdsp_info_sync_check, \
.get = snd_hdsp_get_adatsync_sync_check \
}
static int hdsp_adatsync_sync_check(struct hdsp *hdsp)
{
int status = hdsp_read(hdsp, HDSP_statusRegister);
if (status & HDSP_TimecodeLock) {
if (status & HDSP_TimecodeSync)
return 2;
else
return 1;
} else
return 0;
}
static int snd_hdsp_get_adatsync_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_adatsync_sync_check(hdsp);
return 0;
}
#define HDSP_ADAT_SYNC_CHECK \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.info = snd_hdsp_info_sync_check, \
.get = snd_hdsp_get_adat_sync_check \
}
static int hdsp_adat_sync_check(struct hdsp *hdsp, int idx)
{
int status = hdsp_read(hdsp, HDSP_statusRegister);
if (status & (HDSP_Lock0>>idx)) {
if (status & (HDSP_Sync0>>idx))
return 2;
else
return 1;
} else
return 0;
}
static int snd_hdsp_get_adat_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
int offset;
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
offset = ucontrol->id.index - 1;
if (snd_BUG_ON(offset < 0))
return -EINVAL;
switch (hdsp->io_type) {
case Digiface:
case H9652:
if (offset >= 3)
return -EINVAL;
break;
case Multiface:
case H9632:
if (offset >= 1)
return -EINVAL;
break;
default:
return -EIO;
}
ucontrol->value.enumerated.item[0] = hdsp_adat_sync_check(hdsp, offset);
return 0;
}
#define HDSP_DDS_OFFSET(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.info = snd_hdsp_info_dds_offset, \
.get = snd_hdsp_get_dds_offset, \
.put = snd_hdsp_put_dds_offset \
}
static int hdsp_dds_offset(struct hdsp *hdsp)
{
u64 n;
unsigned int dds_value = hdsp->dds_value;
int system_sample_rate = hdsp->system_sample_rate;
if (!dds_value)
return 0;
n = DDS_NUMERATOR;
/*
* dds_value = n / rate
* rate = n / dds_value
*/
n = div_u64(n, dds_value);
if (system_sample_rate >= 112000)
n *= 4;
else if (system_sample_rate >= 56000)
n *= 2;
return ((int)n) - system_sample_rate;
}
static int hdsp_set_dds_offset(struct hdsp *hdsp, int offset_hz)
{
int rate = hdsp->system_sample_rate + offset_hz;
hdsp_set_dds_value(hdsp, rate);
return 0;
}
static int snd_hdsp_info_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = -5000;
uinfo->value.integer.max = 5000;
return 0;
}
static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
return 0;
}
static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0];
spin_lock_irq(&hdsp->lock);
if (val != hdsp_dds_offset(hdsp))
change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
static struct snd_kcontrol_new snd_hdsp_9632_controls[] = {
HDSP_DA_GAIN("DA Gain", 0),
HDSP_AD_GAIN("AD Gain", 0),
HDSP_PHONE_GAIN("Phones Gain", 0),
HDSP_TOGGLE_SETTING("XLR Breakout Cable", HDSP_XLRBreakoutCable),
HDSP_DDS_OFFSET("DDS Sample Rate Offset", 0)
};
static struct snd_kcontrol_new snd_hdsp_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
.info = snd_hdsp_control_spdif_info,
.get = snd_hdsp_control_spdif_get,
.put = snd_hdsp_control_spdif_put,
},
{
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM),
.info = snd_hdsp_control_spdif_stream_info,
.get = snd_hdsp_control_spdif_stream_get,
.put = snd_hdsp_control_spdif_stream_put,
},
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
.info = snd_hdsp_control_spdif_mask_info,
.get = snd_hdsp_control_spdif_mask_get,
.private_value = IEC958_AES0_NONAUDIO |
IEC958_AES0_PROFESSIONAL |
IEC958_AES0_CON_EMPHASIS,
},
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
.info = snd_hdsp_control_spdif_mask_info,
.get = snd_hdsp_control_spdif_mask_get,
.private_value = IEC958_AES0_NONAUDIO |
IEC958_AES0_PROFESSIONAL |
IEC958_AES0_PRO_EMPHASIS,
},
HDSP_MIXER("Mixer", 0),
HDSP_SPDIF_IN("IEC958 Input Connector", 0),
HDSP_TOGGLE_SETTING("IEC958 Output also on ADAT1", HDSP_SPDIFOpticalOut),
HDSP_TOGGLE_SETTING("IEC958 Professional Bit", HDSP_SPDIFProfessional),
HDSP_TOGGLE_SETTING("IEC958 Emphasis Bit", HDSP_SPDIFEmphasis),
HDSP_TOGGLE_SETTING("IEC958 Non-audio Bit", HDSP_SPDIFNonAudio),
/* 'Sample Clock Source' complies with the alsa control naming scheme */
HDSP_CLOCK_SOURCE("Sample Clock Source", 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Sample Clock Source Locking",
.info = snd_hdsp_info_clock_source_lock,
.get = snd_hdsp_get_clock_source_lock,
.put = snd_hdsp_put_clock_source_lock,
},
HDSP_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
HDSP_PREF_SYNC_REF("Preferred Sync Reference", 0),
HDSP_AUTOSYNC_REF("AutoSync Reference", 0),
HDSP_SPDIF_SAMPLE_RATE("SPDIF Sample Rate", 0),
HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
/* 'External Rate' complies with the alsa control naming scheme */
HDSP_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
HDSP_WC_SYNC_CHECK("Word Clock Lock Status", 0),
HDSP_SPDIF_SYNC_CHECK("SPDIF Lock Status", 0),
HDSP_ADATSYNC_SYNC_CHECK("ADAT Sync Lock Status", 0),
HDSP_TOGGLE_SETTING("Line Out", HDSP_LineOut),
HDSP_PRECISE_POINTER("Precise Pointer", 0),
HDSP_USE_MIDI_TASKLET("Use Midi Tasklet", 0),
};
static int hdsp_rpm_input12(struct hdsp *hdsp)
{
switch (hdsp->control_register & HDSP_RPM_Inp12) {
case HDSP_RPM_Inp12_Phon_6dB:
return 0;
case HDSP_RPM_Inp12_Phon_n6dB:
return 2;
case HDSP_RPM_Inp12_Line_0dB:
return 3;
case HDSP_RPM_Inp12_Line_n6dB:
return 4;
}
return 1;
}
static int snd_hdsp_get_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_rpm_input12(hdsp);
return 0;
}
static int hdsp_set_rpm_input12(struct hdsp *hdsp, int mode)
{
hdsp->control_register &= ~HDSP_RPM_Inp12;
switch (mode) {
case 0:
hdsp->control_register |= HDSP_RPM_Inp12_Phon_6dB;
break;
case 1:
break;
case 2:
hdsp->control_register |= HDSP_RPM_Inp12_Phon_n6dB;
break;
case 3:
hdsp->control_register |= HDSP_RPM_Inp12_Line_0dB;
break;
case 4:
hdsp->control_register |= HDSP_RPM_Inp12_Line_n6dB;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0)
val = 0;
if (val > 4)
val = 4;
spin_lock_irq(&hdsp->lock);
if (val != hdsp_rpm_input12(hdsp))
change = (hdsp_set_rpm_input12(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"
};
return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int hdsp_rpm_input34(struct hdsp *hdsp)
{
switch (hdsp->control_register & HDSP_RPM_Inp34) {
case HDSP_RPM_Inp34_Phon_6dB:
return 0;
case HDSP_RPM_Inp34_Phon_n6dB:
return 2;
case HDSP_RPM_Inp34_Line_0dB:
return 3;
case HDSP_RPM_Inp34_Line_n6dB:
return 4;
}
return 1;
}
static int snd_hdsp_get_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = hdsp_rpm_input34(hdsp);
return 0;
}
static int hdsp_set_rpm_input34(struct hdsp *hdsp, int mode)
{
hdsp->control_register &= ~HDSP_RPM_Inp34;
switch (mode) {
case 0:
hdsp->control_register |= HDSP_RPM_Inp34_Phon_6dB;
break;
case 1:
break;
case 2:
hdsp->control_register |= HDSP_RPM_Inp34_Phon_n6dB;
break;
case 3:
hdsp->control_register |= HDSP_RPM_Inp34_Line_0dB;
break;
case 4:
hdsp->control_register |= HDSP_RPM_Inp34_Line_n6dB;
break;
default:
return -1;
}
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_put_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.enumerated.item[0];
if (val < 0)
val = 0;
if (val > 4)
val = 4;
spin_lock_irq(&hdsp->lock);
if (val != hdsp_rpm_input34(hdsp))
change = (hdsp_set_rpm_input34(hdsp, val) == 0) ? 1 : 0;
else
change = 0;
spin_unlock_irq(&hdsp->lock);
return change;
}
/* RPM Bypass switch */
static int hdsp_rpm_bypass(struct hdsp *hdsp)
{
return (hdsp->control_register & HDSP_RPM_Bypass) ? 1 : 0;
}
static int snd_hdsp_get_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = hdsp_rpm_bypass(hdsp);
return 0;
}
static int hdsp_set_rpm_bypass(struct hdsp *hdsp, int on)
{
if (on)
hdsp->control_register |= HDSP_RPM_Bypass;
else
hdsp->control_register &= ~HDSP_RPM_Bypass;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdsp->lock);
change = (int)val != hdsp_rpm_bypass(hdsp);
hdsp_set_rpm_bypass(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"On", "Off"};
return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
/* RPM Disconnect switch */
static int hdsp_rpm_disconnect(struct hdsp *hdsp)
{
return (hdsp->control_register & HDSP_RPM_Disconnect) ? 1 : 0;
}
static int snd_hdsp_get_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = hdsp_rpm_disconnect(hdsp);
return 0;
}
static int hdsp_set_rpm_disconnect(struct hdsp *hdsp, int on)
{
if (on)
hdsp->control_register |= HDSP_RPM_Disconnect;
else
hdsp->control_register &= ~HDSP_RPM_Disconnect;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
return 0;
}
static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
int change;
unsigned int val;
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdsp->lock);
change = (int)val != hdsp_rpm_disconnect(hdsp);
hdsp_set_rpm_disconnect(hdsp, val);
spin_unlock_irq(&hdsp->lock);
return change;
}
static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {"On", "Off"};
return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "RPM Bypass",
.get = snd_hdsp_get_rpm_bypass,
.put = snd_hdsp_put_rpm_bypass,
.info = snd_hdsp_info_rpm_bypass
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "RPM Disconnect",
.get = snd_hdsp_get_rpm_disconnect,
.put = snd_hdsp_put_rpm_disconnect,
.info = snd_hdsp_info_rpm_disconnect
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input 1/2",
.get = snd_hdsp_get_rpm_input12,
.put = snd_hdsp_put_rpm_input12,
.info = snd_hdsp_info_rpm_input
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input 3/4",
.get = snd_hdsp_get_rpm_input34,
.put = snd_hdsp_put_rpm_input34,
.info = snd_hdsp_info_rpm_input
},
HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
HDSP_MIXER("Mixer", 0)
};
static struct snd_kcontrol_new snd_hdsp_96xx_aeb =
HDSP_TOGGLE_SETTING("Analog Extension Board",
HDSP_AnalogExtensionBoard);
static struct snd_kcontrol_new snd_hdsp_adat_sync_check = HDSP_ADAT_SYNC_CHECK;
static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp)
{
unsigned int idx;
int err;
struct snd_kcontrol *kctl;
if (hdsp->io_type == RPM) {
/* RPM Bypass, Disconnect and Input switches */
for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_rpm_controls); idx++) {
err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_rpm_controls[idx], hdsp));
if (err < 0)
return err;
}
return 0;
}
for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0)
return err;
if (idx == 1) /* IEC958 (S/PDIF) Stream */
hdsp->spdif_ctl = kctl;
}
/* ADAT SyncCheck status */
snd_hdsp_adat_sync_check.name = "ADAT Lock Status";
snd_hdsp_adat_sync_check.index = 1;
if ((err = snd_ctl_add (card, kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp))))
return err;
if (hdsp->io_type == Digiface || hdsp->io_type == H9652) {
for (idx = 1; idx < 3; ++idx) {
snd_hdsp_adat_sync_check.index = idx+1;
if ((err = snd_ctl_add (card, kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp))))
return err;
}
}
/* DA, AD and Phone gain and XLR breakout cable controls for H9632 cards */
if (hdsp->io_type == H9632) {
for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_9632_controls); idx++) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_9632_controls[idx], hdsp))) < 0)
return err;
}
}
/* AEB control for H96xx card */
if (hdsp->io_type == H9632 || hdsp->io_type == H9652) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_96xx_aeb, hdsp))) < 0)
return err;
}
return 0;
}
/*------------------------------------------------------------
/proc interface
------------------------------------------------------------*/
static void
snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct hdsp *hdsp = entry->private_data;
unsigned int status;
unsigned int status2;
char *pref_sync_ref;
char *autosync_ref;
char *system_clock_mode;
char *clock_source;
int x;
status = hdsp_read(hdsp, HDSP_statusRegister);
status2 = hdsp_read(hdsp, HDSP_status2Register);
snd_iprintf(buffer, "%s (Card #%d)\n", hdsp->card_name,
hdsp->card->number + 1);
snd_iprintf(buffer, "Buffers: capture %p playback %p\n",
hdsp->capture_buffer, hdsp->playback_buffer);
snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
hdsp->irq, hdsp->port, (unsigned long)hdsp->iobase);
snd_iprintf(buffer, "Control register: 0x%x\n", hdsp->control_register);
snd_iprintf(buffer, "Control2 register: 0x%x\n",
hdsp->control2_register);
snd_iprintf(buffer, "Status register: 0x%x\n", status);
snd_iprintf(buffer, "Status2 register: 0x%x\n", status2);
if (hdsp_check_for_iobox(hdsp)) {
snd_iprintf(buffer, "No I/O box connected.\n"
"Please connect one and upload firmware.\n");
return;
}
if (hdsp_check_for_firmware(hdsp, 0)) {
if (hdsp->state & HDSP_FirmwareCached) {
if (snd_hdsp_load_firmware_from_cache(hdsp) != 0) {
snd_iprintf(buffer, "Firmware loading from "
"cache failed, "
"please upload manually.\n");
return;
}
} else {
int err = -EINVAL;
err = hdsp_request_fw_loader(hdsp);
if (err < 0) {
snd_iprintf(buffer,
"No firmware loaded nor cached, "
"please upload firmware.\n");
return;
}
}
}
snd_iprintf(buffer, "FIFO status: %d\n", hdsp_read(hdsp, HDSP_fifoStatus) & 0xff);
snd_iprintf(buffer, "MIDI1 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut0));
snd_iprintf(buffer, "MIDI1 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn0));
snd_iprintf(buffer, "MIDI2 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut1));
snd_iprintf(buffer, "MIDI2 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn1));
snd_iprintf(buffer, "Use Midi Tasklet: %s\n", hdsp->use_midi_tasklet ? "on" : "off");
snd_iprintf(buffer, "\n");
x = 1 << (6 + hdsp_decode_latency(hdsp->control_register & HDSP_LatencyMask));
snd_iprintf(buffer, "Buffer Size (Latency): %d samples (2 periods of %lu bytes)\n", x, (unsigned long) hdsp->period_bytes);
snd_iprintf(buffer, "Hardware pointer (frames): %ld\n", hdsp_hw_pointer(hdsp));
snd_iprintf(buffer, "Precise pointer: %s\n", hdsp->precise_ptr ? "on" : "off");
snd_iprintf(buffer, "Line out: %s\n", (hdsp->control_register & HDSP_LineOut) ? "on" : "off");
snd_iprintf(buffer, "Firmware version: %d\n", (status2&HDSP_version0)|(status2&HDSP_version1)<<1|(status2&HDSP_version2)<<2);
snd_iprintf(buffer, "\n");
switch (hdsp_clock_source(hdsp)) {
case HDSP_CLOCK_SOURCE_AUTOSYNC:
clock_source = "AutoSync";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_32KHZ:
clock_source = "Internal 32 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_44_1KHZ:
clock_source = "Internal 44.1 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_48KHZ:
clock_source = "Internal 48 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_64KHZ:
clock_source = "Internal 64 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_88_2KHZ:
clock_source = "Internal 88.2 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_96KHZ:
clock_source = "Internal 96 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_128KHZ:
clock_source = "Internal 128 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_176_4KHZ:
clock_source = "Internal 176.4 kHz";
break;
case HDSP_CLOCK_SOURCE_INTERNAL_192KHZ:
clock_source = "Internal 192 kHz";
break;
default:
clock_source = "Error";
}
snd_iprintf (buffer, "Sample Clock Source: %s\n", clock_source);
if (hdsp_system_clock_mode(hdsp))
system_clock_mode = "Slave";
else
system_clock_mode = "Master";
switch (hdsp_pref_sync_ref (hdsp)) {
case HDSP_SYNC_FROM_WORD:
pref_sync_ref = "Word Clock";
break;
case HDSP_SYNC_FROM_ADAT_SYNC:
pref_sync_ref = "ADAT Sync";
break;
case HDSP_SYNC_FROM_SPDIF:
pref_sync_ref = "SPDIF";
break;
case HDSP_SYNC_FROM_ADAT1:
pref_sync_ref = "ADAT1";
break;
case HDSP_SYNC_FROM_ADAT2:
pref_sync_ref = "ADAT2";
break;
case HDSP_SYNC_FROM_ADAT3:
pref_sync_ref = "ADAT3";
break;
default:
pref_sync_ref = "Word Clock";
break;
}
snd_iprintf (buffer, "Preferred Sync Reference: %s\n", pref_sync_ref);
switch (hdsp_autosync_ref (hdsp)) {
case HDSP_AUTOSYNC_FROM_WORD:
autosync_ref = "Word Clock";
break;
case HDSP_AUTOSYNC_FROM_ADAT_SYNC:
autosync_ref = "ADAT Sync";
break;
case HDSP_AUTOSYNC_FROM_SPDIF:
autosync_ref = "SPDIF";
break;
case HDSP_AUTOSYNC_FROM_NONE:
autosync_ref = "None";
break;
case HDSP_AUTOSYNC_FROM_ADAT1:
autosync_ref = "ADAT1";
break;
case HDSP_AUTOSYNC_FROM_ADAT2:
autosync_ref = "ADAT2";
break;
case HDSP_AUTOSYNC_FROM_ADAT3:
autosync_ref = "ADAT3";
break;
default:
autosync_ref = "---";
break;
}
snd_iprintf (buffer, "AutoSync Reference: %s\n", autosync_ref);
snd_iprintf (buffer, "AutoSync Frequency: %d\n", hdsp_external_sample_rate(hdsp));
snd_iprintf (buffer, "System Clock Mode: %s\n", system_clock_mode);
snd_iprintf (buffer, "System Clock Frequency: %d\n", hdsp->system_sample_rate);
snd_iprintf (buffer, "System Clock Locked: %s\n", hdsp->clock_source_locked ? "Yes" : "No");
snd_iprintf(buffer, "\n");
if (hdsp->io_type != RPM) {
switch (hdsp_spdif_in(hdsp)) {
case HDSP_SPDIFIN_OPTICAL:
snd_iprintf(buffer, "IEC958 input: Optical\n");
break;
case HDSP_SPDIFIN_COAXIAL:
snd_iprintf(buffer, "IEC958 input: Coaxial\n");
break;
case HDSP_SPDIFIN_INTERNAL:
snd_iprintf(buffer, "IEC958 input: Internal\n");
break;
case HDSP_SPDIFIN_AES:
snd_iprintf(buffer, "IEC958 input: AES\n");
break;
default:
snd_iprintf(buffer, "IEC958 input: ???\n");
break;
}
}
if (RPM == hdsp->io_type) {
if (hdsp->control_register & HDSP_RPM_Bypass)
snd_iprintf(buffer, "RPM Bypass: disabled\n");
else
snd_iprintf(buffer, "RPM Bypass: enabled\n");
if (hdsp->control_register & HDSP_RPM_Disconnect)
snd_iprintf(buffer, "RPM disconnected\n");
else
snd_iprintf(buffer, "RPM connected\n");
switch (hdsp->control_register & HDSP_RPM_Inp12) {
case HDSP_RPM_Inp12_Phon_6dB:
snd_iprintf(buffer, "Input 1/2: Phono, 6dB\n");
break;
case HDSP_RPM_Inp12_Phon_0dB:
snd_iprintf(buffer, "Input 1/2: Phono, 0dB\n");
break;
case HDSP_RPM_Inp12_Phon_n6dB:
snd_iprintf(buffer, "Input 1/2: Phono, -6dB\n");
break;
case HDSP_RPM_Inp12_Line_0dB:
snd_iprintf(buffer, "Input 1/2: Line, 0dB\n");
break;
case HDSP_RPM_Inp12_Line_n6dB:
snd_iprintf(buffer, "Input 1/2: Line, -6dB\n");
break;
default:
snd_iprintf(buffer, "Input 1/2: ???\n");
}
switch (hdsp->control_register & HDSP_RPM_Inp34) {
case HDSP_RPM_Inp34_Phon_6dB:
snd_iprintf(buffer, "Input 3/4: Phono, 6dB\n");
break;
case HDSP_RPM_Inp34_Phon_0dB:
snd_iprintf(buffer, "Input 3/4: Phono, 0dB\n");
break;
case HDSP_RPM_Inp34_Phon_n6dB:
snd_iprintf(buffer, "Input 3/4: Phono, -6dB\n");
break;
case HDSP_RPM_Inp34_Line_0dB:
snd_iprintf(buffer, "Input 3/4: Line, 0dB\n");
break;
case HDSP_RPM_Inp34_Line_n6dB:
snd_iprintf(buffer, "Input 3/4: Line, -6dB\n");
break;
default:
snd_iprintf(buffer, "Input 3/4: ???\n");
}
} else {
if (hdsp->control_register & HDSP_SPDIFOpticalOut)
snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
else
snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
if (hdsp->control_register & HDSP_SPDIFProfessional)
snd_iprintf(buffer, "IEC958 quality: Professional\n");
else
snd_iprintf(buffer, "IEC958 quality: Consumer\n");
if (hdsp->control_register & HDSP_SPDIFEmphasis)
snd_iprintf(buffer, "IEC958 emphasis: on\n");
else
snd_iprintf(buffer, "IEC958 emphasis: off\n");
if (hdsp->control_register & HDSP_SPDIFNonAudio)
snd_iprintf(buffer, "IEC958 NonAudio: on\n");
else
snd_iprintf(buffer, "IEC958 NonAudio: off\n");
x = hdsp_spdif_sample_rate(hdsp);
if (x != 0)
snd_iprintf(buffer, "IEC958 sample rate: %d\n", x);
else
snd_iprintf(buffer, "IEC958 sample rate: Error flag set\n");
}
snd_iprintf(buffer, "\n");
/* Sync Check */
x = status & HDSP_Sync0;
if (status & HDSP_Lock0)
snd_iprintf(buffer, "ADAT1: %s\n", x ? "Sync" : "Lock");
else
snd_iprintf(buffer, "ADAT1: No Lock\n");
switch (hdsp->io_type) {
case Digiface:
case H9652:
x = status & HDSP_Sync1;
if (status & HDSP_Lock1)
snd_iprintf(buffer, "ADAT2: %s\n", x ? "Sync" : "Lock");
else
snd_iprintf(buffer, "ADAT2: No Lock\n");
x = status & HDSP_Sync2;
if (status & HDSP_Lock2)
snd_iprintf(buffer, "ADAT3: %s\n", x ? "Sync" : "Lock");
else
snd_iprintf(buffer, "ADAT3: No Lock\n");
break;
default:
/* relax */
break;
}
x = status & HDSP_SPDIFSync;
if (status & HDSP_SPDIFErrorFlag)
snd_iprintf (buffer, "SPDIF: No Lock\n");
else
snd_iprintf (buffer, "SPDIF: %s\n", x ? "Sync" : "Lock");
x = status2 & HDSP_wc_sync;
if (status2 & HDSP_wc_lock)
snd_iprintf (buffer, "Word Clock: %s\n", x ? "Sync" : "Lock");
else
snd_iprintf (buffer, "Word Clock: No Lock\n");
x = status & HDSP_TimecodeSync;
if (status & HDSP_TimecodeLock)
snd_iprintf(buffer, "ADAT Sync: %s\n", x ? "Sync" : "Lock");
else
snd_iprintf(buffer, "ADAT Sync: No Lock\n");
snd_iprintf(buffer, "\n");
/* Informations about H9632 specific controls */
if (hdsp->io_type == H9632) {
char *tmp;
switch (hdsp_ad_gain(hdsp)) {
case 0:
tmp = "-10 dBV";
break;
case 1:
tmp = "+4 dBu";
break;
default:
tmp = "Lo Gain";
break;
}
snd_iprintf(buffer, "AD Gain : %s\n", tmp);
switch (hdsp_da_gain(hdsp)) {
case 0:
tmp = "Hi Gain";
break;
case 1:
tmp = "+4 dBu";
break;
default:
tmp = "-10 dBV";
break;
}
snd_iprintf(buffer, "DA Gain : %s\n", tmp);
switch (hdsp_phone_gain(hdsp)) {
case 0:
tmp = "0 dB";
break;
case 1:
tmp = "-6 dB";
break;
default:
tmp = "-12 dB";
break;
}
snd_iprintf(buffer, "Phones Gain : %s\n", tmp);
snd_iprintf(buffer, "XLR Breakout Cable : %s\n",
hdsp_toggle_setting(hdsp, HDSP_XLRBreakoutCable) ?
"yes" : "no");
if (hdsp->control_register & HDSP_AnalogExtensionBoard)
snd_iprintf(buffer, "AEB : on (ADAT1 internal)\n");
else
snd_iprintf(buffer, "AEB : off (ADAT1 external)\n");
snd_iprintf(buffer, "\n");
}
}
static void snd_hdsp_proc_init(struct hdsp *hdsp)
{
struct snd_info_entry *entry;
if (! snd_card_proc_new(hdsp->card, "hdsp", &entry))
snd_info_set_text_ops(entry, hdsp, snd_hdsp_proc_read);
}
static void snd_hdsp_free_buffers(struct hdsp *hdsp)
{
snd_hammerfall_free_buffer(&hdsp->capture_dma_buf, hdsp->pci);
snd_hammerfall_free_buffer(&hdsp->playback_dma_buf, hdsp->pci);
}
static int snd_hdsp_initialize_memory(struct hdsp *hdsp)
{
unsigned long pb_bus, cb_bus;
if (snd_hammerfall_get_buffer(hdsp->pci, &hdsp->capture_dma_buf, HDSP_DMA_AREA_BYTES) < 0 ||
snd_hammerfall_get_buffer(hdsp->pci, &hdsp->playback_dma_buf, HDSP_DMA_AREA_BYTES) < 0) {
if (hdsp->capture_dma_buf.area)
snd_dma_free_pages(&hdsp->capture_dma_buf);
dev_err(hdsp->card->dev,
"%s: no buffers available\n", hdsp->card_name);
return -ENOMEM;
}
/* Align to bus-space 64K boundary */
cb_bus = ALIGN(hdsp->capture_dma_buf.addr, 0x10000ul);
pb_bus = ALIGN(hdsp->playback_dma_buf.addr, 0x10000ul);
/* Tell the card where it is */
hdsp_write(hdsp, HDSP_inputBufferAddress, cb_bus);
hdsp_write(hdsp, HDSP_outputBufferAddress, pb_bus);
hdsp->capture_buffer = hdsp->capture_dma_buf.area + (cb_bus - hdsp->capture_dma_buf.addr);
hdsp->playback_buffer = hdsp->playback_dma_buf.area + (pb_bus - hdsp->playback_dma_buf.addr);
return 0;
}
static int snd_hdsp_set_defaults(struct hdsp *hdsp)
{
unsigned int i;
/* ASSUMPTION: hdsp->lock is either held, or
there is no need to hold it (e.g. during module
initialization).
*/
/* set defaults:
SPDIF Input via Coax
Master clock mode
maximum latency (7 => 2^7 = 8192 samples, 64Kbyte buffer,
which implies 2 4096 sample, 32Kbyte periods).
Enable line out.
*/
hdsp->control_register = HDSP_ClockModeMaster |
HDSP_SPDIFInputCoaxial |
hdsp_encode_latency(7) |
HDSP_LineOut;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
#ifdef SNDRV_BIG_ENDIAN
hdsp->control2_register = HDSP_BIGENDIAN_MODE;
#else
hdsp->control2_register = 0;
#endif
if (hdsp->io_type == H9652)
snd_hdsp_9652_enable_mixer (hdsp);
else
hdsp_write (hdsp, HDSP_control2Reg, hdsp->control2_register);
hdsp_reset_hw_pointer(hdsp);
hdsp_compute_period_size(hdsp);
/* silence everything */
for (i = 0; i < HDSP_MATRIX_MIXER_SIZE; ++i)
hdsp->mixer_matrix[i] = MINUS_INFINITY_GAIN;
for (i = 0; i < ((hdsp->io_type == H9652 || hdsp->io_type == H9632) ? 1352 : HDSP_MATRIX_MIXER_SIZE); ++i) {
if (hdsp_write_gain (hdsp, i, MINUS_INFINITY_GAIN))
return -EIO;
}
/* H9632 specific defaults */
if (hdsp->io_type == H9632) {
hdsp->control_register |= (HDSP_DAGainPlus4dBu | HDSP_ADGainPlus4dBu | HDSP_PhoneGain0dB);
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
}
/* set a default rate so that the channel map is set up.
*/
hdsp_set_rate(hdsp, 48000, 1);
return 0;
}
static void hdsp_midi_tasklet(unsigned long arg)
{
struct hdsp *hdsp = (struct hdsp *)arg;
if (hdsp->midi[0].pending)
snd_hdsp_midi_input_read (&hdsp->midi[0]);
if (hdsp->midi[1].pending)
snd_hdsp_midi_input_read (&hdsp->midi[1]);
}
static irqreturn_t snd_hdsp_interrupt(int irq, void *dev_id)
{
struct hdsp *hdsp = (struct hdsp *) dev_id;
unsigned int status;
int audio;
int midi0;
int midi1;
unsigned int midi0status;
unsigned int midi1status;
int schedule = 0;
status = hdsp_read(hdsp, HDSP_statusRegister);
audio = status & HDSP_audioIRQPending;
midi0 = status & HDSP_midi0IRQPending;
midi1 = status & HDSP_midi1IRQPending;
if (!audio && !midi0 && !midi1)
return IRQ_NONE;
hdsp_write(hdsp, HDSP_interruptConfirmation, 0);
midi0status = hdsp_read (hdsp, HDSP_midiStatusIn0) & 0xff;
midi1status = hdsp_read (hdsp, HDSP_midiStatusIn1) & 0xff;
if (!(hdsp->state & HDSP_InitializationComplete))
return IRQ_HANDLED;
if (audio) {
if (hdsp->capture_substream)
snd_pcm_period_elapsed(hdsp->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
if (hdsp->playback_substream)
snd_pcm_period_elapsed(hdsp->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
}
if (midi0 && midi0status) {
if (hdsp->use_midi_tasklet) {
/* we disable interrupts for this input until processing is done */
hdsp->control_register &= ~HDSP_Midi0InterruptEnable;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
hdsp->midi[0].pending = 1;
schedule = 1;
} else {
snd_hdsp_midi_input_read (&hdsp->midi[0]);
}
}
if (hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632 && midi1 && midi1status) {
if (hdsp->use_midi_tasklet) {
/* we disable interrupts for this input until processing is done */
hdsp->control_register &= ~HDSP_Midi1InterruptEnable;
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
hdsp->midi[1].pending = 1;
schedule = 1;
} else {
snd_hdsp_midi_input_read (&hdsp->midi[1]);
}
}
if (hdsp->use_midi_tasklet && schedule)
tasklet_schedule(&hdsp->midi_tasklet);
return IRQ_HANDLED;
}
static snd_pcm_uframes_t snd_hdsp_hw_pointer(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
return hdsp_hw_pointer(hdsp);
}
static char *hdsp_channel_buffer_location(struct hdsp *hdsp,
int stream,
int channel)
{
int mapped_channel;
if (snd_BUG_ON(channel < 0 || channel >= hdsp->max_channels))
return NULL;
if ((mapped_channel = hdsp->channel_map[channel]) < 0)
return NULL;
if (stream == SNDRV_PCM_STREAM_CAPTURE)
return hdsp->capture_buffer + (mapped_channel * HDSP_CHANNEL_BUFFER_BYTES);
else
return hdsp->playback_buffer + (mapped_channel * HDSP_CHANNEL_BUFFER_BYTES);
}
static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES / 4))
return -EINVAL;
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
if (copy_from_user(channel_buf + pos * 4, src, count * 4))
return -EFAULT;
return count;
}
static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES / 4))
return -EINVAL;
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
if (copy_to_user(dst, channel_buf + pos * 4, count * 4))
return -EFAULT;
return count;
}
static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
memset(channel_buf + pos * 4, 0, count * 4);
return count;
}
static int snd_hdsp_reset(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *other;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
other = hdsp->capture_substream;
else
other = hdsp->playback_substream;
if (hdsp->running)
runtime->status->hw_ptr = hdsp_hw_pointer(hdsp);
else
runtime->status->hw_ptr = 0;
if (other) {
struct snd_pcm_substream *s;
struct snd_pcm_runtime *oruntime = other->runtime;
snd_pcm_group_for_each_entry(s, substream) {
if (s == other) {
oruntime->status->hw_ptr = runtime->status->hw_ptr;
break;
}
}
}
return 0;
}
static int snd_hdsp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
int err;
pid_t this_pid;
pid_t other_pid;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (hdsp_check_for_firmware(hdsp, 1))
return -EIO;
spin_lock_irq(&hdsp->lock);
if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) {
hdsp->control_register &= ~(HDSP_SPDIFProfessional | HDSP_SPDIFNonAudio | HDSP_SPDIFEmphasis);
hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register |= hdsp->creg_spdif_stream);
this_pid = hdsp->playback_pid;
other_pid = hdsp->capture_pid;
} else {
this_pid = hdsp->capture_pid;
other_pid = hdsp->playback_pid;
}
if ((other_pid > 0) && (this_pid != other_pid)) {
/* The other stream is open, and not by the same
task as this one. Make sure that the parameters
that matter are the same.
*/
if (params_rate(params) != hdsp->system_sample_rate) {
spin_unlock_irq(&hdsp->lock);
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE);
return -EBUSY;
}
if (params_period_size(params) != hdsp->period_bytes / 4) {
spin_unlock_irq(&hdsp->lock);
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return -EBUSY;
}
/* We're fine. */
spin_unlock_irq(&hdsp->lock);
return 0;
} else {
spin_unlock_irq(&hdsp->lock);
}
/* how to make sure that the rate matches an externally-set one ?
*/
spin_lock_irq(&hdsp->lock);
if (! hdsp->clock_source_locked) {
if ((err = hdsp_set_rate(hdsp, params_rate(params), 0)) < 0) {
spin_unlock_irq(&hdsp->lock);
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE);
return err;
}
}
spin_unlock_irq(&hdsp->lock);
if ((err = hdsp_set_interrupt_interval(hdsp, params_period_size(params))) < 0) {
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return err;
}
return 0;
}
static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
int mapped_channel;
if (snd_BUG_ON(info->channel >= hdsp->max_channels))
return -EINVAL;
if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
return -EINVAL;
info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
}
static int snd_hdsp_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
case SNDRV_PCM_IOCTL1_RESET:
return snd_hdsp_reset(substream);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
return snd_hdsp_channel_info(substream, arg);
default:
break;
}
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
static int snd_hdsp_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *other;
int running;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (hdsp_check_for_firmware(hdsp, 0)) /* no auto-loading in trigger */
return -EIO;
spin_lock(&hdsp->lock);
running = hdsp->running;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
running |= 1 << substream->stream;
break;
case SNDRV_PCM_TRIGGER_STOP:
running &= ~(1 << substream->stream);
break;
default:
snd_BUG();
spin_unlock(&hdsp->lock);
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
other = hdsp->capture_substream;
else
other = hdsp->playback_substream;
if (other) {
struct snd_pcm_substream *s;
snd_pcm_group_for_each_entry(s, substream) {
if (s == other) {
snd_pcm_trigger_done(s, substream);
if (cmd == SNDRV_PCM_TRIGGER_START)
running |= 1 << s->stream;
else
running &= ~(1 << s->stream);
goto _ok;
}
}
if (cmd == SNDRV_PCM_TRIGGER_START) {
if (!(running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) &&
substream->stream == SNDRV_PCM_STREAM_CAPTURE)
hdsp_silence_playback(hdsp);
} else {
if (running &&
substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
hdsp_silence_playback(hdsp);
}
} else {
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
hdsp_silence_playback(hdsp);
}
_ok:
snd_pcm_trigger_done(substream, substream);
if (!hdsp->running && running)
hdsp_start_audio(hdsp);
else if (hdsp->running && !running)
hdsp_stop_audio(hdsp);
hdsp->running = running;
spin_unlock(&hdsp->lock);
return 0;
}
static int snd_hdsp_prepare(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
int result = 0;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (hdsp_check_for_firmware(hdsp, 1))
return -EIO;
spin_lock_irq(&hdsp->lock);
if (!hdsp->running)
hdsp_reset_hw_pointer(hdsp);
spin_unlock_irq(&hdsp->lock);
return result;
}
static struct snd_pcm_hardware snd_hdsp_playback_subinfo =
{
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_NONINTERLEAVED |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_DOUBLE),
#ifdef SNDRV_BIG_ENDIAN
.formats = SNDRV_PCM_FMTBIT_S32_BE,
#else
.formats = SNDRV_PCM_FMTBIT_S32_LE,
#endif
.rates = (SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000),
.rate_min = 32000,
.rate_max = 96000,
.channels_min = 6,
.channels_max = HDSP_MAX_CHANNELS,
.buffer_bytes_max = HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
.period_bytes_min = (64 * 4) * 10,
.period_bytes_max = (8192 * 4) * HDSP_MAX_CHANNELS,
.periods_min = 2,
.periods_max = 2,
.fifo_size = 0
};
static struct snd_pcm_hardware snd_hdsp_capture_subinfo =
{
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_NONINTERLEAVED |
SNDRV_PCM_INFO_SYNC_START),
#ifdef SNDRV_BIG_ENDIAN
.formats = SNDRV_PCM_FMTBIT_S32_BE,
#else
.formats = SNDRV_PCM_FMTBIT_S32_LE,
#endif
.rates = (SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000),
.rate_min = 32000,
.rate_max = 96000,
.channels_min = 5,
.channels_max = HDSP_MAX_CHANNELS,
.buffer_bytes_max = HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
.period_bytes_min = (64 * 4) * 10,
.period_bytes_max = (8192 * 4) * HDSP_MAX_CHANNELS,
.periods_min = 2,
.periods_max = 2,
.fifo_size = 0
};
static unsigned int hdsp_period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
static struct snd_pcm_hw_constraint_list hdsp_hw_constraints_period_sizes = {
.count = ARRAY_SIZE(hdsp_period_sizes),
.list = hdsp_period_sizes,
.mask = 0
};
static unsigned int hdsp_9632_sample_rates[] = { 32000, 44100, 48000, 64000, 88200, 96000, 128000, 176400, 192000 };
static struct snd_pcm_hw_constraint_list hdsp_hw_constraints_9632_sample_rates = {
.count = ARRAY_SIZE(hdsp_9632_sample_rates),
.list = hdsp_9632_sample_rates,
.mask = 0
};
static int snd_hdsp_hw_rule_in_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (hdsp->io_type == H9632) {
unsigned int list[3];
list[0] = hdsp->qs_in_channels;
list[1] = hdsp->ds_in_channels;
list[2] = hdsp->ss_in_channels;
return snd_interval_list(c, 3, list, 0);
} else {
unsigned int list[2];
list[0] = hdsp->ds_in_channels;
list[1] = hdsp->ss_in_channels;
return snd_interval_list(c, 2, list, 0);
}
}
static int snd_hdsp_hw_rule_out_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int list[3];
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (hdsp->io_type == H9632) {
list[0] = hdsp->qs_out_channels;
list[1] = hdsp->ds_out_channels;
list[2] = hdsp->ss_out_channels;
return snd_interval_list(c, 3, list, 0);
} else {
list[0] = hdsp->ds_out_channels;
list[1] = hdsp->ss_out_channels;
}
return snd_interval_list(c, 2, list, 0);
}
static int snd_hdsp_hw_rule_in_channels_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (r->min > 96000 && hdsp->io_type == H9632) {
struct snd_interval t = {
.min = hdsp->qs_in_channels,
.max = hdsp->qs_in_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
} else if (r->min > 48000 && r->max <= 96000) {
struct snd_interval t = {
.min = hdsp->ds_in_channels,
.max = hdsp->ds_in_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
} else if (r->max < 64000) {
struct snd_interval t = {
.min = hdsp->ss_in_channels,
.max = hdsp->ss_in_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
}
return 0;
}
static int snd_hdsp_hw_rule_out_channels_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (r->min > 96000 && hdsp->io_type == H9632) {
struct snd_interval t = {
.min = hdsp->qs_out_channels,
.max = hdsp->qs_out_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
} else if (r->min > 48000 && r->max <= 96000) {
struct snd_interval t = {
.min = hdsp->ds_out_channels,
.max = hdsp->ds_out_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
} else if (r->max < 64000) {
struct snd_interval t = {
.min = hdsp->ss_out_channels,
.max = hdsp->ss_out_channels,
.integer = 1,
};
return snd_interval_refine(c, &t);
}
return 0;
}
static int snd_hdsp_hw_rule_rate_out_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (c->min >= hdsp->ss_out_channels) {
struct snd_interval t = {
.min = 32000,
.max = 48000,
.integer = 1,
};
return snd_interval_refine(r, &t);
} else if (c->max <= hdsp->qs_out_channels && hdsp->io_type == H9632) {
struct snd_interval t = {
.min = 128000,
.max = 192000,
.integer = 1,
};
return snd_interval_refine(r, &t);
} else if (c->max <= hdsp->ds_out_channels) {
struct snd_interval t = {
.min = 64000,
.max = 96000,
.integer = 1,
};
return snd_interval_refine(r, &t);
}
return 0;
}
static int snd_hdsp_hw_rule_rate_in_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct hdsp *hdsp = rule->private;
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (c->min >= hdsp->ss_in_channels) {
struct snd_interval t = {
.min = 32000,
.max = 48000,
.integer = 1,
};
return snd_interval_refine(r, &t);
} else if (c->max <= hdsp->qs_in_channels && hdsp->io_type == H9632) {
struct snd_interval t = {
.min = 128000,
.max = 192000,
.integer = 1,
};
return snd_interval_refine(r, &t);
} else if (c->max <= hdsp->ds_in_channels) {
struct snd_interval t = {
.min = 64000,
.max = 96000,
.integer = 1,
};
return snd_interval_refine(r, &t);
}
return 0;
}
static int snd_hdsp_playback_open(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (hdsp_check_for_firmware(hdsp, 1))
return -EIO;
spin_lock_irq(&hdsp->lock);
snd_pcm_set_sync(substream);
runtime->hw = snd_hdsp_playback_subinfo;
runtime->dma_area = hdsp->playback_buffer;
runtime->dma_bytes = HDSP_DMA_AREA_BYTES;
hdsp->playback_pid = current->pid;
hdsp->playback_substream = substream;
spin_unlock_irq(&hdsp->lock);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, &hdsp_hw_constraints_period_sizes);
if (hdsp->clock_source_locked) {
runtime->hw.rate_min = runtime->hw.rate_max = hdsp->system_sample_rate;
} else if (hdsp->io_type == H9632) {
runtime->hw.rate_max = 192000;
runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hdsp_hw_constraints_9632_sample_rates);
}
if (hdsp->io_type == H9632) {
runtime->hw.channels_min = hdsp->qs_out_channels;
runtime->hw.channels_max = hdsp->ss_out_channels;
}
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_hdsp_hw_rule_out_channels, hdsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_hdsp_hw_rule_out_channels_rate, hdsp,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_hdsp_hw_rule_rate_out_channels, hdsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (RPM != hdsp->io_type) {
hdsp->creg_spdif_stream = hdsp->creg_spdif;
hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
}
return 0;
}
static int snd_hdsp_playback_release(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
spin_lock_irq(&hdsp->lock);
hdsp->playback_pid = -1;
hdsp->playback_substream = NULL;
spin_unlock_irq(&hdsp->lock);
if (RPM != hdsp->io_type) {
hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
}
return 0;
}
static int snd_hdsp_capture_open(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (hdsp_check_for_firmware(hdsp, 1))
return -EIO;
spin_lock_irq(&hdsp->lock);
snd_pcm_set_sync(substream);
runtime->hw = snd_hdsp_capture_subinfo;
runtime->dma_area = hdsp->capture_buffer;
runtime->dma_bytes = HDSP_DMA_AREA_BYTES;
hdsp->capture_pid = current->pid;
hdsp->capture_substream = substream;
spin_unlock_irq(&hdsp->lock);
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, &hdsp_hw_constraints_period_sizes);
if (hdsp->io_type == H9632) {
runtime->hw.channels_min = hdsp->qs_in_channels;
runtime->hw.channels_max = hdsp->ss_in_channels;
runtime->hw.rate_max = 192000;
runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hdsp_hw_constraints_9632_sample_rates);
}
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_hdsp_hw_rule_in_channels, hdsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_hdsp_hw_rule_in_channels_rate, hdsp,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_hdsp_hw_rule_rate_in_channels, hdsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
return 0;
}
static int snd_hdsp_capture_release(struct snd_pcm_substream *substream)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
spin_lock_irq(&hdsp->lock);
hdsp->capture_pid = -1;
hdsp->capture_substream = NULL;
spin_unlock_irq(&hdsp->lock);
return 0;
}
/* helper functions for copying meter values */
static inline int copy_u32_le(void __user *dest, void __iomem *src)
{
u32 val = readl(src);
return copy_to_user(dest, &val, 4);
}
static inline int copy_u64_le(void __user *dest, void __iomem *src_low, void __iomem *src_high)
{
u32 rms_low, rms_high;
u64 rms;
rms_low = readl(src_low);
rms_high = readl(src_high);
rms = ((u64)rms_high << 32) | rms_low;
return copy_to_user(dest, &rms, 8);
}
static inline int copy_u48_le(void __user *dest, void __iomem *src_low, void __iomem *src_high)
{
u32 rms_low, rms_high;
u64 rms;
rms_low = readl(src_low) & 0xffffff00;
rms_high = readl(src_high) & 0xffffff00;
rms = ((u64)rms_high << 32) | rms_low;
return copy_to_user(dest, &rms, 8);
}
static int hdsp_9652_get_peak(struct hdsp *hdsp, struct hdsp_peak_rms __user *peak_rms)
{
int doublespeed = 0;
int i, j, channels, ofs;
if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DoubleSpeedStatus)
doublespeed = 1;
channels = doublespeed ? 14 : 26;
for (i = 0, j = 0; i < 26; ++i) {
if (doublespeed && (i & 4))
continue;
ofs = HDSP_9652_peakBase - j * 4;
if (copy_u32_le(&peak_rms->input_peaks[i], hdsp->iobase + ofs))
return -EFAULT;
ofs -= channels * 4;
if (copy_u32_le(&peak_rms->playback_peaks[i], hdsp->iobase + ofs))
return -EFAULT;
ofs -= channels * 4;
if (copy_u32_le(&peak_rms->output_peaks[i], hdsp->iobase + ofs))
return -EFAULT;
ofs = HDSP_9652_rmsBase + j * 8;
if (copy_u48_le(&peak_rms->input_rms[i], hdsp->iobase + ofs,
hdsp->iobase + ofs + 4))
return -EFAULT;
ofs += channels * 8;
if (copy_u48_le(&peak_rms->playback_rms[i], hdsp->iobase + ofs,
hdsp->iobase + ofs + 4))
return -EFAULT;
ofs += channels * 8;
if (copy_u48_le(&peak_rms->output_rms[i], hdsp->iobase + ofs,
hdsp->iobase + ofs + 4))
return -EFAULT;
j++;
}
return 0;
}
static int hdsp_9632_get_peak(struct hdsp *hdsp, struct hdsp_peak_rms __user *peak_rms)
{
int i, j;
struct hdsp_9632_meters __iomem *m;
int doublespeed = 0;
if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DoubleSpeedStatus)
doublespeed = 1;
m = (struct hdsp_9632_meters __iomem *)(hdsp->iobase+HDSP_9632_metersBase);
for (i = 0, j = 0; i < 16; ++i, ++j) {
if (copy_u32_le(&peak_rms->input_peaks[i], &m->input_peak[j]))
return -EFAULT;
if (copy_u32_le(&peak_rms->playback_peaks[i], &m->playback_peak[j]))
return -EFAULT;
if (copy_u32_le(&peak_rms->output_peaks[i], &m->output_peak[j]))
return -EFAULT;
if (copy_u64_le(&peak_rms->input_rms[i], &m->input_rms_low[j],
&m->input_rms_high[j]))
return -EFAULT;
if (copy_u64_le(&peak_rms->playback_rms[i], &m->playback_rms_low[j],
&m->playback_rms_high[j]))
return -EFAULT;
if (copy_u64_le(&peak_rms->output_rms[i], &m->output_rms_low[j],
&m->output_rms_high[j]))
return -EFAULT;
if (doublespeed && i == 3) i += 4;
}
return 0;
}
static int hdsp_get_peak(struct hdsp *hdsp, struct hdsp_peak_rms __user *peak_rms)
{
int i;
for (i = 0; i < 26; i++) {
if (copy_u32_le(&peak_rms->playback_peaks[i],
hdsp->iobase + HDSP_playbackPeakLevel + i * 4))
return -EFAULT;
if (copy_u32_le(&peak_rms->input_peaks[i],
hdsp->iobase + HDSP_inputPeakLevel + i * 4))
return -EFAULT;
}
for (i = 0; i < 28; i++) {
if (copy_u32_le(&peak_rms->output_peaks[i],
hdsp->iobase + HDSP_outputPeakLevel + i * 4))
return -EFAULT;
}
for (i = 0; i < 26; ++i) {
if (copy_u64_le(&peak_rms->playback_rms[i],
hdsp->iobase + HDSP_playbackRmsLevel + i * 8 + 4,
hdsp->iobase + HDSP_playbackRmsLevel + i * 8))
return -EFAULT;
if (copy_u64_le(&peak_rms->input_rms[i],
hdsp->iobase + HDSP_inputRmsLevel + i * 8 + 4,
hdsp->iobase + HDSP_inputRmsLevel + i * 8))
return -EFAULT;
}
return 0;
}
static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg)
{
struct hdsp *hdsp = hw->private_data;
void __user *argp = (void __user *)arg;
int err;
switch (cmd) {
case SNDRV_HDSP_IOCTL_GET_PEAK_RMS: {
struct hdsp_peak_rms __user *peak_rms = (struct hdsp_peak_rms __user *)arg;
err = hdsp_check_for_iobox(hdsp);
if (err < 0)
return err;
err = hdsp_check_for_firmware(hdsp, 1);
if (err < 0)
return err;
if (!(hdsp->state & HDSP_FirmwareLoaded)) {
dev_err(hdsp->card->dev,
"firmware needs to be uploaded to the card.\n");
return -EINVAL;
}
switch (hdsp->io_type) {
case H9652:
return hdsp_9652_get_peak(hdsp, peak_rms);
case H9632:
return hdsp_9632_get_peak(hdsp, peak_rms);
default:
return hdsp_get_peak(hdsp, peak_rms);
}
}
case SNDRV_HDSP_IOCTL_GET_CONFIG_INFO: {
struct hdsp_config_info info;
unsigned long flags;
int i;
err = hdsp_check_for_iobox(hdsp);
if (err < 0)
return err;
err = hdsp_check_for_firmware(hdsp, 1);
if (err < 0)
return err;
memset(&info, 0, sizeof(info));
spin_lock_irqsave(&hdsp->lock, flags);
info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
if (hdsp->io_type != H9632)
info.adatsync_sync_check = (unsigned char)hdsp_adatsync_sync_check(hdsp);
info.spdif_sync_check = (unsigned char)hdsp_spdif_sync_check(hdsp);
for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632) ? 3 : 1); ++i)
info.adat_sync_check[i] = (unsigned char)hdsp_adat_sync_check(hdsp, i);
info.spdif_in = (unsigned char)hdsp_spdif_in(hdsp);
info.spdif_out = (unsigned char)hdsp_toggle_setting(hdsp,
HDSP_SPDIFOpticalOut);
info.spdif_professional = (unsigned char)
hdsp_toggle_setting(hdsp, HDSP_SPDIFProfessional);
info.spdif_emphasis = (unsigned char)
hdsp_toggle_setting(hdsp, HDSP_SPDIFEmphasis);
info.spdif_nonaudio = (unsigned char)
hdsp_toggle_setting(hdsp, HDSP_SPDIFNonAudio);
info.spdif_sample_rate = hdsp_spdif_sample_rate(hdsp);
info.system_sample_rate = hdsp->system_sample_rate;
info.autosync_sample_rate = hdsp_external_sample_rate(hdsp);
info.system_clock_mode = (unsigned char)hdsp_system_clock_mode(hdsp);
info.clock_source = (unsigned char)hdsp_clock_source(hdsp);
info.autosync_ref = (unsigned char)hdsp_autosync_ref(hdsp);
info.line_out = (unsigned char)
hdsp_toggle_setting(hdsp, HDSP_LineOut);
if (hdsp->io_type == H9632) {
info.da_gain = (unsigned char)hdsp_da_gain(hdsp);
info.ad_gain = (unsigned char)hdsp_ad_gain(hdsp);
info.phone_gain = (unsigned char)hdsp_phone_gain(hdsp);
info.xlr_breakout_cable =
(unsigned char)hdsp_toggle_setting(hdsp,
HDSP_XLRBreakoutCable);
} else if (hdsp->io_type == RPM) {
info.da_gain = (unsigned char) hdsp_rpm_input12(hdsp);
info.ad_gain = (unsigned char) hdsp_rpm_input34(hdsp);
}
if (hdsp->io_type == H9632 || hdsp->io_type == H9652)
info.analog_extension_board =
(unsigned char)hdsp_toggle_setting(hdsp,
HDSP_AnalogExtensionBoard);
spin_unlock_irqrestore(&hdsp->lock, flags);
if (copy_to_user(argp, &info, sizeof(info)))
return -EFAULT;
break;
}
case SNDRV_HDSP_IOCTL_GET_9632_AEB: {
struct hdsp_9632_aeb h9632_aeb;
if (hdsp->io_type != H9632) return -EINVAL;
h9632_aeb.aebi = hdsp->ss_in_channels - H9632_SS_CHANNELS;
h9632_aeb.aebo = hdsp->ss_out_channels - H9632_SS_CHANNELS;
if (copy_to_user(argp, &h9632_aeb, sizeof(h9632_aeb)))
return -EFAULT;
break;
}
case SNDRV_HDSP_IOCTL_GET_VERSION: {
struct hdsp_version hdsp_version;
int err;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return -EINVAL;
if (hdsp->io_type == Undefined) {
if ((err = hdsp_get_iobox_version(hdsp)) < 0)
return err;
}
memset(&hdsp_version, 0, sizeof(hdsp_version));
hdsp_version.io_type = hdsp->io_type;
hdsp_version.firmware_rev = hdsp->firmware_rev;
if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
return -EFAULT;
break;
}
case SNDRV_HDSP_IOCTL_UPLOAD_FIRMWARE: {
struct hdsp_firmware __user *firmware;
u32 __user *firmware_data;
int err;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return -EINVAL;
/* SNDRV_HDSP_IOCTL_GET_VERSION must have been called */
if (hdsp->io_type == Undefined) return -EINVAL;
if (hdsp->state & (HDSP_FirmwareCached | HDSP_FirmwareLoaded))
return -EBUSY;
dev_info(hdsp->card->dev,
"initializing firmware upload\n");
firmware = (struct hdsp_firmware __user *)argp;
if (get_user(firmware_data, &firmware->firmware_data))
return -EFAULT;
if (hdsp_check_for_iobox (hdsp))
return -EIO;
if (!hdsp->fw_uploaded) {
hdsp->fw_uploaded = vmalloc(HDSP_FIRMWARE_SIZE);
if (!hdsp->fw_uploaded)
return -ENOMEM;
}
if (copy_from_user(hdsp->fw_uploaded, firmware_data,
HDSP_FIRMWARE_SIZE)) {
vfree(hdsp->fw_uploaded);
hdsp->fw_uploaded = NULL;
return -EFAULT;
}
hdsp->state |= HDSP_FirmwareCached;
if ((err = snd_hdsp_load_firmware_from_cache(hdsp)) < 0)
return err;
if (!(hdsp->state & HDSP_InitializationComplete)) {
if ((err = snd_hdsp_enable_io(hdsp)) < 0)
return err;
snd_hdsp_initialize_channels(hdsp);
snd_hdsp_initialize_midi_flush(hdsp);
if ((err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp)) < 0) {
dev_err(hdsp->card->dev,
"error creating alsa devices\n");
return err;
}
}
break;
}
case SNDRV_HDSP_IOCTL_GET_MIXER: {
struct hdsp_mixer __user *mixer = (struct hdsp_mixer __user *)argp;
if (copy_to_user(mixer->matrix, hdsp->mixer_matrix, sizeof(unsigned short)*HDSP_MATRIX_MIXER_SIZE))
return -EFAULT;
break;
}
default:
return -EINVAL;
}
return 0;
}
static struct snd_pcm_ops snd_hdsp_playback_ops = {
.open = snd_hdsp_playback_open,
.close = snd_hdsp_playback_release,
.ioctl = snd_hdsp_ioctl,
.hw_params = snd_hdsp_hw_params,
.prepare = snd_hdsp_prepare,
.trigger = snd_hdsp_trigger,
.pointer = snd_hdsp_hw_pointer,
.copy = snd_hdsp_playback_copy,
.silence = snd_hdsp_hw_silence,
};
static struct snd_pcm_ops snd_hdsp_capture_ops = {
.open = snd_hdsp_capture_open,
.close = snd_hdsp_capture_release,
.ioctl = snd_hdsp_ioctl,
.hw_params = snd_hdsp_hw_params,
.prepare = snd_hdsp_prepare,
.trigger = snd_hdsp_trigger,
.pointer = snd_hdsp_hw_pointer,
.copy = snd_hdsp_capture_copy,
};
static int snd_hdsp_create_hwdep(struct snd_card *card, struct hdsp *hdsp)
{
struct snd_hwdep *hw;
int err;
if ((err = snd_hwdep_new(card, "HDSP hwdep", 0, &hw)) < 0)
return err;
hdsp->hwdep = hw;
hw->private_data = hdsp;
strcpy(hw->name, "HDSP hwdep interface");
hw->ops.ioctl = snd_hdsp_hwdep_ioctl;
hw->ops.ioctl_compat = snd_hdsp_hwdep_ioctl;
return 0;
}
static int snd_hdsp_create_pcm(struct snd_card *card, struct hdsp *hdsp)
{
struct snd_pcm *pcm;
int err;
if ((err = snd_pcm_new(card, hdsp->card_name, 0, 1, 1, &pcm)) < 0)
return err;
hdsp->pcm = pcm;
pcm->private_data = hdsp;
strcpy(pcm->name, hdsp->card_name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_hdsp_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_hdsp_capture_ops);
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
return 0;
}
static void snd_hdsp_9652_enable_mixer (struct hdsp *hdsp)
{
hdsp->control2_register |= HDSP_9652_ENABLE_MIXER;
hdsp_write (hdsp, HDSP_control2Reg, hdsp->control2_register);
}
static int snd_hdsp_enable_io (struct hdsp *hdsp)
{
int i;
if (hdsp_fifo_wait (hdsp, 0, 100)) {
dev_err(hdsp->card->dev,
"enable_io fifo_wait failed\n");
return -EIO;
}
for (i = 0; i < hdsp->max_channels; ++i) {
hdsp_write (hdsp, HDSP_inputEnable + (4 * i), 1);
hdsp_write (hdsp, HDSP_outputEnable + (4 * i), 1);
}
return 0;
}
static void snd_hdsp_initialize_channels(struct hdsp *hdsp)
{
int status, aebi_channels, aebo_channels;
switch (hdsp->io_type) {
case Digiface:
hdsp->card_name = "RME Hammerfall DSP + Digiface";
hdsp->ss_in_channels = hdsp->ss_out_channels = DIGIFACE_SS_CHANNELS;
hdsp->ds_in_channels = hdsp->ds_out_channels = DIGIFACE_DS_CHANNELS;
break;
case H9652:
hdsp->card_name = "RME Hammerfall HDSP 9652";
hdsp->ss_in_channels = hdsp->ss_out_channels = H9652_SS_CHANNELS;
hdsp->ds_in_channels = hdsp->ds_out_channels = H9652_DS_CHANNELS;
break;
case H9632:
status = hdsp_read(hdsp, HDSP_statusRegister);
/* HDSP_AEBx bits are low when AEB are connected */
aebi_channels = (status & HDSP_AEBI) ? 0 : 4;
aebo_channels = (status & HDSP_AEBO) ? 0 : 4;
hdsp->card_name = "RME Hammerfall HDSP 9632";
hdsp->ss_in_channels = H9632_SS_CHANNELS+aebi_channels;
hdsp->ds_in_channels = H9632_DS_CHANNELS+aebi_channels;
hdsp->qs_in_channels = H9632_QS_CHANNELS+aebi_channels;
hdsp->ss_out_channels = H9632_SS_CHANNELS+aebo_channels;
hdsp->ds_out_channels = H9632_DS_CHANNELS+aebo_channels;
hdsp->qs_out_channels = H9632_QS_CHANNELS+aebo_channels;
break;
case Multiface:
hdsp->card_name = "RME Hammerfall DSP + Multiface";
hdsp->ss_in_channels = hdsp->ss_out_channels = MULTIFACE_SS_CHANNELS;
hdsp->ds_in_channels = hdsp->ds_out_channels = MULTIFACE_DS_CHANNELS;
break;
case RPM:
hdsp->card_name = "RME Hammerfall DSP + RPM";
hdsp->ss_in_channels = RPM_CHANNELS-1;
hdsp->ss_out_channels = RPM_CHANNELS;
hdsp->ds_in_channels = RPM_CHANNELS-1;
hdsp->ds_out_channels = RPM_CHANNELS;
break;
default:
/* should never get here */
break;
}
}
static void snd_hdsp_initialize_midi_flush (struct hdsp *hdsp)
{
snd_hdsp_flush_midi_input (hdsp, 0);
snd_hdsp_flush_midi_input (hdsp, 1);
}
static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp)
{
int err;
if ((err = snd_hdsp_create_pcm(card, hdsp)) < 0) {
dev_err(card->dev,
"Error creating pcm interface\n");
return err;
}
if ((err = snd_hdsp_create_midi(card, hdsp, 0)) < 0) {
dev_err(card->dev,
"Error creating first midi interface\n");
return err;
}
if (hdsp->io_type == Digiface || hdsp->io_type == H9652) {
if ((err = snd_hdsp_create_midi(card, hdsp, 1)) < 0) {
dev_err(card->dev,
"Error creating second midi interface\n");
return err;
}
}
if ((err = snd_hdsp_create_controls(card, hdsp)) < 0) {
dev_err(card->dev,
"Error creating ctl interface\n");
return err;
}
snd_hdsp_proc_init(hdsp);
hdsp->system_sample_rate = -1;
hdsp->playback_pid = -1;
hdsp->capture_pid = -1;
hdsp->capture_substream = NULL;
hdsp->playback_substream = NULL;
if ((err = snd_hdsp_set_defaults(hdsp)) < 0) {
dev_err(card->dev,
"Error setting default values\n");
return err;
}
if (!(hdsp->state & HDSP_InitializationComplete)) {
strcpy(card->shortname, "Hammerfall DSP");
sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
hdsp->port, hdsp->irq);
if ((err = snd_card_register(card)) < 0) {
dev_err(card->dev,
"error registering card\n");
return err;
}
hdsp->state |= HDSP_InitializationComplete;
}
return 0;
}
/* load firmware via hotplug fw loader */
static int hdsp_request_fw_loader(struct hdsp *hdsp)
{
const char *fwfile;
const struct firmware *fw;
int err;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
if (hdsp->io_type == Undefined) {
if ((err = hdsp_get_iobox_version(hdsp)) < 0)
return err;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
}
/* caution: max length of firmware filename is 30! */
switch (hdsp->io_type) {
case RPM:
fwfile = "rpm_firmware.bin";
break;
case Multiface:
if (hdsp->firmware_rev == 0xa)
fwfile = "multiface_firmware.bin";
else
fwfile = "multiface_firmware_rev11.bin";
break;
case Digiface:
if (hdsp->firmware_rev == 0xa)
fwfile = "digiface_firmware.bin";
else
fwfile = "digiface_firmware_rev11.bin";
break;
default:
dev_err(hdsp->card->dev,
"invalid io_type %d\n", hdsp->io_type);
return -EINVAL;
}
if (request_firmware(&fw, fwfile, &hdsp->pci->dev)) {
dev_err(hdsp->card->dev,
"cannot load firmware %s\n", fwfile);
return -ENOENT;
}
if (fw->size < HDSP_FIRMWARE_SIZE) {
dev_err(hdsp->card->dev,
"too short firmware size %d (expected %d)\n",
(int)fw->size, HDSP_FIRMWARE_SIZE);
release_firmware(fw);
return -EINVAL;
}
hdsp->firmware = fw;
hdsp->state |= HDSP_FirmwareCached;
if ((err = snd_hdsp_load_firmware_from_cache(hdsp)) < 0)
return err;
if (!(hdsp->state & HDSP_InitializationComplete)) {
if ((err = snd_hdsp_enable_io(hdsp)) < 0)
return err;
if ((err = snd_hdsp_create_hwdep(hdsp->card, hdsp)) < 0) {
dev_err(hdsp->card->dev,
"error creating hwdep device\n");
return err;
}
snd_hdsp_initialize_channels(hdsp);
snd_hdsp_initialize_midi_flush(hdsp);
if ((err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp)) < 0) {
dev_err(hdsp->card->dev,
"error creating alsa devices\n");
return err;
}
}
return 0;
}
static int snd_hdsp_create(struct snd_card *card,
struct hdsp *hdsp)
{
struct pci_dev *pci = hdsp->pci;
int err;
int is_9652 = 0;
int is_9632 = 0;
hdsp->irq = -1;
hdsp->state = 0;
hdsp->midi[0].rmidi = NULL;
hdsp->midi[1].rmidi = NULL;
hdsp->midi[0].input = NULL;
hdsp->midi[1].input = NULL;
hdsp->midi[0].output = NULL;
hdsp->midi[1].output = NULL;
hdsp->midi[0].pending = 0;
hdsp->midi[1].pending = 0;
spin_lock_init(&hdsp->midi[0].lock);
spin_lock_init(&hdsp->midi[1].lock);
hdsp->iobase = NULL;
hdsp->control_register = 0;
hdsp->control2_register = 0;
hdsp->io_type = Undefined;
hdsp->max_channels = 26;
hdsp->card = card;
spin_lock_init(&hdsp->lock);
tasklet_init(&hdsp->midi_tasklet, hdsp_midi_tasklet, (unsigned long)hdsp);
pci_read_config_word(hdsp->pci, PCI_CLASS_REVISION, &hdsp->firmware_rev);
hdsp->firmware_rev &= 0xff;
/* From Martin Bjoernsen :
"It is important that the card's latency timer register in
the PCI configuration space is set to a value much larger
than 0 by the computer's BIOS or the driver.
The windows driver always sets this 8 bit register [...]
to its maximum 255 to avoid problems with some computers."
*/
pci_write_config_byte(hdsp->pci, PCI_LATENCY_TIMER, 0xFF);
strcpy(card->driver, "H-DSP");
strcpy(card->mixername, "Xilinx FPGA");
if (hdsp->firmware_rev < 0xa)
return -ENODEV;
else if (hdsp->firmware_rev < 0x64)
hdsp->card_name = "RME Hammerfall DSP";
else if (hdsp->firmware_rev < 0x96) {
hdsp->card_name = "RME HDSP 9652";
is_9652 = 1;
} else {
hdsp->card_name = "RME HDSP 9632";
hdsp->max_channels = 16;
is_9632 = 1;
}
if ((err = pci_enable_device(pci)) < 0)
return err;
pci_set_master(hdsp->pci);
if ((err = pci_request_regions(pci, "hdsp")) < 0)
return err;
hdsp->port = pci_resource_start(pci, 0);
if ((hdsp->iobase = ioremap_nocache(hdsp->port, HDSP_IO_EXTENT)) == NULL) {
dev_err(hdsp->card->dev, "unable to remap region 0x%lx-0x%lx\n",
hdsp->port, hdsp->port + HDSP_IO_EXTENT - 1);
return -EBUSY;
}
if (request_irq(pci->irq, snd_hdsp_interrupt, IRQF_SHARED,
KBUILD_MODNAME, hdsp)) {
dev_err(hdsp->card->dev, "unable to use IRQ %d\n", pci->irq);
return -EBUSY;
}
hdsp->irq = pci->irq;
hdsp->precise_ptr = 0;
hdsp->use_midi_tasklet = 1;
hdsp->dds_value = 0;
if ((err = snd_hdsp_initialize_memory(hdsp)) < 0)
return err;
if (!is_9652 && !is_9632) {
/* we wait a maximum of 10 seconds to let freshly
* inserted cardbus cards do their hardware init */
err = hdsp_wait_for_iobox(hdsp, 1000, 10);
if (err < 0)
return err;
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
if ((err = hdsp_request_fw_loader(hdsp)) < 0)
/* we don't fail as this can happen
if userspace is not ready for
firmware upload
*/
dev_err(hdsp->card->dev,
"couldn't get firmware from userspace. try using hdsploader\n");
else
/* init is complete, we return */
return 0;
/* we defer initialization */
dev_info(hdsp->card->dev,
"card initialization pending : waiting for firmware\n");
if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
return err;
return 0;
} else {
dev_info(hdsp->card->dev,
"Firmware already present, initializing card.\n");
if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
hdsp->io_type = RPM;
else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
hdsp->io_type = Multiface;
else
hdsp->io_type = Digiface;
}
}
if ((err = snd_hdsp_enable_io(hdsp)) != 0)
return err;
if (is_9652)
hdsp->io_type = H9652;
if (is_9632)
hdsp->io_type = H9632;
if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
return err;
snd_hdsp_initialize_channels(hdsp);
snd_hdsp_initialize_midi_flush(hdsp);
hdsp->state |= HDSP_FirmwareLoaded;
if ((err = snd_hdsp_create_alsa_devices(card, hdsp)) < 0)
return err;
return 0;
}
static int snd_hdsp_free(struct hdsp *hdsp)
{
if (hdsp->port) {
/* stop the audio, and cancel all interrupts */
tasklet_kill(&hdsp->midi_tasklet);
hdsp->control_register &= ~(HDSP_Start|HDSP_AudioInterruptEnable|HDSP_Midi0InterruptEnable|HDSP_Midi1InterruptEnable);
hdsp_write (hdsp, HDSP_controlRegister, hdsp->control_register);
}
if (hdsp->irq >= 0)
free_irq(hdsp->irq, (void *)hdsp);
snd_hdsp_free_buffers(hdsp);
release_firmware(hdsp->firmware);
vfree(hdsp->fw_uploaded);
iounmap(hdsp->iobase);
if (hdsp->port)
pci_release_regions(hdsp->pci);
pci_disable_device(hdsp->pci);
return 0;
}
static void snd_hdsp_card_free(struct snd_card *card)
{
struct hdsp *hdsp = card->private_data;
if (hdsp)
snd_hdsp_free(hdsp);
}
static int snd_hdsp_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct hdsp *hdsp;
struct snd_card *card;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
sizeof(struct hdsp), &card);
if (err < 0)
return err;
hdsp = card->private_data;
card->private_free = snd_hdsp_card_free;
hdsp->dev = dev;
hdsp->pci = pci;
if ((err = snd_hdsp_create(card, hdsp)) < 0) {
snd_card_free(card);
return err;
}
strcpy(card->shortname, "Hammerfall DSP");
sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
hdsp->port, hdsp->irq);
if ((err = snd_card_register(card)) < 0) {
snd_card_free(card);
return err;
}
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static void snd_hdsp_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
}
static struct pci_driver hdsp_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_hdsp_ids,
.probe = snd_hdsp_probe,
.remove = snd_hdsp_remove,
};
module_pci_driver(hdsp_driver);
| gpl-2.0 |
dutchiexl/constructiv | node_modules/node-sass/src/libsass/src/output.cpp | 452 | 10390 | #include "sass.hpp"
#include "ast.hpp"
#include "output.hpp"
namespace Sass {
Output::Output(Sass_Output_Options& opt)
: Inspect(Emitter(opt)),
charset(""),
top_nodes(0)
{}
Output::~Output() { }
void Output::fallback_impl(AST_Node* n)
{
return n->perform(this);
}
void Output::operator()(Number* n)
{
// use values to_string facility
std::string res = n->to_string(opt);
// check for a valid unit here
// includes result for reporting
if (!n->is_valid_css_unit()) {
throw Exception::InvalidValue(*n);
}
// output the final token
append_token(res, n);
}
void Output::operator()(Import* imp)
{
top_nodes.push_back(imp);
}
void Output::operator()(Map* m)
{
std::string dbg(m->to_string(opt));
error(dbg + " isn't a valid CSS value.", m->pstate());
}
OutputBuffer Output::get_buffer(void)
{
Emitter emitter(opt);
Inspect inspect(emitter);
size_t size_nodes = top_nodes.size();
for (size_t i = 0; i < size_nodes; i++) {
top_nodes[i]->perform(&inspect);
inspect.append_mandatory_linefeed();
}
// flush scheduled outputs
// maybe omit semicolon if possible
inspect.finalize(wbuf.buffer.size() == 0);
// prepend buffer on top
prepend_output(inspect.output());
// make sure we end with a linefeed
if (!ends_with(wbuf.buffer, opt.linefeed)) {
// if the output is not completely empty
if (!wbuf.buffer.empty()) append_string(opt.linefeed);
}
// search for unicode char
for(const char& chr : wbuf.buffer) {
// skip all ascii chars
// static cast to unsigned to handle `char` being signed / unsigned
if (static_cast<unsigned>(chr) < 128) continue;
// declare the charset
if (output_style() != COMPRESSED)
charset = "@charset \"UTF-8\";"
+ std::string(opt.linefeed);
else charset = "\xEF\xBB\xBF";
// abort search
break;
}
// add charset as first line, before comments and imports
if (!charset.empty()) prepend_string(charset);
return wbuf;
}
void Output::operator()(Comment* c)
{
std::string txt = c->text()->to_string(opt);
// if (indentation && txt == "/**/") return;
bool important = c->is_important();
if (output_style() != COMPRESSED || important) {
if (buffer().size() == 0) {
top_nodes.push_back(c);
} else {
in_comment = true;
append_indentation();
c->text()->perform(this);
in_comment = false;
if (indentation == 0) {
append_mandatory_linefeed();
} else {
append_optional_linefeed();
}
}
}
}
void Output::operator()(Ruleset* r)
{
Selector* s = r->selector();
Block* b = r->block();
bool decls = false;
// Filter out rulesets that aren't printable (process its children though)
if (!Util::isPrintable(r, output_style())) {
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (dynamic_cast<Has_Block*>(stm)) {
stm->perform(this);
}
}
return;
}
if (b->has_non_hoistable()) {
decls = true;
if (output_style() == NESTED) indentation += r->tabs();
if (opt.source_comments) {
std::stringstream ss;
append_indentation();
ss << "/* line " << r->pstate().line + 1 << ", " << r->pstate().path << " */";
append_string(ss.str());
append_optional_linefeed();
}
s->perform(this);
append_scope_opener(b);
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
bool bPrintExpression = true;
// Check print conditions
if (typeid(*stm) == typeid(Declaration)) {
Declaration* dec = static_cast<Declaration*>(stm);
if (dec->value()->concrete_type() == Expression::STRING) {
String_Constant* valConst = static_cast<String_Constant*>(dec->value());
std::string val(valConst->value());
if (auto qstr = dynamic_cast<String_Quoted*>(valConst)) {
if (!qstr->quote_mark() && val.empty()) {
bPrintExpression = false;
}
}
}
else if (dec->value()->concrete_type() == Expression::LIST) {
List* list = static_cast<List*>(dec->value());
bool all_invisible = true;
for (size_t list_i = 0, list_L = list->length(); list_i < list_L; ++list_i) {
Expression* item = (*list)[list_i];
if (!item->is_invisible()) all_invisible = false;
}
if (all_invisible) bPrintExpression = false;
}
}
// Print if OK
if (!stm->is_hoistable() && bPrintExpression) {
stm->perform(this);
}
}
if (output_style() == NESTED) indentation -= r->tabs();
append_scope_closer(b);
}
if (b->has_hoistable()) {
if (decls) ++indentation;
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (stm->is_hoistable()) {
stm->perform(this);
}
}
if (decls) --indentation;
}
}
void Output::operator()(Keyframe_Rule* r)
{
Block* b = r->block();
Selector* v = r->selector();
if (v) {
v->perform(this);
}
if (!b) {
append_colon_separator();
return;
}
append_scope_opener();
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (!stm->is_hoistable()) {
stm->perform(this);
if (i < L - 1) append_special_linefeed();
}
}
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (stm->is_hoistable()) {
stm->perform(this);
}
}
append_scope_closer();
}
void Output::operator()(Supports_Block* f)
{
if (f->is_invisible()) return;
Supports_Condition* c = f->condition();
Block* b = f->block();
// Filter out feature blocks that aren't printable (process its children though)
if (!Util::isPrintable(f, output_style())) {
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (dynamic_cast<Has_Block*>(stm)) {
stm->perform(this);
}
}
return;
}
if (output_style() == NESTED) indentation += f->tabs();
append_indentation();
append_token("@supports", f);
append_mandatory_space();
c->perform(this);
append_scope_opener();
if (b->has_non_hoistable()) {
// JMA - hoisted, output the non-hoistable in a nested block, followed by the hoistable
append_scope_opener();
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (!stm->is_hoistable()) {
stm->perform(this);
}
}
append_scope_closer();
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (stm->is_hoistable()) {
stm->perform(this);
}
}
}
else {
// JMA - not hoisted, just output in order
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
stm->perform(this);
if (i < L - 1) append_special_linefeed();
}
}
if (output_style() == NESTED) indentation -= f->tabs();
append_scope_closer();
}
void Output::operator()(Media_Block* m)
{
if (m->is_invisible()) return;
List* q = m->media_queries();
Block* b = m->block();
// Filter out media blocks that aren't printable (process its children though)
if (!Util::isPrintable(m, output_style())) {
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (dynamic_cast<Has_Block*>(stm)) {
stm->perform(this);
}
}
return;
}
if (output_style() == NESTED) indentation += m->tabs();
append_indentation();
append_token("@media", m);
append_mandatory_space();
in_media_block = true;
q->perform(this);
in_media_block = false;
append_scope_opener();
for (size_t i = 0, L = b->length(); i < L; ++i) {
if ((*b)[i]) (*b)[i]->perform(this);
if (i < L - 1) append_special_linefeed();
}
if (output_style() == NESTED) indentation -= m->tabs();
append_scope_closer();
}
void Output::operator()(Directive* a)
{
std::string kwd = a->keyword();
Selector* s = a->selector();
Expression* v = a->value();
Block* b = a->block();
append_indentation();
append_token(kwd, a);
if (s) {
append_mandatory_space();
in_wrapped = true;
s->perform(this);
in_wrapped = false;
}
if (v) {
append_mandatory_space();
// ruby sass bug? should use options?
append_token(v->to_string(/* opt */), v);
}
if (!b) {
append_delimiter();
return;
}
if (b->is_invisible() || b->length() == 0) {
append_optional_space();
return append_string("{}");
}
append_scope_opener();
bool format = kwd != "@font-face";;
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (!stm->is_hoistable()) {
stm->perform(this);
if (i < L - 1 && format) append_special_linefeed();
}
}
for (size_t i = 0, L = b->length(); i < L; ++i) {
Statement* stm = (*b)[i];
if (stm->is_hoistable()) {
stm->perform(this);
if (i < L - 1 && format) append_special_linefeed();
}
}
append_scope_closer();
}
void Output::operator()(String_Quoted* s)
{
if (s->quote_mark()) {
append_token(quote(s->value(), s->quote_mark()), s);
} else if (!in_comment) {
append_token(string_to_output(s->value()), s);
} else {
append_token(s->value(), s);
}
}
void Output::operator()(String_Constant* s)
{
std::string value(s->value());
if (s->can_compress_whitespace() && output_style() == COMPRESSED) {
value.erase(std::remove_if(value.begin(), value.end(), ::isspace), value.end());
}
if (!in_comment) {
append_token(string_to_output(value), s);
} else {
append_token(value, s);
}
}
}
| gpl-2.0 |
segment-routing/openwrt | drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c | 1476 | 7340 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTL8188E_REDESC_C_
#include <osdep_service.h>
#include <drv_types.h>
#include <rtl8188e_hal.h>
static void process_rssi(struct adapter *padapter, struct recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
if (signal_stat->update_req) {
signal_stat->total_num = 0;
signal_stat->total_val = 0;
signal_stat->update_req = 0;
}
signal_stat->total_num++;
signal_stat->total_val += pattrib->phy_info.SignalStrength;
signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
} /* Process_UI_RSSI_8192C */
static void process_link_qual(struct adapter *padapter,
struct recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
if (prframe == NULL || padapter == NULL)
return;
pattrib = &prframe->attrib;
signal_stat = &padapter->recvpriv.signal_qual_data;
if (signal_stat->update_req) {
signal_stat->total_num = 0;
signal_stat->total_val = 0;
signal_stat->update_req = 0;
}
signal_stat->total_num++;
signal_stat->total_val += pattrib->phy_info.SignalQuality;
signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
}
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
struct recv_frame *precvframe = (struct recv_frame *)prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
/* Check EVM */
process_link_qual(padapter, precvframe);
}
void update_recvframe_attrib_88e(struct recv_frame *precvframe,
struct recv_stat *prxstat)
{
struct rx_pkt_attrib *pattrib;
struct recv_stat report;
report.rxdw0 = prxstat->rxdw0;
report.rxdw1 = prxstat->rxdw1;
report.rxdw2 = prxstat->rxdw2;
report.rxdw3 = prxstat->rxdw3;
report.rxdw4 = prxstat->rxdw4;
report.rxdw5 = prxstat->rxdw5;
pattrib = &precvframe->attrib;
memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);/* u8)prxreport->crc32; */
/* update rx report to recv_frame attribute */
pattrib->pkt_rpt_type = (u8)((le32_to_cpu(report.rxdw3) >> 14) & 0x3);/* prxreport->rpt_sel; */
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
pattrib->drvinfo_sz = (u8)((le32_to_cpu(report.rxdw0) >> 16) & 0xf) * 8;/* u8)(prxreport->drvinfosize << 3); */
pattrib->physt = (u8)((le32_to_cpu(report.rxdw0) >> 26) & 0x1);/* u8)prxreport->physt; */
pattrib->bdecrypted = (le32_to_cpu(report.rxdw0) & BIT(27)) ? 0 : 1;/* u8)(prxreport->swdec ? 0 : 1); */
pattrib->encrypt = (u8)((le32_to_cpu(report.rxdw0) >> 20) & 0x7);/* u8)prxreport->security; */
pattrib->qos = (u8)((le32_to_cpu(report.rxdw0) >> 23) & 0x1);/* u8)prxreport->qos; */
pattrib->priority = (u8)((le32_to_cpu(report.rxdw1) >> 8) & 0xf);/* u8)prxreport->tid; */
pattrib->amsdu = (u8)((le32_to_cpu(report.rxdw1) >> 13) & 0x1);/* u8)prxreport->amsdu; */
pattrib->seq_num = (u16)(le32_to_cpu(report.rxdw2) & 0x00000fff);/* u16)prxreport->seq; */
pattrib->frag_num = (u8)((le32_to_cpu(report.rxdw2) >> 12) & 0xf);/* u8)prxreport->frag; */
pattrib->mfrag = (u8)((le32_to_cpu(report.rxdw1) >> 27) & 0x1);/* u8)prxreport->mf; */
pattrib->mdata = (u8)((le32_to_cpu(report.rxdw1) >> 26) & 0x1);/* u8)prxreport->md; */
pattrib->mcs_rate = (u8)(le32_to_cpu(report.rxdw3) & 0x3f);/* u8)prxreport->rxmcs; */
pattrib->rxht = (u8)((le32_to_cpu(report.rxdw3) >> 6) & 0x1);/* u8)prxreport->rxht; */
pattrib->icv_err = (u8)((le32_to_cpu(report.rxdw0) >> 15) & 0x1);/* u8)prxreport->icverr; */
pattrib->shift_sz = (u8)((le32_to_cpu(report.rxdw0) >> 24) & 0x3);
} else if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX */
pattrib->pkt_len = TX_RPT1_PKT_LEN;
pattrib->drvinfo_sz = 0;
} else if (pattrib->pkt_rpt_type == TX_REPORT2) { /* TX RPT */
pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x3FF);/* Rx length[9:0] */
pattrib->drvinfo_sz = 0;
/* */
/* Get TX report MAC ID valid. */
/* */
pattrib->MacIDValidEntry[0] = le32_to_cpu(report.rxdw4);
pattrib->MacIDValidEntry[1] = le32_to_cpu(report.rxdw5);
} else if (pattrib->pkt_rpt_type == HIS_REPORT) { /* USB HISR RPT */
pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
}
}
/*
* Notice:
* Before calling this function,
* precvframe->rx_data should be ready!
*/
void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
struct phy_stat *pphy_status)
{
struct adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
u8 *wlanhdr;
struct odm_per_pkt_info pkt_info;
u8 *sa = NULL;
struct sta_priv *pstapriv;
struct sta_info *psta;
pkt_info.bPacketMatchBSSID = false;
pkt_info.bPacketToSelf = false;
pkt_info.bPacketBeacon = false;
wlanhdr = precvframe->rx_data;
pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
!pattrib->icv_err && !pattrib->crc_err &&
!memcmp(get_hdr_bssid(wlanhdr),
get_bssid(&padapter->mlmepriv), ETH_ALEN));
pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
(!memcmp(get_da(wlanhdr),
myid(&padapter->eeprompriv), ETH_ALEN));
pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
(GetFrameSubType(wlanhdr) == WIFI_BEACON);
if (pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
sa = padapter->mlmepriv.cur_network.network.MacAddress;
/* to do Ad-hoc */
} else {
sa = get_sa(wlanhdr);
}
pstapriv = &padapter->stapriv;
pkt_info.StationID = 0xFF;
psta = rtw_get_stainfo(pstapriv, sa);
if (psta)
pkt_info.StationID = psta->mac_id;
pkt_info.Rate = pattrib->mcs_rate;
ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
precvframe->psta = NULL;
if (pkt_info.bPacketMatchBSSID &&
(check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
if (psta) {
precvframe->psta = psta;
rtl8188e_process_phy_info(padapter, precvframe);
}
} else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
if (psta)
precvframe->psta = psta;
}
rtl8188e_process_phy_info(padapter, precvframe);
}
}
| gpl-2.0 |
zhuyj/gmac | arch/arm/probes/decode-thumb.c | 1476 | 32477 | /*
* arch/arm/probes/decode-thumb.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "decode.h"
#include "decode-thumb.h"
static const union decode_item t32_table_1110_100x_x0xx[] = {
/* Load/store multiple instructions */
/* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xfe4f0000, 0xe80f0000),
/* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */
/* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffc00000, 0xe8000000),
/* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */
/* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffc00000, 0xe9800000),
/* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfe508000, 0xe8008000),
/* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */
DECODE_REJECT (0xfe50c000, 0xe810c000),
/* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */
DECODE_REJECT (0xfe402000, 0xe8002000),
/* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */
/* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */
/* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */
/* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_CUSTOM (0xfe400000, 0xe8000000, PROBES_T32_LDMSTM),
DECODE_END
};
static const union decode_item t32_table_1110_100x_x1xx[] = {
/* Load/store dual, load/store exclusive, table branch */
/* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */
/* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */
DECODE_OR (0xff600000, 0xe8600000),
/* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */
/* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xff400000, 0xe9400000, PROBES_T32_LDRDSTRD,
REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)),
/* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */
/* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */
DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, PROBES_T32_TABLE_BRANCH,
REGS(NOSP, 0, 0, 0, NOSPPC)),
/* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */
/* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */
/* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */
/* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */
/* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */
/* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */
/* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */
/* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */
/* And unallocated instructions... */
DECODE_END
};
static const union decode_item t32_table_1110_101x[] = {
/* Data-processing (shifted register) */
/* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */
/* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */
DECODE_EMULATEX (0xff700f00, 0xea100f00, PROBES_T32_TST,
REGS(NOSPPC, 0, 0, 0, NOSPPC)),
/* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */
DECODE_OR (0xfff00f00, 0xeb100f00),
/* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */
DECODE_EMULATEX (0xfff00f00, 0xebb00f00, PROBES_T32_TST,
REGS(NOPC, 0, 0, 0, NOSPPC)),
/* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */
/* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xffcf0000, 0xea4f0000, PROBES_T32_MOV,
REGS(0, 0, NOSPPC, 0, NOSPPC)),
/* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */
/* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffa00000, 0xeaa00000),
/* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffe00000, 0xeb200000),
/* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffe00000, 0xeb800000),
/* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xffe00000, 0xebe00000),
/* ADD/SUB SP, SP, Rm, LSL #0..3 */
/* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */
DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, PROBES_T32_ADDSUB,
REGS(SP, 0, SP, 0, NOSPPC)),
/* ADD/SUB SP, SP, Rm, shift */
/* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */
DECODE_REJECT (0xff4f0f00, 0xeb0d0d00),
/* ADD/SUB Rd, SP, Rm, shift */
/* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, PROBES_T32_ADDSUB,
REGS(SP, 0, NOPC, 0, NOSPPC)),
/* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */
/* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */
/* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */
/* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */
/* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */
/* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */
/* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */
/* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */
/* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */
/* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */
/* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfe000000, 0xea000000, PROBES_T32_LOGICAL,
REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
DECODE_END
};
static const union decode_item t32_table_1111_0x0x___0[] = {
/* Data-processing (modified immediate) */
/* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */
/* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */
DECODE_EMULATEX (0xfb708f00, 0xf0100f00, PROBES_T32_TST,
REGS(NOSPPC, 0, 0, 0, 0)),
/* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */
DECODE_OR (0xfbf08f00, 0xf1100f00),
/* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */
DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, PROBES_T32_CMP,
REGS(NOPC, 0, 0, 0, 0)),
/* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */
/* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, PROBES_T32_MOV,
REGS(0, 0, NOSPPC, 0, 0)),
/* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfbe08000, 0xf0a00000),
/* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */
/* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfbc08000, 0xf0c00000),
/* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfbe08000, 0xf1200000),
/* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfbe08000, 0xf1800000),
/* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */
DECODE_REJECT (0xfbe08000, 0xf1e00000),
/* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */
/* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, PROBES_T32_ADDSUB,
REGS(SP, 0, NOPC, 0, 0)),
/* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */
/* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */
/* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */
/* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */
/* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */
/* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */
/* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */
/* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */
/* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */
/* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfa008000, 0xf0000000, PROBES_T32_LOGICAL,
REGS(NOSPPC, 0, NOSPPC, 0, 0)),
DECODE_END
};
static const union decode_item t32_table_1111_0x1x___0[] = {
/* Data-processing (plain binary immediate) */
/* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */
DECODE_OR (0xfbff8000, 0xf20f0000),
/* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfbff8000, 0xf2af0000, PROBES_T32_ADDWSUBW_PC,
REGS(PC, 0, NOSPPC, 0, 0)),
/* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */
DECODE_OR (0xfbff8f00, 0xf20d0d00),
/* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */
DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, PROBES_T32_ADDWSUBW,
REGS(SP, 0, SP, 0, 0)),
/* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */
DECODE_OR (0xfbf08000, 0xf2000000),
/* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfbf08000, 0xf2a00000, PROBES_T32_ADDWSUBW,
REGS(NOPCX, 0, NOSPPC, 0, 0)),
/* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */
/* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfb708000, 0xf2400000, PROBES_T32_MOVW,
REGS(0, 0, NOSPPC, 0, 0)),
/* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */
/* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */
/* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */
/* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfb508000, 0xf3000000, PROBES_T32_SAT,
REGS(NOSPPC, 0, NOSPPC, 0, 0)),
/* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */
/* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfb708000, 0xf3400000, PROBES_T32_BITFIELD,
REGS(NOSPPC, 0, NOSPPC, 0, 0)),
/* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfbff8000, 0xf36f0000, PROBES_T32_BITFIELD,
REGS(0, 0, NOSPPC, 0, 0)),
/* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfbf08000, 0xf3600000, PROBES_T32_BITFIELD,
REGS(NOSPPCX, 0, NOSPPC, 0, 0)),
DECODE_END
};
static const union decode_item t32_table_1111_0xxx___1[] = {
/* Branches and miscellaneous control */
/* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */
DECODE_OR (0xfff0d7ff, 0xf3a08001),
/* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */
DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, PROBES_T32_SEV),
/* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */
/* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */
/* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */
DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, PROBES_T32_WFE),
/* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */
DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, PROBES_T32_MRS,
REGS(0, 0, NOSPPC, 0, 0)),
/*
* Unsupported instructions
* 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx
*
* MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx
* DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx
* Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx
* CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx
* CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx
* BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx
* SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx
* MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx
* SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx
* UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx
* ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx
*/
DECODE_REJECT (0xfb80d000, 0xf3808000),
/* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */
DECODE_CUSTOM (0xf800d000, 0xf0008000, PROBES_T32_BRANCH_COND),
/* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */
DECODE_OR (0xf800d001, 0xf000c000),
/* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */
/* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */
DECODE_SIMULATE (0xf8009000, 0xf0009000, PROBES_T32_BRANCH),
DECODE_END
};
static const union decode_item t32_table_1111_100x_x0x1__1111[] = {
/* Memory hints */
/* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */
/* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */
DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, PROBES_T32_PLDI),
/* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */
DECODE_OR (0xffd0f000, 0xf890f000),
/* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */
DECODE_OR (0xffd0ff00, 0xf810fc00),
/* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */
DECODE_OR (0xfff0f000, 0xf990f000),
/* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */
DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, PROBES_T32_PLDI,
REGS(NOPCX, 0, 0, 0, 0)),
/* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */
DECODE_OR (0xffd0ffc0, 0xf810f000),
/* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */
DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, PROBES_T32_PLDI,
REGS(NOPCX, 0, 0, 0, NOSPPC)),
/* Other unallocated instructions... */
DECODE_END
};
static const union decode_item t32_table_1111_100x[] = {
/* Store/Load single data item */
/* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xfe600000, 0xf8600000),
/* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xfff00000, 0xf9500000),
/* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */
DECODE_REJECT (0xfe800d00, 0xf8000800),
/* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */
/* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */
/* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */
/* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */
/* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */
/* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */
/* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */
/* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */
DECODE_REJECT (0xfe800f00, 0xf8000e00),
/* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */
DECODE_REJECT (0xff1f0000, 0xf80f0000),
/* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */
DECODE_REJECT (0xff10f000, 0xf800f000),
/* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */
DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, PROBES_T32_LDR_LIT,
REGS(PC, ANY, 0, 0, 0)),
/* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */
/* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */
DECODE_OR (0xffe00800, 0xf8400800),
/* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */
/* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xffe00000, 0xf8c00000, PROBES_T32_LDRSTR,
REGS(NOPCX, ANY, 0, 0, 0)),
/* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
/* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */
DECODE_EMULATEX (0xffe00fc0, 0xf8400000, PROBES_T32_LDRSTR,
REGS(NOPCX, ANY, 0, 0, NOSPPC)),
/* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */
/* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
/* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
/* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, PROBES_T32_LDR_LIT,
REGS(PC, NOSPPCX, 0, 0, 0)),
/* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
/* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */
/* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */
/* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */
/* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */
/* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */
DECODE_OR (0xfec00800, 0xf8000800),
/* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */
/* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */
/* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */
/* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */
/* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */
/* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0xfec00000, 0xf8800000, PROBES_T32_LDRSTR,
REGS(NOPCX, NOSPPCX, 0, 0, 0)),
/* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
/* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
/* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */
/* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */
/* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */
/* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */
DECODE_EMULATEX (0xfe800fc0, 0xf8000000, PROBES_T32_LDRSTR,
REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)),
/* Other unallocated instructions... */
DECODE_END
};
static const union decode_item t32_table_1111_1010___1111[] = {
/* Data-processing (register) */
/* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */
DECODE_REJECT (0xffe0f080, 0xfa60f080),
/* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */
/* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */
/* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */
/* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */
/* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */
/* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */
DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, PROBES_T32_SIGN_EXTEND,
REGS(0, 0, NOSPPC, 0, NOSPPC)),
/* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */
DECODE_REJECT (0xff80f0b0, 0xfa80f030),
/* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */
DECODE_REJECT (0xffb0f080, 0xfab0f000),
/* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */
/* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */
/* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */
/* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */
/* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */
/* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */
/* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */
/* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */
/* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */
/* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */
/* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */
/* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */
/* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */
/* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */
/* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */
/* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */
/* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */
/* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */
/* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */
/* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */
/* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */
/* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */
/* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */
/* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */
/* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */
/* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */
/* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */
/* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */
/* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */
/* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */
/* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */
/* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */
/* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */
/* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */
/* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */
/* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */
DECODE_OR (0xff80f080, 0xfa80f000),
/* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */
/* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */
/* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */
/* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */
/* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */
/* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */
DECODE_OR (0xff80f080, 0xfa00f080),
/* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */
/* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */
/* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */
/* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */
DECODE_OR (0xfff0f0c0, 0xfa80f080),
/* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
DECODE_OR (0xfff0f0f0, 0xfaa0f080),
/* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */
/* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */
/* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */
/* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */
DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, PROBES_T32_MEDIA,
REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
/* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
DECODE_OR (0xfff0f0f0, 0xfab0f080),
/* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */
/* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */
/* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */
/* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */
DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, PROBES_T32_REVERSE,
REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)),
/* Other unallocated instructions... */
DECODE_END
};
static const union decode_item t32_table_1111_1011_0[] = {
/* Multiply, multiply accumulate, and absolute difference */
/* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */
DECODE_REJECT (0xfff0f0f0, 0xfb00f010),
/* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */
DECODE_REJECT (0xfff0f0f0, 0xfb70f010),
/* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */
DECODE_OR (0xfff0f0c0, 0xfb10f000),
/* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */
/* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */
/* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */
/* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */
/* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */
/* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */
DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, PROBES_T32_MUL_ADD,
REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
/* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */
DECODE_REJECT (0xfff000f0, 0xfb700010),
/* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */
DECODE_OR (0xfff000c0, 0xfb100000),
/* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */
/* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */
/* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */
/* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */
/* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */
/* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */
/* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */
/* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */
DECODE_EMULATEX (0xff8000c0, 0xfb000000, PROBES_T32_MUL_ADD2,
REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)),
/* Other unallocated instructions... */
DECODE_END
};
static const union decode_item t32_table_1111_1011_1[] = {
/* Long multiply, long multiply accumulate, and divide */
/* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */
DECODE_OR (0xfff000f0, 0xfbe00060),
/* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */
DECODE_OR (0xfff000c0, 0xfbc00080),
/* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */
/* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */
DECODE_OR (0xffe000e0, 0xfbc000c0),
/* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */
/* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */
/* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */
/* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */
DECODE_EMULATEX (0xff9000f0, 0xfb800000, PROBES_T32_MUL_ADD_LONG,
REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)),
/* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */
/* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */
/* Other unallocated instructions... */
DECODE_END
};
const union decode_item probes_decode_thumb32_table[] = {
/*
* Load/store multiple instructions
* 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx),
/*
* Load/store dual, load/store exclusive, table branch
* 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx),
/*
* Data-processing (shifted register)
* 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x),
/*
* Coprocessor instructions
* 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_REJECT (0xfc000000, 0xec000000),
/*
* Data-processing (modified immediate)
* 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0),
/*
* Data-processing (plain binary immediate)
* 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0),
/*
* Branches and miscellaneous control
* 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1),
/*
* Advanced SIMD element or structure load/store instructions
* 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx
*/
DECODE_REJECT (0xff100000, 0xf9000000),
/*
* Memory hints
* 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx
*/
DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111),
/*
* Store single data item
* 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx
* Load single data items
* 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x),
/*
* Data-processing (register)
* 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx
*/
DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111),
/*
* Multiply, multiply accumulate, and absolute difference
* 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0),
/*
* Long multiply, long multiply accumulate, and divide
* 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1),
/*
* Coprocessor instructions
* 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_END
};
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(probes_decode_thumb32_table);
#endif
static const union decode_item t16_table_1011[] = {
/* Miscellaneous 16-bit instructions */
/* ADD (SP plus immediate) 1011 0000 0xxx xxxx */
/* SUB (SP minus immediate) 1011 0000 1xxx xxxx */
DECODE_SIMULATE (0xff00, 0xb000, PROBES_T16_ADD_SP),
/* CBZ 1011 00x1 xxxx xxxx */
/* CBNZ 1011 10x1 xxxx xxxx */
DECODE_SIMULATE (0xf500, 0xb100, PROBES_T16_CBZ),
/* SXTH 1011 0010 00xx xxxx */
/* SXTB 1011 0010 01xx xxxx */
/* UXTH 1011 0010 10xx xxxx */
/* UXTB 1011 0010 11xx xxxx */
/* REV 1011 1010 00xx xxxx */
/* REV16 1011 1010 01xx xxxx */
/* ??? 1011 1010 10xx xxxx */
/* REVSH 1011 1010 11xx xxxx */
DECODE_REJECT (0xffc0, 0xba80),
DECODE_EMULATE (0xf500, 0xb000, PROBES_T16_SIGN_EXTEND),
/* PUSH 1011 010x xxxx xxxx */
DECODE_CUSTOM (0xfe00, 0xb400, PROBES_T16_PUSH),
/* POP 1011 110x xxxx xxxx */
DECODE_CUSTOM (0xfe00, 0xbc00, PROBES_T16_POP),
/*
* If-Then, and hints
* 1011 1111 xxxx xxxx
*/
/* YIELD 1011 1111 0001 0000 */
DECODE_OR (0xffff, 0xbf10),
/* SEV 1011 1111 0100 0000 */
DECODE_EMULATE (0xffff, 0xbf40, PROBES_T16_SEV),
/* NOP 1011 1111 0000 0000 */
/* WFE 1011 1111 0010 0000 */
/* WFI 1011 1111 0011 0000 */
DECODE_SIMULATE (0xffcf, 0xbf00, PROBES_T16_WFE),
/* Unassigned hints 1011 1111 xxxx 0000 */
DECODE_REJECT (0xff0f, 0xbf00),
/* IT 1011 1111 xxxx xxxx */
DECODE_CUSTOM (0xff00, 0xbf00, PROBES_T16_IT),
/* SETEND 1011 0110 010x xxxx */
/* CPS 1011 0110 011x xxxx */
/* BKPT 1011 1110 xxxx xxxx */
/* And unallocated instructions... */
DECODE_END
};
const union decode_item probes_decode_thumb16_table[] = {
/*
* Shift (immediate), add, subtract, move, and compare
* 00xx xxxx xxxx xxxx
*/
/* CMP (immediate) 0010 1xxx xxxx xxxx */
DECODE_EMULATE (0xf800, 0x2800, PROBES_T16_CMP),
/* ADD (register) 0001 100x xxxx xxxx */
/* SUB (register) 0001 101x xxxx xxxx */
/* LSL (immediate) 0000 0xxx xxxx xxxx */
/* LSR (immediate) 0000 1xxx xxxx xxxx */
/* ASR (immediate) 0001 0xxx xxxx xxxx */
/* ADD (immediate, Thumb) 0001 110x xxxx xxxx */
/* SUB (immediate, Thumb) 0001 111x xxxx xxxx */
/* MOV (immediate) 0010 0xxx xxxx xxxx */
/* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */
/* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */
DECODE_EMULATE (0xc000, 0x0000, PROBES_T16_ADDSUB),
/*
* 16-bit Thumb data-processing instructions
* 0100 00xx xxxx xxxx
*/
/* TST (register) 0100 0010 00xx xxxx */
DECODE_EMULATE (0xffc0, 0x4200, PROBES_T16_CMP),
/* CMP (register) 0100 0010 10xx xxxx */
/* CMN (register) 0100 0010 11xx xxxx */
DECODE_EMULATE (0xff80, 0x4280, PROBES_T16_CMP),
/* AND (register) 0100 0000 00xx xxxx */
/* EOR (register) 0100 0000 01xx xxxx */
/* LSL (register) 0100 0000 10xx xxxx */
/* LSR (register) 0100 0000 11xx xxxx */
/* ASR (register) 0100 0001 00xx xxxx */
/* ADC (register) 0100 0001 01xx xxxx */
/* SBC (register) 0100 0001 10xx xxxx */
/* ROR (register) 0100 0001 11xx xxxx */
/* RSB (immediate) 0100 0010 01xx xxxx */
/* ORR (register) 0100 0011 00xx xxxx */
/* MUL 0100 0011 00xx xxxx */
/* BIC (register) 0100 0011 10xx xxxx */
/* MVN (register) 0100 0011 10xx xxxx */
DECODE_EMULATE (0xfc00, 0x4000, PROBES_T16_LOGICAL),
/*
* Special data instructions and branch and exchange
* 0100 01xx xxxx xxxx
*/
/* BLX pc 0100 0111 1111 1xxx */
DECODE_REJECT (0xfff8, 0x47f8),
/* BX (register) 0100 0111 0xxx xxxx */
/* BLX (register) 0100 0111 1xxx xxxx */
DECODE_SIMULATE (0xff00, 0x4700, PROBES_T16_BLX),
/* ADD pc, pc 0100 0100 1111 1111 */
DECODE_REJECT (0xffff, 0x44ff),
/* ADD (register) 0100 0100 xxxx xxxx */
/* CMP (register) 0100 0101 xxxx xxxx */
/* MOV (register) 0100 0110 xxxx xxxx */
DECODE_CUSTOM (0xfc00, 0x4400, PROBES_T16_HIREGOPS),
/*
* Load from Literal Pool
* LDR (literal) 0100 1xxx xxxx xxxx
*/
DECODE_SIMULATE (0xf800, 0x4800, PROBES_T16_LDR_LIT),
/*
* 16-bit Thumb Load/store instructions
* 0101 xxxx xxxx xxxx
* 011x xxxx xxxx xxxx
* 100x xxxx xxxx xxxx
*/
/* STR (register) 0101 000x xxxx xxxx */
/* STRH (register) 0101 001x xxxx xxxx */
/* STRB (register) 0101 010x xxxx xxxx */
/* LDRSB (register) 0101 011x xxxx xxxx */
/* LDR (register) 0101 100x xxxx xxxx */
/* LDRH (register) 0101 101x xxxx xxxx */
/* LDRB (register) 0101 110x xxxx xxxx */
/* LDRSH (register) 0101 111x xxxx xxxx */
/* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */
/* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */
/* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */
/* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */
DECODE_EMULATE (0xc000, 0x4000, PROBES_T16_LDRHSTRH),
/* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */
/* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */
DECODE_EMULATE (0xf000, 0x8000, PROBES_T16_LDRHSTRH),
/* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */
/* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */
DECODE_SIMULATE (0xf000, 0x9000, PROBES_T16_LDRSTR),
/*
* Generate PC-/SP-relative address
* ADR (literal) 1010 0xxx xxxx xxxx
* ADD (SP plus immediate) 1010 1xxx xxxx xxxx
*/
DECODE_SIMULATE (0xf000, 0xa000, PROBES_T16_ADR),
/*
* Miscellaneous 16-bit instructions
* 1011 xxxx xxxx xxxx
*/
DECODE_TABLE (0xf000, 0xb000, t16_table_1011),
/* STM 1100 0xxx xxxx xxxx */
/* LDM 1100 1xxx xxxx xxxx */
DECODE_EMULATE (0xf000, 0xc000, PROBES_T16_LDMSTM),
/*
* Conditional branch, and Supervisor Call
*/
/* Permanently UNDEFINED 1101 1110 xxxx xxxx */
/* SVC 1101 1111 xxxx xxxx */
DECODE_REJECT (0xfe00, 0xde00),
/* Conditional branch 1101 xxxx xxxx xxxx */
DECODE_CUSTOM (0xf000, 0xd000, PROBES_T16_BRANCH_COND),
/*
* Unconditional branch
* B 1110 0xxx xxxx xxxx
*/
DECODE_SIMULATE (0xf800, 0xe000, PROBES_T16_BRANCH),
DECODE_END
};
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(probes_decode_thumb16_table);
#endif
static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
{
if (unlikely(in_it_block(cpsr)))
return probes_condition_checks[current_cond(cpsr)](cpsr);
return true;
}
static void __kprobes thumb16_singlestep(probes_opcode_t opcode,
struct arch_probes_insn *asi,
struct pt_regs *regs)
{
regs->ARM_pc += 2;
asi->insn_handler(opcode, asi, regs);
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
}
static void __kprobes thumb32_singlestep(probes_opcode_t opcode,
struct arch_probes_insn *asi,
struct pt_regs *regs)
{
regs->ARM_pc += 4;
asi->insn_handler(opcode, asi, regs);
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
}
enum probes_insn __kprobes
thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
bool emulate, const union decode_action *actions,
const struct decode_checker *checkers[])
{
asi->insn_singlestep = thumb16_singlestep;
asi->insn_check_cc = thumb_check_cc;
return probes_decode_insn(insn, asi, probes_decode_thumb16_table, true,
emulate, actions, checkers);
}
enum probes_insn __kprobes
thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
bool emulate, const union decode_action *actions,
const struct decode_checker *checkers[])
{
asi->insn_singlestep = thumb32_singlestep;
asi->insn_check_cc = thumb_check_cc;
return probes_decode_insn(insn, asi, probes_decode_thumb32_table, true,
emulate, actions, checkers);
}
| gpl-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-4.14/arch/ia64/kernel/acpi-ext.c | 1476 | 2822 | /*
* (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
* Alex Williamson <alex.williamson@hp.com>
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <asm/acpi-ext.h>
/*
* Device CSRs that do not appear in PCI config space should be described
* via ACPI. This would normally be done with Address Space Descriptors
* marked as "consumer-only," but old versions of Windows and Linux ignore
* the producer/consumer flag, so HP invented a vendor-defined resource to
* describe the location and size of CSR space.
*/
struct acpi_vendor_uuid hp_ccsr_uuid = {
.subtype = 2,
.data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};
static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_resource *resource;
struct acpi_resource_vendor_typed *vendor;
status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
&buffer);
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
status = AE_NOT_FOUND;
goto exit;
}
memcpy(base, vendor->byte_data, sizeof(*base));
memcpy(length, vendor->byte_data + 8, sizeof(*length));
exit:
kfree(buffer.pointer);
return status;
}
struct csr_space {
u64 base;
u64 length;
};
static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
{
struct csr_space *space = data;
struct acpi_resource_address64 addr;
acpi_status status;
status = acpi_resource_to_address64(resource, &addr);
if (ACPI_SUCCESS(status) &&
addr.resource_type == ACPI_MEMORY_RANGE &&
addr.address.address_length &&
addr.producer_consumer == ACPI_CONSUMER) {
space->base = addr.address.minimum;
space->length = addr.address.address_length;
return AE_CTRL_TERMINATE;
}
return AE_OK; /* keep looking */
}
static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
{
struct csr_space space = { 0, 0 };
acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
if (!space.length)
return AE_NOT_FOUND;
*base = space.base;
*length = space.length;
return AE_OK;
}
acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
acpi_status status;
status = hp_ccsr_locate(obj, csr_base, csr_length);
if (ACPI_SUCCESS(status))
return status;
return hp_crs_locate(obj, csr_base, csr_length);
}
EXPORT_SYMBOL(hp_acpi_csr_space);
| gpl-2.0 |
beroid/android_kernel_cyanogen_msm8916 | kernel/locking/rtmutex-debug.c | 1732 | 4645 | /*
* RT-Mutexes: blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* This code is based on the rt.c implementation in the preempt-rt tree.
* Portions of said code are
*
* Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Copyright (C) 2006 Esben Nielsen
* Copyright (C) 2006 Kihon Technologies Inc.,
* Steven Rostedt <rostedt@goodmis.org>
*
* See rt.c in preempt-rt for proper credits and further information
*/
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/rbtree.h>
#include <linux/fs.h>
#include <linux/debug_locks.h>
#include "rtmutex_common.h"
static void printk_task(struct task_struct *p)
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
else
printk("<none>");
}
static void printk_lock(struct rt_mutex *lock, int print_owner)
{
if (lock->name)
printk(" [%p] {%s}\n",
lock, lock->name);
else
printk(" [%p] {%s:%d}\n",
lock, lock->file, lock->line);
if (print_owner && rt_mutex_owner(lock)) {
printk(".. ->owner: %p\n", lock->owner);
printk(".. held by: ");
printk_task(rt_mutex_owner(lock));
printk("\n");
}
}
void rt_mutex_debug_task_free(struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
/*
* We fill out the fields in the waiter to store the information about
* the deadlock. We print when we return. act_waiter can be NULL in
* case of a remove waiter operation.
*/
void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *act_waiter,
struct rt_mutex *lock)
{
struct task_struct *task;
if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
return;
task = rt_mutex_owner(act_waiter->lock);
if (task && task != current) {
act_waiter->deadlock_task_pid = get_pid(task_pid(task));
act_waiter->deadlock_lock = lock;
}
}
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
struct task_struct *task;
if (!waiter->deadlock_lock || !debug_locks)
return;
rcu_read_lock();
task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
if (!task) {
rcu_read_unlock();
return;
}
if (!debug_locks_off()) {
rcu_read_unlock();
return;
}
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
printk("%s\n", print_tainted());
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
current->comm, task_pid_nr(current));
printk("\n1) %s/%d is trying to acquire this lock:\n",
current->comm, task_pid_nr(current));
printk_lock(waiter->lock, 1);
printk("\n2) %s/%d is blocked on this lock:\n",
task->comm, task_pid_nr(task));
printk_lock(waiter->deadlock_lock, 1);
debug_show_held_locks(current);
debug_show_held_locks(task);
printk("\n%s/%d's [blocked] stackdump:\n\n",
task->comm, task_pid_nr(task));
show_stack(task, NULL);
printk("\n%s/%d's [current] stackdump:\n\n",
current->comm, task_pid_nr(current));
dump_stack();
debug_show_all_locks();
rcu_read_unlock();
printk("[ turning off deadlock detection."
"Please report this trace. ]\n\n");
}
void debug_rt_mutex_lock(struct rt_mutex *lock)
{
}
void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
void
debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
{
}
void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}
void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
memset(waiter, 0x11, sizeof(*waiter));
waiter->deadlock_task_pid = NULL;
}
void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
put_pid(waiter->deadlock_task_pid);
memset(waiter, 0x22, sizeof(*waiter));
}
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
{
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lock->name = name;
}
void
rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
{
}
void rt_mutex_deadlock_account_unlock(struct task_struct *task)
{
}
| gpl-2.0 |
MongooseHelix/kernel_desirec_2.6.35_ics | arch/arm/mach-at91/irq.c | 1732 | 4285 | /*
* linux/arch/arm/mach-at91/irq.c
*
* Copyright (C) 2004 SAN People
* Copyright (C) 2004 ATMEL
* Copyright (C) Rick Bronson
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
static void at91_aic_mask_irq(unsigned int irq)
{
/* Disable interrupt on AIC */
at91_sys_write(AT91_AIC_IDCR, 1 << irq);
}
static void at91_aic_unmask_irq(unsigned int irq)
{
/* Enable interrupt on AIC */
at91_sys_write(AT91_AIC_IECR, 1 << irq);
}
unsigned int at91_extern_irq;
#define is_extern_irq(irq) ((1 << (irq)) & at91_extern_irq)
static int at91_aic_set_type(unsigned irq, unsigned type)
{
unsigned int smr, srctype;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
srctype = AT91_AIC_SRCTYPE_HIGH;
break;
case IRQ_TYPE_EDGE_RISING:
srctype = AT91_AIC_SRCTYPE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
if ((irq == AT91_ID_FIQ) || is_extern_irq(irq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_LOW;
else
return -EINVAL;
break;
case IRQ_TYPE_EDGE_FALLING:
if ((irq == AT91_ID_FIQ) || is_extern_irq(irq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_FALLING;
else
return -EINVAL;
break;
default:
return -EINVAL;
}
smr = at91_sys_read(AT91_AIC_SMR(irq)) & ~AT91_AIC_SRCTYPE;
at91_sys_write(AT91_AIC_SMR(irq), smr | srctype);
return 0;
}
#ifdef CONFIG_PM
static u32 wakeups;
static u32 backups;
static int at91_aic_set_wake(unsigned irq, unsigned value)
{
if (unlikely(irq >= 32))
return -EINVAL;
if (value)
wakeups |= (1 << irq);
else
wakeups &= ~(1 << irq);
return 0;
}
void at91_irq_suspend(void)
{
backups = at91_sys_read(AT91_AIC_IMR);
at91_sys_write(AT91_AIC_IDCR, backups);
at91_sys_write(AT91_AIC_IECR, wakeups);
}
void at91_irq_resume(void)
{
at91_sys_write(AT91_AIC_IDCR, wakeups);
at91_sys_write(AT91_AIC_IECR, backups);
}
#else
#define at91_aic_set_wake NULL
#endif
static struct irq_chip at91_aic_chip = {
.name = "AIC",
.ack = at91_aic_mask_irq,
.mask = at91_aic_mask_irq,
.unmask = at91_aic_unmask_irq,
.set_type = at91_aic_set_type,
.set_wake = at91_aic_set_wake,
};
/*
* Initialize the AIC interrupt controller.
*/
void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
{
unsigned int i;
/*
* The IVR is used by macro get_irqnr_and_base to read and verify.
* The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
*/
for (i = 0; i < NR_AIC_IRQS; i++) {
/* Put irq number in Source Vector Register: */
at91_sys_write(AT91_AIC_SVR(i), i);
/* Active Low interrupt, with the specified priority */
at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
set_irq_chip(i, &at91_aic_chip);
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
/* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */
if (i < 8)
at91_sys_write(AT91_AIC_EOICR, 0);
}
/*
* Spurious Interrupt ID in Spurious Vector Register is NR_AIC_IRQS
* When there is no current interrupt, the IRQ Vector Register reads the value stored in AIC_SPU
*/
at91_sys_write(AT91_AIC_SPU, NR_AIC_IRQS);
/* No debugging in AIC: Debug (Protect) Control Register */
at91_sys_write(AT91_AIC_DCR, 0);
/* Disable and clear all interrupts initially */
at91_sys_write(AT91_AIC_IDCR, 0xFFFFFFFF);
at91_sys_write(AT91_AIC_ICCR, 0xFFFFFFFF);
}
| gpl-2.0 |
bas-t/media_tree | net/rds/info.c | 1732 | 6577 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements a getsockopt() call which copies a set of fixed
* sized structs into a user-specified buffer as a means of providing
* read-only information about RDS.
*
* For a given information source there are a given number of fixed sized
* structs at a given time. The structs are only copied if the user-specified
* buffer is big enough. The destination pages that make up the buffer
* are pinned for the duration of the copy.
*
* This gives us the following benefits:
*
* - simple implementation, no copy "position" across multiple calls
* - consistent snapshot of an info source
* - atomic copy works well with whatever locking info source has
* - one portable tool to get rds info across implementations
* - long-lived tool can get info without allocating
*
* at the following costs:
*
* - info source copy must be pinned, may be "large"
*/
struct rds_info_iterator {
struct page **pages;
void *addr;
unsigned long offset;
};
static DEFINE_SPINLOCK(rds_info_lock);
static rds_info_func rds_info_funcs[RDS_INFO_LAST - RDS_INFO_FIRST + 1];
void rds_info_register_func(int optname, rds_info_func func)
{
int offset = optname - RDS_INFO_FIRST;
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
BUG_ON(rds_info_funcs[offset]);
rds_info_funcs[offset] = func;
spin_unlock(&rds_info_lock);
}
EXPORT_SYMBOL_GPL(rds_info_register_func);
void rds_info_deregister_func(int optname, rds_info_func func)
{
int offset = optname - RDS_INFO_FIRST;
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
BUG_ON(rds_info_funcs[offset] != func);
rds_info_funcs[offset] = NULL;
spin_unlock(&rds_info_lock);
}
EXPORT_SYMBOL_GPL(rds_info_deregister_func);
/*
* Typically we hold an atomic kmap across multiple rds_info_copy() calls
* because the kmap is so expensive. This must be called before using blocking
* operations while holding the mapping and as the iterator is torn down.
*/
void rds_info_iter_unmap(struct rds_info_iterator *iter)
{
if (iter->addr) {
kunmap_atomic(iter->addr);
iter->addr = NULL;
}
}
/*
* get_user_pages() called flush_dcache_page() on the pages for us.
*/
void rds_info_copy(struct rds_info_iterator *iter, void *data,
unsigned long bytes)
{
unsigned long this;
while (bytes) {
if (!iter->addr)
iter->addr = kmap_atomic(*iter->pages);
this = min(bytes, PAGE_SIZE - iter->offset);
rdsdebug("page %p addr %p offset %lu this %lu data %p "
"bytes %lu\n", *iter->pages, iter->addr,
iter->offset, this, data, bytes);
memcpy(iter->addr + iter->offset, data, this);
data += this;
bytes -= this;
iter->offset += this;
if (iter->offset == PAGE_SIZE) {
kunmap_atomic(iter->addr);
iter->addr = NULL;
iter->offset = 0;
iter->pages++;
}
}
}
EXPORT_SYMBOL_GPL(rds_info_copy);
/*
* @optval points to the userspace buffer that the information snapshot
* will be copied into.
*
* @optlen on input is the size of the buffer in userspace. @optlen
* on output is the size of the requested snapshot in bytes.
*
* This function returns -errno if there is a failure, particularly -ENOSPC
* if the given userspace buffer was not large enough to fit the snapshot.
* On success it returns the positive number of bytes of each array element
* in the snapshot.
*/
int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
int __user *optlen)
{
struct rds_info_iterator iter;
struct rds_info_lengths lens;
unsigned long nr_pages = 0;
unsigned long start;
unsigned long i;
rds_info_func func;
struct page **pages = NULL;
int ret;
int len;
int total;
if (get_user(len, optlen)) {
ret = -EFAULT;
goto out;
}
/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
/* a 0 len call is just trying to probe its length */
if (len == 0)
goto call_func;
nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
>> PAGE_SHIFT;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
ret = get_user_pages_fast(start, nr_pages, 1, pages);
if (ret != nr_pages) {
if (ret > 0)
nr_pages = ret;
else
nr_pages = 0;
ret = -EAGAIN; /* XXX ? */
goto out;
}
rdsdebug("len %d nr_pages %lu\n", len, nr_pages);
call_func:
func = rds_info_funcs[optname - RDS_INFO_FIRST];
if (!func) {
ret = -ENOPROTOOPT;
goto out;
}
iter.pages = pages;
iter.addr = NULL;
iter.offset = start & (PAGE_SIZE - 1);
func(sock, len, &iter, &lens);
BUG_ON(lens.each == 0);
total = lens.nr * lens.each;
rds_info_iter_unmap(&iter);
if (total > len) {
len = total;
ret = -ENOSPC;
} else {
len = total;
ret = lens.each;
}
if (put_user(len, optlen))
ret = -EFAULT;
out:
for (i = 0; pages && i < nr_pages; i++)
put_page(pages[i]);
kfree(pages);
return ret;
}
| gpl-2.0 |
sattarvoybek/OwnKernel-sprout | drivers/net/fddi/skfp/skfddi.c | 2244 | 64899 | /*
* File Name:
* skfddi.c
*
* Copyright Information:
* Copyright SysKonnect 1998,1999.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
* Abstract:
* A Linux device driver supporting the SysKonnect FDDI PCI controller
* familie.
*
* Maintainers:
* CG Christoph Goos (cgoos@syskonnect.de)
*
* Contributors:
* DM David S. Miller
*
* Address all question to:
* linux@syskonnect.de
*
* The technical manual for the adapters is available from SysKonnect's
* web pages: www.syskonnect.com
* Goto "Support" and search Knowledge Base for "manual".
*
* Driver Architecture:
* The driver architecture is based on the DEC FDDI driver by
* Lawrence V. Stefani and several ethernet drivers.
* I also used an existing Windows NT miniport driver.
* All hardware dependent functions are handled by the SysKonnect
* Hardware Module.
* The only headerfiles that are directly related to this source
* are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
* The others belong to the SysKonnect FDDI Hardware Module and
* should better not be changed.
*
* Modification History:
* Date Name Description
* 02-Mar-98 CG Created.
*
* 10-Mar-99 CG Support for 2.2.x added.
* 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
* 26-Oct-99 CG Fixed compilation error on 2.2.13
* 12-Nov-99 CG Source code release
* 22-Nov-99 CG Included in kernel source.
* 07-May-00 DM 64 bit fixes, new dma interface
* 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
* Daniele Bellucci <bellucda@tiscali.it>
* 03-Dec-03 SH Convert to PCI device model
*
* Compilation options (-Dxxx):
* DRIVERDEBUG print lots of messages to log file
* DUMPPACKETS print received/transmitted packets to logfile
*
* Tested cpu architectures:
* - i386
* - sparc64
*/
/* Version information string - should be updated prior to */
/* each new release!!! */
#define VERSION "2.07"
static const char * const boot_msg =
"SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
" SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
/* Include files */
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include "h/types.h"
#undef ADDR // undo Linux definition
#include "h/skfbi.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smtstate.h"
// Define module-wide (static) routines
static int skfp_driver_init(struct net_device *dev);
static int skfp_open(struct net_device *dev);
static int skfp_close(struct net_device *dev);
static irqreturn_t skfp_interrupt(int irq, void *dev_id);
static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
static void skfp_ctl_set_multicast_list(struct net_device *dev);
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev);
static void send_queued_packets(struct s_smc *smc);
static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
static void ResetAdapter(struct s_smc *smc);
// Functions needed by the hardware module
void *mac_drv_get_space(struct s_smc *smc, u_int size);
void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
int flag);
void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
void llc_restart_tx(struct s_smc *smc);
void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len);
void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count);
void mac_drv_fill_rxd(struct s_smc *smc);
void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count);
int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
int la_len);
void dump_data(unsigned char *Data, int length);
// External functions from the hardware module
extern u_int mac_drv_check_space(void);
extern int mac_drv_init(struct s_smc *smc);
extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
int len, int frame_status);
extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
int frame_len, int frame_status);
extern void fddi_isr(struct s_smc *smc);
extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
int len, int frame_status);
extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
// Define module-wide (static) variables
static int num_boards; /* total number of adapters configured */
static const struct net_device_ops skfp_netdev_ops = {
.ndo_open = skfp_open,
.ndo_stop = skfp_close,
.ndo_start_xmit = skfp_send_pkt,
.ndo_get_stats = skfp_ctl_get_stats,
.ndo_change_mtu = fddi_change_mtu,
.ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
.ndo_do_ioctl = skfp_ioctl,
};
/*
* =================
* = skfp_init_one =
* =================
*
* Overview:
* Probes for supported FDDI PCI controllers
*
* Returns:
* Condition code
*
* Arguments:
* pdev - pointer to PCI device information
*
* Functional Description:
* This is now called by PCI driver registration process
* for each board found.
*
* Return Codes:
* 0 - This device (fddi0, fddi1, etc) configured successfully
* -ENODEV - No devices present, or no SysKonnect FDDI PCI device
* present for this device name
*
*
* Side Effects:
* Device structures for FDDI adapters (fddi0, fddi1, etc) are
* initialized and the board resources are read and stored in
* the device structure.
*/
static int skfp_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct s_smc *smc; /* board pointer */
void __iomem *mem;
int err;
pr_debug("entering skfp_init_one\n");
if (num_boards == 0)
printk("%s\n", boot_msg);
err = pci_enable_device(pdev);
if (err)
return err;
err = pci_request_regions(pdev, "skfddi");
if (err)
goto err_out1;
pci_set_master(pdev);
#ifdef MEM_MAPPED_IO
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
printk(KERN_ERR "skfp: region is not an MMIO resource\n");
err = -EIO;
goto err_out2;
}
mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
#else
if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
printk(KERN_ERR "skfp: region is not PIO resource\n");
err = -EIO;
goto err_out2;
}
mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
#endif
if (!mem) {
printk(KERN_ERR "skfp: Unable to map register, "
"FDDI adapter will be disabled.\n");
err = -EIO;
goto err_out2;
}
dev = alloc_fddidev(sizeof(struct s_smc));
if (!dev) {
printk(KERN_ERR "skfp: Unable to allocate fddi device, "
"FDDI adapter will be disabled.\n");
err = -ENOMEM;
goto err_out3;
}
dev->irq = pdev->irq;
dev->netdev_ops = &skfp_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
/* Initialize board structure with bus-specific info */
smc = netdev_priv(dev);
smc->os.dev = dev;
smc->os.bus_type = SK_BUS_TYPE_PCI;
smc->os.pdev = *pdev;
smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
smc->os.MaxFrameSize = MAX_FRAME_SIZE;
smc->os.dev = dev;
smc->hw.slot = -1;
smc->hw.iop = mem;
smc->os.ResetRequested = FALSE;
skb_queue_head_init(&smc->os.SendSkbQueue);
dev->base_addr = (unsigned long)mem;
err = skfp_driver_init(dev);
if (err)
goto err_out4;
err = register_netdev(dev);
if (err)
goto err_out5;
++num_boards;
pci_set_drvdata(pdev, dev);
if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
(pdev->subsystem_device & 0xff00) == 0x5800)
printk("%s: SysKonnect FDDI PCI adapter"
" found (SK-%04X)\n", dev->name,
pdev->subsystem_device);
else
printk("%s: FDDI PCI adapter found\n", dev->name);
return 0;
err_out5:
if (smc->os.SharedMemAddr)
pci_free_consistent(pdev, smc->os.SharedMemSize,
smc->os.SharedMemAddr,
smc->os.SharedMemDMA);
pci_free_consistent(pdev, MAX_FRAME_SIZE,
smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
err_out4:
free_netdev(dev);
err_out3:
#ifdef MEM_MAPPED_IO
iounmap(mem);
#else
ioport_unmap(mem);
#endif
err_out2:
pci_release_regions(pdev);
err_out1:
pci_disable_device(pdev);
return err;
}
/*
* Called for each adapter board from pci_unregister_driver
*/
static void skfp_remove_one(struct pci_dev *pdev)
{
struct net_device *p = pci_get_drvdata(pdev);
struct s_smc *lp = netdev_priv(p);
unregister_netdev(p);
if (lp->os.SharedMemAddr) {
pci_free_consistent(&lp->os.pdev,
lp->os.SharedMemSize,
lp->os.SharedMemAddr,
lp->os.SharedMemDMA);
lp->os.SharedMemAddr = NULL;
}
if (lp->os.LocalRxBuffer) {
pci_free_consistent(&lp->os.pdev,
MAX_FRAME_SIZE,
lp->os.LocalRxBuffer,
lp->os.LocalRxBufferDMA);
lp->os.LocalRxBuffer = NULL;
}
#ifdef MEM_MAPPED_IO
iounmap(lp->hw.iop);
#else
ioport_unmap(lp->hw.iop);
#endif
pci_release_regions(pdev);
free_netdev(p);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
/*
* ====================
* = skfp_driver_init =
* ====================
*
* Overview:
* Initializes remaining adapter board structure information
* and makes sure adapter is in a safe state prior to skfp_open().
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function allocates additional resources such as the host memory
* blocks needed by the adapter.
* The adapter is also reset. The OS must call skfp_open() to open
* the adapter and bring it on-line.
*
* Return Codes:
* 0 - initialization succeeded
* -1 - initialization failed
*/
static int skfp_driver_init(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
int err = -EIO;
pr_debug("entering skfp_driver_init\n");
// set the io address in private structures
bp->base_addr = dev->base_addr;
// Get the interrupt level from the PCI Configuration Table
smc->hw.irq = dev->irq;
spin_lock_init(&bp->DriverLock);
// Allocate invalid frame
bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
if (!bp->LocalRxBuffer) {
printk("could not allocate mem for ");
printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
goto fail;
}
// Determine the required size of the 'shared' memory area.
bp->SharedMemSize = mac_drv_check_space();
pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
bp->SharedMemSize,
&bp->SharedMemDMA);
if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
bp->SharedMemSize);
goto fail;
}
bp->SharedMemHeap = 0; // Nothing used yet.
} else {
bp->SharedMemAddr = NULL;
bp->SharedMemHeap = 0;
} // SharedMemSize > 0
memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
card_stop(smc); // Reset adapter.
pr_debug("mac_drv_init()..\n");
if (mac_drv_init(smc) != 0) {
pr_debug("mac_drv_init() failed\n");
goto fail;
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
return 0;
fail:
if (bp->SharedMemAddr) {
pci_free_consistent(&bp->pdev,
bp->SharedMemSize,
bp->SharedMemAddr,
bp->SharedMemDMA);
bp->SharedMemAddr = NULL;
}
if (bp->LocalRxBuffer) {
pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
bp->LocalRxBuffer, bp->LocalRxBufferDMA);
bp->LocalRxBuffer = NULL;
}
return err;
} // skfp_driver_init
/*
* =============
* = skfp_open =
* =============
*
* Overview:
* Opens the adapter
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function brings the adapter to an operational state.
*
* Return Codes:
* 0 - Adapter was successfully opened
* -EAGAIN - Could not register IRQ
*/
static int skfp_open(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
int err;
pr_debug("entering skfp_open\n");
/* Register IRQ - support shared interrupts by passing device ptr */
err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
dev->name, dev);
if (err)
return err;
/*
* Set current address to factory MAC address
*
* Note: We've already done this step in skfp_driver_init.
* However, it's possible that a user has set a node
* address override, then closed and reopened the
* adapter. Unless we reset the device address field
* now, we'll continue to use the existing modified
* address.
*/
read_address(smc, NULL);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
init_smt(smc, NULL);
smt_online(smc, 1);
STI_FBI();
/* Clear local multicast address tables */
mac_clear_multicast(smc);
/* Disable promiscuous filter settings */
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
netif_start_queue(dev);
return 0;
} // skfp_open
/*
* ==============
* = skfp_close =
* ==============
*
* Overview:
* Closes the device/module.
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This routine closes the adapter and brings it to a safe state.
* The interrupt service routine is deregistered with the OS.
* The adapter can be opened again with another call to skfp_open().
*
* Return Codes:
* Always return 0.
*
* Assumptions:
* No further requests for this adapter are made after this routine is
* called. skfp_open() can be called to reset and reinitialize the
* adapter.
*/
static int skfp_close(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
CLI_FBI();
smt_reset_defaults(smc, 1);
card_stop(smc);
mac_drv_clear_tx_queue(smc);
mac_drv_clear_rx_queue(smc);
netif_stop_queue(dev);
/* Deregister (free) IRQ */
free_irq(dev->irq, dev);
skb_queue_purge(&bp->SendSkbQueue);
bp->QueueSkb = MAX_TX_QUEUE_LEN;
return 0;
} // skfp_close
/*
* ==================
* = skfp_interrupt =
* ==================
*
* Overview:
* Interrupt processing routine
*
* Returns:
* None
*
* Arguments:
* irq - interrupt vector
* dev_id - pointer to device information
*
* Functional Description:
* This routine calls the interrupt processing routine for this adapter. It
* disables and reenables adapter interrupts, as appropriate. We can support
* shared interrupts since the incoming dev_id pointer provides our device
* structure context. All the real work is done in the hardware module.
*
* Return Codes:
* None
*
* Assumptions:
* The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
* on Intel-based systems) is done by the operating system outside this
* routine.
*
* System interrupts are enabled through this call.
*
* Side Effects:
* Interrupts are disabled, then reenabled at the adapter.
*/
static irqreturn_t skfp_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct s_smc *smc; /* private board structure pointer */
skfddi_priv *bp;
smc = netdev_priv(dev);
bp = &smc->os;
// IRQs enabled or disabled ?
if (inpd(ADDR(B0_IMSK)) == 0) {
// IRQs are disabled: must be shared interrupt
return IRQ_NONE;
}
// Note: At this point, IRQs are enabled.
if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
// Adapter did not issue an IRQ: must be shared interrupt
return IRQ_NONE;
}
CLI_FBI(); // Disable IRQs from our adapter.
spin_lock(&bp->DriverLock);
// Call interrupt handler in hardware module (HWM).
fddi_isr(smc);
if (smc->os.ResetRequested) {
ResetAdapter(smc);
smc->os.ResetRequested = FALSE;
}
spin_unlock(&bp->DriverLock);
STI_FBI(); // Enable IRQs from our adapter.
return IRQ_HANDLED;
} // skfp_interrupt
/*
* ======================
* = skfp_ctl_get_stats =
* ======================
*
* Overview:
* Get statistics for FDDI adapter
*
* Returns:
* Pointer to FDDI statistics structure
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* Gets current MIB objects from adapter, then
* returns FDDI statistics structure as defined
* in if_fddi.h.
*
* Note: Since the FDDI statistics structure is
* still new and the device structure doesn't
* have an FDDI-specific get statistics handler,
* we'll return the FDDI statistics structure as
* a pointer to an Ethernet statistics structure.
* That way, at least the first part of the statistics
* structure can be decoded properly.
* We'll have to pay attention to this routine as the
* device structure becomes more mature and LAN media
* independent.
*
*/
static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
{
struct s_smc *bp = netdev_priv(dev);
/* Fill the bp->stats structure with driver-maintained counters */
bp->os.MacStat.port_bs_flag[0] = 0x1234;
bp->os.MacStat.port_bs_flag[1] = 0x5678;
// goos: need to fill out fddi statistic
#if 0
/* Get FDDI SMT MIB objects */
/* Fill the bp->stats structure with the SMT MIB object values */
memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
/* Fill the bp->stats structure with the FDDI counter values */
bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
#endif
return (struct net_device_stats *)&bp->os.MacStat;
} // ctl_get_stat
/*
* ==============================
* = skfp_ctl_set_multicast_list =
* ==============================
*
* Overview:
* Enable/Disable LLC frame promiscuous mode reception
* on the adapter and/or update multicast address table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function acquires the driver lock and only calls
* skfp_ctl_set_multicast_list_wo_lock then.
* This routine follows a fairly simple algorithm for setting the
* adapter filters and CAM:
*
* if IFF_PROMISC flag is set
* enable promiscuous mode
* else
* disable promiscuous mode
* if number of multicast addresses <= max. multicast number
* add mc addresses to adapter table
* else
* enable promiscuous mode
* update adapter filters
*
* Assumptions:
* Multicast addresses are presented in canonical (LSB) format.
*
* Side Effects:
* On-board adapter filters are updated.
*/
static void skfp_ctl_set_multicast_list(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
unsigned long Flags;
spin_lock_irqsave(&bp->DriverLock, Flags);
skfp_ctl_set_multicast_list_wo_lock(dev);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
} // skfp_ctl_set_multicast_list
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
pr_debug("PROMISCUOUS MODE ENABLED\n");
}
/* Else, update multicast address table */
else {
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
pr_debug("PROMISCUOUS MODE DISABLED\n");
// Reset all MC addresses
mac_clear_multicast(smc);
mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug("ENABLE ALL MC ADDRESSES\n");
} else if (!netdev_mc_empty(dev)) {
if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
netdev_for_each_mc_addr(ha, dev) {
mac_add_multicast(smc,
(struct fddi_addr *)ha->addr,
1);
pr_debug("ENABLE MC ADDRESS: %pMF\n",
ha->addr);
}
} else { // more MC addresses than HW supports
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug("ENABLE ALL MC ADDRESSES\n");
}
} else { // no MC addresses
pr_debug("DISABLE ALL MC ADDRESSES\n");
}
/* Update adapter filters */
mac_update_multicast(smc);
}
} // skfp_ctl_set_multicast_list_wo_lock
/*
* ===========================
* = skfp_ctl_set_mac_address =
* ===========================
*
* Overview:
* set new mac address on adapter and update dev_addr field in device table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
* addr - pointer to sockaddr structure containing unicast address to set
*
* Assumptions:
* The address pointed to by addr->sa_data is a valid unicast
* address and is presented in canonical (LSB) format.
*/
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
{
struct s_smc *smc = netdev_priv(dev);
struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
skfddi_priv *bp = &smc->os;
unsigned long Flags;
memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
spin_lock_irqsave(&bp->DriverLock, Flags);
ResetAdapter(smc);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
return 0; /* always return zero */
} // skfp_ctl_set_mac_address
/*
* ==============
* = skfp_ioctl =
* ==============
*
* Overview:
*
* Perform IOCTL call functions here. Some are privileged operations and the
* effective uid is checked in those cases.
*
* Returns:
* status value
* 0 - success
* other - failure
*
* Arguments:
* dev - pointer to device information
* rq - pointer to ioctl request structure
* cmd - ?
*
*/
static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
switch (ioc.cmd) {
case SKFP_GET_STATS: /* Get the driver statistics */
ioc.len = sizeof(lp->MacStat);
status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
? -EFAULT : 0;
break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) {
status = -EPERM;
} else {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
}
break;
default:
printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
status = -EOPNOTSUPP;
} // switch
return status;
} // skfp_ioctl
/*
* =====================
* = skfp_send_pkt =
* =====================
*
* Overview:
* Queues a packet for transmission and try to transmit it.
*
* Returns:
* Condition code
*
* Arguments:
* skb - pointer to sk_buff to queue for transmission
* dev - pointer to device information
*
* Functional Description:
* Here we assume that an incoming skb transmit request
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
* by using pci_map_single().
*
* We have an internal queue for packets we can not send
* immediately. Packets in this queue can be given to the
* adapter if transmit buffers are freed.
*
* We can't free the skb until after it's been DMA'd
* out by the adapter, so we'll keep it in the driver and
* return it in mac_drv_tx_complete.
*
* Return Codes:
* 0 - driver has queued and/or sent packet
* 1 - caller should requeue the sk_buff for later transmission
*
* Assumptions:
* The entire packet is stored in one physically
* contiguous buffer which is not cached and whose
* 32-bit physical address can be determined.
*
* It's vital that this routine is NOT reentered for the
* same board and that the OS is not in another section of
* code (eg. skfp_interrupt) for the same board on a
* different thread.
*
* Side Effects:
* None
*/
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
pr_debug("skfp_send_pkt\n");
/*
* Verify that incoming transmit request is OK
*
* Note: The packet size check is consistent with other
* Linux device drivers, although the correct packet
* size should be verified before calling the
* transmit routine.
*/
if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
bp->MacStat.gen.tx_errors++; /* bump error counter */
// dequeue packets from xmt queue and send them
netif_start_queue(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* return "success" */
}
if (bp->QueueSkb == 0) { // return with tbusy set: queue full
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
bp->QueueSkb--;
skb_queue_tail(&bp->SendSkbQueue, skb);
send_queued_packets(netdev_priv(dev));
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
} // skfp_send_pkt
/*
* =======================
* = send_queued_packets =
* =======================
*
* Overview:
* Send packets from the driver queue as long as there are some and
* transmit resources are available.
*
* Returns:
* None
*
* Arguments:
* smc - pointer to smc (adapter) structure
*
* Functional Description:
* Take a packet from queue if there is any. If not, then we are done.
* Check if there are resources to send the packet. If not, requeue it
* and exit.
* Set packet descriptor flags and give packet to adapter.
* Check if any send resources can be freed (we do not use the
* transmit complete interrupt).
*/
static void send_queued_packets(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
struct sk_buff *skb;
unsigned char fc;
int queue;
struct s_smt_fp_txd *txd; // Current TxD.
dma_addr_t dma_address;
unsigned long Flags;
int frame_status; // HWM tx frame status.
pr_debug("send queued packets\n");
for (;;) {
// send first buffer from queue
skb = skb_dequeue(&bp->SendSkbQueue);
if (!skb) {
pr_debug("queue empty\n");
return;
} // queue empty !
spin_lock_irqsave(&bp->DriverLock, Flags);
fc = skb->data[0];
queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
#ifdef ESS
// Check if the frame may/must be sent as a synchronous frame.
if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
// It's an LLC frame.
if (!smc->ess.sync_bw_available)
fc &= ~FC_SYNC_BIT; // No bandwidth available.
else { // Bandwidth is available.
if (smc->mib.fddiESSSynchTxMode) {
// Send as sync. frame.
fc |= FC_SYNC_BIT;
}
}
}
#endif // ESS
frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
// Unable to send the frame.
if ((frame_status & RING_DOWN) != 0) {
// Ring is down.
pr_debug("Tx attempt while ring down.\n");
} else if ((frame_status & OUT_OF_TXD) != 0) {
pr_debug("%s: out of TXDs.\n", bp->dev->name);
} else {
pr_debug("%s: out of transmit resources",
bp->dev->name);
}
// Note: We will retry the operation as soon as
// transmit resources become available.
skb_queue_head(&bp->SendSkbQueue, skb);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
return; // Packet has been queued.
} // if (unable to send frame)
bp->QueueSkb++; // one packet less in local queue
// source address in packet ?
CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
dma_address = pci_map_single(&bp->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (frame_status & LAN_TX) {
txd->txd_os.skb = skb; // save skb
txd->txd_os.dma_addr = dma_address; // save dma mapping
}
hwm_tx_frag(smc, skb->data, dma_address, skb->len,
frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
if (!(frame_status & LAN_TX)) { // local only frame
pci_unmap_single(&bp->pdev, dma_address,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
}
spin_unlock_irqrestore(&bp->DriverLock, Flags);
} // for
return; // never reached
} // send_queued_packets
/************************
*
* CheckSourceAddress
*
* Verify if the source address is set. Insert it if necessary.
*
************************/
static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
{
unsigned char SRBit;
if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
return;
if ((unsigned short) frame[1 + 10] != 0)
return;
SRBit = frame[1 + 6] & 0x01;
memcpy(&frame[1 + 6], hw_addr, 6);
frame[8] |= SRBit;
} // CheckSourceAddress
/************************
*
* ResetAdapter
*
* Reset the adapter and bring it back to operational mode.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
static void ResetAdapter(struct s_smc *smc)
{
pr_debug("[fddi: ResetAdapter]\n");
// Stop the adapter.
card_stop(smc); // Stop all activity.
// Clear the transmit and receive descriptor queues.
mac_drv_clear_tx_queue(smc);
mac_drv_clear_rx_queue(smc);
// Restart the adapter.
smt_reset_defaults(smc, 1); // Initialize the SMT module.
init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
smt_online(smc, 1); // Insert into the ring again.
STI_FBI();
// Restore original receive mode (multicasts, promiscuous, etc.).
skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
} // ResetAdapter
//--------------- functions called by hardware module ----------------
/************************
*
* llc_restart_tx
*
* The hardware driver calls this routine when the transmit complete
* interrupt bits (end of frame) for the synchronous or asynchronous
* queue is set.
*
* NOTE The hardware driver calls this function also if no packets are queued.
* The routine must be able to handle this case.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void llc_restart_tx(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
pr_debug("[llc_restart_tx]\n");
// Try to send queued packets
spin_unlock(&bp->DriverLock);
send_queued_packets(smc);
spin_lock(&bp->DriverLock);
netif_start_queue(bp->dev);// system may send again if it was blocked
} // llc_restart_tx
/************************
*
* mac_drv_get_space
*
* The hardware module calls this function to allocate the memory
* for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
* Args
* smc - A pointer to the SMT context struct.
*
* size - Size of memory in bytes to allocate.
* Out
* != 0 A pointer to the virtual address of the allocated memory.
* == 0 Allocation error.
*
************************/
void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
{
void *virt;
pr_debug("mac_drv_get_space (%d bytes), ", size);
virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
printk("Unexpected SMT memory size requested: %d\n", size);
return NULL;
}
smc->os.SharedMemHeap += size; // Move heap pointer.
pr_debug("mac_drv_get_space end\n");
pr_debug("virt addr: %lx\n", (ulong) virt);
pr_debug("bus addr: %lx\n", (ulong)
(smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr)));
return virt;
} // mac_drv_get_space
/************************
*
* mac_drv_get_desc_mem
*
* This function is called by the hardware dependent module.
* It allocates the memory for the RxD and TxD descriptors.
*
* This memory must be non-cached, non-movable and non-swappable.
* This memory should start at a physical page boundary.
* Args
* smc - A pointer to the SMT context struct.
*
* size - Size of memory in bytes to allocate.
* Out
* != 0 A pointer to the virtual address of the allocated memory.
* == 0 Allocation error.
*
************************/
void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
{
char *virt;
pr_debug("mac_drv_get_desc_mem\n");
// Descriptor memory must be aligned on 16-byte boundary.
virt = mac_drv_get_space(smc, size);
size = (u_int) (16 - (((unsigned long) virt) & 15UL));
size = size % 16;
pr_debug("Allocate %u bytes alignment gap ", size);
pr_debug("for descriptor memory.\n");
if (!mac_drv_get_space(smc, size)) {
printk("fddi: Unable to align descriptor memory.\n");
return NULL;
}
return virt + size;
} // mac_drv_get_desc_mem
/************************
*
* mac_drv_virt2phys
*
* Get the physical address of a given virtual address.
* Args
* smc - A pointer to the SMT context struct.
*
* virt - A (virtual) pointer into our 'shared' memory area.
* Out
* Physical address of the given virtual address.
*
************************/
unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
{
return smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr);
} // mac_drv_virt2phys
/************************
*
* dma_master
*
* The HWM calls this function, when the driver leads through a DMA
* transfer. If the OS-specific module must prepare the system hardware
* for the DMA transfer, it should do it in this function.
*
* The hardware module calls this dma_master if it wants to send an SMT
* frame. This means that the virt address passed in here is part of
* the 'shared' memory area.
* Args
* smc - A pointer to the SMT context struct.
*
* virt - The virtual address of the data.
*
* len - The length in bytes of the data.
*
* flag - Indicates the transmit direction and the buffer type:
* DMA_RD (0x01) system RAM ==> adapter buffer memory
* DMA_WR (0x02) adapter buffer memory ==> system RAM
* SMT_BUF (0x80) SMT buffer
*
* >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
* Out
* Returns the pyhsical address for the DMA transfer.
*
************************/
u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
{
return smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr);
} // dma_master
/************************
*
* dma_complete
*
* The hardware module calls this routine when it has completed a DMA
* transfer. If the operating system dependent module has set up the DMA
* channel via dma_master() (e.g. Windows NT or AIX) it should clean up
* the DMA channel.
* Args
* smc - A pointer to the SMT context struct.
*
* descr - A pointer to a TxD or RxD, respectively.
*
* flag - Indicates the DMA transfer direction / SMT buffer:
* DMA_RD (0x01) system RAM ==> adapter buffer memory
* DMA_WR (0x02) adapter buffer memory ==> system RAM
* SMT_BUF (0x80) SMT buffer (managed by HWM)
* Out
* Nothing.
*
************************/
void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
{
/* For TX buffers, there are two cases. If it is an SMT transmit
* buffer, there is nothing to do since we use consistent memory
* for the 'shared' memory area. The other case is for normal
* transmit packets given to us by the networking stack, and in
* that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
* below.
*
* For RX buffers, we have to unmap dynamic PCI DMA mappings here
* because the hardware module is about to potentially look at
* the contents of the buffer. If we did not call the PCI DMA
* unmap first, the hardware module could read inconsistent data.
*/
if (flag & DMA_WR) {
skfddi_priv *bp = &smc->os;
volatile struct s_smt_fp_rxd *r = &descr->r;
/* If SKB is NULL, we used the local buffer. */
if (r->rxd_os.skb && r->rxd_os.dma_addr) {
int MaxFrameSize = bp->MaxFrameSize;
pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
MaxFrameSize, PCI_DMA_FROMDEVICE);
r->rxd_os.dma_addr = 0;
}
}
} // dma_complete
/************************
*
* mac_drv_tx_complete
*
* Transmit of a packet is complete. Release the tx staging buffer.
*
* Args
* smc - A pointer to the SMT context struct.
*
* txd - A pointer to the last TxD which is used by the frame.
* Out
* Returns nothing.
*
************************/
void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_tx_complete\n");
// Check if this TxD points to a skb
if (!(skb = txd->txd_os.skb)) {
pr_debug("TXD with no skb assigned.\n");
return;
}
txd->txd_os.skb = NULL;
// release the DMA mapping
pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
skb->len, PCI_DMA_TODEVICE);
txd->txd_os.dma_addr = 0;
smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
// free the skb
dev_kfree_skb_irq(skb);
pr_debug("leaving mac_drv_tx_complete\n");
} // mac_drv_tx_complete
/************************
*
* dump packets to logfile
*
************************/
#ifdef DUMPPACKETS
void dump_data(unsigned char *Data, int length)
{
int i, j;
unsigned char s[255], sh[10];
if (length > 64) {
length = 64;
}
printk(KERN_INFO "---Packet start---\n");
for (i = 0, j = 0; i < length / 8; i++, j += 8)
printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
strcpy(s, "");
for (i = 0; i < length % 8; i++) {
sprintf(sh, "%02x ", Data[j + i]);
strcat(s, sh);
}
printk(KERN_INFO "%s\n", s);
printk(KERN_INFO "------------------\n");
} // dump_data
#else
#define dump_data(data,len)
#endif // DUMPPACKETS
/************************
*
* mac_drv_rx_complete
*
* The hardware module calls this function if an LLC frame is received
* in a receive buffer. Also the SMT, NSA, and directed beacon frames
* from the network will be passed to the LLC layer by this function
* if passing is enabled.
*
* mac_drv_rx_complete forwards the frame to the LLC layer if it should
* be received. It also fills the RxD ring with new receive buffers if
* some can be queued.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive frame.
*
* frag_count - Count of RxDs used by the received frame.
*
* len - Frame length.
* Out
* Nothing.
*
************************/
void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len)
{
skfddi_priv *bp = &smc->os;
struct sk_buff *skb;
unsigned char *virt, *cp;
unsigned short ri;
u_int RifLength;
pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
if (frag_count != 1) { // This is not allowed to happen.
printk("fddi: Multi-fragment receive!\n");
goto RequeueRxd; // Re-use the given RXD(s).
}
skb = rxd->rxd_os.skb;
if (!skb) {
pr_debug("No skb in rxd\n");
smc->os.MacStat.gen.rx_errors++;
goto RequeueRxd;
}
virt = skb->data;
// The DMA mapping was released in dma_complete above.
dump_data(skb->data, len);
/*
* FDDI Frame format:
* +-------+-------+-------+------------+--------+------------+
* | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
* +-------+-------+-------+------------+--------+------------+
*
* FC = Frame Control
* DA = Destination Address
* SA = Source Address
* RIF = Routing Information Field
* LLC = Logical Link Control
*/
// Remove Routing Information Field (RIF), if present.
if ((virt[1 + 6] & FDDI_RII) == 0)
RifLength = 0;
else {
int n;
// goos: RIF removal has still to be tested
pr_debug("RIF found\n");
// Get RIF length from Routing Control (RC) field.
cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
ri = ntohs(*((__be16 *) cp));
RifLength = ri & FDDI_RCF_LEN_MASK;
if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
printk("fddi: Invalid RIF.\n");
goto RequeueRxd; // Discard the frame.
}
virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
// regions overlap
virt = cp + RifLength;
for (n = FDDI_MAC_HDR_LEN; n; n--)
*--virt = *--cp;
// adjust sbd->data pointer
skb_pull(skb, RifLength);
len -= RifLength;
RifLength = 0;
}
// Count statistics.
smc->os.MacStat.gen.rx_packets++; // Count indicated receive
// packets.
smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
// virt points to header again
if (virt[1] & 0x01) { // Check group (multicast) bit.
smc->os.MacStat.gen.multicast++;
}
// deliver frame to system
rxd->rxd_os.skb = NULL;
skb_trim(skb, len);
skb->protocol = fddi_type_trans(skb, bp->dev);
netif_rx(skb);
HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
return;
RequeueRxd:
pr_debug("Rx: re-queue RXD.\n");
mac_drv_requeue_rxd(smc, rxd, frag_count);
smc->os.MacStat.gen.rx_errors++; // Count receive packets
// not indicated.
} // mac_drv_rx_complete
/************************
*
* mac_drv_requeue_rxd
*
* The hardware module calls this function to request the OS-specific
* module to queue the receive buffer(s) represented by the pointer
* to the RxD and the frag_count into the receive queue again. This
* buffer was filled with an invalid frame or an SMT frame.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive frame.
*
* frag_count - Count of RxDs used by the received frame.
* Out
* Nothing.
*
************************/
void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count)
{
volatile struct s_smt_fp_rxd *next_rxd;
volatile struct s_smt_fp_rxd *src_rxd;
struct sk_buff *skb;
int MaxFrameSize;
unsigned char *v_addr;
dma_addr_t b_addr;
if (frag_count != 1) // This is not allowed to happen.
printk("fddi: Multi-fragment requeue!\n");
MaxFrameSize = smc->os.MaxFrameSize;
src_rxd = rxd;
for (; frag_count > 0; frag_count--) {
next_rxd = src_rxd->rxd_next;
rxd = HWM_GET_CURR_RXD(smc);
skb = src_rxd->rxd_os.skb;
if (skb == NULL) { // this should not happen
pr_debug("Requeue with no skb in rxd!\n");
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
if (skb) {
// we got a skb
rxd->rxd_os.skb = skb;
skb_reserve(skb, 3);
skb_put(skb, MaxFrameSize);
v_addr = skb->data;
b_addr = pci_map_single(&smc->os.pdev,
v_addr,
MaxFrameSize,
PCI_DMA_FROMDEVICE);
rxd->rxd_os.dma_addr = b_addr;
} else {
// no skb available, use local buffer
pr_debug("Queueing invalid buffer!\n");
rxd->rxd_os.skb = NULL;
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
}
} else {
// we use skb from old rxd
rxd->rxd_os.skb = skb;
v_addr = skb->data;
b_addr = pci_map_single(&smc->os.pdev,
v_addr,
MaxFrameSize,
PCI_DMA_FROMDEVICE);
rxd->rxd_os.dma_addr = b_addr;
}
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
src_rxd = next_rxd;
}
} // mac_drv_requeue_rxd
/************************
*
* mac_drv_fill_rxd
*
* The hardware module calls this function at initialization time
* to fill the RxD ring with receive buffers. It is also called by
* mac_drv_rx_complete if rx_free is large enough to queue some new
* receive buffers into the RxD ring. mac_drv_fill_rxd queues new
* receive buffers as long as enough RxDs and receive buffers are
* available.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void mac_drv_fill_rxd(struct s_smc *smc)
{
int MaxFrameSize;
unsigned char *v_addr;
unsigned long b_addr;
struct sk_buff *skb;
volatile struct s_smt_fp_rxd *rxd;
pr_debug("entering mac_drv_fill_rxd\n");
// Walk through the list of free receive buffers, passing receive
// buffers to the HWM as long as RXDs are available.
MaxFrameSize = smc->os.MaxFrameSize;
// Check if there is any RXD left.
while (HWM_GET_RX_FREE(smc) > 0) {
pr_debug(".\n");
rxd = HWM_GET_CURR_RXD(smc);
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
if (skb) {
// we got a skb
skb_reserve(skb, 3);
skb_put(skb, MaxFrameSize);
v_addr = skb->data;
b_addr = pci_map_single(&smc->os.pdev,
v_addr,
MaxFrameSize,
PCI_DMA_FROMDEVICE);
rxd->rxd_os.dma_addr = b_addr;
} else {
// no skb available, use local buffer
// System has run out of buffer memory, but we want to
// keep the receiver running in hope of better times.
// Multiple descriptors may point to this local buffer,
// so data in it must be considered invalid.
pr_debug("Queueing invalid buffer!\n");
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
}
rxd->rxd_os.skb = skb;
// Pass receive buffer to HWM.
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
}
pr_debug("leaving mac_drv_fill_rxd\n");
} // mac_drv_fill_rxd
/************************
*
* mac_drv_clear_rxd
*
* The hardware module calls this function to release unused
* receive buffers.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive buffer.
*
* frag_count - Count of RxDs used by the receive buffer.
* Out
* Nothing.
*
************************/
void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_clear_rxd\n");
if (frag_count != 1) // This is not allowed to happen.
printk("fddi: Multi-fragment clear!\n");
for (; frag_count > 0; frag_count--) {
skb = rxd->rxd_os.skb;
if (skb != NULL) {
skfddi_priv *bp = &smc->os;
int MaxFrameSize = bp->MaxFrameSize;
pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
MaxFrameSize, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
rxd->rxd_os.skb = NULL;
}
rxd = rxd->rxd_next; // Next RXD.
}
} // mac_drv_clear_rxd
/************************
*
* mac_drv_rx_init
*
* The hardware module calls this routine when an SMT or NSA frame of the
* local SMT should be delivered to the LLC layer.
*
* It is necessary to have this function, because there is no other way to
* copy the contents of SMT MBufs into receive buffers.
*
* mac_drv_rx_init allocates the required target memory for this frame,
* and receives the frame fragment by fragment by calling mac_drv_rx_frag.
* Args
* smc - A pointer to the SMT context struct.
*
* len - The length (in bytes) of the received frame (FC, DA, SA, Data).
*
* fc - The Frame Control field of the received frame.
*
* look_ahead - A pointer to the lookahead data buffer (may be NULL).
*
* la_len - The length of the lookahead data stored in the lookahead
* buffer (may be zero).
* Out
* Always returns zero (0).
*
************************/
int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
char *look_ahead, int la_len)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
// "Received" a SMT or NSA frame of the local SMT.
if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
pr_debug("fddi: Discard invalid local SMT frame\n");
pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
len, la_len, (unsigned long) look_ahead);
return 0;
}
skb = alloc_skb(len + 3, GFP_ATOMIC);
if (!skb) {
pr_debug("fddi: Local SMT: skb memory exhausted.\n");
return 0;
}
skb_reserve(skb, 3);
skb_put(skb, len);
skb_copy_to_linear_data(skb, look_ahead, len);
// deliver frame to system
skb->protocol = fddi_type_trans(skb, smc->os.dev);
netif_rx(skb);
return 0;
} // mac_drv_rx_init
/************************
*
* smt_timer_poll
*
* This routine is called periodically by the SMT module to clean up the
* driver.
*
* Return any queued frames back to the upper protocol layers if the ring
* is down.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void smt_timer_poll(struct s_smc *smc)
{
} // smt_timer_poll
/************************
*
* ring_status_indication
*
* This function indicates a change of the ring state.
* Args
* smc - A pointer to the SMT context struct.
*
* status - The current ring status.
* Out
* Nothing.
*
************************/
void ring_status_indication(struct s_smc *smc, u_long status)
{
pr_debug("ring_status_indication( ");
if (status & RS_RES15)
pr_debug("RS_RES15 ");
if (status & RS_HARDERROR)
pr_debug("RS_HARDERROR ");
if (status & RS_SOFTERROR)
pr_debug("RS_SOFTERROR ");
if (status & RS_BEACON)
pr_debug("RS_BEACON ");
if (status & RS_PATHTEST)
pr_debug("RS_PATHTEST ");
if (status & RS_SELFTEST)
pr_debug("RS_SELFTEST ");
if (status & RS_RES9)
pr_debug("RS_RES9 ");
if (status & RS_DISCONNECT)
pr_debug("RS_DISCONNECT ");
if (status & RS_RES7)
pr_debug("RS_RES7 ");
if (status & RS_DUPADDR)
pr_debug("RS_DUPADDR ");
if (status & RS_NORINGOP)
pr_debug("RS_NORINGOP ");
if (status & RS_VERSION)
pr_debug("RS_VERSION ");
if (status & RS_STUCKBYPASSS)
pr_debug("RS_STUCKBYPASSS ");
if (status & RS_EVENT)
pr_debug("RS_EVENT ");
if (status & RS_RINGOPCHANGE)
pr_debug("RS_RINGOPCHANGE ");
if (status & RS_RES0)
pr_debug("RS_RES0 ");
pr_debug("]\n");
} // ring_status_indication
/************************
*
* smt_get_time
*
* Gets the current time from the system.
* Args
* None.
* Out
* The current time in TICKS_PER_SECOND.
*
* TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
* defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
* to the time returned by smt_get_time().
*
************************/
unsigned long smt_get_time(void)
{
return jiffies;
} // smt_get_time
/************************
*
* smt_stat_counter
*
* Status counter update (ring_op, fifo full).
* Args
* smc - A pointer to the SMT context struct.
*
* stat - = 0: A ring operational change occurred.
* = 1: The FORMAC FIFO buffer is full / FIFO overflow.
* Out
* Nothing.
*
************************/
void smt_stat_counter(struct s_smc *smc, int stat)
{
// BOOLEAN RingIsUp ;
pr_debug("smt_stat_counter\n");
switch (stat) {
case 0:
pr_debug("Ring operational change.\n");
break;
case 1:
pr_debug("Receive fifo overflow.\n");
smc->os.MacStat.gen.rx_errors++;
break;
default:
pr_debug("Unknown status (%d).\n", stat);
break;
}
} // smt_stat_counter
/************************
*
* cfm_state_change
*
* Sets CFM state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* c_state - Possible values are:
*
* EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
* EC5_INSERT, EC6_CHECK, EC7_DEINSERT
* Out
* Nothing.
*
************************/
void cfm_state_change(struct s_smc *smc, int c_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (c_state) {
case SC0_ISOLATED:
s = "SC0_ISOLATED";
break;
case SC1_WRAP_A:
s = "SC1_WRAP_A";
break;
case SC2_WRAP_B:
s = "SC2_WRAP_B";
break;
case SC4_THRU_A:
s = "SC4_THRU_A";
break;
case SC5_THRU_B:
s = "SC5_THRU_B";
break;
case SC7_WRAP_S:
s = "SC7_WRAP_S";
break;
case SC9_C_WRAP_A:
s = "SC9_C_WRAP_A";
break;
case SC10_C_WRAP_B:
s = "SC10_C_WRAP_B";
break;
case SC11_C_WRAP_S:
s = "SC11_C_WRAP_S";
break;
default:
pr_debug("cfm_state_change: unknown %d\n", c_state);
return;
}
pr_debug("cfm_state_change: %s\n", s);
#endif // DRIVERDEBUG
} // cfm_state_change
/************************
*
* ecm_state_change
*
* Sets ECM state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* e_state - Possible values are:
*
* SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
* SC5_THRU_B (7), SC7_WRAP_S (8)
* Out
* Nothing.
*
************************/
void ecm_state_change(struct s_smc *smc, int e_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (e_state) {
case EC0_OUT:
s = "EC0_OUT";
break;
case EC1_IN:
s = "EC1_IN";
break;
case EC2_TRACE:
s = "EC2_TRACE";
break;
case EC3_LEAVE:
s = "EC3_LEAVE";
break;
case EC4_PATH_TEST:
s = "EC4_PATH_TEST";
break;
case EC5_INSERT:
s = "EC5_INSERT";
break;
case EC6_CHECK:
s = "EC6_CHECK";
break;
case EC7_DEINSERT:
s = "EC7_DEINSERT";
break;
default:
s = "unknown";
break;
}
pr_debug("ecm_state_change: %s\n", s);
#endif //DRIVERDEBUG
} // ecm_state_change
/************************
*
* rmt_state_change
*
* Sets RMT state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* r_state - Possible values are:
*
* RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
* RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
* Out
* Nothing.
*
************************/
void rmt_state_change(struct s_smc *smc, int r_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (r_state) {
case RM0_ISOLATED:
s = "RM0_ISOLATED";
break;
case RM1_NON_OP:
s = "RM1_NON_OP - not operational";
break;
case RM2_RING_OP:
s = "RM2_RING_OP - ring operational";
break;
case RM3_DETECT:
s = "RM3_DETECT - detect dupl addresses";
break;
case RM4_NON_OP_DUP:
s = "RM4_NON_OP_DUP - dupl. addr detected";
break;
case RM5_RING_OP_DUP:
s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
break;
case RM6_DIRECTED:
s = "RM6_DIRECTED - sending directed beacons";
break;
case RM7_TRACE:
s = "RM7_TRACE - trace initiated";
break;
default:
s = "unknown";
break;
}
pr_debug("[rmt_state_change: %s]\n", s);
#endif // DRIVERDEBUG
} // rmt_state_change
/************************
*
* drv_reset_indication
*
* This function is called by the SMT when it has detected a severe
* hardware problem. The driver should perform a reset on the adapter
* as soon as possible, but not from within this function.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void drv_reset_indication(struct s_smc *smc)
{
pr_debug("entering drv_reset_indication\n");
smc->os.ResetRequested = TRUE; // Set flag.
} // drv_reset_indication
static struct pci_driver skfddi_pci_driver = {
.name = "skfddi",
.id_table = skfddi_pci_tbl,
.probe = skfp_init_one,
.remove = skfp_remove_one,
};
static int __init skfd_init(void)
{
return pci_register_driver(&skfddi_pci_driver);
}
static void __exit skfd_exit(void)
{
pci_unregister_driver(&skfddi_pci_driver);
}
module_init(skfd_init);
module_exit(skfd_exit);
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_kccat6 | drivers/mtd/maps/intel_vr_nor.c | 2244 | 6986 | /*
* drivers/mtd/maps/intel_vr_nor.c
*
* An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
* Vermilion Range chipset.
*
* The Vermilion Range Expansion Bus supports four chip selects, each of which
* has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
* is a 256MiB memory region containing the address spaces for all four of the
* chip selects, with start addresses hardcoded on 64MiB boundaries.
*
* This map driver only supports NOR flash on chip select 0. The buswidth
* (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
* and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
* not modify the value in the EXP_TIMING_CS0 register except to enable writing
* and disable boot acceleration. The timing parameters in the register are
* assumed to have been properly initialized by the BIOS. The reset default
* timing parameters are maximally conservative (slow), so access to the flash
* will be slower than it should be if the BIOS has not initialized the timing
* parameters.
*
* Author: Andy Lowe <alowe@mvista.com>
*
* 2006 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#define DRV_NAME "vr_nor"
struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
struct pci_dev *dev;
};
/* Expansion Bus Configuration and Status Registers are in BAR 0 */
#define EXP_CSR_MBAR 0
/* Expansion Bus Memory Window is BAR 1 */
#define EXP_WIN_MBAR 1
/* Maximum address space for Chip Select 0 is 64MiB */
#define CS0_SIZE 0x04000000
/* Chip Select 0 is at offset 0 in the Memory Window */
#define CS0_START 0x0
/* Chip Select 0 Timing Register is at offset 0 in CSR */
#define EXP_TIMING_CS0 0x00
#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
#define TIMING_WR_EN (1 << 1) /* Write Enable */
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char * const probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
const char * const *type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
if (!p->info)
return -ENODEV;
p->info->owner = THIS_MODULE;
return 0;
}
static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
}
/*
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
unsigned int exp_timing_cs0;
int err;
csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
if (!csr_phys || !csr_len || !win_phys || !win_len)
return -ENODEV;
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
p->csr_base = ioremap_nocache(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
if (!(exp_timing_cs0 & TIMING_CS_EN)) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is disabled.\n");
err = -ENODEV;
goto release;
}
if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is configured for maximally slow access times.\n");
}
p->map.name = DRV_NAME;
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
}
simple_map_init(&p->map);
/* Enable writes to flash bank */
exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
return 0;
release:
iounmap(p->csr_base);
return err;
}
static struct pci_device_id vr_nor_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
{0,}
};
static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
kfree(p);
pci_release_regions(dev);
pci_disable_device(dev);
}
static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, DRV_NAME);
if (err)
goto disable_dev;
p = kzalloc(sizeof(*p), GFP_KERNEL);
err = -ENOMEM;
if (!p)
goto release;
p->dev = dev;
err = vr_nor_init_maps(p);
if (err)
goto release;
err = vr_nor_mtd_setup(p);
if (err)
goto destroy_maps;
err = vr_nor_init_partitions(p);
if (err)
goto destroy_mtd_setup;
pci_set_drvdata(dev, p);
return 0;
destroy_mtd_setup:
map_destroy(p->info);
destroy_maps:
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
release:
kfree(p);
pci_release_regions(dev);
disable_dev:
pci_disable_device(dev);
out:
return err;
}
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
.remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
module_pci_driver(vr_nor_pci_driver);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
| gpl-2.0 |
ZeroInfinityXDA/OQC-m9 | drivers/net/wireless/ath/carl9170/cmd.c | 3524 | 5827 | /*
* Atheros CARL9170 driver
*
* Basic HW register/memory/command access functions
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/div64.h>
#include "carl9170.h"
#include "cmd.h"
int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
{
const __le32 buf[2] = {
cpu_to_le32(reg),
cpu_to_le32(val),
};
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf),
(u8 *) buf, 0, NULL);
if (err) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "writing reg %#x "
"(val %#x) failed (%d)\n", reg, val, err);
}
}
return err;
}
int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
const u32 *regs, u32 *out)
{
int i, err;
__le32 *offs, *res;
/* abuse "out" for the register offsets, must be same length */
offs = (__le32 *)out;
for (i = 0; i < nregs; i++)
offs[i] = cpu_to_le32(regs[i]);
/* also use the same buffer for the input */
res = (__le32 *)out;
err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
4 * nregs, (u8 *)offs,
4 * nregs, (u8 *)res);
if (err) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n",
err);
}
return err;
}
/* convert result to cpu endian */
for (i = 0; i < nregs; i++)
out[i] = le32_to_cpu(res[i]);
return 0;
}
int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
{
return carl9170_read_mreg(ar, 1, ®, val);
}
int carl9170_echo_test(struct ar9170 *ar, const u32 v)
{
u32 echores;
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO,
4, (u8 *)&v,
4, (u8 *)&echores);
if (err)
return err;
if (v != echores) {
wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores);
return -EINVAL;
}
return 0;
}
struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
const enum carl9170_cmd_oids cmd, const unsigned int len)
{
struct carl9170_cmd *tmp;
tmp = kzalloc(sizeof(struct carl9170_cmd_head) + len, GFP_ATOMIC);
if (tmp) {
tmp->hdr.cmd = cmd;
tmp->hdr.len = len;
}
return tmp;
}
int carl9170_reboot(struct ar9170 *ar)
{
struct carl9170_cmd *cmd;
int err;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0);
if (!cmd)
return -ENOMEM;
err = __carl9170_exec_cmd(ar, cmd, true);
return err;
}
int carl9170_mac_reset(struct ar9170 *ar)
{
return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST,
0, NULL, 0, NULL);
}
int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
const u32 mode, const u32 addr, const u32 len)
{
struct carl9170_cmd *cmd;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC,
sizeof(struct carl9170_bcn_ctrl_cmd));
if (!cmd)
return -ENOMEM;
cmd->bcn_ctrl.vif_id = cpu_to_le32(vif_id);
cmd->bcn_ctrl.mode = cpu_to_le32(mode);
cmd->bcn_ctrl.bcn_addr = cpu_to_le32(addr);
cmd->bcn_ctrl.bcn_len = cpu_to_le32(len);
return __carl9170_exec_cmd(ar, cmd, true);
}
int carl9170_collect_tally(struct ar9170 *ar)
{
struct carl9170_tally_rsp tally;
struct survey_info *info;
unsigned int tick;
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_TALLY, 0, NULL,
sizeof(tally), (u8 *)&tally);
if (err)
return err;
tick = le32_to_cpu(tally.tick);
if (tick) {
ar->tally.active += le32_to_cpu(tally.active) / tick;
ar->tally.cca += le32_to_cpu(tally.cca) / tick;
ar->tally.tx_time += le32_to_cpu(tally.tx_time) / tick;
ar->tally.rx_total += le32_to_cpu(tally.rx_total);
ar->tally.rx_overrun += le32_to_cpu(tally.rx_overrun);
if (ar->channel) {
info = &ar->survey[ar->channel->hw_value];
info->channel_time = ar->tally.active;
info->channel_time_busy = ar->tally.cca;
info->channel_time_tx = ar->tally.tx_time;
do_div(info->channel_time, 1000);
do_div(info->channel_time_busy, 1000);
do_div(info->channel_time_tx, 1000);
}
}
return 0;
}
int carl9170_powersave(struct ar9170 *ar, const bool ps)
{
struct carl9170_cmd *cmd;
u32 state;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC,
sizeof(struct carl9170_psm));
if (!cmd)
return -ENOMEM;
if (ps) {
/* Sleep until next TBTT */
state = CARL9170_PSM_SLEEP | 1;
} else {
/* wake up immediately */
state = 1;
}
cmd->psm.state = cpu_to_le32(state);
return __carl9170_exec_cmd(ar, cmd, true);
}
| gpl-2.0 |
omnirom/android_kernel_samsung_smdk4412 | arch/mips/sgi-ip22/ip28-berr.c | 4036 | 14896 | /*
* ip28-berr.c: Bus error handling.
*
* Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org)
* Copyright (C) 2005 Peter Fuerst (pf@net.alphadv.de) - IP28
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/addrspace.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/branch.h>
#include <asm/irq_regs.h>
#include <asm/sgi/mc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ioc.h>
#include <asm/sgi/ip22.h>
#include <asm/r4kcache.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
static unsigned int count_be_is_fixup;
static unsigned int count_be_handler;
static unsigned int count_be_interrupt;
static int debug_be_interrupt;
static unsigned int cpu_err_stat; /* Status reg for CPU */
static unsigned int gio_err_stat; /* Status reg for GIO */
static unsigned int cpu_err_addr; /* Error address reg for CPU */
static unsigned int gio_err_addr; /* Error address reg for GIO */
static unsigned int extio_stat;
static unsigned int hpc3_berr_stat; /* Bus error interrupt status */
struct hpc3_stat {
unsigned long addr;
unsigned int ctrl;
unsigned int cbp;
unsigned int ndptr;
};
static struct {
struct hpc3_stat pbdma[8];
struct hpc3_stat scsi[2];
struct hpc3_stat ethrx, ethtx;
} hpc3;
static struct {
unsigned long err_addr;
struct {
u32 lo;
u32 hi;
} tags[1][2], tagd[4][2], tagi[4][2]; /* Way 0/1 */
} cache_tags;
static inline void save_cache_tags(unsigned busaddr)
{
unsigned long addr = CAC_BASE | busaddr;
int i;
cache_tags.err_addr = addr;
/*
* Starting with a bus-address, save secondary cache (indexed by
* PA[23..18:7..6]) tags first.
*/
addr &= ~1L;
#define tag cache_tags.tags[0]
cache_op(Index_Load_Tag_S, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_S, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
#undef tag
/*
* Save all primary data cache (indexed by VA[13:5]) tags which
* might fit to this bus-address, knowing that VA[11:0] == PA[11:0].
* Saving all tags and evaluating them later is easier and safer
* than relying on VA[13:12] from the secondary cache tags to pick
* matching primary tags here already.
*/
addr &= (0xffL << 56) | ((1 << 12) - 1);
#define tag cache_tags.tagd[i]
for (i = 0; i < 4; ++i, addr += (1 << 12)) {
cache_op(Index_Load_Tag_D, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_D, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
}
#undef tag
/*
* Save primary instruction cache (indexed by VA[13:6]) tags
* the same way.
*/
addr &= (0xffL << 56) | ((1 << 12) - 1);
#define tag cache_tags.tagi[i]
for (i = 0; i < 4; ++i, addr += (1 << 12)) {
cache_op(Index_Load_Tag_I, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_I, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
}
#undef tag
}
#define GIO_ERRMASK 0xff00
#define CPU_ERRMASK 0x3f00
static void save_and_clear_buserr(void)
{
int i;
/* save status registers */
cpu_err_addr = sgimc->cerr;
cpu_err_stat = sgimc->cstat;
gio_err_addr = sgimc->gerr;
gio_err_stat = sgimc->gstat;
extio_stat = sgioc->extio;
hpc3_berr_stat = hpc3c0->bestat;
hpc3.scsi[0].addr = (unsigned long)&hpc3c0->scsi_chan0;
hpc3.scsi[0].ctrl = hpc3c0->scsi_chan0.ctrl; /* HPC3_SCTRL_ACTIVE ? */
hpc3.scsi[0].cbp = hpc3c0->scsi_chan0.cbptr;
hpc3.scsi[0].ndptr = hpc3c0->scsi_chan0.ndptr;
hpc3.scsi[1].addr = (unsigned long)&hpc3c0->scsi_chan1;
hpc3.scsi[1].ctrl = hpc3c0->scsi_chan1.ctrl; /* HPC3_SCTRL_ACTIVE ? */
hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr;
hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
for (i = 0; i < 8; ++i) {
/* HPC3_PDMACTRL_ISACT ? */
hpc3.pbdma[i].addr = (unsigned long)&hpc3c0->pbdma[i];
hpc3.pbdma[i].ctrl = hpc3c0->pbdma[i].pbdma_ctrl;
hpc3.pbdma[i].cbp = hpc3c0->pbdma[i].pbdma_bptr;
hpc3.pbdma[i].ndptr = hpc3c0->pbdma[i].pbdma_dptr;
}
i = 0;
if (gio_err_stat & CPU_ERRMASK)
i = gio_err_addr;
if (cpu_err_stat & CPU_ERRMASK)
i = cpu_err_addr;
save_cache_tags(i);
sgimc->cstat = sgimc->gstat = 0;
}
static void print_cache_tags(void)
{
u32 scb, scw;
int i;
printk(KERN_ERR "Cache tags @ %08x:\n", (unsigned)cache_tags.err_addr);
/* PA[31:12] shifted to PTag0 (PA[35:12]) format */
scw = (cache_tags.err_addr >> 4) & 0x0fffff00;
scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 5) - 1);
for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
if ((cache_tags.tagd[i][0].lo & 0x0fffff00) != scw &&
(cache_tags.tagd[i][1].lo & 0x0fffff00) != scw)
continue;
printk(KERN_ERR
"D: 0: %08x %08x, 1: %08x %08x (VA[13:5] %04x)\n",
cache_tags.tagd[i][0].hi, cache_tags.tagd[i][0].lo,
cache_tags.tagd[i][1].hi, cache_tags.tagd[i][1].lo,
scb | (1 << 12)*i);
}
scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 6) - 1);
for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
if ((cache_tags.tagi[i][0].lo & 0x0fffff00) != scw &&
(cache_tags.tagi[i][1].lo & 0x0fffff00) != scw)
continue;
printk(KERN_ERR
"I: 0: %08x %08x, 1: %08x %08x (VA[13:6] %04x)\n",
cache_tags.tagi[i][0].hi, cache_tags.tagi[i][0].lo,
cache_tags.tagi[i][1].hi, cache_tags.tagi[i][1].lo,
scb | (1 << 12)*i);
}
i = read_c0_config();
scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
i = ((1 << scw) - 1) & ~((1 << scb) - 1);
printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
scw-1, scb, i & (unsigned)cache_tags.err_addr);
}
static inline const char *cause_excode_text(int cause)
{
static const char *txt[32] =
{ "Interrupt",
"TLB modification",
"TLB (load or instruction fetch)",
"TLB (store)",
"Address error (load or instruction fetch)",
"Address error (store)",
"Bus error (instruction fetch)",
"Bus error (data: load or store)",
"Syscall",
"Breakpoint",
"Reserved instruction",
"Coprocessor unusable",
"Arithmetic Overflow",
"Trap",
"14",
"Floating-Point",
"16", "17", "18", "19", "20", "21", "22",
"Watch Hi/Lo",
"24", "25", "26", "27", "28", "29", "30", "31",
};
return txt[(cause & 0x7c) >> 2];
}
static void print_buserr(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int error = 0;
if (extio_stat & EXTIO_MC_BUSERR) {
printk(KERN_ERR "MC Bus Error\n");
error |= 1;
}
if (extio_stat & EXTIO_HPC3_BUSERR) {
printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n",
hpc3_berr_stat,
(hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >>
HPC3_BESTAT_PIDSHIFT,
(hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA",
hpc3_berr_stat & HPC3_BESTAT_BLMASK);
error |= 2;
}
if (extio_stat & EXTIO_EISA_BUSERR) {
printk(KERN_ERR "EISA Bus Error\n");
error |= 4;
}
if (cpu_err_stat & CPU_ERRMASK) {
printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n",
cpu_err_stat,
cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "",
cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "",
cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "",
cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "",
cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "",
cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "",
cpu_err_addr);
error |= 8;
}
if (gio_err_stat & GIO_ERRMASK) {
printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n",
gio_err_stat,
gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "",
gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "",
gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "",
gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "",
gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "",
gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "",
gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "",
gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "",
gio_err_addr);
error |= 16;
}
if (!error)
printk(KERN_ERR "MC: Hmm, didn't find any error condition.\n");
else {
printk(KERN_ERR "CP0: config %08x, "
"MC: cpuctrl0/1: %08x/%05x, giopar: %04x\n"
"MC: cpu/gio_memacc: %08x/%05x, memcfg0/1: %08x/%08x\n",
read_c0_config(),
sgimc->cpuctrl0, sgimc->cpuctrl0, sgimc->giopar,
sgimc->cmacc, sgimc->gmacc,
sgimc->mconfig0, sgimc->mconfig1);
print_cache_tags();
}
printk(KERN_ALERT "%s, epc == %0*lx, ra == %0*lx\n",
cause_excode_text(regs->cp0_cause),
field, regs->cp0_epc, field, regs->regs[31]);
}
/*
* Check, whether MC's (virtual) DMA address caused the bus error.
* See "Virtual DMA Specification", Draft 1.5, Feb 13 1992, SGI
*/
static int addr_is_ram(unsigned long addr, unsigned sz)
{
int i;
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long a = boot_mem_map.map[i].addr;
if (a <= addr && addr+sz <= a+boot_mem_map.map[i].size)
return 1;
}
return 0;
}
static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
{
/* This is likely rather similar to correct code ;-) */
vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */
/* If tlb-entry is valid and VPN-high (bits [30:21] ?) matches... */
if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) {
u32 ctl = sgimc->dma_ctrl;
if (ctl & 1) {
unsigned int pgsz = (ctl & 2) ? 14:12; /* 16k:4k */
/* PTEIndex is VPN-low (bits [22:14]/[20:12] ?) */
unsigned long pte = (lo >> 6) << 12; /* PTEBase */
pte += 8*((vaddr >> pgsz) & 0x1ff);
if (addr_is_ram(pte, 8)) {
/*
* Note: Since DMA hardware does look up
* translation on its own, this PTE *must*
* match the TLB/EntryLo-register format !
*/
unsigned long a = *(unsigned long *)
PHYS_TO_XKSEG_UNCACHED(pte);
a = (a & 0x3f) << 6; /* PFN */
a += vaddr & ((1 << pgsz) - 1);
return (cpu_err_addr == a);
}
}
}
return 0;
}
static int check_vdma_memaddr(void)
{
if (cpu_err_stat & CPU_ERRMASK) {
u32 a = sgimc->maddronly;
if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */
return (cpu_err_addr == a);
if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) ||
check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) ||
check_microtlb(sgimc->dtlb_hi2, sgimc->dtlb_lo2, a) ||
check_microtlb(sgimc->dtlb_hi3, sgimc->dtlb_lo3, a))
return 1;
}
return 0;
}
static int check_vdma_gioaddr(void)
{
if (gio_err_stat & GIO_ERRMASK) {
u32 a = sgimc->gio_dma_trans;
a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a);
return (gio_err_addr == a);
}
return 0;
}
/*
* MC sends an interrupt whenever bus or parity errors occur. In addition,
* if the error happened during a CPU read, it also asserts the bus error
* pin on the R4K. Code in bus error handler save the MC bus error registers
* and then clear the interrupt when this happens.
*/
static int ip28_be_interrupt(const struct pt_regs *regs)
{
int i;
save_and_clear_buserr();
/*
* Try to find out, whether we got here by a mispredicted speculative
* load/store operation. If so, it's not fatal, we can go on.
*/
/* Any cause other than "Interrupt" (ExcCode 0) is fatal. */
if (regs->cp0_cause & CAUSEF_EXCCODE)
goto mips_be_fatal;
/* Any cause other than "Bus error interrupt" (IP6) is weird. */
if ((regs->cp0_cause & CAUSEF_IP6) != CAUSEF_IP6)
goto mips_be_fatal;
if (extio_stat & (EXTIO_HPC3_BUSERR | EXTIO_EISA_BUSERR))
goto mips_be_fatal;
/* Any state other than "Memory bus error" is fatal. */
if (cpu_err_stat & CPU_ERRMASK & ~SGIMC_CSTAT_ADDR)
goto mips_be_fatal;
/* GIO errors other than timeouts are fatal */
if (gio_err_stat & GIO_ERRMASK & ~SGIMC_GSTAT_TIME)
goto mips_be_fatal;
/*
* Now we have an asynchronous bus error, speculatively or DMA caused.
* Need to search all DMA descriptors for the error address.
*/
for (i = 0; i < sizeof(hpc3)/sizeof(struct hpc3_stat); ++i) {
struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
if ((cpu_err_stat & CPU_ERRMASK) &&
(cpu_err_addr == hp->ndptr || cpu_err_addr == hp->cbp))
break;
if ((gio_err_stat & GIO_ERRMASK) &&
(gio_err_addr == hp->ndptr || gio_err_addr == hp->cbp))
break;
}
if (i < sizeof(hpc3)/sizeof(struct hpc3_stat)) {
struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
printk(KERN_ERR "at DMA addresses: HPC3 @ %08lx:"
" ctl %08x, ndp %08x, cbp %08x\n",
CPHYSADDR(hp->addr), hp->ctrl, hp->ndptr, hp->cbp);
goto mips_be_fatal;
}
/* Check MC's virtual DMA stuff. */
if (check_vdma_memaddr()) {
printk(KERN_ERR "at GIO DMA: mem address 0x%08x.\n",
sgimc->maddronly);
goto mips_be_fatal;
}
if (check_vdma_gioaddr()) {
printk(KERN_ERR "at GIO DMA: gio address 0x%08x.\n",
sgimc->gmaddronly);
goto mips_be_fatal;
}
/* A speculative bus error... */
if (debug_be_interrupt) {
print_buserr(regs);
printk(KERN_ERR "discarded!\n");
}
return MIPS_BE_DISCARD;
mips_be_fatal:
print_buserr(regs);
return MIPS_BE_FATAL;
}
void ip22_be_interrupt(int irq)
{
struct pt_regs *regs = get_irq_regs();
count_be_interrupt++;
if (ip28_be_interrupt(regs) != MIPS_BE_DISCARD) {
/* Assume it would be too dangerous to continue ... */
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
} else if (debug_be_interrupt)
show_regs((struct pt_regs *)regs);
}
static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
{
/*
* We arrive here only in the unusual case of do_be() invocation,
* i.e. by a bus error exception without a bus error interrupt.
*/
if (is_fixup) {
count_be_is_fixup++;
save_and_clear_buserr();
return MIPS_BE_FIXUP;
}
count_be_handler++;
return ip28_be_interrupt(regs);
}
void __init ip22_be_init(void)
{
board_be_handler = ip28_be_handler;
}
int ip28_show_be_info(struct seq_file *m)
{
seq_printf(m, "IP28 be fixups\t\t: %u\n", count_be_is_fixup);
seq_printf(m, "IP28 be interrupts\t: %u\n", count_be_interrupt);
seq_printf(m, "IP28 be handler\t\t: %u\n", count_be_handler);
return 0;
}
static int __init debug_be_setup(char *str)
{
debug_be_interrupt++;
return 1;
}
__setup("ip28_debug_be", debug_be_setup);
| gpl-2.0 |
CyanogenMod/lge-kernel-p880 | arch/powerpc/platforms/cell/spu_notify.c | 4548 | 1999 | /*
* Move OProfile dependencies from spufs module to the kernel so it
* can run on non-cell PPC.
*
* Copyright (C) IBM 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#undef DEBUG
#include <linux/module.h>
#include <asm/spu.h>
#include "spufs/spufs.h"
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
blocking_notifier_call_chain(&spu_switch_notifier,
ctx ? ctx->object_id : 0, spu);
}
EXPORT_SYMBOL_GPL(spu_switch_notify);
int spu_switch_event_register(struct notifier_block *n)
{
int ret;
ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
if (!ret)
notify_spus_active();
return ret;
}
EXPORT_SYMBOL_GPL(spu_switch_event_register);
int spu_switch_event_unregister(struct notifier_block *n)
{
return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
void spu_set_profile_private_kref(struct spu_context *ctx,
struct kref *prof_info_kref,
void (* prof_info_release) (struct kref *kref))
{
ctx->prof_priv_kref = prof_info_kref;
ctx->prof_priv_release = prof_info_release;
}
EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
void *spu_get_profile_private_kref(struct spu_context *ctx)
{
return ctx->prof_priv_kref;
}
EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
| gpl-2.0 |
stariver/qt210-kernel | arch/m68k/platform/5307/config.c | 4548 | 4030 | /***************************************************************************/
/*
* linux/arch/m68knommu/platform/5307/config.c
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2000, Lineo (www.lineo.com)
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfwdebug.h>
/***************************************************************************/
/*
* Some platforms need software versions of the GPIO data registers.
*/
unsigned short ppdata;
unsigned char ledbank = 0xff;
/***************************************************************************/
static struct mcf_platform_uart m5307_uart_platform[] = {
{
.mapbase = MCF_MBAR + MCFUART_BASE1,
.irq = 73,
},
{
.mapbase = MCF_MBAR + MCFUART_BASE2,
.irq = 74,
},
{ },
};
static struct platform_device m5307_uart = {
.name = "mcfuart",
.id = 0,
.dev.platform_data = m5307_uart_platform,
};
static struct platform_device *m5307_devices[] __initdata = {
&m5307_uart,
};
/***************************************************************************/
static void __init m5307_uart_init_line(int line, int irq)
{
if (line == 0) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
static void __init m5307_uarts_init(void)
{
const int nrlines = ARRAY_SIZE(m5307_uart_platform);
int line;
for (line = 0; (line < nrlines); line++)
m5307_uart_init_line(line, m5307_uart_platform[line].irq);
}
/***************************************************************************/
static void __init m5307_timers_init(void)
{
/* Timer1 is always used as system timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER1ICR);
mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
#ifdef CONFIG_HIGHPROFILE
/* Timer2 is to be used as a high speed profile timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER2ICR);
mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
#endif
}
/***************************************************************************/
void m5307_cpu_reset(void)
{
local_irq_disable();
/* Set watchdog to soft reset, and enabled */
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
for (;;)
/* wait for watchdog to timeout */;
}
/***************************************************************************/
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel) || \
defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
#endif
mach_reset = m5307_cpu_reset;
m5307_timers_init();
m5307_uarts_init();
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
mcf_mapirq2imr(27, MCFINTC_EINT3);
mcf_mapirq2imr(29, MCFINTC_EINT5);
mcf_mapirq2imr(31, MCFINTC_EINT7);
#ifdef CONFIG_BDM_DISABLE
/*
* Disable the BDM clocking. This also turns off most of the rest of
* the BDM device. This is good for EMC reasons. This option is not
* incompatible with the memory protection option.
*/
wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
#endif
}
/***************************************************************************/
static int __init init_BSP(void)
{
platform_add_devices(m5307_devices, ARRAY_SIZE(m5307_devices));
return 0;
}
arch_initcall(init_BSP);
/***************************************************************************/
| gpl-2.0 |
Keeperv85/Blade_G_kernel | drivers/acpi/acpica/utxface.c | 4548 | 19635 | /******************************************************************************
*
* Module Name: utxface - External interfaces for "global" ACPI functions
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
#ifndef ACPI_ASL_COMPILER
/*******************************************************************************
*
* FUNCTION: acpi_initialize_subsystem
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Initializes all global variables. This is the first function
* called, so any early initialization belongs here.
*
******************************************************************************/
acpi_status __init acpi_initialize_subsystem(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
/* Initialize the OS-Dependent layer */
status = acpi_os_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
return_ACPI_STATUS(status);
}
/* Initialize all globals used by the subsystem */
status = acpi_ut_init_globals();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During initialization of globals"));
return_ACPI_STATUS(status);
}
/* Create the default mutex objects */
status = acpi_ut_mutex_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Global Mutex creation"));
return_ACPI_STATUS(status);
}
/*
* Initialize the namespace manager and
* the root of the namespace tree
*/
status = acpi_ns_root_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Namespace initialization"));
return_ACPI_STATUS(status);
}
/* Initialize the global OSI interfaces list with the static names */
status = acpi_ut_initialize_interfaces();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During OSI interfaces initialization"));
return_ACPI_STATUS(status);
}
/* If configured, initialize the AML debugger */
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_enable_subsystem
*
* PARAMETERS: Flags - Init/enable Options
*
* RETURN: Status
*
* DESCRIPTION: Completes the subsystem initialization including hardware.
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
acpi_status acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
#if (!ACPI_REDUCED_HARDWARE)
/* Enable ACPI mode */
if (!(flags & ACPI_NO_ACPI_ENABLE)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Going into ACPI mode\n"));
acpi_gbl_original_mode = acpi_hw_get_mode();
status = acpi_enable();
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
return_ACPI_STATUS(status);
}
}
/*
* Obtain a permanent mapping for the FACS. This is required for the
* Global Lock and the Firmware Waking Vector
*/
status = acpi_tb_initialize_facs();
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
return_ACPI_STATUS(status);
}
#endif /* !ACPI_REDUCED_HARDWARE */
/*
* Install the default op_region handlers. These are installed unless
* other handlers have already been installed via the
* install_address_space_handler interface.
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Installing default address space handlers\n"));
status = acpi_ev_install_region_handlers();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
#if (!ACPI_REDUCED_HARDWARE)
/*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
* Note1: We must have the hardware and events initialized before we can
* execute any control methods safely. Any control method can require
* ACPI hardware support, so the hardware must be fully initialized before
* any method execution!
*
* Note2: Fixed events are initialized and enabled here. GPEs are
* initialized, but cannot be enabled until after the hardware is
* completely initialized (SCI and global_lock activated)
*/
if (!(flags & ACPI_NO_EVENT_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Initializing ACPI events\n"));
status = acpi_ev_initialize_events();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Install the SCI handler and Global Lock handler. This completes the
* hardware initialization.
*/
if (!(flags & ACPI_NO_HANDLER_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Installing SCI/GL handlers\n"));
status = acpi_ev_install_xrupt_handlers();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
#endif /* !ACPI_REDUCED_HARDWARE */
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
/*******************************************************************************
*
* FUNCTION: acpi_initialize_objects
*
* PARAMETERS: Flags - Init/enable Options
*
* RETURN: Status
*
* DESCRIPTION: Completes namespace initialization by initializing device
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
acpi_status acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(acpi_initialize_objects);
/*
* Run all _REG methods
*
* Note: Any objects accessed by the _REG methods will be automatically
* initialized, even if they contain executable AML (see the call to
* acpi_ns_initialize_objects below).
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Executing _REG OpRegion methods\n"));
status = acpi_ev_initialize_op_regions();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Execute any module-level code that was detected during the table load
* phase. Although illegal since ACPI 2.0, there are many machines that
* contain this type of code. Each block of detected executable AML code
* outside of any control method is wrapped with a temporary control
* method object and placed on a global list. The methods on this list
* are executed below.
*/
acpi_ns_exec_module_code_list();
/*
* Initialize the objects that remain uninitialized. This runs the
* executable AML that may be part of the declaration of these objects:
* operation_regions, buffer_fields, Buffers, and Packages.
*/
if (!(flags & ACPI_NO_OBJECT_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Completing Initialization of ACPI Objects\n"));
status = acpi_ns_initialize_objects();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Initialize all device objects in the namespace. This runs the device
* _STA and _INI methods.
*/
if (!(flags & ACPI_NO_DEVICE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Initializing ACPI Devices\n"));
status = acpi_ns_initialize_devices();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Empty the caches (delete the cached objects) on the assumption that
* the table load filled them up more than they will be at runtime --
* thus wasting non-paged memory.
*/
status = acpi_purge_cached_objects();
acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
#endif
/*******************************************************************************
*
* FUNCTION: acpi_terminate
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
acpi_status acpi_terminate(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_terminate);
/* Just exit if subsystem is already shutdown */
if (acpi_gbl_shutdown) {
ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
return_ACPI_STATUS(AE_OK);
}
/* Subsystem appears active, go ahead and shut it down */
acpi_gbl_shutdown = TRUE;
acpi_gbl_startup_flags = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
/* Terminate the AML Debugger if present */
ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
/* Shutdown and free all resources */
acpi_ut_subsystem_shutdown();
/* Free the mutex objects */
acpi_ut_mutex_terminate();
#ifdef ACPI_DEBUGGER
/* Shut down the debugger */
acpi_db_terminate();
#endif
/* Now we can shutdown the OS-dependent layer */
status = acpi_os_terminate();
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_terminate)
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_subsystem_status
*
* PARAMETERS: None
*
* RETURN: Status of the ACPI subsystem
*
* DESCRIPTION: Other drivers that use the ACPI subsystem should call this
* before making any other calls, to ensure the subsystem
* initialized successfully.
*
******************************************************************************/
acpi_status acpi_subsystem_status(void)
{
if (acpi_gbl_startup_flags & ACPI_INITIALIZED_OK) {
return (AE_OK);
} else {
return (AE_ERROR);
}
}
ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
/*******************************************************************************
*
* FUNCTION: acpi_get_system_info
*
* PARAMETERS: out_buffer - A buffer to receive the resources for the
* device
*
* RETURN: Status - the status of the call
*
* DESCRIPTION: This function is called to get information about the current
* state of the ACPI subsystem. It will return system information
* in the out_buffer.
*
* If the function fails an appropriate status will be returned
* and the value of out_buffer is undefined.
*
******************************************************************************/
acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_get_system_info);
/* Parameter validation */
status = acpi_ut_validate_buffer(out_buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Validate/Allocate/Clear caller buffer */
status =
acpi_ut_initialize_buffer(out_buffer,
sizeof(struct acpi_system_info));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Populate the return buffer
*/
info_ptr = (struct acpi_system_info *)out_buffer->pointer;
info_ptr->acpi_ca_version = ACPI_CA_VERSION;
/* System flags (ACPI capabilities) */
info_ptr->flags = ACPI_SYS_MODE_ACPI;
/* Timer resolution - 24 or 32 bits */
if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
info_ptr->timer_resolution = 24;
} else {
info_ptr->timer_resolution = 32;
}
/* Clear the reserved fields */
info_ptr->reserved1 = 0;
info_ptr->reserved2 = 0;
/* Current debug levels */
info_ptr->debug_layer = acpi_dbg_layer;
info_ptr->debug_level = acpi_dbg_level;
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_get_system_info)
/*****************************************************************************
*
* FUNCTION: acpi_install_initialization_handler
*
* PARAMETERS: Handler - Callback procedure
* Function - Not (currently) used, see below
*
* RETURN: Status
*
* DESCRIPTION: Install an initialization handler
*
* TBD: When a second function is added, must save the Function also.
*
****************************************************************************/
acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
{
if (!handler) {
return (AE_BAD_PARAMETER);
}
if (acpi_gbl_init_handler) {
return (AE_ALREADY_EXISTS);
}
acpi_gbl_init_handler = handler;
return AE_OK;
}
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Empty all caches (delete the cached objects)
*
****************************************************************************/
acpi_status acpi_purge_cached_objects(void)
{
ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
(void)acpi_os_purge_cache(acpi_gbl_state_cache);
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
/*****************************************************************************
*
* FUNCTION: acpi_install_interface
*
* PARAMETERS: interface_name - The interface to install
*
* RETURN: Status
*
* DESCRIPTION: Install an _OSI interface to the global list
*
****************************************************************************/
acpi_status acpi_install_interface(acpi_string interface_name)
{
acpi_status status;
struct acpi_interface_info *interface_info;
/* Parameter validation */
if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
/* Check if the interface name is already in the global list */
interface_info = acpi_ut_get_interface(interface_name);
if (interface_info) {
/*
* The interface already exists in the list. This is OK if the
* interface has been marked invalid -- just clear the bit.
*/
if (interface_info->flags & ACPI_OSI_INVALID) {
interface_info->flags &= ~ACPI_OSI_INVALID;
status = AE_OK;
} else {
status = AE_ALREADY_EXISTS;
}
} else {
/* New interface name, install into the global list */
status = acpi_ut_install_interface(interface_name);
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_interface)
/*****************************************************************************
*
* FUNCTION: acpi_remove_interface
*
* PARAMETERS: interface_name - The interface to remove
*
* RETURN: Status
*
* DESCRIPTION: Remove an _OSI interface from the global list
*
****************************************************************************/
acpi_status acpi_remove_interface(acpi_string interface_name)
{
acpi_status status;
/* Parameter validation */
if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
status = acpi_ut_remove_interface(interface_name);
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_interface)
/*****************************************************************************
*
* FUNCTION: acpi_install_interface_handler
*
* PARAMETERS: Handler - The _OSI interface handler to install
* NULL means "remove existing handler"
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for the predefined _OSI ACPI method.
* invoked during execution of the internal implementation of
* _OSI. A NULL handler simply removes any existing handler.
*
****************************************************************************/
acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
{
acpi_status status = AE_OK;
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (handler && acpi_gbl_interface_handler) {
status = AE_ALREADY_EXISTS;
} else {
acpi_gbl_interface_handler = handler;
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
/*****************************************************************************
*
* FUNCTION: acpi_check_address_range
*
* PARAMETERS: space_id - Address space ID
* Address - Start address
* Length - Length
* Warn - TRUE if warning on overlap desired
*
* RETURN: Count of the number of conflicts detected.
*
* DESCRIPTION: Check if the input address range overlaps any of the
* ASL operation region address ranges.
*
****************************************************************************/
u32
acpi_check_address_range(acpi_adr_space_type space_id,
acpi_physical_address address,
acpi_size length, u8 warn)
{
u32 overlaps;
acpi_status status;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (0);
}
overlaps = acpi_ut_check_address_range(space_id, address,
(u32)length, warn);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (overlaps);
}
ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
| gpl-2.0 |
shskyinfo/android_kernel_lge_e610 | arch/arm/mach-highbank/hotplug.c | 4804 | 1424 | /*
* Copyright 2011 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#include "core.h"
extern void secondary_startup(void);
int platform_cpu_kill(unsigned int cpu)
{
return 1;
}
/*
* platform-specific code to shutdown a CPU
*
*/
void platform_cpu_die(unsigned int cpu)
{
flush_cache_all();
highbank_set_cpu_jump(cpu, secondary_startup);
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
cpu_do_idle();
/* We should never return from idle */
panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
}
int platform_cpu_disable(unsigned int cpu)
{
/*
* CPU0 should not be shut down via hotplug. cpu_idle can WFI
* or a proper shutdown or hibernate should be used.
*/
return cpu == 0 ? -EPERM : 0;
}
| gpl-2.0 |
NuriJ/sony_kernel_msm8960 | tools/perf/util/scripting-engines/trace-event-python.c | 4804 | 15059 | /*
* trace-event-python. Feed trace events to an embedded Python interpreter.
*
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <Python.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "../../perf.h"
#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../trace-event.h"
PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
struct event *events[FTRACE_MAX_EVENT];
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7
extern struct scripting_context *scripting_context;
static char *cur_field_name;
static int zero_flag_atom;
static PyObject *main_module, *main_dict;
static void handler_call_die(const char *handler_name)
{
PyErr_Print();
Py_FatalError("problem in Python trace event handler");
}
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
const char *handler_name = "define_flag_value";
PyObject *handler, *t, *retval;
unsigned long long value;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_value";
t = PyTuple_New(4);
if (!t)
Py_FatalError("couldn't create Python tuple");
value = eval_flag(field_value);
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
PyTuple_SetItem(t, n++, PyInt_FromLong(value));
PyTuple_SetItem(t, n++, PyString_FromString(field_str));
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
}
Py_DECREF(t);
}
static void define_values(enum print_arg_type field_type,
struct print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_value(field_type, ev_name, field_name, field->value,
field->str);
if (field->next)
define_values(field_type, field->next, ev_name, field_name);
}
static void define_field(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *delim)
{
const char *handler_name = "define_flag_field";
PyObject *handler, *t, *retval;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_field";
if (field_type == PRINT_FLAGS)
t = PyTuple_New(3);
else
t = PyTuple_New(2);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
if (field_type == PRINT_FLAGS)
PyTuple_SetItem(t, n++, PyString_FromString(delim));
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
}
Py_DECREF(t);
}
static void define_event_symbols(struct event *event,
const char *ev_name,
struct print_arg *args)
{
switch (args->type) {
case PRINT_NULL:
break;
case PRINT_ATOM:
define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
case PRINT_FIELD:
if (cur_field_name)
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_field(PRINT_FLAGS, ev_name, cur_field_name,
args->flags.delim);
define_values(PRINT_FLAGS, args->flags.flags, ev_name,
cur_field_name);
break;
case PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
case PRINT_STRING:
break;
case PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
case PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
default:
/* we should warn... */
return;
}
if (args->next)
define_event_symbols(event, ev_name, args->next);
}
static inline struct event *find_cache_event(int type)
{
static char ev_name[256];
struct event *event;
if (events[type])
return events[type];
events[type] = event = trace_find_event(type);
if (!event)
return NULL;
sprintf(ev_name, "%s__%s", event->system, event->name);
define_event_symbols(event, ev_name, event->print_fmt.args);
return event;
}
static void python_process_event(union perf_event *pevent __unused,
struct perf_sample *sample,
struct perf_evsel *evsel __unused,
struct machine *machine __unused,
struct thread *thread)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256];
struct format_field *field;
unsigned long long val;
unsigned long s, ns;
struct event *event;
unsigned n = 0;
int type;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
char *comm = thread->comm;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
type = trace_parse_common_type(data);
event = find_cache_event(type);
if (!event)
die("ug! no event found for type %d", type);
pid = trace_parse_common_pid(data);
sprintf(handler_name, "%s__%s", event->system, event->name);
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && !PyCallable_Check(handler))
handler = NULL;
if (!handler) {
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dict");
}
s = nsecs / NSECS_PER_SEC;
ns = nsecs - s * NSECS_PER_SEC;
scripting_context->event_data = data;
context = PyCObject_FromVoidPtr(scripting_context, NULL);
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
PyTuple_SetItem(t, n++, context);
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
PyTuple_SetItem(t, n++, PyInt_FromLong(s));
PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm));
} else {
PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
int offset;
if (field->flags & FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
offset &= 0xffff;
} else
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
val = read_size(data + field->offset, field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
}
if (handler)
PyTuple_SetItem(t, n++, obj);
else
PyDict_SetItemString(dict, field->name, obj);
}
if (!handler)
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
if (handler) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
} else {
handler = PyDict_GetItemString(main_dict, "trace_unhandled");
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die("trace_unhandled");
}
Py_DECREF(dict);
}
Py_DECREF(t);
}
static int run_start_sub(void)
{
PyObject *handler, *retval;
int err = 0;
main_module = PyImport_AddModule("__main__");
if (main_module == NULL)
return -1;
Py_INCREF(main_module);
main_dict = PyModule_GetDict(main_module);
if (main_dict == NULL) {
err = -1;
goto error;
}
Py_INCREF(main_dict);
handler = PyDict_GetItemString(main_dict, "trace_begin");
if (handler == NULL || !PyCallable_Check(handler))
goto out;
retval = PyObject_CallObject(handler, NULL);
if (retval == NULL)
handler_call_die("trace_begin");
Py_DECREF(retval);
return err;
error:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
out:
return err;
}
/*
* Start trace script
*/
static int python_start_script(const char *script, int argc, const char **argv)
{
const char **command_line;
char buf[PATH_MAX];
int i, err = 0;
FILE *fp;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
Py_Initialize();
initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
fp = fopen(script, "r");
if (!fp) {
sprintf(buf, "Can't open python script \"%s\"", script);
perror(buf);
err = -1;
goto error;
}
err = PyRun_SimpleFile(fp, script);
if (err) {
fprintf(stderr, "Error running python script %s\n", script);
goto error;
}
err = run_start_sub();
if (err) {
fprintf(stderr, "Error starting python script %s\n", script);
goto error;
}
free(command_line);
return err;
error:
Py_Finalize();
free(command_line);
return err;
}
/*
* Stop trace script
*/
static int python_stop_script(void)
{
PyObject *handler, *retval;
int err = 0;
handler = PyDict_GetItemString(main_dict, "trace_end");
if (handler == NULL || !PyCallable_Check(handler))
goto out;
retval = PyObject_CallObject(handler, NULL);
if (retval == NULL)
handler_call_die("trace_end");
else
Py_DECREF(retval);
out:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
Py_Finalize();
return err;
}
static int python_generate_script(const char *outfile)
{
struct event *event = NULL;
struct format_field *f;
char fname[PATH_MAX];
int not_first, count;
FILE *ofp;
sprintf(fname, "%s.py", outfile);
ofp = fopen(fname, "w");
if (ofp == NULL) {
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
fprintf(ofp, "# perf script event handlers, "
"generated by perf script -g python\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
fprintf(ofp, "# The common_* event handler fields are the most useful "
"fields common to\n");
fprintf(ofp, "# all events. They don't necessarily correspond to "
"the 'common_*' fields\n");
fprintf(ofp, "# in the format files. Those fields not available as "
"handler params can\n");
fprintf(ofp, "# be retrieved using Python functions of the form "
"common_*(context).\n");
fprintf(ofp, "# See the perf-trace-python Documentation for the list "
"of available functions.\n\n");
fprintf(ofp, "import os\n");
fprintf(ofp, "import sys\n\n");
fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
fprintf(ofp, "\nfrom perf_trace_context import *\n");
fprintf(ofp, "from Core import *\n\n\n");
fprintf(ofp, "def trace_begin():\n");
fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint \"in trace_end\"\n\n");
while ((event = trace_find_next_event(event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
fprintf(ofp, "common_cpu,\n");
fprintf(ofp, "\tcommon_secs, ");
fprintf(ofp, "common_nsecs, ");
fprintf(ofp, "common_pid, ");
fprintf(ofp, "common_comm,\n\t");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t");
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "):\n");
fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
"common_secs, common_nsecs,\n\t\t\t"
"common_pid, common_comm)\n\n");
fprintf(ofp, "\t\tprint \"");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (count && count % 3 == 0) {
fprintf(ofp, "\" \\\n\t\t\"");
}
count++;
fprintf(ofp, "%s=", f->name);
if (f->flags & FIELD_IS_STRING ||
f->flags & FIELD_IS_FLAG ||
f->flags & FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
}
fprintf(ofp, "\\n\" %% \\\n\t\t(");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
if (f->flags & FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "flag_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else if (f->flags & FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "symbol_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "),\n\n");
}
fprintf(ofp, "def trace_unhandled(event_name, context, "
"event_fields_dict):\n");
fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
"for k,v in sorted(event_fields_dict.items())])\n\n");
fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n"
"\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
"(event_name, cpu, secs, nsecs, pid, comm),\n");
fclose(ofp);
fprintf(stderr, "generated Python script: %s\n", fname);
return 0;
}
struct scripting_ops python_scripting_ops = {
.name = "Python",
.start_script = python_start_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.generate_script = python_generate_script,
};
| gpl-2.0 |
C-Aniruddh/PixCMKernel | init/version.c | 6852 | 1122 | /*
* linux/init/version.c
*
* Copyright (C) 1992 Theodore Ts'o
*
* May be freely distributed as part of Linux.
*/
#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
#include <linux/version.h>
#ifndef CONFIG_KALLSYMS
#define version(a) Version_ ## a
#define version_string(a) version(a)
extern int version_string(LINUX_VERSION_CODE);
int version_string(LINUX_VERSION_CODE);
#endif
struct uts_namespace init_uts_ns = {
.kref = {
.refcount = ATOMIC_INIT(2),
},
.name = {
.sysname = UTS_SYSNAME,
.nodename = UTS_NODENAME,
.release = UTS_RELEASE,
.version = UTS_VERSION,
.machine = UTS_MACHINE,
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
};
EXPORT_SYMBOL_GPL(init_uts_ns);
/* FIXED STRINGS! Don't touch! */
const char linux_banner[] =
"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
const char linux_proc_banner[] =
"%s version %s"
" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
" (" LINUX_COMPILER ") %s\n";
| gpl-2.0 |
jibaron/ddebug | arch/ia64/kernel/unwind_decoder.c | 14020 | 12293 | /*
* Copyright (C) 2000 Hewlett-Packard Co
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Generic IA-64 unwind info decoder.
*
* This file is used both by the Linux kernel and objdump. Please keep
* the two copies of this file in sync.
*
* You need to customize the decoder by defining the following
* macros/constants before including this file:
*
* Types:
* unw_word Unsigned integer type with at least 64 bits
*
* Register names:
* UNW_REG_BSP
* UNW_REG_BSPSTORE
* UNW_REG_FPSR
* UNW_REG_LC
* UNW_REG_PFS
* UNW_REG_PR
* UNW_REG_RNAT
* UNW_REG_PSP
* UNW_REG_RP
* UNW_REG_UNAT
*
* Decoder action macros:
* UNW_DEC_BAD_CODE(code)
* UNW_DEC_ABI(fmt,abi,context,arg)
* UNW_DEC_BR_GR(fmt,brmask,gr,arg)
* UNW_DEC_BR_MEM(fmt,brmask,arg)
* UNW_DEC_COPY_STATE(fmt,label,arg)
* UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
* UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
* UNW_DEC_FR_MEM(fmt,frmask,arg)
* UNW_DEC_GR_GR(fmt,grmask,gr,arg)
* UNW_DEC_GR_MEM(fmt,grmask,arg)
* UNW_DEC_LABEL_STATE(fmt,label,arg)
* UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
* UNW_DEC_MEM_STACK_V(fmt,t,arg)
* UNW_DEC_PRIUNAT_GR(fmt,r,arg)
* UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
* UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
* UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
* UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
* UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
* UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
* UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
* UNW_DEC_REG_REG(fmt,src,dst,arg)
* UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
* UNW_DEC_REG_WHEN(fmt,reg,t,arg)
* UNW_DEC_RESTORE(fmt,t,abreg,arg)
* UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
* UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
* UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
* UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
* UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
* UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
* UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
* UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
* UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
*/
static unw_word
unw_decode_uleb128 (unsigned char **dpp)
{
unsigned shift = 0;
unw_word byte, result = 0;
unsigned char *bp = *dpp;
while (1)
{
byte = *bp++;
result |= (byte & 0x7f) << shift;
if ((byte & 0x80) == 0)
break;
shift += 7;
}
*dpp = bp;
return result;
}
static unsigned char *
unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, abreg;
unw_word t, off;
byte1 = *dp++;
t = unw_decode_uleb128 (&dp);
off = unw_decode_uleb128 (&dp);
abreg = (byte1 & 0x7f);
if (byte1 & 0x80)
UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
else
UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
return dp;
}
static unsigned char *
unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, abreg, x, ytreg;
unw_word t;
byte1 = *dp++; byte2 = *dp++;
t = unw_decode_uleb128 (&dp);
abreg = (byte1 & 0x7f);
ytreg = byte2;
x = (byte1 >> 7) & 1;
if ((byte1 & 0x80) == 0 && ytreg == 0)
UNW_DEC_RESTORE(X2, t, abreg, arg);
else
UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
return dp;
}
static unsigned char *
unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, abreg, qp;
unw_word t, off;
byte1 = *dp++; byte2 = *dp++;
t = unw_decode_uleb128 (&dp);
off = unw_decode_uleb128 (&dp);
qp = (byte1 & 0x3f);
abreg = (byte2 & 0x7f);
if (byte1 & 0x80)
UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
else
UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
return dp;
}
static unsigned char *
unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
unw_word t;
byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
t = unw_decode_uleb128 (&dp);
qp = (byte1 & 0x3f);
abreg = (byte2 & 0x7f);
x = (byte2 >> 7) & 1;
ytreg = byte3;
if ((byte2 & 0x80) == 0 && byte3 == 0)
UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
else
UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
return dp;
}
static unsigned char *
unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
{
int body = (code & 0x20) != 0;
unw_word rlen;
rlen = (code & 0x1f);
UNW_DEC_PROLOGUE(R1, body, rlen, arg);
return dp;
}
static unsigned char *
unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, mask, grsave;
unw_word rlen;
byte1 = *dp++;
mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
grsave = (byte1 & 0x7f);
rlen = unw_decode_uleb128 (&dp);
UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
return dp;
}
static unsigned char *
unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word rlen;
rlen = unw_decode_uleb128 (&dp);
UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
return dp;
}
static unsigned char *
unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char brmask = (code & 0x1f);
UNW_DEC_BR_MEM(P1, brmask, arg);
return dp;
}
static unsigned char *
unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
{
if ((code & 0x10) == 0)
{
unsigned char byte1 = *dp++;
UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
(byte1 & 0x7f), arg);
}
else if ((code & 0x08) == 0)
{
unsigned char byte1 = *dp++, r, dst;
r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
dst = (byte1 & 0x7f);
switch (r)
{
case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
else if ((code & 0x7) == 0)
UNW_DEC_SPILL_MASK(P4, dp, arg);
else if ((code & 0x7) == 1)
{
unw_word grmask, frmask, byte1, byte2, byte3;
byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
grmask = ((byte1 >> 4) & 0xf);
frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
}
else
UNW_DEC_BAD_CODE(code);
return dp;
}
static unsigned char *
unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
{
int gregs = (code & 0x10) != 0;
unsigned char mask = (code & 0x0f);
if (gregs)
UNW_DEC_GR_MEM(P6, mask, arg);
else
UNW_DEC_FR_MEM(P6, mask, arg);
return dp;
}
static unsigned char *
unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char r, byte1, byte2;
unw_word t, size;
if ((code & 0x10) == 0)
{
r = (code & 0xf);
t = unw_decode_uleb128 (&dp);
switch (r)
{
case 0:
size = unw_decode_uleb128 (&dp);
UNW_DEC_MEM_STACK_F(P7, t, size, arg);
break;
case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
else
{
switch (code & 0xf)
{
case 0x0: /* p8 */
{
r = *dp++;
t = unw_decode_uleb128 (&dp);
switch (r)
{
case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
break;
case 0x1:
byte1 = *dp++; byte2 = *dp++;
UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
break;
case 0xf: /* p10 */
byte1 = *dp++; byte2 = *dp++;
UNW_DEC_ABI(P10, byte1, byte2, arg);
break;
case 0x9:
return unw_decode_x1 (dp, code, arg);
case 0xa:
return unw_decode_x2 (dp, code, arg);
case 0xb:
return unw_decode_x3 (dp, code, arg);
case 0xc:
return unw_decode_x4 (dp, code, arg);
default:
UNW_DEC_BAD_CODE(code);
break;
}
}
return dp;
}
static unsigned char *
unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word label = (code & 0x1f);
if ((code & 0x20) != 0)
UNW_DEC_COPY_STATE(B1, label, arg);
else
UNW_DEC_LABEL_STATE(B1, label, arg);
return dp;
}
static unsigned char *
unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word t;
t = unw_decode_uleb128 (&dp);
UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
return dp;
}
static unsigned char *
unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word t, ecount, label;
if ((code & 0x10) == 0)
{
t = unw_decode_uleb128 (&dp);
ecount = unw_decode_uleb128 (&dp);
UNW_DEC_EPILOGUE(B3, t, ecount, arg);
}
else if ((code & 0x07) == 0)
{
label = unw_decode_uleb128 (&dp);
if ((code & 0x08) != 0)
UNW_DEC_COPY_STATE(B4, label, arg);
else
UNW_DEC_LABEL_STATE(B4, label, arg);
}
else
switch (code & 0x7)
{
case 1: return unw_decode_x1 (dp, code, arg);
case 2: return unw_decode_x2 (dp, code, arg);
case 3: return unw_decode_x3 (dp, code, arg);
case 4: return unw_decode_x4 (dp, code, arg);
default: UNW_DEC_BAD_CODE(code); break;
}
return dp;
}
typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
static unw_decoder unw_decode_table[2][8] =
{
/* prologue table: */
{
unw_decode_r1, /* 0 */
unw_decode_r1,
unw_decode_r2,
unw_decode_r3,
unw_decode_p1, /* 4 */
unw_decode_p2_p5,
unw_decode_p6,
unw_decode_p7_p10
},
{
unw_decode_r1, /* 0 */
unw_decode_r1,
unw_decode_r2,
unw_decode_r3,
unw_decode_b1, /* 4 */
unw_decode_b1,
unw_decode_b2,
unw_decode_b3_x4
}
};
/*
* Decode one descriptor and return address of next descriptor.
*/
static inline unsigned char *
unw_decode (unsigned char *dp, int inside_body, void *arg)
{
unw_decoder decoder;
unsigned char code;
code = *dp++;
decoder = unw_decode_table[inside_body][code >> 5];
dp = (*decoder) (dp, code, arg);
return dp;
}
| gpl-2.0 |
Windeal/Linux-3.12.36 | drivers/mtd/mtdpart.c | 197 | 21454 | /*
* Simple MTD partitioning layer
*
* Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
* Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
#include "mtdcore.h"
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
/* Our partition node structure */
struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
struct list_head list;
};
/*
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
struct mtd_ecc_stats stats;
int res;
stats = part->master->ecc_stats;
res = part->master->_read(part->master, from + part->offset, len,
retlen, buf);
if (unlikely(mtd_is_eccerr(res)))
mtd->ecc_stats.failed +=
part->master->ecc_stats.failed - stats.failed;
else
mtd->ecc_stats.corrected +=
part->master->ecc_stats.corrected - stats.corrected;
return res;
}
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct mtd_part *part = PART(mtd);
return part->master->_point(part->master, from + part->offset, len,
retlen, virt, phys);
}
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
struct mtd_part *part = PART(mtd);
offset += part->offset;
return part->master->_get_unmapped_area(part->master, len, offset,
flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
int res;
if (from >= mtd->size)
return -EINVAL;
if (ops->datbuf && from + ops->len > mtd->size)
return -EINVAL;
/*
* If OOB is also requested, make sure that we do not read past the end
* of this partition.
*/
if (ops->oobbuf) {
size_t len, pages;
if (ops->mode == MTD_OPS_AUTO_OOB)
len = mtd->oobavail;
else
len = mtd->oobsize;
pages = mtd_div_by_ws(mtd->size, mtd);
pages -= mtd_div_by_ws(from, mtd);
if (ops->ooboffs + ops->ooblen > pages * len)
return -EINVAL;
}
res = part->master->_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
if (mtd_is_eccerr(res))
mtd->ecc_stats.failed++;
}
return res;
}
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_read_user_prot_reg(part->master, from, len,
retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_read_fact_prot_reg(part->master, from, len,
retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_write(part->master, to + part->offset, len,
retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_panic_write(part->master, to + part->offset, len,
retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
if (to >= mtd->size)
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
return part->master->_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_write_user_prot_reg(part->master, from, len,
retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
return part->master->_writev(part->master, vecs, count,
to + part->offset, retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
instr->addr += part->offset;
ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
}
return ret;
}
void mtd_erase_callback(struct erase_info *instr)
{
if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
}
if (instr->callback)
instr->callback(instr);
}
EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
part->master->_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
return part->master->_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
part->master->_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
ofs += part->offset;
return part->master->_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
int res;
ofs += part->offset;
res = part->master->_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
}
static inline void free_partition(struct mtd_part *p)
{
kfree(p->mtd.name);
kfree(p);
}
/*
* This function unregisters and destroy all slave MTD objects which are
* attached to the given master MTD object.
*/
int del_mtd_partitions(struct mtd_info *master)
{
struct mtd_part *slave, *next;
int ret, err = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
ret = del_mtd_device(&slave->mtd);
if (ret < 0) {
err = ret;
continue;
}
list_del(&slave->list);
free_partition(slave);
}
mutex_unlock(&mtd_partitions_mutex);
return err;
}
static struct mtd_part *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
{
struct mtd_part *slave;
char *name;
/* allocate the partition structure */
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
if (!name || !slave) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
master->name);
kfree(name);
kfree(slave);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
slave->mtd.type = master->type;
slave->mtd.flags = master->flags & ~part->mask_flags;
slave->mtd.size = part->size;
slave->mtd.writesize = master->writesize;
slave->mtd.writebufsize = master->writebufsize;
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
slave->mtd.name = name;
slave->mtd.owner = master->owner;
slave->mtd.backing_dev_info = master->backing_dev_info;
/* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
* to have the same data be in two different partitions.
*/
slave->mtd.dev.parent = master->dev.parent;
slave->mtd._read = part_read;
slave->mtd._write = part_write;
if (master->_panic_write)
slave->mtd._panic_write = part_panic_write;
if (master->_point && master->_unpoint) {
slave->mtd._point = part_point;
slave->mtd._unpoint = part_unpoint;
}
if (master->_get_unmapped_area)
slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (master->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (master->_write_oob)
slave->mtd._write_oob = part_write_oob;
if (master->_read_user_prot_reg)
slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
if (master->_read_fact_prot_reg)
slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
if (master->_write_user_prot_reg)
slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
if (master->_lock_user_prot_reg)
slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
if (master->_get_user_prot_info)
slave->mtd._get_user_prot_info = part_get_user_prot_info;
if (master->_get_fact_prot_info)
slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
if (master->_sync)
slave->mtd._sync = part_sync;
if (!partno && !master->dev.class && master->_suspend &&
master->_resume) {
slave->mtd._suspend = part_suspend;
slave->mtd._resume = part_resume;
}
if (master->_writev)
slave->mtd._writev = part_writev;
if (master->_lock)
slave->mtd._lock = part_lock;
if (master->_unlock)
slave->mtd._unlock = part_unlock;
if (master->_is_locked)
slave->mtd._is_locked = part_is_locked;
if (master->_block_isbad)
slave->mtd._block_isbad = part_block_isbad;
if (master->_block_markbad)
slave->mtd._block_markbad = part_block_markbad;
slave->mtd._erase = part_erase;
slave->master = master;
slave->offset = part->offset;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
slave->offset = cur_offset;
if (mtd_mod_by_eb(cur_offset, master) != 0) {
/* Round up to next erasesize */
slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
}
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
if (master->size - slave->offset >= slave->mtd.size) {
slave->mtd.size = master->size - slave->offset
- slave->mtd.size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
part->name, master->size - slave->offset,
slave->mtd.size);
/* register to preserve ordering */
goto out_register;
}
}
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
/* let's do some sanity checks */
if (slave->offset >= master->size) {
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
if (slave->offset + slave->mtd.size > master->size) {
slave->mtd.size = master->size - slave->offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
part->name, master->name, (unsigned long long)slave->mtd.size);
}
if (master->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = master->numeraseregions;
u64 end = slave->offset + slave->mtd.size;
struct mtd_erase_region_info *regions = master->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
;
/* The loop searched for the region _behind_ the first one */
if (i > 0)
i--;
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
if (slave->mtd.erasesize < regions[i].erasesize) {
slave->mtd.erasesize = regions[i].erasesize;
}
}
BUG_ON(slave->mtd.erasesize == 0);
} else {
/* Single erase size */
slave->mtd.erasesize = master->erasesize;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
slave->mtd.flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
part->name);
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
slave->mtd.flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
part->name);
}
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_step_size = master->ecc_step_size;
slave->mtd.ecc_strength = master->ecc_strength;
slave->mtd.bitflip_threshold = master->bitflip_threshold;
if (master->_block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
}
out_register:
return slave;
}
int mtd_add_partition(struct mtd_info *master, char *name,
long long offset, long long length)
{
struct mtd_partition part;
struct mtd_part *p, *new;
uint64_t start, end;
int ret = 0;
/* the direct offset is expected */
if (offset == MTDPART_OFS_APPEND ||
offset == MTDPART_OFS_NXTBLK)
return -EINVAL;
if (length == MTDPART_SIZ_FULL)
length = master->size - offset;
if (length <= 0)
return -EINVAL;
part.name = name;
part.size = length;
part.offset = offset;
part.mask_flags = 0;
part.ecclayout = NULL;
new = allocate_partition(master, &part, -1, offset);
if (IS_ERR(new))
return PTR_ERR(new);
start = offset;
end = offset + length;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(p, &mtd_partitions, list)
if (p->master == master) {
if ((start >= p->offset) &&
(start < (p->offset + p->mtd.size)))
goto err_inv;
if ((end >= p->offset) &&
(end < (p->offset + p->mtd.size)))
goto err_inv;
}
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
return ret;
err_inv:
mutex_unlock(&mtd_partitions_mutex);
free_partition(new);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
int mtd_del_partition(struct mtd_info *master, int partno)
{
struct mtd_part *slave, *next;
int ret = -EINVAL;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if ((slave->master == master) &&
(slave->mtd.index == partno)) {
ret = del_mtd_device(&slave->mtd);
if (ret < 0)
break;
list_del(&slave->list);
free_partition(slave);
break;
}
mutex_unlock(&mtd_partitions_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
* the partition definitions.
*
* We don't register the master, or expect the caller to have done so,
* for reasons of data integrity.
*/
int add_mtd_partitions(struct mtd_info *master,
const struct mtd_partition *parts,
int nbparts)
{
struct mtd_part *slave;
uint64_t cur_offset = 0;
int i;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
if (IS_ERR(slave))
return PTR_ERR(slave);
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
cur_offset = slave->offset + slave->mtd.size;
}
return 0;
}
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
static struct mtd_part_parser *get_partition_parser(const char *name)
{
struct mtd_part_parser *p, *ret = NULL;
spin_lock(&part_parser_lock);
list_for_each_entry(p, &part_parsers, list)
if (!strcmp(p->name, name) && try_module_get(p->owner)) {
ret = p;
break;
}
spin_unlock(&part_parser_lock);
return ret;
}
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
int register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_add(&p->list, &part_parsers);
spin_unlock(&part_parser_lock);
return 0;
}
EXPORT_SYMBOL_GPL(register_mtd_parser);
int deregister_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_del(&p->list);
spin_unlock(&part_parser_lock);
return 0;
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
/*
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
* are changing this array!
*/
static const char * const default_mtd_part_types[] = {
"cmdlinepart",
"ofpart",
NULL
};
/**
* parse_mtd_partitions - parse MTD partitions
* @master: the master partition (describes whole MTD device)
* @types: names of partition parsers to try or %NULL
* @pparts: array of partitions found is returned here
* @data: MTD partition parser-specific data
*
* This function tries to find partition on MTD device @master. It uses MTD
* partition parsers, specified in @types. However, if @types is %NULL, then
* the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
* Note: If there are more then one parser in @types, the kernel only takes the
* partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
* o zero if no partitions were found
* o a positive number of found partitions, in which case on exit @pparts will
* point to an array containing this number of &struct mtd_info objects.
*/
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct mtd_part_parser *parser;
int ret = 0;
if (!types)
types = default_mtd_part_types;
for ( ; ret <= 0 && *types; types++) {
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
break;
}
}
return ret;
}
int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(part, &mtd_partitions, list)
if (&part->mtd == mtd) {
ispart = 1;
break;
}
mutex_unlock(&mtd_partitions_mutex);
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
if (!mtd_is_partition(mtd))
return mtd->size;
return PART(mtd)->master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
| gpl-2.0 |
pacificIT/linux_kernel | fs/utimes.c | 1733 | 6064 | #include <linux/compiler.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/utime.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#ifdef __ARCH_WANT_SYS_UTIME
/*
* sys_utime() can be implemented in user-level using sys_utimes().
* Is this for backwards compatibility? If so, why not move it
* into the appropriate arch directory (for those architectures that
* need it).
*/
/* If times==NULL, set access and modification to current time,
* must be owner or have write permission.
* Else, update from *times, must be owner or super user.
*/
SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
{
struct timespec tv[2];
if (times) {
if (get_user(tv[0].tv_sec, ×->actime) ||
get_user(tv[1].tv_sec, ×->modtime))
return -EFAULT;
tv[0].tv_nsec = 0;
tv[1].tv_nsec = 0;
}
return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
}
#endif
static bool nsec_valid(long nsec)
{
if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
return true;
return nsec >= 0 && nsec <= 999999999;
}
static int utimes_common(struct path *path, struct timespec *times)
{
int error;
struct iattr newattrs;
struct inode *inode = path->dentry->d_inode;
struct inode *delegated_inode = NULL;
error = mnt_want_write(path->mnt);
if (error)
goto out;
if (times && times[0].tv_nsec == UTIME_NOW &&
times[1].tv_nsec == UTIME_NOW)
times = NULL;
newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
if (times) {
if (times[0].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_ATIME;
else if (times[0].tv_nsec != UTIME_NOW) {
newattrs.ia_atime.tv_sec = times[0].tv_sec;
newattrs.ia_atime.tv_nsec = times[0].tv_nsec;
newattrs.ia_valid |= ATTR_ATIME_SET;
}
if (times[1].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_MTIME;
else if (times[1].tv_nsec != UTIME_NOW) {
newattrs.ia_mtime.tv_sec = times[1].tv_sec;
newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
newattrs.ia_valid |= ATTR_MTIME_SET;
}
/*
* Tell inode_change_ok(), that this is an explicit time
* update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET
* were used.
*/
newattrs.ia_valid |= ATTR_TIMES_SET;
} else {
/*
* If times is NULL (or both times are UTIME_NOW),
* then we need to check permissions, because
* inode_change_ok() won't do it.
*/
error = -EACCES;
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
}
}
retry_deleg:
mutex_lock(&inode->i_mutex);
error = notify_change(path->dentry, &newattrs, &delegated_inode);
mutex_unlock(&inode->i_mutex);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
out:
return error;
}
/*
* do_utimes - change times on filename or file descriptor
* @dfd: open file descriptor, -1 or AT_FDCWD
* @filename: path name or NULL
* @times: new times or NULL
* @flags: zero or more flags (only AT_SYMLINK_NOFOLLOW for the moment)
*
* If filename is NULL and dfd refers to an open file, then operate on
* the file. Otherwise look up filename, possibly using dfd as a
* starting point.
*
* If times==NULL, set access and modification to current time,
* must be owner or have write permission.
* Else, update from *times, must be owner or super user.
*/
long do_utimes(int dfd, const char __user *filename, struct timespec *times,
int flags)
{
int error = -EINVAL;
if (times && (!nsec_valid(times[0].tv_nsec) ||
!nsec_valid(times[1].tv_nsec))) {
goto out;
}
if (flags & ~AT_SYMLINK_NOFOLLOW)
goto out;
if (filename == NULL && dfd != AT_FDCWD) {
struct fd f;
if (flags & AT_SYMLINK_NOFOLLOW)
goto out;
f = fdget(dfd);
error = -EBADF;
if (!f.file)
goto out;
error = utimes_common(&f.file->f_path, times);
fdput(f);
} else {
struct path path;
int lookup_flags = 0;
if (!(flags & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = utimes_common(&path, times);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
}
out:
return error;
}
SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
struct timespec __user *, utimes, int, flags)
{
struct timespec tstimes[2];
if (utimes) {
if (copy_from_user(&tstimes, utimes, sizeof(tstimes)))
return -EFAULT;
/* Nothing to do, we must not even check the path. */
if (tstimes[0].tv_nsec == UTIME_OMIT &&
tstimes[1].tv_nsec == UTIME_OMIT)
return 0;
}
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename,
struct timeval __user *, utimes)
{
struct timeval times[2];
struct timespec tstimes[2];
if (utimes) {
if (copy_from_user(×, utimes, sizeof(times)))
return -EFAULT;
/* This test is needed to catch all invalid values. If we
would test only in do_utimes we would miss those invalid
values truncated by the multiplication with 1000. Note
that we also catch UTIME_{NOW,OMIT} here which are only
valid for utimensat. */
if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 ||
times[1].tv_usec >= 1000000 || times[1].tv_usec < 0)
return -EINVAL;
tstimes[0].tv_sec = times[0].tv_sec;
tstimes[0].tv_nsec = 1000 * times[0].tv_usec;
tstimes[1].tv_sec = times[1].tv_sec;
tstimes[1].tv_nsec = 1000 * times[1].tv_usec;
}
return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0);
}
SYSCALL_DEFINE2(utimes, char __user *, filename,
struct timeval __user *, utimes)
{
return sys_futimesat(AT_FDCWD, filename, utimes);
}
| gpl-2.0 |
Pafcholini/Nadia-kernel-LL-N910F-EUR-LL-OpenSource | drivers/net/ethernet/sun/cassini.c | 2245 | 142930 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
*
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
*
* The cassini chip has a number of features that distinguish it from
* the gem chip:
* 4 transmit descriptor rings that are used for either QoS (VLAN) or
* load balancing (non-VLAN mode)
* batching of multiple packets
* multiple CPU dispatching
* page-based RX descriptor engine with separate completion rings
* Gigabit support (GMII and PCS interface)
* MIF link up/down detection works
*
* RX is handled by page sized buffers that are attached as fragments to
* the skb. here's what's done:
* -- driver allocates pages at a time and keeps reference counts
* on them.
* -- the upper protocol layers assume that the header is in the skb
* itself. as a result, cassini will copy a small amount (64 bytes)
* to make them happy.
* -- driver appends the rest of the data pages as frags to skbuffs
* and increments the reference count
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
* the page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
*
* TX has 4 queues. currently these queues are used in a round-robin
* fashion for load balancing. They can also be used for QoS. for that
* to work, however, QoS information needs to be exposed down to the driver
* level so that subqueues get targeted to particular transmit rings.
* alternatively, the queues can be configured via use of the all-purpose
* ioctl.
*
* RX DATA: the rx completion ring has all the info, but the rx desc
* ring has all of the data. RX can conceivably come in under multiple
* interrupts, but the INT# assignment needs to be set up properly by
* the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
* that. also, the two descriptor rings are designed to distinguish between
* encrypted and non-encrypted packets, but we use them for buffering
* instead.
*
* by default, the selective clear mask is set up to process rx packets.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
#include <linux/mii.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
#include <net/checksum.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x))
#define CAS_NCPUS num_online_cpus()
#define cas_skb_release(x) netif_rx(x)
/* select which firmware to use */
#define USE_HP_WORKAROUND
#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
#include "cassini.h"
#define USE_TX_COMPWB /* use completion writeback registers */
#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
#define USE_RX_BLANK /* hw interrupt mitigation */
#undef USE_ENTROPY_DEV /* don't test for entropy device */
/* NOTE: these aren't useable unless PCI interrupts can be assigned.
* also, we need to make cp->lock finer-grained.
*/
#undef USE_PCI_INTB
#undef USE_PCI_INTC
#undef USE_PCI_INTD
#undef USE_QOS
#undef USE_VPD_DEBUG /* debug vpd information if defined */
/* rx processing options */
#define USE_PAGE_ORDER /* specify to allocate large rx pages */
#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
#define RX_COPY_ALWAYS 0 /* if 0, use frags */
#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
#define DRV_MODULE_NAME "cassini"
#define DRV_MODULE_VERSION "1.6"
#define DRV_MODULE_RELDATE "21 May 2008"
#define CAS_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
#define CAS_TX_TIMEOUT (HZ)
#define CAS_LINK_TIMEOUT (22*HZ/10)
#define CAS_LINK_FAST_TIMEOUT (1)
/* timeout values for state changing. these specify the number
* of 10us delays to be used before giving up.
*/
#define STOP_TRIES_PHY 1000
#define STOP_TRIES 5000
/* specify a minimum frame size to deal with some fifo issues
* max mtu == 2 * page size - ethernet header - 64 - swivel =
* 2 * page_size - 0x50
*/
#define CAS_MIN_FRAME 97
#define CAS_1000MB_MIN_FRAME 255
#define CAS_MIN_MTU 60
#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
#if 1
/*
* Eliminate these and use separate atomic counters for each, to
* avoid a race condition.
*/
#else
#define CAS_RESET_MTU 1
#define CAS_RESET_ALL 2
#define CAS_RESET_SPARE 3
#endif
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
static int link_mode;
MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("sun/cassini.bin");
module_param(cassini_debug, int, 0);
MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
module_param(link_mode, int, 0);
MODULE_PARM_DESC(link_mode, "default link mode");
/*
* Work around for a PCS bug in which the link goes down due to the chip
* being confused and never showing a link status of "up."
*/
#define DEFAULT_LINKDOWN_TIMEOUT 5
/*
* Value in seconds, for user input.
*/
static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
module_param(linkdown_timeout, int, 0);
MODULE_PARM_DESC(linkdown_timeout,
"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
/*
* value in 'ticks' (units used by jiffies). Set when we init the
* module because 'HZ' in actually a function call on some flavors of
* Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
*/
static int link_transition_timeout;
static u16 link_modes[] = {
BMCR_ANENABLE, /* 0 : autoneg */
0, /* 1 : 10bt half duplex */
BMCR_SPEED100, /* 2 : 100bt half duplex */
BMCR_FULLDPLX, /* 3 : 10bt full duplex */
BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
static void cas_set_link_modes(struct cas *cp);
static inline void cas_lock_tx(struct cas *cp)
{
int i;
for (i = 0; i < N_TX_RINGS; i++)
spin_lock(&cp->tx_lock[i]);
}
static inline void cas_lock_all(struct cas *cp)
{
spin_lock_irq(&cp->lock);
cas_lock_tx(cp);
}
/* WTZ: QA was finding deadlock problems with the previous
* versions after long test runs with multiple cards per machine.
* See if replacing cas_lock_all with safer versions helps. The
* symptoms QA is reporting match those we'd expect if interrupts
* aren't being properly restored, and we fixed a previous deadlock
* with similar symptoms by using save/restore versions in other
* places.
*/
#define cas_lock_all_save(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
spin_lock_irqsave(&xxxcp->lock, flags); \
cas_lock_tx(xxxcp); \
} while (0)
static inline void cas_unlock_tx(struct cas *cp)
{
int i;
for (i = N_TX_RINGS; i > 0; i--)
spin_unlock(&cp->tx_lock[i - 1]);
}
static inline void cas_unlock_all(struct cas *cp)
{
cas_unlock_tx(cp);
spin_unlock_irq(&cp->lock);
}
#define cas_unlock_all_restore(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
cas_unlock_tx(xxxcp); \
spin_unlock_irqrestore(&xxxcp->lock, flags); \
} while (0)
static void cas_disable_irq(struct cas *cp, const int ring)
{
/* Make sure we won't get any more interrupts */
if (ring == 0) {
writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
return;
}
/* disable completion interrupts and selectively mask */
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
switch (ring) {
#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
#ifdef USE_PCI_INTB
case 1:
#endif
#ifdef USE_PCI_INTC
case 2:
#endif
#ifdef USE_PCI_INTD
case 3:
#endif
writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
cp->regs + REG_PLUS_INTRN_MASK(ring));
break;
#endif
default:
writel(INTRN_MASK_CLEAR_ALL, cp->regs +
REG_PLUS_INTRN_MASK(ring));
break;
}
}
}
static inline void cas_mask_intr(struct cas *cp)
{
int i;
for (i = 0; i < N_RX_COMP_RINGS; i++)
cas_disable_irq(cp, i);
}
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
return;
}
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
switch (ring) {
#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
#ifdef USE_PCI_INTB
case 1:
#endif
#ifdef USE_PCI_INTC
case 2:
#endif
#ifdef USE_PCI_INTD
case 3:
#endif
writel(INTRN_MASK_RX_EN, cp->regs +
REG_PLUS_INTRN_MASK(ring));
break;
#endif
default:
break;
}
}
}
static inline void cas_unmask_intr(struct cas *cp)
{
int i;
for (i = 0; i < N_RX_COMP_RINGS; i++)
cas_enable_irq(cp, i);
}
static inline void cas_entropy_gather(struct cas *cp)
{
#ifdef USE_ENTROPY_DEV
if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
return;
batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
readl(cp->regs + REG_ENTROPY_IV),
sizeof(uint64_t)*8);
#endif
}
static inline void cas_entropy_reset(struct cas *cp)
{
#ifdef USE_ENTROPY_DEV
if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
return;
writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
cp->regs + REG_BIM_LOCAL_DEV_EN);
writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
/* if we read back 0x0, we don't have an entropy device */
if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
#endif
}
/* access to the phy. the following assumes that we've initialized the MIF to
* be in frame rather than bit-bang mode
*/
static u16 cas_phy_read(struct cas *cp, int reg)
{
u32 cmd;
int limit = STOP_TRIES_PHY;
cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
writel(cmd, cp->regs + REG_MIF_FRAME);
/* poll for completion */
while (limit-- > 0) {
udelay(10);
cmd = readl(cp->regs + REG_MIF_FRAME);
if (cmd & MIF_FRAME_TURN_AROUND_LSB)
return cmd & MIF_FRAME_DATA_MASK;
}
return 0xFFFF; /* -1 */
}
static int cas_phy_write(struct cas *cp, int reg, u16 val)
{
int limit = STOP_TRIES_PHY;
u32 cmd;
cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
cmd |= val & MIF_FRAME_DATA_MASK;
writel(cmd, cp->regs + REG_MIF_FRAME);
/* poll for completion */
while (limit-- > 0) {
udelay(10);
cmd = readl(cp->regs + REG_MIF_FRAME);
if (cmd & MIF_FRAME_TURN_AROUND_LSB)
return 0;
}
return -1;
}
static void cas_phy_powerup(struct cas *cp)
{
u16 ctl = cas_phy_read(cp, MII_BMCR);
if ((ctl & BMCR_PDOWN) == 0)
return;
ctl &= ~BMCR_PDOWN;
cas_phy_write(cp, MII_BMCR, ctl);
}
static void cas_phy_powerdown(struct cas *cp)
{
u16 ctl = cas_phy_read(cp, MII_BMCR);
if (ctl & BMCR_PDOWN)
return;
ctl |= BMCR_PDOWN;
cas_phy_write(cp, MII_BMCR, ctl);
}
/* cp->lock held. note: the last put_page will free the buffer */
static int cas_page_free(struct cas *cp, cas_page_t *page)
{
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
}
#ifdef RX_COUNT_BUFFERS
#define RX_USED_ADD(x, y) ((x)->used += (y))
#define RX_USED_SET(x, y) ((x)->used = (y))
#else
#define RX_USED_ADD(x, y)
#define RX_USED_SET(x, y)
#endif
/* local page allocation routines for the receive buffers. jumbo pages
* require at least 8K contiguous and 8K aligned buffers.
*/
static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
{
cas_page_t *page;
page = kmalloc(sizeof(cas_page_t), flags);
if (!page)
return NULL;
INIT_LIST_HEAD(&page->list);
RX_USED_SET(page, 0);
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
page_err:
kfree(page);
return NULL;
}
/* initialize spare pool of rx buffers, but allocate during the open */
static void cas_spare_init(struct cas *cp)
{
spin_lock(&cp->rx_inuse_lock);
INIT_LIST_HEAD(&cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
spin_lock(&cp->rx_spare_lock);
INIT_LIST_HEAD(&cp->rx_spare_list);
cp->rx_spares_needed = RX_SPARE_COUNT;
spin_unlock(&cp->rx_spare_lock);
}
/* used on close. free all the spare buffers. */
static void cas_spare_free(struct cas *cp)
{
struct list_head list, *elem, *tmp;
/* free spare buffers */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_spare_lock);
list_splice_init(&cp->rx_spare_list, &list);
spin_unlock(&cp->rx_spare_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_free(cp, list_entry(elem, cas_page_t, list));
}
INIT_LIST_HEAD(&list);
#if 1
/*
* Looks like Adrian had protected this with a different
* lock than used everywhere else to manipulate this list.
*/
spin_lock(&cp->rx_inuse_lock);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
#else
spin_lock(&cp->rx_spare_lock);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_spare_lock);
#endif
list_for_each_safe(elem, tmp, &list) {
cas_page_free(cp, list_entry(elem, cas_page_t, list));
}
}
/* replenish spares if needed */
static void cas_spare_recover(struct cas *cp, const gfp_t flags)
{
struct list_head list, *elem, *tmp;
int needed, i;
/* check inuse list. if we don't need any more free buffers,
* just free it
*/
/* make a local copy of the list */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_inuse_lock);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
/*
* With the lockless pagecache, cassini buffering scheme gets
* slightly less accurate: we might find that a page has an
* elevated reference count here, due to a speculative ref,
* and skip it as in-use. Ideally we would be able to reclaim
* it. However this would be such a rare case, it doesn't
* matter too much as we should pick it up the next time round.
*
* Importantly, if we find that the page has a refcount of 1
* here (our refcount), then we know it is definitely not inuse
* so we can reuse it.
*/
if (page_count(page->buffer) > 1)
continue;
list_del(elem);
spin_lock(&cp->rx_spare_lock);
if (cp->rx_spares_needed > 0) {
list_add(elem, &cp->rx_spare_list);
cp->rx_spares_needed--;
spin_unlock(&cp->rx_spare_lock);
} else {
spin_unlock(&cp->rx_spare_lock);
cas_page_free(cp, page);
}
}
/* put any inuse buffers back on the list */
if (!list_empty(&list)) {
spin_lock(&cp->rx_inuse_lock);
list_splice(&list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
}
spin_lock(&cp->rx_spare_lock);
needed = cp->rx_spares_needed;
spin_unlock(&cp->rx_spare_lock);
if (!needed)
return;
/* we still need spares, so try to allocate some */
INIT_LIST_HEAD(&list);
i = 0;
while (i < needed) {
cas_page_t *spare = cas_page_alloc(cp, flags);
if (!spare)
break;
list_add(&spare->list, &list);
i++;
}
spin_lock(&cp->rx_spare_lock);
list_splice(&list, &cp->rx_spare_list);
cp->rx_spares_needed -= i;
spin_unlock(&cp->rx_spare_lock);
}
/* pull a page from the list. */
static cas_page_t *cas_page_dequeue(struct cas *cp)
{
struct list_head *entry;
int recover;
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
/* try to do a quick recovery */
spin_unlock(&cp->rx_spare_lock);
cas_spare_recover(cp, GFP_ATOMIC);
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
netif_err(cp, rx_err, cp->dev,
"no spare buffers available\n");
spin_unlock(&cp->rx_spare_lock);
return NULL;
}
}
entry = cp->rx_spare_list.next;
list_del(entry);
recover = ++cp->rx_spares_needed;
spin_unlock(&cp->rx_spare_lock);
/* trigger the timer to do the recovery */
if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_spare);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
schedule_work(&cp->reset_task);
#endif
}
return list_entry(entry, cas_page_t, list);
}
static void cas_mif_poll(struct cas *cp, const int enable)
{
u32 cfg;
cfg = readl(cp->regs + REG_MIF_CFG);
cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
if (cp->phy_type & CAS_PHY_MII_MDIO1)
cfg |= MIF_CFG_PHY_SELECT;
/* poll and interrupt on link status change. */
if (enable) {
cfg |= MIF_CFG_POLL_EN;
cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
}
writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
cp->regs + REG_MIF_MASK);
writel(cfg, cp->regs + REG_MIF_CFG);
}
/* Must be invoked under cp->lock */
static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
{
u16 ctl;
#if 1
int lcntl;
int changed = 0;
int oldstate = cp->lstate;
int link_was_not_down = !(oldstate == link_down);
#endif
/* Setup link parameters */
if (!ep)
goto start_aneg;
lcntl = cp->link_cntl;
if (ep->autoneg == AUTONEG_ENABLE)
cp->link_cntl = BMCR_ANENABLE;
else {
u32 speed = ethtool_cmd_speed(ep);
cp->link_cntl = 0;
if (speed == SPEED_100)
cp->link_cntl |= BMCR_SPEED100;
else if (speed == SPEED_1000)
cp->link_cntl |= CAS_BMCR_SPEED1000;
if (ep->duplex == DUPLEX_FULL)
cp->link_cntl |= BMCR_FULLDPLX;
}
#if 1
changed = (lcntl != cp->link_cntl);
#endif
start_aneg:
if (cp->lstate == link_up) {
netdev_info(cp->dev, "PCS link down\n");
} else {
if (changed) {
netdev_info(cp->dev, "link configuration changed\n");
}
}
cp->lstate = link_down;
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
if (!cp->hw_running)
return;
#if 1
/*
* WTZ: If the old state was link_up, we turn off the carrier
* to replicate everything we do elsewhere on a link-down
* event when we were already in a link-up state..
*/
if (oldstate == link_up)
netif_carrier_off(cp->dev);
if (changed && link_was_not_down) {
/*
* WTZ: This branch will simply schedule a full reset after
* we explicitly changed link modes in an ioctl. See if this
* fixes the link-problems we were having for forced mode.
*/
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
schedule_work(&cp->reset_task);
cp->timer_ticks = 0;
mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
return;
}
#endif
if (cp->phy_type & CAS_PHY_SERDES) {
u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
if (cp->link_cntl & BMCR_ANENABLE) {
val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
cp->lstate = link_aneg;
} else {
if (cp->link_cntl & BMCR_FULLDPLX)
val |= PCS_MII_CTRL_DUPLEX;
val &= ~PCS_MII_AUTONEG_EN;
cp->lstate = link_force_ok;
}
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
writel(val, cp->regs + REG_PCS_MII_CTRL);
} else {
cas_mif_poll(cp, 0);
ctl = cas_phy_read(cp, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
ctl |= cp->link_cntl;
if (ctl & BMCR_ANENABLE) {
ctl |= BMCR_ANRESTART;
cp->lstate = link_aneg;
} else {
cp->lstate = link_force_ok;
}
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
cas_phy_write(cp, MII_BMCR, ctl);
cas_mif_poll(cp, 1);
}
cp->timer_ticks = 0;
mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
}
/* Must be invoked under cp->lock. */
static int cas_reset_mii_phy(struct cas *cp)
{
int limit = STOP_TRIES_PHY;
u16 val;
cas_phy_write(cp, MII_BMCR, BMCR_RESET);
udelay(100);
while (--limit) {
val = cas_phy_read(cp, MII_BMCR);
if ((val & BMCR_RESET) == 0)
break;
udelay(10);
}
return limit <= 0;
}
static int cas_saturn_firmware_init(struct cas *cp)
{
const struct firmware *fw;
const char fw_name[] = "sun/cassini.bin";
int err;
if (PHY_NS_DP83065 != cp->phy_id)
return 0;
err = request_firmware(&fw, fw_name, &cp->pdev->dev);
if (err) {
pr_err("Failed to load firmware \"%s\"\n",
fw_name);
return err;
}
if (fw->size < 2) {
pr_err("bogus length %zu in \"%s\"\n",
fw->size, fw_name);
err = -EINVAL;
goto out;
}
cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
cp->fw_size = fw->size - 2;
cp->fw_data = vmalloc(cp->fw_size);
if (!cp->fw_data) {
err = -ENOMEM;
goto out;
}
memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
out:
release_firmware(fw);
return err;
}
static void cas_saturn_firmware_load(struct cas *cp)
{
int i;
cas_phy_powerdown(cp);
/* expanded memory access mode */
cas_phy_write(cp, DP83065_MII_MEM, 0x0);
/* pointer configuration for new firmware */
cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
cas_phy_write(cp, DP83065_MII_REGD, 0x82);
cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
cas_phy_write(cp, DP83065_MII_REGD, 0x0);
cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
cas_phy_write(cp, DP83065_MII_REGD, 0x39);
/* download new firmware */
cas_phy_write(cp, DP83065_MII_MEM, 0x1);
cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
for (i = 0; i < cp->fw_size; i++)
cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
/* enable firmware */
cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
cas_phy_write(cp, DP83065_MII_REGD, 0x1);
}
/* phy initialization */
static void cas_phy_init(struct cas *cp)
{
u16 val;
/* if we're in MII/GMII mode, set up phy */
if (CAS_PHY_MII(cp->phy_type)) {
writel(PCS_DATAPATH_MODE_MII,
cp->regs + REG_PCS_DATAPATH_MODE);
cas_mif_poll(cp, 0);
cas_reset_mii_phy(cp); /* take out of isolate mode */
if (PHY_LUCENT_B0 == cp->phy_id) {
/* workaround link up/down issue with lucent */
cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
cas_phy_write(cp, MII_BMCR, 0x00f1);
cas_phy_write(cp, LUCENT_MII_REG, 0x0);
} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
/* workarounds for broadcom phy */
cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
} else if (PHY_BROADCOM_5411 == cp->phy_id) {
val = cas_phy_read(cp, BROADCOM_MII_REG4);
val = cas_phy_read(cp, BROADCOM_MII_REG4);
if (val & 0x0080) {
/* link workaround */
cas_phy_write(cp, BROADCOM_MII_REG4,
val & ~0x0080);
}
} else if (cp->cas_flags & CAS_FLAG_SATURN) {
writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
SATURN_PCFG_FSI : 0x0,
cp->regs + REG_SATURN_PCFG);
/* load firmware to address 10Mbps auto-negotiation
* issue. NOTE: this will need to be changed if the
* default firmware gets fixed.
*/
if (PHY_NS_DP83065 == cp->phy_id) {
cas_saturn_firmware_load(cp);
}
cas_phy_powerup(cp);
}
/* advertise capabilities */
val = cas_phy_read(cp, MII_BMCR);
val &= ~BMCR_ANENABLE;
cas_phy_write(cp, MII_BMCR, val);
udelay(10);
cas_phy_write(cp, MII_ADVERTISE,
cas_phy_read(cp, MII_ADVERTISE) |
(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL |
CAS_ADVERTISE_PAUSE |
CAS_ADVERTISE_ASYM_PAUSE));
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
/* make sure that we don't advertise half
* duplex to avoid a chip issue
*/
val = cas_phy_read(cp, CAS_MII_1000_CTRL);
val &= ~CAS_ADVERTISE_1000HALF;
val |= CAS_ADVERTISE_1000FULL;
cas_phy_write(cp, CAS_MII_1000_CTRL, val);
}
} else {
/* reset pcs for serdes */
u32 val;
int limit;
writel(PCS_DATAPATH_MODE_SERDES,
cp->regs + REG_PCS_DATAPATH_MODE);
/* enable serdes pins on saturn */
if (cp->cas_flags & CAS_FLAG_SATURN)
writel(0, cp->regs + REG_SATURN_PCFG);
/* Reset PCS unit. */
val = readl(cp->regs + REG_PCS_MII_CTRL);
val |= PCS_MII_RESET;
writel(val, cp->regs + REG_PCS_MII_CTRL);
limit = STOP_TRIES;
while (--limit > 0) {
udelay(10);
if ((readl(cp->regs + REG_PCS_MII_CTRL) &
PCS_MII_RESET) == 0)
break;
}
if (limit <= 0)
netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
readl(cp->regs + REG_PCS_STATE_MACHINE));
/* Make sure PCS is disabled while changing advertisement
* configuration.
*/
writel(0x0, cp->regs + REG_PCS_CFG);
/* Advertise all capabilities except half-duplex. */
val = readl(cp->regs + REG_PCS_MII_ADVERT);
val &= ~PCS_MII_ADVERT_HD;
val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
PCS_MII_ADVERT_ASYM_PAUSE);
writel(val, cp->regs + REG_PCS_MII_ADVERT);
/* enable PCS */
writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
/* pcs workaround: enable sync detect */
writel(PCS_SERDES_CTRL_SYNCD_EN,
cp->regs + REG_PCS_SERDES_CTRL);
}
}
static int cas_pcs_link_check(struct cas *cp)
{
u32 stat, state_machine;
int retval = 0;
/* The link status bit latches on zero, so you must
* read it twice in such a case to see a transition
* to the link being up.
*/
stat = readl(cp->regs + REG_PCS_MII_STATUS);
if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
stat = readl(cp->regs + REG_PCS_MII_STATUS);
/* The remote-fault indication is only valid
* when autoneg has completed.
*/
if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
PCS_MII_STATUS_REMOTE_FAULT)) ==
(PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
/* work around link detection issue by querying the PCS state
* machine directly.
*/
state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
stat &= ~PCS_MII_STATUS_LINK_STATUS;
} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
stat |= PCS_MII_STATUS_LINK_STATUS;
}
if (stat & PCS_MII_STATUS_LINK_STATUS) {
if (cp->lstate != link_up) {
if (cp->opened) {
cp->lstate = link_up;
cp->link_transition = LINK_TRANSITION_LINK_UP;
cas_set_link_modes(cp);
netif_carrier_on(cp->dev);
}
}
} else if (cp->lstate == link_up) {
cp->lstate = link_down;
if (link_transition_timeout != 0 &&
cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
!cp->link_transition_jiffies_valid) {
/*
* force a reset, as a workaround for the
* link-failure problem. May want to move this to a
* point a bit earlier in the sequence. If we had
* generated a reset a short time ago, we'll wait for
* the link timer to check the status until a
* timer expires (link_transistion_jiffies_valid is
* true when the timer is running.) Instead of using
* a system timer, we just do a check whenever the
* link timer is running - this clears the flag after
* a suitable delay.
*/
retval = 1;
cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
cp->link_transition_jiffies = jiffies;
cp->link_transition_jiffies_valid = 1;
} else {
cp->link_transition = LINK_TRANSITION_ON_FAILURE;
}
netif_carrier_off(cp->dev);
if (cp->opened)
netif_info(cp, link, cp->dev, "PCS link down\n");
/* Cassini only: if you force a mode, there can be
* sync problems on link down. to fix that, the following
* things need to be checked:
* 1) read serialink state register
* 2) read pcs status register to verify link down.
* 3) if link down and serial link == 0x03, then you need
* to global reset the chip.
*/
if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
/* should check to see if we're in a forced mode */
stat = readl(cp->regs + REG_PCS_SERDES_STATE);
if (stat == 0x03)
return 1;
}
} else if (cp->lstate == link_down) {
if (link_transition_timeout != 0 &&
cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
!cp->link_transition_jiffies_valid) {
/* force a reset, as a workaround for the
* link-failure problem. May want to move
* this to a point a bit earlier in the
* sequence.
*/
retval = 1;
cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
cp->link_transition_jiffies = jiffies;
cp->link_transition_jiffies_valid = 1;
} else {
cp->link_transition = LINK_TRANSITION_STILL_FAILED;
}
}
return retval;
}
static int cas_pcs_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
return 0;
return cas_pcs_link_check(cp);
}
static int cas_txmac_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
if (!txmac_stat)
return 0;
netif_printk(cp, intr, KERN_DEBUG, cp->dev,
"txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
/* Defer timer expiration is quite normal,
* don't even log the event.
*/
if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
!(txmac_stat & ~MAC_TX_DEFER_TIMER))
return 0;
spin_lock(&cp->stat_lock[0]);
if (txmac_stat & MAC_TX_UNDERRUN) {
netdev_err(dev, "TX MAC xmit underrun\n");
cp->net_stats[0].tx_fifo_errors++;
}
if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
netdev_err(dev, "TX MAC max packet size error\n");
cp->net_stats[0].tx_errors++;
}
/* The rest are all cases of one of the 16-bit TX
* counters expiring.
*/
if (txmac_stat & MAC_TX_COLL_NORMAL)
cp->net_stats[0].collisions += 0x10000;
if (txmac_stat & MAC_TX_COLL_EXCESS) {
cp->net_stats[0].tx_aborted_errors += 0x10000;
cp->net_stats[0].collisions += 0x10000;
}
if (txmac_stat & MAC_TX_COLL_LATE) {
cp->net_stats[0].tx_aborted_errors += 0x10000;
cp->net_stats[0].collisions += 0x10000;
}
spin_unlock(&cp->stat_lock[0]);
/* We do not keep track of MAC_TX_COLL_FIRST and
* MAC_TX_PEAK_ATTEMPTS events.
*/
return 0;
}
static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
{
cas_hp_inst_t *inst;
u32 val;
int i;
i = 0;
while ((inst = firmware) && inst->note) {
writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
++firmware;
++i;
}
}
static void cas_init_rx_dma(struct cas *cp)
{
u64 desc_dma = cp->block_dvma;
u32 val;
int i, size;
/* rx free descriptors */
val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
if ((N_RX_DESC_RINGS > 1) &&
(cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
writel(val, cp->regs + REG_RX_CFG);
val = (unsigned long) cp->init_rxds[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
/* rx desc 2 is for IPSEC packets. however,
* we don't it that for that purpose.
*/
val = (unsigned long) cp->init_rxds[1] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_DB1_LOW);
writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
REG_PLUS_RX_KICK1);
}
/* rx completion registers */
val = (unsigned long) cp->init_rxcs[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
/* rx comp 2-4 */
for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
val = (unsigned long) cp->init_rxcs[i] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs +
REG_PLUS_RX_CBN_HI(i));
writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_CBN_LOW(i));
}
}
/* read selective clear regs to prevent spurious interrupts
* on reset because complete == kick.
* selective clear set up to prevent interrupts on resets
*/
readl(cp->regs + REG_INTR_STATUS_ALIAS);
writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
for (i = 1; i < N_RX_COMP_RINGS; i++)
readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
/* 2 is different from 3 and 4 */
if (N_RX_COMP_RINGS > 1)
writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
cp->regs + REG_PLUS_ALIASN_CLEAR(1));
for (i = 2; i < N_RX_COMP_RINGS; i++)
writel(INTR_RX_DONE_ALT,
cp->regs + REG_PLUS_ALIASN_CLEAR(i));
}
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
val |= CAS_BASE(RX_PAUSE_THRESH_ON,
cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
writel(val, cp->regs + REG_RX_PAUSE_THRESH);
/* zero out dma reassembly buffers */
for (i = 0; i < 64; i++) {
writel(i, cp->regs + REG_RX_TABLE_ADDR);
writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
}
/* make sure address register is 0 for normal operation */
writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
/* interrupt mitigation */
#ifdef USE_RX_BLANK
val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
writel(val, cp->regs + REG_RX_BLANK);
#else
writel(0x0, cp->regs + REG_RX_BLANK);
#endif
/* interrupt generation as a function of low water marks for
* free desc and completion entries. these are used to trigger
* housekeeping for rx descs. we don't use the free interrupt
* as it's not very useful
*/
/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
writel(val, cp->regs + REG_RX_AE_THRESH);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
}
/* Random early detect registers. useful for congestion avoidance.
* this should be tunable.
*/
writel(0x0, cp->regs + REG_RX_RED);
/* receive page sizes. default == 2K (0x800) */
val = 0;
if (cp->page_size == 0x1000)
val = 0x1;
else if (cp->page_size == 0x2000)
val = 0x2;
else if (cp->page_size == 0x4000)
val = 0x3;
/* round mtu + offset. constrain to page size. */
size = cp->dev->mtu + 64;
if (size > cp->page_size)
size = cp->page_size;
if (size <= 0x400)
i = 0x0;
else if (size <= 0x800)
i = 0x1;
else if (size <= 0x1000)
i = 0x2;
else
i = 0x3;
cp->mtu_stride = 1 << (i + 10);
val = CAS_BASE(RX_PAGE_SIZE, val);
val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
writel(val, cp->regs + REG_RX_PAGE_SIZE);
/* enable the header parser if desired */
if (CAS_HP_FIRMWARE == cas_prog_null)
return;
val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
writel(val, cp->regs + REG_HP_CFG);
}
static inline void cas_rxc_init(struct cas_rx_comp *rxc)
{
memset(rxc, 0, sizeof(*rxc));
rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
}
/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
* flipping is protected by the fact that the chip will not
* hand back the same page index while it's being processed.
*/
static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
{
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
if (page_count(page->buffer) == 1)
return page;
new = cas_page_dequeue(cp);
if (new) {
spin_lock(&cp->rx_inuse_lock);
list_add(&page->list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
}
return new;
}
/* this needs to be changed if we actually use the ENC RX DESC ring */
static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
const int index)
{
cas_page_t **page0 = cp->rx_pages[0];
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
if (page_count(page0[index]->buffer) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
page0[index] = new;
}
}
RX_USED_SET(page0[index], 0);
return page0[index];
}
static void cas_clean_rxds(struct cas *cp)
{
/* only clean ring 0 as ring 1 is used for spare buffers */
struct cas_rx_desc *rxd = cp->init_rxds[0];
int i, size;
/* release all rx flows */
for (i = 0; i < N_RX_FLOWS; i++) {
struct sk_buff *skb;
while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
cas_skb_release(skb);
}
}
/* initialize descriptors */
size = RX_DESC_RINGN_SIZE(0);
for (i = 0; i < size; i++) {
cas_page_t *page = cas_page_swap(cp, 0, i);
rxd[i].buffer = cpu_to_le64(page->dma_addr);
rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
CAS_BASE(RX_INDEX_RING, 0));
}
cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
cp->rx_last[0] = 0;
cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
}
static void cas_clean_rxcs(struct cas *cp)
{
int i, j;
/* take ownership of rx comp descriptors */
memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
for (i = 0; i < N_RX_COMP_RINGS; i++) {
struct cas_rx_comp *rxc = cp->init_rxcs[i];
for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
cas_rxc_init(rxc + j);
}
}
}
#if 0
/* When we get a RX fifo overflow, the RX unit is probably hung
* so we do the following.
*
* If any part of the reset goes wrong, we return 1 and that causes the
* whole chip to be reset.
*/
static int cas_rxmac_reset(struct cas *cp)
{
struct net_device *dev = cp->dev;
int limit;
u32 val;
/* First, reset MAC RX. */
writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
for (limit = 0; limit < STOP_TRIES; limit++) {
if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
break;
udelay(10);
}
if (limit == STOP_TRIES) {
netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
/* Second, disable RX DMA. */
writel(0, cp->regs + REG_RX_CFG);
for (limit = 0; limit < STOP_TRIES; limit++) {
if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
break;
udelay(10);
}
if (limit == STOP_TRIES) {
netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
mdelay(5);
/* Execute RX reset command. */
writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
for (limit = 0; limit < STOP_TRIES; limit++) {
if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
break;
udelay(10);
}
if (limit == STOP_TRIES) {
netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
/* reset driver rx state */
cas_clean_rxds(cp);
cas_clean_rxcs(cp);
/* Now, reprogram the rest of RX unit. */
cas_init_rx_dma(cp);
/* re-enable */
val = readl(cp->regs + REG_RX_CFG);
writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
val = readl(cp->regs + REG_MAC_RX_CFG);
writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
return 0;
}
#endif
static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
u32 status)
{
u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
if (!stat)
return 0;
netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
if (stat & MAC_RX_ALIGN_ERR)
cp->net_stats[0].rx_frame_errors += 0x10000;
if (stat & MAC_RX_CRC_ERR)
cp->net_stats[0].rx_crc_errors += 0x10000;
if (stat & MAC_RX_LEN_ERR)
cp->net_stats[0].rx_length_errors += 0x10000;
if (stat & MAC_RX_OVERFLOW) {
cp->net_stats[0].rx_over_errors++;
cp->net_stats[0].rx_fifo_errors++;
}
/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
* events.
*/
spin_unlock(&cp->stat_lock[0]);
return 0;
}
static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
u32 status)
{
u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
if (!stat)
return 0;
netif_printk(cp, intr, KERN_DEBUG, cp->dev,
"mac interrupt, stat: 0x%x\n", stat);
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
* but probably by default we will mask these events.
*/
if (stat & MAC_CTRL_PAUSE_STATE)
cp->pause_entered++;
if (stat & MAC_CTRL_PAUSE_RECEIVED)
cp->pause_last_time_recvd = (stat >> 16);
return 0;
}
/* Must be invoked under cp->lock. */
static inline int cas_mdio_link_not_up(struct cas *cp)
{
u16 val;
switch (cp->lstate) {
case link_force_ret:
netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
cp->timer_ticks = 5;
cp->lstate = link_force_ok;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
break;
case link_aneg:
val = cas_phy_read(cp, MII_BMCR);
/* Try forced modes. we try things in the following order:
* 1000 full -> 100 full/half -> 10 half
*/
val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
val |= BMCR_FULLDPLX;
val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
CAS_BMCR_SPEED1000 : BMCR_SPEED100;
cas_phy_write(cp, MII_BMCR, val);
cp->timer_ticks = 5;
cp->lstate = link_force_try;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
break;
case link_force_try:
/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
val = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
if (val & CAS_BMCR_SPEED1000) { /* gigabit */
val &= ~CAS_BMCR_SPEED1000;
val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
cas_phy_write(cp, MII_BMCR, val);
break;
}
if (val & BMCR_SPEED100) {
if (val & BMCR_FULLDPLX) /* fd failed */
val &= ~BMCR_FULLDPLX;
else { /* 100Mbps failed */
val &= ~BMCR_SPEED100;
}
cas_phy_write(cp, MII_BMCR, val);
break;
}
default:
break;
}
return 0;
}
/* must be invoked with cp->lock held */
static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
{
int restart;
if (bmsr & BMSR_LSTATUS) {
/* Ok, here we got a link. If we had it due to a forced
* fallback, and we were configured for autoneg, we
* retry a short autoneg pass. If you know your hub is
* broken, use ethtool ;)
*/
if ((cp->lstate == link_force_try) &&
(cp->link_cntl & BMCR_ANENABLE)) {
cp->lstate = link_force_ret;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
cas_mif_poll(cp, 0);
cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
if (cp->opened)
netif_info(cp, link, cp->dev,
"Got link after fallback, retrying autoneg once...\n");
cas_phy_write(cp, MII_BMCR,
cp->link_fcntl | BMCR_ANENABLE |
BMCR_ANRESTART);
cas_mif_poll(cp, 1);
} else if (cp->lstate != link_up) {
cp->lstate = link_up;
cp->link_transition = LINK_TRANSITION_LINK_UP;
if (cp->opened) {
cas_set_link_modes(cp);
netif_carrier_on(cp->dev);
}
}
return 0;
}
/* link not up. if the link was previously up, we restart the
* whole process
*/
restart = 0;
if (cp->lstate == link_up) {
cp->lstate = link_down;
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
if (cp->opened)
netif_info(cp, link, cp->dev, "Link down\n");
restart = 1;
} else if (++cp->timer_ticks > 10)
cas_mdio_link_not_up(cp);
return restart;
}
static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
u32 status)
{
u32 stat = readl(cp->regs + REG_MIF_STATUS);
u16 bmsr;
/* check for a link change */
if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
return 0;
bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
return cas_mii_link_check(cp, bmsr);
}
static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
u32 status)
{
u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
if (!stat)
return 0;
netdev_err(dev, "PCI error [%04x:%04x]",
stat, readl(cp->regs + REG_BIM_DIAG));
/* cassini+ has this reserved */
if ((stat & PCI_ERR_BADACK) &&
((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
pr_cont(" <No ACK64# during ABS64 cycle>");
if (stat & PCI_ERR_DTRTO)
pr_cont(" <Delayed transaction timeout>");
if (stat & PCI_ERR_OTHER)
pr_cont(" <other>");
if (stat & PCI_ERR_BIM_DMA_WRITE)
pr_cont(" <BIM DMA 0 write req>");
if (stat & PCI_ERR_BIM_DMA_READ)
pr_cont(" <BIM DMA 0 read req>");
pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
u16 cfg;
/* Interrogate PCI config space for the
* true cause.
*/
pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
if (cfg & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
if (cfg & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
if (cfg & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
if (cfg & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
cfg &= (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR |
PCI_STATUS_DETECTED_PARITY);
pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
}
/* For all PCI errors, we should reset the chip. */
return 1;
}
/* All non-normal interrupt conditions get serviced here.
* Returns non-zero if we should just exit the interrupt
* handler right now (ie. if we reset the card which invalidates
* all of the other original irq status bits).
*/
static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
u32 status)
{
if (status & INTR_RX_TAG_ERROR) {
/* corrupt RX tag framing */
netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
"corrupt rx tag framing\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
goto do_reset;
}
if (status & INTR_RX_LEN_MISMATCH) {
/* length mismatch. */
netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
"length mismatch for rx frame\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
goto do_reset;
}
if (status & INTR_PCS_STATUS) {
if (cas_pcs_interrupt(dev, cp, status))
goto do_reset;
}
if (status & INTR_TX_MAC_STATUS) {
if (cas_txmac_interrupt(dev, cp, status))
goto do_reset;
}
if (status & INTR_RX_MAC_STATUS) {
if (cas_rxmac_interrupt(dev, cp, status))
goto do_reset;
}
if (status & INTR_MAC_CTRL_STATUS) {
if (cas_mac_interrupt(dev, cp, status))
goto do_reset;
}
if (status & INTR_MIF_STATUS) {
if (cas_mif_interrupt(dev, cp, status))
goto do_reset;
}
if (status & INTR_PCI_ERROR_STATUS) {
if (cas_pci_interrupt(dev, cp, status))
goto do_reset;
}
return 0;
do_reset:
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
netdev_err(dev, "reset called in cas_abnormal_irq\n");
schedule_work(&cp->reset_task);
#endif
return 1;
}
/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
* determining whether to do a netif_stop/wakeup
*/
#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
const int len)
{
unsigned long off = addr + len;
if (CAS_TABORT(cp) == 1)
return 0;
if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
return 0;
return TX_TARGET_ABORT_LEN;
}
static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
{
struct cas_tx_desc *txds;
struct sk_buff **skbs;
struct net_device *dev = cp->dev;
int entry, count;
spin_lock(&cp->tx_lock[ring]);
txds = cp->init_txds[ring];
skbs = cp->tx_skbs[ring];
entry = cp->tx_old[ring];
count = TX_BUFF_COUNT(ring, entry, limit);
while (entry != limit) {
struct sk_buff *skb = skbs[entry];
dma_addr_t daddr;
u32 dlen;
int frag;
if (!skb) {
/* this should never occur */
entry = TX_DESC_NEXT(ring, entry);
continue;
}
/* however, we might get only a partial skb release. */
count -= skb_shinfo(skb)->nr_frags +
+ cp->tx_tiny_use[ring][entry].nbufs + 1;
if (count < 0)
break;
netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
"tx[%d] done, slot %d\n", ring, entry);
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
struct cas_tx_desc *txd = txds + entry;
daddr = le64_to_cpu(txd->buffer);
dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd->control));
pci_unmap_page(cp->pdev, daddr, dlen,
PCI_DMA_TODEVICE);
entry = TX_DESC_NEXT(ring, entry);
/* tiny buffer may follow */
if (cp->tx_tiny_use[ring][entry].used) {
cp->tx_tiny_use[ring][entry].used = 0;
entry = TX_DESC_NEXT(ring, entry);
}
}
spin_lock(&cp->stat_lock[ring]);
cp->net_stats[ring].tx_packets++;
cp->net_stats[ring].tx_bytes += skb->len;
spin_unlock(&cp->stat_lock[ring]);
dev_kfree_skb_irq(skb);
}
cp->tx_old[ring] = entry;
/* this is wrong for multiple tx rings. the net device needs
* multiple queues for this to do the right thing. we wait
* for 2*packets to be available when using tiny buffers
*/
if (netif_queue_stopped(dev) &&
(TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
netif_wake_queue(dev);
spin_unlock(&cp->tx_lock[ring]);
}
static void cas_tx(struct net_device *dev, struct cas *cp,
u32 status)
{
int limit, ring;
#ifdef USE_TX_COMPWB
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
netif_printk(cp, intr, KERN_DEBUG, cp->dev,
"tx interrupt, status: 0x%x, %llx\n",
status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
/* use the completion writeback registers */
limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
CAS_VAL(TX_COMPWB_LSB, compwb);
compwb = TX_COMPWB_NEXT(compwb);
#else
limit = readl(cp->regs + REG_TX_COMPN(ring));
#endif
if (cp->tx_old[ring] != limit)
cas_tx_ringN(cp, ring, limit);
}
}
static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
int entry, const u64 *words,
struct sk_buff **skbref)
{
int dlen, hlen, len, i, alloclen;
int off, swivel = RX_SWIVEL_OFF_VAL;
struct cas_page *page;
struct sk_buff *skb;
void *addr, *crcaddr;
__sum16 csum;
char *p;
hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
len = hlen + dlen;
if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
alloclen = len;
else
alloclen = max(hlen, RX_COPY_MIN);
skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
if (skb == NULL)
return -1;
*skbref = skb;
skb_reserve(skb, swivel);
p = skb->data;
addr = crcaddr = NULL;
if (hlen) { /* always copy header pages */
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
swivel;
i = hlen;
if (!dlen) /* attach FCS */
i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
addr = cas_page_map(page->buffer);
memcpy(p, addr + off, i);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
RX_USED_ADD(page, 0x100);
p += hlen;
swivel = 0;
}
if (alloclen < (hlen + dlen)) {
skb_frag_t *frag = skb_shinfo(skb)->frags;
/* normal or jumbo packets. we use frags */
i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
"rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
i = hlen;
if (i == dlen) /* attach FCS */
i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
/* make sure we always copy a header */
swivel = 0;
if (p == (char *) skb->data) { /* not split */
addr = cas_page_map(page->buffer);
memcpy(p, addr + off, RX_COPY_MIN);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
off += RX_COPY_MIN;
swivel = RX_COPY_MIN;
RX_USED_ADD(page, cp->mtu_stride);
} else {
RX_USED_ADD(page, hlen);
}
skb_put(skb, alloclen);
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen - swivel;
skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = off;
skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
hlen = dlen;
off = 0;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
hlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
hlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen;
skb->len += hlen;
frag++;
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = 0;
skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
if (cp->crc_size) {
addr = cas_page_map(page->buffer);
crcaddr = addr + off + hlen;
}
} else {
/* copying packet */
if (!dlen)
goto end_copy_pkt;
i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
"rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
i = hlen;
if (i == dlen) /* attach FCS */
i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
addr = cas_page_map(page->buffer);
memcpy(p, addr + off, i);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
if (p == (char *) skb->data) /* not split */
RX_USED_ADD(page, cp->mtu_stride);
else
RX_USED_ADD(page, i);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
p += hlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
addr = cas_page_map(page->buffer);
memcpy(p, addr, dlen + cp->crc_size);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
RX_USED_ADD(page, dlen + cp->crc_size);
}
end_copy_pkt:
if (cp->crc_size) {
addr = NULL;
crcaddr = skb->data + alloclen;
}
skb_put(skb, alloclen);
}
csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
if (cp->crc_size) {
/* checksum includes FCS. strip it out. */
csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
csum_unfold(csum)));
if (addr)
cas_page_unmap(addr);
}
skb->protocol = eth_type_trans(skb, cp->dev);
if (skb->protocol == htons(ETH_P_IP)) {
skb->csum = csum_unfold(~csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else
skb_checksum_none_assert(skb);
return len;
}
/* we can handle up to 64 rx flows at a time. we do the same thing
* as nonreassm except that we batch up the buffers.
* NOTE: we currently just treat each flow as a bunch of packets that
* we pass up. a better way would be to coalesce the packets
* into a jumbo packet. to do that, we need to do the following:
* 1) the first packet will have a clean split between header and
* data. save both.
* 2) each time the next flow packet comes in, extend the
* data length and merge the checksums.
* 3) on flow release, fix up the header.
* 4) make sure the higher layer doesn't care.
* because packets get coalesced, we shouldn't run into fragment count
* issues.
*/
static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
struct sk_buff *skb)
{
int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
struct sk_buff_head *flow = &cp->rx_flows[flowid];
/* this is protected at a higher layer, so no need to
* do any additional locking here. stick the buffer
* at the end.
*/
__skb_queue_tail(flow, skb);
if (words[0] & RX_COMP1_RELEASE_FLOW) {
while ((skb = __skb_dequeue(flow))) {
cas_skb_release(skb);
}
}
}
/* put rx descriptor back on ring. if a buffer is in use by a higher
* layer, this will need to put in a replacement.
*/
static void cas_post_page(struct cas *cp, const int ring, const int index)
{
cas_page_t *new;
int entry;
entry = cp->rx_old[ring];
new = cas_page_swap(cp, ring, index);
cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
cp->init_rxds[ring][entry].index =
cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
CAS_BASE(RX_INDEX_RING, ring));
entry = RX_DESC_ENTRY(ring, entry + 1);
cp->rx_old[ring] = entry;
if (entry % 4)
return;
if (ring == 0)
writel(entry, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
(cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(entry, cp->regs + REG_PLUS_RX_KICK1);
}
/* only when things are bad */
static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
{
unsigned int entry, last, count, released;
int cluster;
cas_page_t **page = cp->rx_pages[ring];
entry = cp->rx_old[ring];
netif_printk(cp, intr, KERN_DEBUG, cp->dev,
"rxd[%d] interrupt, done: %d\n", ring, entry);
cluster = -1;
count = entry & 0x3;
last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
if (page_count(page[entry]->buffer) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
/* let the timer know that we need to
* do this again
*/
cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
if (!timer_pending(&cp->link_timer))
mod_timer(&cp->link_timer, jiffies +
CAS_LINK_FAST_TIMEOUT);
cp->rx_old[ring] = entry;
cp->rx_last[ring] = num ? num - released : 0;
return -ENOMEM;
}
spin_lock(&cp->rx_inuse_lock);
list_add(&page[entry]->list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
cp->init_rxds[ring][entry].buffer =
cpu_to_le64(new->dma_addr);
page[entry] = new;
}
if (++count == 4) {
cluster = entry;
count = 0;
}
released++;
entry = RX_DESC_ENTRY(ring, entry + 1);
}
cp->rx_old[ring] = entry;
if (cluster < 0)
return 0;
if (ring == 0)
writel(cluster, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
(cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
return 0;
}
/* process a completion ring. packets are set up in three basic ways:
* small packets: should be copied header + data in single buffer.
* large packets: header and data in a single buffer.
* split packets: header in a separate buffer from data.
* data may be in multiple pages. data may be > 256
* bytes but in a single page.
*
* NOTE: RX page posting is done in this routine as well. while there's
* the capability of using multiple RX completion rings, it isn't
* really worthwhile due to the fact that the page posting will
* force serialization on the single descriptor ring.
*/
static int cas_rx_ringN(struct cas *cp, int ring, int budget)
{
struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
int entry, drops;
int npackets = 0;
netif_printk(cp, intr, KERN_DEBUG, cp->dev,
"rx[%d] interrupt, done: %d/%d\n",
ring,
readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
entry = cp->rx_new[ring];
drops = 0;
while (1) {
struct cas_rx_comp *rxc = rxcs + entry;
struct sk_buff *uninitialized_var(skb);
int type, len;
u64 words[4];
int i, dring;
words[0] = le64_to_cpu(rxc->word1);
words[1] = le64_to_cpu(rxc->word2);
words[2] = le64_to_cpu(rxc->word3);
words[3] = le64_to_cpu(rxc->word4);
/* don't touch if still owned by hw */
type = CAS_VAL(RX_COMP1_TYPE, words[0]);
if (type == 0)
break;
/* hw hasn't cleared the zero bit yet */
if (words[3] & RX_COMP4_ZERO) {
break;
}
/* get info on the packet */
if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
spin_lock(&cp->stat_lock[ring]);
cp->net_stats[ring].rx_errors++;
if (words[3] & RX_COMP4_LEN_MISMATCH)
cp->net_stats[ring].rx_length_errors++;
if (words[3] & RX_COMP4_BAD)
cp->net_stats[ring].rx_crc_errors++;
spin_unlock(&cp->stat_lock[ring]);
/* We'll just return it to Cassini. */
drop_it:
spin_lock(&cp->stat_lock[ring]);
++cp->net_stats[ring].rx_dropped;
spin_unlock(&cp->stat_lock[ring]);
goto next;
}
len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
if (len < 0) {
++drops;
goto drop_it;
}
/* see if it's a flow re-assembly or not. the driver
* itself handles release back up.
*/
if (RX_DONT_BATCH || (type == 0x2)) {
/* non-reassm: these always get released */
cas_skb_release(skb);
} else {
cas_rx_flow_pkt(cp, words, skb);
}
spin_lock(&cp->stat_lock[ring]);
cp->net_stats[ring].rx_packets++;
cp->net_stats[ring].rx_bytes += len;
spin_unlock(&cp->stat_lock[ring]);
next:
npackets++;
/* should it be released? */
if (words[0] & RX_COMP1_RELEASE_HDR) {
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
dring = CAS_VAL(RX_INDEX_RING, i);
i = CAS_VAL(RX_INDEX_NUM, i);
cas_post_page(cp, dring, i);
}
if (words[0] & RX_COMP1_RELEASE_DATA) {
i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
dring = CAS_VAL(RX_INDEX_RING, i);
i = CAS_VAL(RX_INDEX_NUM, i);
cas_post_page(cp, dring, i);
}
if (words[0] & RX_COMP1_RELEASE_NEXT) {
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
dring = CAS_VAL(RX_INDEX_RING, i);
i = CAS_VAL(RX_INDEX_NUM, i);
cas_post_page(cp, dring, i);
}
/* skip to the next entry */
entry = RX_COMP_ENTRY(ring, entry + 1 +
CAS_VAL(RX_COMP1_SKIP, words[0]));
#ifdef USE_NAPI
if (budget && (npackets >= budget))
break;
#endif
}
cp->rx_new[ring] = entry;
if (drops)
netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
return npackets;
}
/* put completion entries back on the ring */
static void cas_post_rxcs_ringN(struct net_device *dev,
struct cas *cp, int ring)
{
struct cas_rx_comp *rxc = cp->init_rxcs[ring];
int last, entry;
last = cp->rx_cur[ring];
entry = cp->rx_new[ring];
netif_printk(cp, intr, KERN_DEBUG, dev,
"rxc[%d] interrupt, done: %d/%d\n",
ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
/* zero and re-mark descriptors */
while (last != entry) {
cas_rxc_init(rxc + last);
last = RX_COMP_ENTRY(ring, last + 1);
}
cp->rx_cur[ring] = last;
if (ring == 0)
writel(last, cp->regs + REG_RX_COMP_TAIL);
else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
}
/* cassini can use all four PCI interrupts for the completion ring.
* rings 3 and 4 are identical
*/
#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
static inline void cas_handle_irqN(struct net_device *dev,
struct cas *cp, const u32 status,
const int ring)
{
if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
cas_post_rxcs_ringN(dev, cp, ring);
}
static irqreturn_t cas_interruptN(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
unsigned long flags;
int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
/* check for shared irq */
if (status == 0)
return IRQ_NONE;
spin_lock_irqsave(&cp->lock, flags);
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, ring, 0);
#endif
status &= ~INTR_RX_DONE_ALT;
}
if (status)
cas_handle_irqN(dev, cp, status, ring);
spin_unlock_irqrestore(&cp->lock, flags);
return IRQ_HANDLED;
}
#endif
#ifdef USE_PCI_INTB
/* everything but rx packets */
static inline void cas_handle_irq1(struct cas *cp, const u32 status)
{
if (status & INTR_RX_BUF_UNAVAIL_1) {
/* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition. */
cas_post_rxds_ringN(cp, 1, 0);
spin_lock(&cp->stat_lock[1]);
cp->net_stats[1].rx_dropped++;
spin_unlock(&cp->stat_lock[1]);
}
if (status & INTR_RX_BUF_AE_1)
cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
RX_AE_FREEN_VAL(1));
if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
cas_post_rxcs_ringN(cp, 1);
}
/* ring 2 handles a few more events than 3 and 4 */
static irqreturn_t cas_interrupt1(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
unsigned long flags;
u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
/* check for shared interrupt */
if (status == 0)
return IRQ_NONE;
spin_lock_irqsave(&cp->lock, flags);
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 1, 0);
#endif
status &= ~INTR_RX_DONE_ALT;
}
if (status)
cas_handle_irq1(cp, status);
spin_unlock_irqrestore(&cp->lock, flags);
return IRQ_HANDLED;
}
#endif
static inline void cas_handle_irq(struct net_device *dev,
struct cas *cp, const u32 status)
{
/* housekeeping interrupts */
if (status & INTR_ERROR_MASK)
cas_abnormal_irq(dev, cp, status);
if (status & INTR_RX_BUF_UNAVAIL) {
/* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition.
*/
cas_post_rxds_ringN(cp, 0, 0);
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_dropped++;
spin_unlock(&cp->stat_lock[0]);
} else if (status & INTR_RX_BUF_AE) {
cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
RX_AE_FREEN_VAL(0));
}
if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
cas_post_rxcs_ringN(dev, cp, 0);
}
static irqreturn_t cas_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
unsigned long flags;
u32 status = readl(cp->regs + REG_INTR_STATUS);
if (status == 0)
return IRQ_NONE;
spin_lock_irqsave(&cp->lock, flags);
if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
cas_tx(dev, cp, status);
status &= ~(INTR_TX_ALL | INTR_TX_INTME);
}
if (status & INTR_RX_DONE) {
#ifdef USE_NAPI
cas_mask_intr(cp);
napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 0, 0);
#endif
status &= ~INTR_RX_DONE;
}
if (status)
cas_handle_irq(dev, cp, status);
spin_unlock_irqrestore(&cp->lock, flags);
return IRQ_HANDLED;
}
#ifdef USE_NAPI
static int cas_poll(struct napi_struct *napi, int budget)
{
struct cas *cp = container_of(napi, struct cas, napi);
struct net_device *dev = cp->dev;
int i, enable_intr, credits;
u32 status = readl(cp->regs + REG_INTR_STATUS);
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
cas_tx(dev, cp, status);
spin_unlock_irqrestore(&cp->lock, flags);
/* NAPI rx packets. we spread the credits across all of the
* rxc rings
*
* to make sure we're fair with the work we loop through each
* ring N_RX_COMP_RING times with a request of
* budget / N_RX_COMP_RINGS
*/
enable_intr = 1;
credits = 0;
for (i = 0; i < N_RX_COMP_RINGS; i++) {
int j;
for (j = 0; j < N_RX_COMP_RINGS; j++) {
credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
if (credits >= budget) {
enable_intr = 0;
goto rx_comp;
}
}
}
rx_comp:
/* final rx completion */
spin_lock_irqsave(&cp->lock, flags);
if (status)
cas_handle_irq(dev, cp, status);
#ifdef USE_PCI_INTB
if (N_RX_COMP_RINGS > 1) {
status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
if (status)
cas_handle_irq1(dev, cp, status);
}
#endif
#ifdef USE_PCI_INTC
if (N_RX_COMP_RINGS > 2) {
status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
if (status)
cas_handle_irqN(dev, cp, status, 2);
}
#endif
#ifdef USE_PCI_INTD
if (N_RX_COMP_RINGS > 3) {
status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
if (status)
cas_handle_irqN(dev, cp, status, 3);
}
#endif
spin_unlock_irqrestore(&cp->lock, flags);
if (enable_intr) {
napi_complete(napi);
cas_unmask_intr(cp);
}
return credits;
}
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cas_netpoll(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
cas_disable_irq(cp, 0);
cas_interrupt(cp->pdev->irq, dev);
cas_enable_irq(cp, 0);
#ifdef USE_PCI_INTB
if (N_RX_COMP_RINGS > 1) {
/* cas_interrupt1(); */
}
#endif
#ifdef USE_PCI_INTC
if (N_RX_COMP_RINGS > 2) {
/* cas_interruptN(); */
}
#endif
#ifdef USE_PCI_INTD
if (N_RX_COMP_RINGS > 3) {
/* cas_interruptN(); */
}
#endif
}
#endif
static void cas_tx_timeout(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
netdev_err(dev, "transmit timed out, resetting\n");
if (!cp->hw_running) {
netdev_err(dev, "hrm.. hw not running!\n");
return;
}
netdev_err(dev, "MIF_STATE[%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
netdev_err(dev, "MAC_STATE[%08x]\n",
readl(cp->regs + REG_MAC_STATE_MACHINE));
netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
readl(cp->regs + REG_TX_CFG),
readl(cp->regs + REG_MAC_TX_STATUS),
readl(cp->regs + REG_MAC_TX_CFG),
readl(cp->regs + REG_TX_FIFO_PKT_CNT),
readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
readl(cp->regs + REG_TX_FIFO_READ_PTR),
readl(cp->regs + REG_TX_SM_1),
readl(cp->regs + REG_TX_SM_2));
netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
readl(cp->regs + REG_RX_CFG),
readl(cp->regs + REG_MAC_RX_STATUS),
readl(cp->regs + REG_MAC_RX_CFG));
netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
readl(cp->regs + REG_HP_STATE_MACHINE),
readl(cp->regs + REG_HP_STATUS0),
readl(cp->regs + REG_HP_STATUS1),
readl(cp->regs + REG_HP_STATUS2));
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
schedule_work(&cp->reset_task);
#endif
}
static inline int cas_intme(int ring, int entry)
{
/* Algorithm: IRQ every 1/2 of descriptors. */
if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
return 1;
return 0;
}
static void cas_write_txd(struct cas *cp, int ring, int entry,
dma_addr_t mapping, int len, u64 ctrl, int last)
{
struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
if (cas_intme(ring, entry))
ctrl |= TX_DESC_INTME;
if (last)
ctrl |= TX_DESC_EOF;
txd->control = cpu_to_le64(ctrl);
txd->buffer = cpu_to_le64(mapping);
}
static inline void *tx_tiny_buf(struct cas *cp, const int ring,
const int entry)
{
return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
}
static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
const int entry, const int tentry)
{
cp->tx_tiny_use[ring][tentry].nbufs++;
cp->tx_tiny_use[ring][entry].used = 1;
return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
}
static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
struct sk_buff *skb)
{
struct net_device *dev = cp->dev;
int entry, nr_frags, frag, tabort, tentry;
dma_addr_t mapping;
unsigned long flags;
u64 ctrl;
u32 len;
spin_lock_irqsave(&cp->tx_lock[ring], flags);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp, ring) <=
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return 1;
}
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
}
entry = cp->tx_new[ring];
cp->tx_skbs[ring][entry] = skb;
nr_frags = skb_shinfo(skb)->nr_frags;
len = skb_headlen(skb);
mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
offset_in_page(skb->data), len,
PCI_DMA_TODEVICE);
tentry = entry;
tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
if (unlikely(tabort)) {
/* NOTE: len is always > tabort */
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl | TX_DESC_SOF, 0);
entry = TX_DESC_NEXT(ring, entry);
skb_copy_from_linear_data_offset(skb, len - tabort,
tx_tiny_buf(cp, ring, entry), tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
(nr_frags == 0));
} else {
cas_write_txd(cp, ring, entry, mapping, len, ctrl |
TX_DESC_SOF, (nr_frags == 0));
}
entry = TX_DESC_NEXT(ring, entry);
for (frag = 0; frag < nr_frags; frag++) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
len = skb_frag_size(fragp);
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
if (unlikely(tabort)) {
void *addr;
/* NOTE: len is always > tabort */
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
addr + fragp->page_offset + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
len = tabort;
}
cas_write_txd(cp, ring, entry, mapping, len, ctrl,
(frag + 1 == nr_frags));
entry = TX_DESC_NEXT(ring, entry);
}
cp->tx_new[ring] = entry;
if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
netif_printk(cp, tx_queued, KERN_DEBUG, dev,
"tx[%d] queued, slot %d, skblen %d, avail %d\n",
ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
}
static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
/* this is only used as a load-balancing hint, so it doesn't
* need to be SMP safe
*/
static int ring;
if (skb_padto(skb, cp->min_frame_size))
return NETDEV_TX_OK;
/* XXX: we need some higher-level QoS hooks to steer packets to
* individual queues.
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
static void cas_init_tx_dma(struct cas *cp)
{
u64 desc_dma = cp->block_dvma;
unsigned long off;
u32 val;
int i;
/* set up tx completion writeback registers. must be 8-byte aligned */
#ifdef USE_TX_COMPWB
off = offsetof(struct cas_init_block, tx_compwb);
writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
#endif
/* enable completion writebacks, enable paced mode,
* disable read pipe, and disable pre-interrupt compwbs
*/
val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
TX_CFG_INTR_COMPWB_DIS;
/* write out tx ring info and tx desc bases */
for (i = 0; i < MAX_TX_RINGS; i++) {
off = (unsigned long) cp->init_txds[i] -
(unsigned long) cp->init_block;
val |= CAS_TX_RINGN_BASE(i);
writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
writel((desc_dma + off) & 0xffffffff, cp->regs +
REG_TX_DBN_LOW(i));
/* don't zero out the kick register here as the system
* will wedge
*/
}
writel(val, cp->regs + REG_TX_CFG);
/* program max burst sizes. these numbers should be different
* if doing QoS.
*/
#ifdef USE_QOS
writel(0x800, cp->regs + REG_TX_MAXBURST_0);
writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
#else
writel(0x800, cp->regs + REG_TX_MAXBURST_0);
writel(0x800, cp->regs + REG_TX_MAXBURST_1);
writel(0x800, cp->regs + REG_TX_MAXBURST_2);
writel(0x800, cp->regs + REG_TX_MAXBURST_3);
#endif
}
/* Must be invoked under cp->lock. */
static inline void cas_init_dma(struct cas *cp)
{
cas_init_tx_dma(cp);
cas_init_rx_dma(cp);
}
static void cas_process_mc_list(struct cas *cp)
{
u16 hash_table[16];
u32 crc;
struct netdev_hw_addr *ha;
int i = 1;
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, cp->dev) {
if (i <= CAS_MC_EXACT_MATCH_SIZE) {
/* use the alternate mac address registers for the
* first 15 multicast addresses
*/
writel((ha->addr[4] << 8) | ha->addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
writel((ha->addr[2] << 8) | ha->addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
writel((ha->addr[0] << 8) | ha->addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
i++;
}
else {
/* use hw hash table for the next series of
* multicast addresses
*/
crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
}
for (i = 0; i < 16; i++)
writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
}
/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{
u32 rxcfg = 0;
int i;
if (cp->dev->flags & IFF_PROMISC) {
rxcfg |= MAC_RX_CFG_PROMISC_EN;
} else if (cp->dev->flags & IFF_ALLMULTI) {
for (i=0; i < 16; i++)
writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
} else {
cas_process_mc_list(cp);
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
return rxcfg;
}
/* must be invoked under cp->stat_lock[N_TX_RINGS] */
static void cas_clear_mac_err(struct cas *cp)
{
writel(0, cp->regs + REG_MAC_COLL_NORMAL);
writel(0, cp->regs + REG_MAC_COLL_FIRST);
writel(0, cp->regs + REG_MAC_COLL_EXCESS);
writel(0, cp->regs + REG_MAC_COLL_LATE);
writel(0, cp->regs + REG_MAC_TIMER_DEFER);
writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
writel(0, cp->regs + REG_MAC_RECV_FRAME);
writel(0, cp->regs + REG_MAC_LEN_ERR);
writel(0, cp->regs + REG_MAC_ALIGN_ERR);
writel(0, cp->regs + REG_MAC_FCS_ERR);
writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
}
static void cas_mac_reset(struct cas *cp)
{
int i;
/* do both TX and RX reset */
writel(0x1, cp->regs + REG_MAC_TX_RESET);
writel(0x1, cp->regs + REG_MAC_RX_RESET);
/* wait for TX */
i = STOP_TRIES;
while (i-- > 0) {
if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
break;
udelay(10);
}
/* wait for RX */
i = STOP_TRIES;
while (i-- > 0) {
if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
break;
udelay(10);
}
if (readl(cp->regs + REG_MAC_TX_RESET) |
readl(cp->regs + REG_MAC_RX_RESET))
netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
readl(cp->regs + REG_MAC_TX_RESET),
readl(cp->regs + REG_MAC_RX_RESET),
readl(cp->regs + REG_MAC_STATE_MACHINE));
}
/* Must be invoked under cp->lock. */
static void cas_init_mac(struct cas *cp)
{
unsigned char *e = &cp->dev->dev_addr[0];
int i;
cas_mac_reset(cp);
/* setup core arbitration weight register */
writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
/* XXX Use pci_dma_burst_advice() */
#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
/* set the infinite burst register for chips that don't have
* pci issues.
*/
if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
#endif
writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
writel(0x00, cp->regs + REG_MAC_IPG0);
writel(0x08, cp->regs + REG_MAC_IPG1);
writel(0x04, cp->regs + REG_MAC_IPG2);
/* change later for 802.3z */
writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
/* min frame + FCS */
writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
* specify the maximum frame size to prevent RX tag errors on
* oversized frames.
*/
writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
cp->regs + REG_MAC_FRAMESIZE_MAX);
/* NOTE: crc_size is used as a surrogate for half-duplex.
* workaround saturn half-duplex issue by increasing preamble
* size to 65 bytes.
*/
if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
writel(0x41, cp->regs + REG_MAC_PA_SIZE);
else
writel(0x07, cp->regs + REG_MAC_PA_SIZE);
writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
/* setup mac address in perfect filter array */
for (i = 0; i < 45; i++)
writel(0x0, cp->regs + REG_MAC_ADDRN(i));
writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
cp->mac_rx_cfg = cas_setup_multicast(cp);
spin_lock(&cp->stat_lock[N_TX_RINGS]);
cas_clear_mac_err(cp);
spin_unlock(&cp->stat_lock[N_TX_RINGS]);
/* Setup MAC interrupts. We want to get all of the interesting
* counter expiration events, but we do not want to hear about
* normal rx/tx as the DMA engine tells us that.
*/
writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
/* Don't enable even the PAUSE interrupts for now, we
* make no use of those events other than to record them.
*/
writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
}
/* Must be invoked under cp->lock. */
static void cas_init_pause_thresholds(struct cas *cp)
{
/* Calculate pause thresholds. Setting the OFF threshold to the
* full RX fifo size effectively disables PAUSE generation
*/
if (cp->rx_fifo_size <= (2 * 1024)) {
cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
} else {
int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
if (max_frame * 3 > cp->rx_fifo_size) {
cp->rx_pause_off = 7104;
cp->rx_pause_on = 960;
} else {
int off = (cp->rx_fifo_size - (max_frame * 2));
int on = off - max_frame;
cp->rx_pause_off = off;
cp->rx_pause_on = on;
}
}
}
static int cas_vpd_match(const void __iomem *p, const char *str)
{
int len = strlen(str) + 1;
int i;
for (i = 0; i < len; i++) {
if (readb(p + i) != str[i])
return 0;
}
return 1;
}
/* get the mac address by reading the vpd information in the rom.
* also get the phy type and determine if there's an entropy generator.
* NOTE: this is a bit convoluted for the following reasons:
* 1) vpd info has order-dependent mac addresses for multinic cards
* 2) the only way to determine the nic order is to use the slot
* number.
* 3) fiber cards don't have bridges, so their slot numbers don't
* mean anything.
* 4) we don't actually know we have a fiber card until after
* the mac addresses are parsed.
*/
static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
const int offset)
{
void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
void __iomem *base, *kstart;
int i, len;
int found = 0;
#define VPD_FOUND_MAC 0x01
#define VPD_FOUND_PHY 0x02
int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
int mac_off = 0;
#if defined(CONFIG_SPARC)
const unsigned char *addr;
#endif
/* give us access to the PROM */
writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
cp->regs + REG_BIM_LOCAL_DEV_EN);
/* check for an expansion rom */
if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
goto use_random_mac_addr;
/* search for beginning of vpd */
base = NULL;
for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
/* check for PCIR */
if ((readb(p + i + 0) == 0x50) &&
(readb(p + i + 1) == 0x43) &&
(readb(p + i + 2) == 0x49) &&
(readb(p + i + 3) == 0x52)) {
base = p + (readb(p + i + 8) |
(readb(p + i + 9) << 8));
break;
}
}
if (!base || (readb(base) != 0x82))
goto use_random_mac_addr;
i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
while (i < EXPANSION_ROM_SIZE) {
if (readb(base + i) != 0x90) /* no vpd found */
goto use_random_mac_addr;
/* found a vpd field */
len = readb(base + i + 1) | (readb(base + i + 2) << 8);
/* extract keywords */
kstart = base + i + 3;
p = kstart;
while ((p - kstart) < len) {
int klen = readb(p + 2);
int j;
char type;
p += 3;
/* look for the following things:
* -- correct length == 29
* 3 (type) + 2 (size) +
* 18 (strlen("local-mac-address") + 1) +
* 6 (mac addr)
* -- VPD Instance 'I'
* -- VPD Type Bytes 'B'
* -- VPD data length == 6
* -- property string == local-mac-address
*
* -- correct length == 24
* 3 (type) + 2 (size) +
* 12 (strlen("entropy-dev") + 1) +
* 7 (strlen("vms110") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'B'
* -- VPD data length == 7
* -- property string == entropy-dev
*
* -- correct length == 18
* 3 (type) + 2 (size) +
* 9 (strlen("phy-type") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
* -- VPD data length == 4
* -- property string == phy-type
*
* -- correct length == 23
* 3 (type) + 2 (size) +
* 14 (strlen("phy-interface") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
* -- VPD data length == 4
* -- property string == phy-interface
*/
if (readb(p) != 'I')
goto next;
/* finally, check string and length */
type = readb(p + 3);
if (type == 'B') {
if ((klen == 29) && readb(p + 4) == 6 &&
cas_vpd_match(p + 5,
"local-mac-address")) {
if (mac_off++ > offset)
goto next;
/* set mac address */
for (j = 0; j < 6; j++)
dev_addr[j] =
readb(p + 23 + j);
goto found_mac;
}
}
if (type != 'S')
goto next;
#ifdef USE_ENTROPY_DEV
if ((klen == 24) &&
cas_vpd_match(p + 5, "entropy-dev") &&
cas_vpd_match(p + 17, "vms110")) {
cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
goto next;
}
#endif
if (found & VPD_FOUND_PHY)
goto next;
if ((klen == 18) && readb(p + 4) == 4 &&
cas_vpd_match(p + 5, "phy-type")) {
if (cas_vpd_match(p + 14, "pcs")) {
phy_type = CAS_PHY_SERDES;
goto found_phy;
}
}
if ((klen == 23) && readb(p + 4) == 4 &&
cas_vpd_match(p + 5, "phy-interface")) {
if (cas_vpd_match(p + 19, "pcs")) {
phy_type = CAS_PHY_SERDES;
goto found_phy;
}
}
found_mac:
found |= VPD_FOUND_MAC;
goto next;
found_phy:
found |= VPD_FOUND_PHY;
next:
p += klen;
}
i += len + 3;
}
use_random_mac_addr:
if (found & VPD_FOUND_MAC)
goto done;
#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
memcpy(dev_addr, addr, 6);
goto done;
}
#endif
/* Sun MAC prefix then 3 random bytes. */
pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(dev_addr + 3, 3);
done:
writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
return phy_type;
}
/* check pci invariants */
static void cas_check_pci_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
cp->cas_flags = 0;
if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
(pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
if (pdev->revision >= CAS_ID_REVPLUS)
cp->cas_flags |= CAS_FLAG_REG_PLUS;
if (pdev->revision < CAS_ID_REVPLUS02u)
cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
/* Original Cassini supports HW CSUM, but it's not
* enabled by default as it can trigger TX hangs.
*/
if (pdev->revision < CAS_ID_REV2)
cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
} else {
/* Only sun has original cassini chips. */
cp->cas_flags |= CAS_FLAG_REG_PLUS;
/* We use a flag because the same phy might be externally
* connected.
*/
if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
(pdev->device == PCI_DEVICE_ID_NS_SATURN))
cp->cas_flags |= CAS_FLAG_SATURN;
}
}
static int cas_check_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
u32 cfg;
int i;
/* get page size for rx buffers. */
cp->page_order = 0;
#ifdef USE_PAGE_ORDER
if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
/* see if we can allocate larger pages */
struct page *page = alloc_pages(GFP_ATOMIC,
CAS_JUMBO_PAGE_SHIFT -
PAGE_SHIFT);
if (page) {
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
} else {
printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
}
}
#endif
cp->page_size = (PAGE_SIZE << cp->page_order);
/* Fetch the FIFO configurations. */
cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
cp->rx_fifo_size = RX_FIFO_SIZE;
/* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
PCI_SLOT(pdev->devfn));
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
}
/* MII */
cfg = readl(cp->regs + REG_MIF_CFG);
if (cfg & MIF_CFG_MDIO_1) {
cp->phy_type = CAS_PHY_MII_MDIO1;
} else if (cfg & MIF_CFG_MDIO_0) {
cp->phy_type = CAS_PHY_MII_MDIO0;
}
cas_mif_poll(cp, 0);
writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
for (i = 0; i < 32; i++) {
u32 phy_id;
int j;
for (j = 0; j < 3; j++) {
cp->phy_addr = i;
phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
phy_id |= cas_phy_read(cp, MII_PHYSID2);
if (phy_id && (phy_id != 0xFFFFFFFF)) {
cp->phy_id = phy_id;
goto done;
}
}
}
pr_err("MII phy did not respond [%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
return -1;
done:
/* see if we can do gigabit */
cfg = cas_phy_read(cp, MII_BMSR);
if ((cfg & CAS_BMSR_1000_EXTEND) &&
cas_phy_read(cp, CAS_MII_1000_EXTEND))
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0;
}
/* Must be invoked under cp->lock. */
static inline void cas_start_dma(struct cas *cp)
{
int i;
u32 val;
int txfailed = 0;
/* enable dma */
val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
writel(val, cp->regs + REG_TX_CFG);
val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
writel(val, cp->regs + REG_RX_CFG);
/* enable the mac */
val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
writel(val, cp->regs + REG_MAC_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
writel(val, cp->regs + REG_MAC_RX_CFG);
i = STOP_TRIES;
while (i-- > 0) {
val = readl(cp->regs + REG_MAC_TX_CFG);
if ((val & MAC_TX_CFG_EN))
break;
udelay(10);
}
if (i < 0) txfailed = 1;
i = STOP_TRIES;
while (i-- > 0) {
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
netdev_err(cp->dev,
"enabling mac failed [tx:%08x:%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE),
readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto enable_rx_done;
}
udelay(10);
}
netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
(txfailed ? "tx,rx" : "rx"),
readl(cp->regs + REG_MIF_STATE_MACHINE),
readl(cp->regs + REG_MAC_STATE_MACHINE));
enable_rx_done:
cas_unmask_intr(cp); /* enable interrupts */
writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
writel(0, cp->regs + REG_RX_COMP_TAIL);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
if (N_RX_DESC_RINGS > 1)
writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
for (i = 1; i < N_RX_COMP_RINGS; i++)
writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
/* Must be invoked under cp->lock. */
static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
int *pause)
{
u32 val = readl(cp->regs + REG_PCS_MII_LPA);
*fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
*pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
if (val & PCS_MII_LPA_ASYM_PAUSE)
*pause |= 0x10;
*spd = 1000;
}
/* Must be invoked under cp->lock. */
static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
int *pause)
{
u32 val;
*fd = 0;
*spd = 10;
*pause = 0;
/* use GMII registers */
val = cas_phy_read(cp, MII_LPA);
if (val & CAS_LPA_PAUSE)
*pause = 0x01;
if (val & CAS_LPA_ASYM_PAUSE)
*pause |= 0x10;
if (val & LPA_DUPLEX)
*fd = 1;
if (val & LPA_100)
*spd = 100;
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
val = cas_phy_read(cp, CAS_MII_1000_STATUS);
if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
*spd = 1000;
if (val & CAS_LPA_1000FULL)
*fd = 1;
}
}
/* A link-up condition has occurred, initialize and enable the
* rest of the chip.
*
* Must be invoked under cp->lock.
*/
static void cas_set_link_modes(struct cas *cp)
{
u32 val;
int full_duplex, speed, pause;
full_duplex = 0;
speed = 10;
pause = 0;
if (CAS_PHY_MII(cp->phy_type)) {
cas_mif_poll(cp, 0);
val = cas_phy_read(cp, MII_BMCR);
if (val & BMCR_ANENABLE) {
cas_read_mii_link_mode(cp, &full_duplex, &speed,
&pause);
} else {
if (val & BMCR_FULLDPLX)
full_duplex = 1;
if (val & BMCR_SPEED100)
speed = 100;
else if (val & CAS_BMCR_SPEED1000)
speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1000 : 100;
}
cas_mif_poll(cp, 1);
} else {
val = readl(cp->regs + REG_PCS_MII_CTRL);
cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
if ((val & PCS_MII_AUTONEG_EN) == 0) {
if (val & PCS_MII_CTRL_DUPLEX)
full_duplex = 1;
}
}
netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
speed, full_duplex ? "full" : "half");
val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
if (CAS_PHY_MII(cp->phy_type)) {
val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
if (!full_duplex)
val |= MAC_XIF_DISABLE_ECHO;
}
if (full_duplex)
val |= MAC_XIF_FDPLX_LED;
if (speed == 1000)
val |= MAC_XIF_GMII_MODE;
writel(val, cp->regs + REG_MAC_XIF_CFG);
/* deal with carrier and collision detect. */
val = MAC_TX_CFG_IPG_EN;
if (full_duplex) {
val |= MAC_TX_CFG_IGNORE_CARRIER;
val |= MAC_TX_CFG_IGNORE_COLL;
} else {
#ifndef USE_CSMA_CD_PROTO
val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
#endif
}
/* val now set up for REG_MAC_TX_CFG */
/* If gigabit and half-duplex, enable carrier extension
* mode. increase slot time to 512 bytes as well.
* else, disable it and make sure slot time is 64 bytes.
* also activate checksum bug workaround
*/
if ((speed == 1000) && !full_duplex) {
writel(val | MAC_TX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG);
val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
writel(val | MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
cp->crc_size = 4;
/* minimum size gigabit frame at half duplex */
cp->min_frame_size = CAS_1000MB_MIN_FRAME;
} else {
writel(val, cp->regs + REG_MAC_TX_CFG);
/* checksum bug workaround. don't strip FCS when in
* half-duplex mode
*/
val = readl(cp->regs + REG_MAC_RX_CFG);
if (full_duplex) {
val |= MAC_RX_CFG_STRIP_FCS;
cp->crc_size = 0;
cp->min_frame_size = CAS_MIN_MTU;
} else {
val &= ~MAC_RX_CFG_STRIP_FCS;
cp->crc_size = 4;
cp->min_frame_size = CAS_MIN_FRAME;
}
writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
}
if (netif_msg_link(cp)) {
if (pause & 0x01) {
netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
cp->rx_fifo_size,
cp->rx_pause_off,
cp->rx_pause_on);
} else if (pause & 0x10) {
netdev_info(cp->dev, "TX pause enabled\n");
} else {
netdev_info(cp->dev, "Pause is disabled\n");
}
}
val = readl(cp->regs + REG_MAC_CTRL_CFG);
val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
if (pause) { /* symmetric or asymmetric pause */
val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
if (pause & 0x01) { /* symmetric pause */
val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
}
}
writel(val, cp->regs + REG_MAC_CTRL_CFG);
cas_start_dma(cp);
}
/* Must be invoked under cp->lock. */
static void cas_init_hw(struct cas *cp, int restart_link)
{
if (restart_link)
cas_phy_init(cp);
cas_init_pause_thresholds(cp);
cas_init_mac(cp);
cas_init_dma(cp);
if (restart_link) {
/* Default aneg parameters */
cp->timer_ticks = 0;
cas_begin_auto_negotiation(cp, NULL);
} else if (cp->lstate == link_up) {
cas_set_link_modes(cp);
netif_carrier_on(cp->dev);
}
}
/* Must be invoked under cp->lock. on earlier cassini boards,
* SOFT_0 is tied to PCI reset. we use this to force a pci reset,
* let it settle out, and then restore pci state.
*/
static void cas_hard_reset(struct cas *cp)
{
writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
udelay(20);
pci_restore_state(cp->pdev);
}
static void cas_global_reset(struct cas *cp, int blkflag)
{
int limit;
/* issue a global reset. don't use RSTOUT. */
if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
/* For PCS, when the blkflag is set, we should set the
* SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
* the last autonegotiation from being cleared. We'll
* need some special handling if the chip is set into a
* loopback mode.
*/
writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
cp->regs + REG_SW_RESET);
} else {
writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
}
/* need to wait at least 3ms before polling register */
mdelay(3);
limit = STOP_TRIES;
while (limit-- > 0) {
u32 val = readl(cp->regs + REG_SW_RESET);
if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
goto done;
udelay(10);
}
netdev_err(cp->dev, "sw reset failed\n");
done:
/* enable various BIM interrupts */
writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
/* clear out pci error status mask for handled errors.
* we don't deal with DMA counter overflows as they happen
* all the time.
*/
writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
PCI_ERR_BIM_DMA_READ), cp->regs +
REG_PCI_ERR_STATUS_MASK);
/* set up for MII by default to address mac rx reset timeout
* issue
*/
writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
}
static void cas_reset(struct cas *cp, int blkflag)
{
u32 val;
cas_mask_intr(cp);
cas_global_reset(cp, blkflag);
cas_mac_reset(cp);
cas_entropy_reset(cp);
/* disable dma engines. */
val = readl(cp->regs + REG_TX_CFG);
val &= ~TX_CFG_DMA_EN;
writel(val, cp->regs + REG_TX_CFG);
val = readl(cp->regs + REG_RX_CFG);
val &= ~RX_CFG_DMA_EN;
writel(val, cp->regs + REG_RX_CFG);
/* program header parser */
if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
(CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
cas_load_firmware(cp, CAS_HP_FIRMWARE);
} else {
cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
}
/* clear out error registers */
spin_lock(&cp->stat_lock[N_TX_RINGS]);
cas_clear_mac_err(cp);
spin_unlock(&cp->stat_lock[N_TX_RINGS]);
}
/* Shut down the chip, must be called with pm_mutex held. */
static void cas_shutdown(struct cas *cp)
{
unsigned long flags;
/* Make us not-running to avoid timers respawning */
cp->hw_running = 0;
del_timer_sync(&cp->link_timer);
/* Stop the reset task */
#if 0
while (atomic_read(&cp->reset_task_pending_mtu) ||
atomic_read(&cp->reset_task_pending_spare) ||
atomic_read(&cp->reset_task_pending_all))
schedule();
#else
while (atomic_read(&cp->reset_task_pending))
schedule();
#endif
/* Actually stop the chip */
cas_lock_all_save(cp, flags);
cas_reset(cp, 0);
if (cp->cas_flags & CAS_FLAG_SATURN)
cas_phy_powerdown(cp);
cas_unlock_all_restore(cp, flags);
}
static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
/* let the reset task handle it */
#if 1
atomic_inc(&cp->reset_task_pending);
if ((cp->phy_type & CAS_PHY_SERDES)) {
atomic_inc(&cp->reset_task_pending_all);
} else {
atomic_inc(&cp->reset_task_pending_mtu);
}
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
pr_err("reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
#endif
flush_work(&cp->reset_task);
return 0;
}
static void cas_clean_txd(struct cas *cp, int ring)
{
struct cas_tx_desc *txd = cp->init_txds[ring];
struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
u64 daddr, dlen;
int i, size;
size = TX_DESC_RINGN_SIZE(ring);
for (i = 0; i < size; i++) {
int frag;
if (skbs[i] == NULL)
continue;
skb = skbs[i];
skbs[i] = NULL;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
int ent = i & (size - 1);
/* first buffer is never a tiny buffer and so
* needs to be unmapped.
*/
daddr = le64_to_cpu(txd[ent].buffer);
dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd[ent].control));
pci_unmap_page(cp->pdev, daddr, dlen,
PCI_DMA_TODEVICE);
if (frag != skb_shinfo(skb)->nr_frags) {
i++;
/* next buffer might by a tiny buffer.
* skip past it.
*/
ent = i & (size - 1);
if (cp->tx_tiny_use[ring][ent].used)
i++;
}
}
dev_kfree_skb_any(skb);
}
/* zero out tiny buf usage */
memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
}
/* freed on close */
static inline void cas_free_rx_desc(struct cas *cp, int ring)
{
cas_page_t **page = cp->rx_pages[ring];
int i, size;
size = RX_DESC_RINGN_SIZE(ring);
for (i = 0; i < size; i++) {
if (page[i]) {
cas_page_free(cp, page[i]);
page[i] = NULL;
}
}
}
static void cas_free_rxds(struct cas *cp)
{
int i;
for (i = 0; i < N_RX_DESC_RINGS; i++)
cas_free_rx_desc(cp, i);
}
/* Must be invoked under cp->lock. */
static void cas_clean_rings(struct cas *cp)
{
int i;
/* need to clean all tx rings */
memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
for (i = 0; i < N_TX_RINGS; i++)
cas_clean_txd(cp, i);
/* zero out init block */
memset(cp->init_block, 0, sizeof(struct cas_init_block));
cas_clean_rxds(cp);
cas_clean_rxcs(cp);
}
/* allocated on open */
static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
{
cas_page_t **page = cp->rx_pages[ring];
int size, i = 0;
size = RX_DESC_RINGN_SIZE(ring);
for (i = 0; i < size; i++) {
if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
return -1;
}
return 0;
}
static int cas_alloc_rxds(struct cas *cp)
{
int i;
for (i = 0; i < N_RX_DESC_RINGS; i++) {
if (cas_alloc_rx_desc(cp, i) < 0) {
cas_free_rxds(cp);
return -1;
}
}
return 0;
}
static void cas_reset_task(struct work_struct *work)
{
struct cas *cp = container_of(work, struct cas, reset_task);
#if 0
int pending = atomic_read(&cp->reset_task_pending);
#else
int pending_all = atomic_read(&cp->reset_task_pending_all);
int pending_spare = atomic_read(&cp->reset_task_pending_spare);
int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
/* We can have more tasks scheduled than actually
* needed.
*/
atomic_dec(&cp->reset_task_pending);
return;
}
#endif
/* The link went down, we reset the ring, but keep
* DMA stopped. Use this function for reset
* on error as well.
*/
if (cp->hw_running) {
unsigned long flags;
/* Make sure we don't get interrupts or tx packets */
netif_device_detach(cp->dev);
cas_lock_all_save(cp, flags);
if (cp->opened) {
/* We call cas_spare_recover when we call cas_open.
* but we do not initialize the lists cas_spare_recover
* uses until cas_open is called.
*/
cas_spare_recover(cp, GFP_ATOMIC);
}
#if 1
/* test => only pending_spare set */
if (!pending_all && !pending_mtu)
goto done;
#else
if (pending == CAS_RESET_SPARE)
goto done;
#endif
/* when pending == CAS_RESET_ALL, the following
* call to cas_init_hw will restart auto negotiation.
* Setting the second argument of cas_reset to
* !(pending == CAS_RESET_ALL) will set this argument
* to 1 (avoiding reinitializing the PHY for the normal
* PCS case) when auto negotiation is not restarted.
*/
#if 1
cas_reset(cp, !(pending_all > 0));
if (cp->opened)
cas_clean_rings(cp);
cas_init_hw(cp, (pending_all > 0));
#else
cas_reset(cp, !(pending == CAS_RESET_ALL));
if (cp->opened)
cas_clean_rings(cp);
cas_init_hw(cp, pending == CAS_RESET_ALL);
#endif
done:
cas_unlock_all_restore(cp, flags);
netif_device_attach(cp->dev);
}
#if 1
atomic_sub(pending_all, &cp->reset_task_pending_all);
atomic_sub(pending_spare, &cp->reset_task_pending_spare);
atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
atomic_dec(&cp->reset_task_pending);
#else
atomic_set(&cp->reset_task_pending, 0);
#endif
}
static void cas_link_timer(unsigned long data)
{
struct cas *cp = (struct cas *) data;
int mask, pending = 0, reset = 0;
unsigned long flags;
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
((jiffies - cp->link_transition_jiffies) >
(link_transition_timeout))) {
/* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
*/
cp->link_transition_jiffies_valid = 0;
}
if (!cp->hw_running)
return;
spin_lock_irqsave(&cp->lock, flags);
cas_lock_tx(cp);
cas_entropy_gather(cp);
/* If the link task is still pending, we just
* reschedule the link timer
*/
#if 1
if (atomic_read(&cp->reset_task_pending_all) ||
atomic_read(&cp->reset_task_pending_spare) ||
atomic_read(&cp->reset_task_pending_mtu))
goto done;
#else
if (atomic_read(&cp->reset_task_pending))
goto done;
#endif
/* check for rx cleaning */
if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
int i, rmask;
for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
rmask = CAS_FLAG_RXD_POST(i);
if ((mask & rmask) == 0)
continue;
/* post_rxds will do a mod_timer */
if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
pending = 1;
continue;
}
cp->cas_flags &= ~rmask;
}
}
if (CAS_PHY_MII(cp->phy_type)) {
u16 bmsr;
cas_mif_poll(cp, 0);
bmsr = cas_phy_read(cp, MII_BMSR);
/* WTZ: Solaris driver reads this twice, but that
* may be due to the PCS case and the use of a
* common implementation. Read it twice here to be
* safe.
*/
bmsr = cas_phy_read(cp, MII_BMSR);
cas_mif_poll(cp, 1);
readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
reset = cas_mii_link_check(cp, bmsr);
} else {
reset = cas_pcs_link_check(cp);
}
if (reset)
goto done;
/* check for tx state machine confusion */
if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
u32 wptr, rptr;
int tlm = CAS_VAL(MAC_SM_TLM, val);
if (((tlm == 0x5) || (tlm == 0x3)) &&
(CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
"tx err: MAC_STATE[%08x]\n", val);
reset = 1;
goto done;
}
val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
if ((val == 0) && (wptr != rptr)) {
netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
"tx err: TX_FIFO[%08x:%08x:%08x]\n",
val, wptr, rptr);
reset = 1;
}
if (reset)
cas_hard_reset(cp);
}
done:
if (reset) {
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
pr_err("reset called in cas_link_timer\n");
schedule_work(&cp->reset_task);
#endif
}
if (!pending)
mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
cas_unlock_tx(cp);
spin_unlock_irqrestore(&cp->lock, flags);
}
/* tiny buffers are used to avoid target abort issues with
* older cassini's
*/
static void cas_tx_tiny_free(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
int i;
for (i = 0; i < N_TX_RINGS; i++) {
if (!cp->tx_tiny_bufs[i])
continue;
pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
cp->tx_tiny_bufs[i],
cp->tx_tiny_dvma[i]);
cp->tx_tiny_bufs[i] = NULL;
}
}
static int cas_tx_tiny_alloc(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
int i;
for (i = 0; i < N_TX_RINGS; i++) {
cp->tx_tiny_bufs[i] =
pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
&cp->tx_tiny_dvma[i]);
if (!cp->tx_tiny_bufs[i]) {
cas_tx_tiny_free(cp);
return -1;
}
}
return 0;
}
static int cas_open(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
int hw_was_up, err;
unsigned long flags;
mutex_lock(&cp->pm_mutex);
hw_was_up = cp->hw_running;
/* The power-management mutex protects the hw_running
* etc. state so it is safe to do this bit without cp->lock
*/
if (!cp->hw_running) {
/* Reset the chip */
cas_lock_all_save(cp, flags);
/* We set the second arg to cas_reset to zero
* because cas_init_hw below will have its second
* argument set to non-zero, which will force
* autonegotiation to start.
*/
cas_reset(cp, 0);
cp->hw_running = 1;
cas_unlock_all_restore(cp, flags);
}
err = -ENOMEM;
if (cas_tx_tiny_alloc(cp) < 0)
goto err_unlock;
/* alloc rx descriptors */
if (cas_alloc_rxds(cp) < 0)
goto err_tx_tiny;
/* allocate spares */
cas_spare_init(cp);
cas_spare_recover(cp, GFP_KERNEL);
/* We can now request the interrupt as we know it's masked
* on the controller. cassini+ has up to 4 interrupts
* that can be used, but you need to do explicit pci interrupt
* mapping to expose them
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
IRQF_SHARED, dev->name, (void *) dev)) {
netdev_err(cp->dev, "failed to request irq !\n");
err = -EAGAIN;
goto err_spare;
}
#ifdef USE_NAPI
napi_enable(&cp->napi);
#endif
/* init hw */
cas_lock_all_save(cp, flags);
cas_clean_rings(cp);
cas_init_hw(cp, !hw_was_up);
cp->opened = 1;
cas_unlock_all_restore(cp, flags);
netif_start_queue(dev);
mutex_unlock(&cp->pm_mutex);
return 0;
err_spare:
cas_spare_free(cp);
cas_free_rxds(cp);
err_tx_tiny:
cas_tx_tiny_free(cp);
err_unlock:
mutex_unlock(&cp->pm_mutex);
return err;
}
static int cas_close(struct net_device *dev)
{
unsigned long flags;
struct cas *cp = netdev_priv(dev);
#ifdef USE_NAPI
napi_disable(&cp->napi);
#endif
/* Make sure we don't get distracted by suspend/resume */
mutex_lock(&cp->pm_mutex);
netif_stop_queue(dev);
/* Stop traffic, mark us closed */
cas_lock_all_save(cp, flags);
cp->opened = 0;
cas_reset(cp, 0);
cas_phy_init(cp);
cas_begin_auto_negotiation(cp, NULL);
cas_clean_rings(cp);
cas_unlock_all_restore(cp, flags);
free_irq(cp->pdev->irq, (void *) dev);
cas_spare_free(cp);
cas_free_rxds(cp);
cas_tx_tiny_free(cp);
mutex_unlock(&cp->pm_mutex);
return 0;
}
static struct {
const char name[ETH_GSTRING_LEN];
} ethtool_cassini_statnames[] = {
{"collisions"},
{"rx_bytes"},
{"rx_crc_errors"},
{"rx_dropped"},
{"rx_errors"},
{"rx_fifo_errors"},
{"rx_frame_errors"},
{"rx_length_errors"},
{"rx_over_errors"},
{"rx_packets"},
{"tx_aborted_errors"},
{"tx_bytes"},
{"tx_dropped"},
{"tx_errors"},
{"tx_fifo_errors"},
{"tx_packets"}
};
#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
static struct {
const int offsets; /* neg. values for 2nd arg to cas_read_phy */
} ethtool_register_table[] = {
{-MII_BMSR},
{-MII_BMCR},
{REG_CAWR},
{REG_INF_BURST},
{REG_BIM_CFG},
{REG_RX_CFG},
{REG_HP_CFG},
{REG_MAC_TX_CFG},
{REG_MAC_RX_CFG},
{REG_MAC_CTRL_CFG},
{REG_MAC_XIF_CFG},
{REG_MIF_CFG},
{REG_PCS_CFG},
{REG_SATURN_PCFG},
{REG_PCS_MII_STATUS},
{REG_PCS_STATE_MACHINE},
{REG_MAC_COLL_EXCESS},
{REG_MAC_COLL_LATE}
};
#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
{
u8 *p;
int i;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
u16 hval;
u32 val;
if (ethtool_register_table[i].offsets < 0) {
hval = cas_phy_read(cp,
-ethtool_register_table[i].offsets);
val = hval;
} else {
val= readl(cp->regs+ethtool_register_table[i].offsets);
}
memcpy(p, (u8 *)&val, sizeof(u32));
}
spin_unlock_irqrestore(&cp->lock, flags);
}
static struct net_device_stats *cas_get_stats(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
struct net_device_stats *stats = cp->net_stats;
unsigned long flags;
int i;
unsigned long tmp;
/* we collate all of the stats into net_stats[N_TX_RING] */
if (!cp->hw_running)
return stats + N_TX_RINGS;
/* collect outstanding stats */
/* WTZ: the Cassini spec gives these as 16 bit counters but
* stored in 32-bit words. Added a mask of 0xffff to be safe,
* in case the chip somehow puts any garbage in the other bits.
* Also, counter usage didn't seem to mach what Adrian did
* in the parts of the code that set these quantities. Made
* that consistent.
*/
spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
stats[N_TX_RINGS].rx_crc_errors +=
readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
stats[N_TX_RINGS].rx_frame_errors +=
readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
stats[N_TX_RINGS].rx_length_errors +=
readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
#if 1
tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
stats[N_TX_RINGS].tx_aborted_errors += tmp;
stats[N_TX_RINGS].collisions +=
tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
#else
stats[N_TX_RINGS].tx_aborted_errors +=
readl(cp->regs + REG_MAC_COLL_EXCESS);
stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
readl(cp->regs + REG_MAC_COLL_LATE);
#endif
cas_clear_mac_err(cp);
/* saved bits that are unique to ring 0 */
spin_lock(&cp->stat_lock[0]);
stats[N_TX_RINGS].collisions += stats[0].collisions;
stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
spin_unlock(&cp->stat_lock[0]);
for (i = 0; i < N_TX_RINGS; i++) {
spin_lock(&cp->stat_lock[i]);
stats[N_TX_RINGS].rx_length_errors +=
stats[i].rx_length_errors;
stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
memset(stats + i, 0, sizeof(struct net_device_stats));
spin_unlock(&cp->stat_lock[i]);
}
spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
return stats + N_TX_RINGS;
}
static void cas_set_multicast(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
u32 rxcfg, rxcfg_new;
unsigned long flags;
int limit = STOP_TRIES;
if (!cp->hw_running)
return;
spin_lock_irqsave(&cp->lock, flags);
rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
/* disable RX MAC and wait for completion */
writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
if (!limit--)
break;
udelay(10);
}
/* disable hash filter and wait for completion */
limit = STOP_TRIES;
rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
if (!limit--)
break;
udelay(10);
}
/* program hash filters */
cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
rxcfg |= rxcfg_new;
writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
spin_unlock_irqrestore(&cp->lock, flags);
}
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
cp->casreg_len : CAS_MAX_REGS;
info->n_stats = CAS_NUM_STAT_KEYS;
}
static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cas *cp = netdev_priv(dev);
u16 bmcr;
int full_duplex, speed, pause;
unsigned long flags;
enum link_state linkstate = link_up;
cmd->advertising = 0;
cmd->supported = SUPPORTED_Autoneg;
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
cmd->supported |= SUPPORTED_1000baseT_Full;
cmd->advertising |= ADVERTISED_1000baseT_Full;
}
/* Record PHY settings if HW is on. */
spin_lock_irqsave(&cp->lock, flags);
bmcr = 0;
linkstate = cp->lstate;
if (CAS_PHY_MII(cp->phy_type)) {
cmd->port = PORT_MII;
cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->phy_address = cp->phy_addr;
cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
cmd->supported |=
(SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_TP | SUPPORTED_MII);
if (cp->hw_running) {
cas_mif_poll(cp, 0);
bmcr = cas_phy_read(cp, MII_BMCR);
cas_read_mii_link_mode(cp, &full_duplex,
&speed, &pause);
cas_mif_poll(cp, 1);
}
} else {
cmd->port = PORT_FIBRE;
cmd->transceiver = XCVR_INTERNAL;
cmd->phy_address = 0;
cmd->supported |= SUPPORTED_FIBRE;
cmd->advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
/* pcs uses the same bits as mii */
bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
cas_read_pcs_link_mode(cp, &full_duplex,
&speed, &pause);
}
}
spin_unlock_irqrestore(&cp->lock, flags);
if (bmcr & BMCR_ANENABLE) {
cmd->advertising |= ADVERTISED_Autoneg;
cmd->autoneg = AUTONEG_ENABLE;
ethtool_cmd_speed_set(cmd, ((speed == 10) ?
SPEED_10 :
((speed == 1000) ?
SPEED_1000 : SPEED_100)));
cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
SPEED_1000 :
((bmcr & BMCR_SPEED100) ?
SPEED_100 : SPEED_10)));
cmd->duplex =
(bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
if (linkstate != link_up) {
/* Force these to "unknown" if the link is not up and
* autonogotiation in enabled. We can set the link
* speed to 0, but not cmd->duplex,
* because its legal values are 0 and 1. Ethtool will
* print the value reported in parentheses after the
* word "Unknown" for unrecognized values.
*
* If in forced mode, we report the speed and duplex
* settings that we configured.
*/
if (cp->link_cntl & BMCR_ANENABLE) {
ethtool_cmd_speed_set(cmd, 0);
cmd->duplex = 0xff;
} else {
ethtool_cmd_speed_set(cmd, SPEED_10);
if (cp->link_cntl & BMCR_SPEED100) {
ethtool_cmd_speed_set(cmd, SPEED_100);
} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
ethtool_cmd_speed_set(cmd, SPEED_1000);
}
cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
DUPLEX_FULL : DUPLEX_HALF;
}
}
return 0;
}
static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
u32 speed = ethtool_cmd_speed(cmd);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE &&
cmd->autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
spin_lock_irqsave(&cp->lock, flags);
cas_begin_auto_negotiation(cp, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return 0;
}
static int cas_nway_reset(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
if ((cp->link_cntl & BMCR_ANENABLE) == 0)
return -EINVAL;
/* Restart link process. */
spin_lock_irqsave(&cp->lock, flags);
cas_begin_auto_negotiation(cp, NULL);
spin_unlock_irqrestore(&cp->lock, flags);
return 0;
}
static u32 cas_get_link(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
return cp->lstate == link_up;
}
static u32 cas_get_msglevel(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
return cp->msg_enable;
}
static void cas_set_msglevel(struct net_device *dev, u32 value)
{
struct cas *cp = netdev_priv(dev);
cp->msg_enable = value;
}
static int cas_get_regs_len(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
}
static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct cas *cp = netdev_priv(dev);
regs->version = 0;
/* cas_read_regs handles locks (cp->lock). */
cas_read_regs(cp, p, regs->len / sizeof(u32));
}
static int cas_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return CAS_NUM_STAT_KEYS;
default:
return -EOPNOTSUPP;
}
}
static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
memcpy(data, ðtool_cassini_statnames,
CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
}
static void cas_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *data)
{
struct cas *cp = netdev_priv(dev);
struct net_device_stats *stats = cas_get_stats(cp->dev);
int i = 0;
data[i++] = stats->collisions;
data[i++] = stats->rx_bytes;
data[i++] = stats->rx_crc_errors;
data[i++] = stats->rx_dropped;
data[i++] = stats->rx_errors;
data[i++] = stats->rx_fifo_errors;
data[i++] = stats->rx_frame_errors;
data[i++] = stats->rx_length_errors;
data[i++] = stats->rx_over_errors;
data[i++] = stats->rx_packets;
data[i++] = stats->tx_aborted_errors;
data[i++] = stats->tx_bytes;
data[i++] = stats->tx_dropped;
data[i++] = stats->tx_errors;
data[i++] = stats->tx_fifo_errors;
data[i++] = stats->tx_packets;
BUG_ON(i != CAS_NUM_STAT_KEYS);
}
static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
.get_settings = cas_get_settings,
.set_settings = cas_set_settings,
.nway_reset = cas_nway_reset,
.get_link = cas_get_link,
.get_msglevel = cas_get_msglevel,
.set_msglevel = cas_set_msglevel,
.get_regs_len = cas_get_regs_len,
.get_regs = cas_get_regs,
.get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
};
static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cas *cp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int rc = -EOPNOTSUPP;
/* Hold the PM mutex while doing ioctl's or we may collide
* with open/close and power management and oops.
*/
mutex_lock(&cp->pm_mutex);
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
/* Fallthrough... */
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irqsave(&cp->lock, flags);
cas_mif_poll(cp, 0);
data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
cas_mif_poll(cp, 1);
spin_unlock_irqrestore(&cp->lock, flags);
rc = 0;
break;
case SIOCSMIIREG: /* Write MII PHY register. */
spin_lock_irqsave(&cp->lock, flags);
cas_mif_poll(cp, 0);
rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
cas_mif_poll(cp, 1);
spin_unlock_irqrestore(&cp->lock, flags);
break;
default:
break;
}
mutex_unlock(&cp->pm_mutex);
return rc;
}
/* When this chip sits underneath an Intel 31154 bridge, it is the
* only subordinate device and we can tweak the bridge settings to
* reflect that fact.
*/
static void cas_program_bridge(struct pci_dev *cas_pdev)
{
struct pci_dev *pdev = cas_pdev->bus->self;
u32 val;
if (!pdev)
return;
if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
return;
/* Clear bit 10 (Bus Parking Control) in the Secondary
* Arbiter Control/Status Register which lives at offset
* 0x41. Using a 32-bit word read/modify/write at 0x40
* is much simpler so that's how we do this.
*/
pci_read_config_dword(pdev, 0x40, &val);
val &= ~0x00040000;
pci_write_config_dword(pdev, 0x40, val);
/* Max out the Multi-Transaction Timer settings since
* Cassini is the only device present.
*
* The register is 16-bit and lives at 0x50. When the
* settings are enabled, it extends the GRANT# signal
* for a requestor after a transaction is complete. This
* allows the next request to run without first needing
* to negotiate the GRANT# signal back.
*
* Bits 12:10 define the grant duration:
*
* 1 -- 16 clocks
* 2 -- 32 clocks
* 3 -- 64 clocks
* 4 -- 128 clocks
* 5 -- 256 clocks
*
* All other values are illegal.
*
* Bits 09:00 define which REQ/GNT signal pairs get the
* GRANT# signal treatment. We set them all.
*/
pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
/* The Read Prefecth Policy register is 16-bit and sits at
* offset 0x52. It enables a "smart" pre-fetch policy. We
* enable it and max out all of the settings since only one
* device is sitting underneath and thus bandwidth sharing is
* not an issue.
*
* The register has several 3 bit fields, which indicates a
* multiplier applied to the base amount of prefetching the
* chip would do. These fields are at:
*
* 15:13 --- ReRead Primary Bus
* 12:10 --- FirstRead Primary Bus
* 09:07 --- ReRead Secondary Bus
* 06:04 --- FirstRead Secondary Bus
*
* Bits 03:00 control which REQ/GNT pairs the prefetch settings
* get enabled on. Bit 3 is a grouped enabler which controls
* all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
* the individual REQ/GNT pairs [2:0].
*/
pci_write_config_word(pdev, 0x52,
(0x7 << 13) |
(0x7 << 10) |
(0x7 << 7) |
(0x7 << 4) |
(0xf << 0));
/* Force cacheline size to 0x8 */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
/* Force latency timer to maximum setting so Cassini can
* sit on the bus as long as it likes.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
}
static const struct net_device_ops cas_netdev_ops = {
.ndo_open = cas_open,
.ndo_stop = cas_close,
.ndo_start_xmit = cas_start_xmit,
.ndo_get_stats = cas_get_stats,
.ndo_set_rx_mode = cas_set_multicast,
.ndo_do_ioctl = cas_ioctl,
.ndo_tx_timeout = cas_tx_timeout,
.ndo_change_mtu = cas_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cas_netpoll,
#endif
};
static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int cas_version_printed = 0;
unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
int i, err, pci_using_dac;
u16 pci_cmd;
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
pr_info("%s", version);
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper PCI device "
"base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
err = -ENOMEM;
goto err_out_disable_pdev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_request_regions(pdev, dev->name);
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
/* we must always turn on parity response or else parity
* doesn't get generated properly. disable SERR/PERR as well.
* in addition, we want to turn MWI on.
*/
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_cmd &= ~PCI_COMMAND_SERR;
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
if (pci_try_set_mwi(pdev))
pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
cas_program_bridge(pdev);
/*
* On some architectures, the default cache line size set
* by pci_try_set_mwi reduces perforamnce. We have to increase
* it for this case. To start, we'll print some configuration
* data.
*/
#if 1
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
&orig_cacheline_size);
if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
cas_cacheline_size =
(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
if (pci_write_config_byte(pdev,
PCI_CACHE_LINE_SIZE,
cas_cacheline_size)) {
dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
goto err_write_cacheline;
}
}
#endif
/* Configure DMA attributes. */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
if (err < 0) {
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");
goto err_out_free_res;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, "
"aborting\n");
goto err_out_free_res;
}
pci_using_dac = 0;
}
casreg_len = pci_resource_len(pdev, 0);
cp = netdev_priv(dev);
cp->pdev = pdev;
#if 1
/* A value of 0 indicates we never explicitly set it */
cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
#endif
cp->dev = dev;
cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
#if defined(CONFIG_SPARC)
cp->of_node = pci_device_to_OF_node(pdev);
#endif
cp->link_transition = LINK_TRANSITION_UNKNOWN;
cp->link_transition_jiffies_valid = 0;
spin_lock_init(&cp->lock);
spin_lock_init(&cp->rx_inuse_lock);
spin_lock_init(&cp->rx_spare_lock);
for (i = 0; i < N_TX_RINGS; i++) {
spin_lock_init(&cp->stat_lock[i]);
spin_lock_init(&cp->tx_lock[i]);
}
spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
mutex_init(&cp->pm_mutex);
init_timer(&cp->link_timer);
cp->link_timer.function = cas_link_timer;
cp->link_timer.data = (unsigned long) cp;
#if 1
/* Just in case the implementation of atomic operations
* change so that an explicit initialization is necessary.
*/
atomic_set(&cp->reset_task_pending, 0);
atomic_set(&cp->reset_task_pending_all, 0);
atomic_set(&cp->reset_task_pending_spare, 0);
atomic_set(&cp->reset_task_pending_mtu, 0);
#endif
INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
if (link_mode >= 0 && link_mode < 6)
cp->link_cntl = link_modes[link_mode];
else
cp->link_cntl = BMCR_ANENABLE;
cp->lstate = link_down;
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
cp->timer_ticks = 0;
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
if (!cp->regs) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
pci_save_state(pdev);
cas_check_pci_invariants(cp);
cas_hard_reset(cp);
cas_reset(cp, 0);
if (cas_check_invariants(cp))
goto err_out_iounmap;
if (cp->cas_flags & CAS_FLAG_SATURN)
if (cas_saturn_firmware_init(cp))
goto err_out_iounmap;
cp->init_block = (struct cas_init_block *)
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap;
}
for (i = 0; i < N_TX_RINGS; i++)
cp->init_txds[i] = cp->init_block->txds[i];
for (i = 0; i < N_RX_DESC_RINGS; i++)
cp->init_rxds[i] = cp->init_block->rxds[i];
for (i = 0; i < N_RX_COMP_RINGS; i++)
cp->init_rxcs[i] = cp->init_block->rxcs[i];
for (i = 0; i < N_RX_FLOWS; i++)
skb_queue_head_init(&cp->rx_flows[i]);
dev->netdev_ops = &cas_netdev_ops;
dev->ethtool_ops = &cas_ethtool_ops;
dev->watchdog_timeo = CAS_TX_TIMEOUT;
#ifdef USE_NAPI
netif_napi_add(dev, &cp->napi, cas_poll, 64);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
/* Cassini features. */
if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
}
i = readl(cp->regs + REG_BIM_CFG);
netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
(cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
(i & BIM_CFG_32BIT) ? "32" : "64",
(i & BIM_CFG_66MHZ) ? "66" : "33",
(cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
dev->dev_addr);
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;
cas_entropy_reset(cp);
cas_phy_init(cp);
cas_begin_auto_negotiation(cp, NULL);
return 0;
err_out_free_consistent:
pci_free_consistent(pdev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma);
err_out_iounmap:
mutex_lock(&cp->pm_mutex);
if (cp->hw_running)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
pci_iounmap(pdev, cp->regs);
err_out_free_res:
pci_release_regions(pdev);
err_write_cacheline:
/* Try to restore it in case the error occurred after we
* set it.
*/
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
err_out_free_netdev:
free_netdev(dev);
err_out_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
static void cas_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp;
if (!dev)
return;
cp = netdev_priv(dev);
unregister_netdev(dev);
if (cp->fw_data)
vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
cancel_work_sync(&cp->reset_task);
if (cp->hw_running)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
#if 1
if (cp->orig_cacheline_size) {
/* Restore the cache line size if we had modified
* it.
*/
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
cp->orig_cacheline_size);
}
#endif
pci_free_consistent(pdev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma);
pci_iounmap(pdev, cp->regs);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
unsigned long flags;
mutex_lock(&cp->pm_mutex);
/* If the driver is opened, we stop the DMA */
if (cp->opened) {
netif_device_detach(dev);
cas_lock_all_save(cp, flags);
/* We can set the second arg of cas_reset to 0
* because on resume, we'll call cas_init_hw with
* its second arg set so that autonegotiation is
* restarted.
*/
cas_reset(cp, 0);
cas_clean_rings(cp);
cas_unlock_all_restore(cp, flags);
}
if (cp->hw_running)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
return 0;
}
static int cas_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
netdev_info(dev, "resuming\n");
mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
if (cp->opened) {
unsigned long flags;
cas_lock_all_save(cp, flags);
cas_reset(cp, 0);
cp->hw_running = 1;
cas_clean_rings(cp);
cas_init_hw(cp, 1);
cas_unlock_all_restore(cp, flags);
netif_device_attach(dev);
}
mutex_unlock(&cp->pm_mutex);
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver cas_driver = {
.name = DRV_MODULE_NAME,
.id_table = cas_pci_tbl,
.probe = cas_init_one,
.remove = cas_remove_one,
#ifdef CONFIG_PM
.suspend = cas_suspend,
.resume = cas_resume
#endif
};
static int __init cas_init(void)
{
if (linkdown_timeout > 0)
link_transition_timeout = linkdown_timeout * HZ;
else
link_transition_timeout = 0;
return pci_register_driver(&cas_driver);
}
static void __exit cas_cleanup(void)
{
pci_unregister_driver(&cas_driver);
}
module_init(cas_init);
module_exit(cas_cleanup);
| gpl-2.0 |
FrostBite-Android/isotope-kernel | arch/m68k/kernel/traps_no.c | 2501 | 8746 | /*
* linux/arch/m68knommu/kernel/traps.c
*
* Copyright (C) 1993, 1994 by Hamish Macdonald
*
* 68040 fixes by Michael Rausch
* 68040 fixes by Martin Apel
* 68060 fixes by Roman Hodek
* 68060 fixes by Jesper Skov
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Sets up all exception vectors
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/user.h>
#include <linux/string.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/siginfo.h>
static char const * const vec_names[] = {
"RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
"ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
"PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
"UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
"FORMAT ERROR", "UNINITIALIZED INTERRUPT",
"UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
"UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
"UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
"UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
"SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
"LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
"SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
"TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
"TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
"TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
"FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
"FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
"FPCP UNSUPPORTED OPERATION",
"MMU CONFIGURATION ERROR"
};
void __init trap_init(void)
{
}
void die_if_kernel(char *str, struct pt_regs *fp, int nr)
{
if (!(fp->sr & PS_S))
return;
console_verbose();
printk(KERN_EMERG "%s: %08x\n",str,nr);
printk(KERN_EMERG "PC: [<%08lx>]\nSR: %04x SP: %p a2: %08lx\n",
fp->pc, fp->sr, fp, fp->a2);
printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
fp->d0, fp->d1, fp->d2, fp->d3);
printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
fp->d4, fp->d5, fp->a0, fp->a1);
printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
show_stack(NULL, (unsigned long *)(fp + 1));
add_taint(TAINT_DIE);
do_exit(SIGSEGV);
}
asmlinkage void buserr_c(struct frame *fp)
{
/* Only set esp0 if coming from user mode */
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
#if defined(DEBUG)
printk (KERN_DEBUG "*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
die_if_kernel("bad frame format",&fp->ptregs,0);
#if defined(DEBUG)
printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
}
static void print_this_address(unsigned long addr, int i)
{
#ifdef CONFIG_KALLSYMS
printk(KERN_EMERG " [%08lx] ", addr);
print_symbol(KERN_CONT "%s\n", addr);
#else
if (i % 5)
printk(KERN_CONT " [%08lx] ", addr);
else
printk(KERN_EMERG " [%08lx] ", addr);
i++;
#endif
}
int kstack_depth_to_print = 48;
static void __show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *endstack, addr;
#ifdef CONFIG_FRAME_POINTER
unsigned long *last_stack;
#endif
int i;
if (!stack)
stack = (unsigned long *)task->thread.ksp;
addr = (unsigned long) stack;
endstack = (unsigned long *) PAGE_ALIGN(addr);
printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (stack + 1 + i > endstack)
break;
if (i % 8 == 0)
printk(KERN_EMERG " ");
printk(KERN_CONT " %08lx", *(stack + i));
}
printk("\n");
i = 0;
#ifdef CONFIG_FRAME_POINTER
printk(KERN_EMERG "Call Trace:\n");
last_stack = stack - 1;
while (stack <= endstack && stack > last_stack) {
addr = *(stack + 1);
print_this_address(addr, i);
i++;
last_stack = stack;
stack = (unsigned long *)*stack;
}
printk("\n");
#else
printk(KERN_EMERG "Call Trace with CONFIG_FRAME_POINTER disabled:\n");
while (stack <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the kernel,
* or in a region which is occupied by a module then it *may*
* be the address of a calling routine; if so, print it so that
* someone tracing down the cause of the crash will be able to
* figure out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
print_this_address(addr, i);
i++;
}
}
printk(KERN_CONT "\n");
#endif
}
void bad_super_trap(struct frame *fp)
{
int vector = (fp->ptregs.vector >> 2) & 0xff;
console_verbose();
if (vector < ARRAY_SIZE(vec_names))
printk (KERN_WARNING "*** %s *** FORMAT=%X\n",
vec_names[vector],
fp->ptregs.format);
else
printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
vector,
fp->ptregs.format);
printk (KERN_WARNING "Current process id is %d\n", current->pid);
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
asmlinkage void trap_c(struct frame *fp)
{
int sig;
int vector = (fp->ptregs.vector >> 2) & 0xff;
siginfo_t info;
if (fp->ptregs.sr & PS_S) {
if (vector == VEC_TRACE) {
/* traced a trapping instruction */
} else
bad_super_trap(fp);
return;
}
/* send the appropriate signal to the user program */
switch (vector) {
case VEC_ADDRERR:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
break;
case VEC_ILLEGAL:
case VEC_LINE10:
case VEC_LINE11:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
case VEC_PRIV:
info.si_code = ILL_PRVOPC;
sig = SIGILL;
break;
case VEC_COPROC:
info.si_code = ILL_COPROC;
sig = SIGILL;
break;
case VEC_TRAP1: /* gdbserver breakpoint */
fp->ptregs.pc -= 2;
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_TRAP2:
case VEC_TRAP3:
case VEC_TRAP4:
case VEC_TRAP5:
case VEC_TRAP6:
case VEC_TRAP7:
case VEC_TRAP8:
case VEC_TRAP9:
case VEC_TRAP10:
case VEC_TRAP11:
case VEC_TRAP12:
case VEC_TRAP13:
case VEC_TRAP14:
info.si_code = ILL_ILLTRP;
sig = SIGILL;
break;
case VEC_FPBRUC:
case VEC_FPOE:
case VEC_FPNAN:
info.si_code = FPE_FLTINV;
sig = SIGFPE;
break;
case VEC_FPIR:
info.si_code = FPE_FLTRES;
sig = SIGFPE;
break;
case VEC_FPDIVZ:
info.si_code = FPE_FLTDIV;
sig = SIGFPE;
break;
case VEC_FPUNDER:
info.si_code = FPE_FLTUND;
sig = SIGFPE;
break;
case VEC_FPOVER:
info.si_code = FPE_FLTOVF;
sig = SIGFPE;
break;
case VEC_ZERODIV:
info.si_code = FPE_INTDIV;
sig = SIGFPE;
break;
case VEC_CHK:
case VEC_TRAP:
info.si_code = FPE_INTOVF;
sig = SIGFPE;
break;
case VEC_TRACE: /* ptrace single step */
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_TRAP15: /* breakpoint */
info.si_code = TRAP_BRKPT;
sig = SIGTRAP;
break;
default:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
}
info.si_signo = sig;
info.si_errno = 0;
switch (fp->ptregs.format) {
default:
info.si_addr = (void *) fp->ptregs.pc;
break;
case 2:
info.si_addr = (void *) fp->un.fmt2.iaddr;
break;
case 7:
info.si_addr = (void *) fp->un.fmt7.effaddr;
break;
case 9:
info.si_addr = (void *) fp->un.fmt9.iaddr;
break;
case 10:
info.si_addr = (void *) fp->un.fmta.daddr;
break;
case 11:
info.si_addr = (void *) fp->un.fmtb.daddr;
break;
}
force_sig_info (sig, &info, current);
}
asmlinkage void set_esp0(unsigned long ssp)
{
current->thread.esp0 = ssp;
}
/*
* The architecture-independent backtrace generator
*/
void dump_stack(void)
{
/*
* We need frame pointers for this little trick, which works as follows:
*
* +------------+ 0x00
* | Next SP | -> 0x0c
* +------------+ 0x04
* | Caller |
* +------------+ 0x08
* | Local vars | -> our stack var
* +------------+ 0x0c
* | Next SP | -> 0x18, that is what we pass to show_stack()
* +------------+ 0x10
* | Caller |
* +------------+ 0x14
* | Local vars |
* +------------+ 0x18
* | ... |
* +------------+
*/
unsigned long *stack;
stack = (unsigned long *)&stack;
stack++;
__show_stack(current, stack);
}
EXPORT_SYMBOL(dump_stack);
void show_stack(struct task_struct *task, unsigned long *stack)
{
if (!stack && !task)
dump_stack();
else
__show_stack(task, stack);
}
| gpl-2.0 |
bestmjh47/android_kernel_a770k-stock | drivers/usb/host/oxu210hp-hcd.c | 2501 | 101548 | /*
* Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include "oxu210hp.h"
#define DRIVER_VERSION "0.0.50"
/*
* Main defines
*/
#define oxu_dbg(oxu, fmt, args...) \
dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_err(oxu, fmt, args...) \
dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
}
static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
{
return (struct oxu_hcd *) (hcd->hcd_priv);
}
/*
* Debug stuff
*/
#undef OXU_URB_TRACE
#undef OXU_VERBOSE_DEBUG
#ifdef OXU_VERBOSE_DEBUG
#define oxu_vdbg oxu_dbg
#else
#define oxu_vdbg(oxu, fmt, args...) /* Nop */
#endif
#ifdef DEBUG
static int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : ""
);
}
static int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : ""
);
}
static const char *const fls_strings[] =
{ "1024", "512", "256", "??" };
static int dbg_command_buf(char *buf, unsigned len,
const char *label, u32 command)
{
return scnprintf(buf, len,
"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_PARK) ? "park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT"
);
}
static int dbg_port_buf(char *buf, unsigned len, const char *label,
int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10:
sig = "k"; /* low speed */
break;
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
return scnprintf(buf, len,
"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_OCC) ? " OCC" : "",
(status & PORT_OC) ? " OC" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : ""
);
}
#else
static inline int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
#endif /* DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(oxu, label, status) { \
char _buf[80]; \
dbg_status_buf(_buf, sizeof _buf, label, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_cmd(oxu, label, command) { \
char _buf[80]; \
dbg_command_buf(_buf, sizeof _buf, label, command); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_port(oxu, label, port, status) { \
char _buf[80]; \
dbg_port_buf(_buf, sizeof _buf, label, port, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
/*
* Module parameters
*/
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* Initial park setting: slower than hw default */
static unsigned park;
module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
static int ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
static void ehci_work(struct oxu_hcd *oxu);
static int oxu_hub_control(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
/*
* Local functions
*/
/* Low level read/write registers functions */
static inline u32 oxu_readl(void *base, u32 reg)
{
return readl(base + reg);
}
static inline void oxu_writel(void *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
static inline void timer_action_done(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
clear_bit(action, &oxu->actions);
}
static inline void timer_action(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
if (!test_and_set_bit(action, &oxu->actions)) {
unsigned long t;
switch (action) {
case TIMER_IAA_WATCHDOG:
t = EHCI_IAA_JIFFIES;
break;
case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
t = EHCI_ASYNC_JIFFIES;
break;
case TIMER_ASYNC_SHRINK:
default:
t = EHCI_SHRINK_JIFFIES;
break;
}
t += jiffies;
/* all timings except IAA watchdog can be overridden.
* async queue SHRINK often precedes IAA. while it's ready
* to go OFF neither can matter, and afterwards the IO
* watchdog stops unless there's still periodic traffic.
*/
if (action != TIMER_IAA_WATCHDOG
&& t > oxu->watchdog.expires
&& timer_pending(&oxu->watchdog))
return;
mod_timer(&oxu->watchdog, t);
}
}
/*
* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = readl(ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay(1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */
static int ehci_halt(struct oxu_hcd *oxu)
{
u32 temp = readl(&oxu->regs->status);
/* disable any irqs left enabled by previous code */
writel(0, &oxu->regs->intr_enable);
if ((temp & STS_HALT) != 0)
return 0;
temp = readl(&oxu->regs->command);
temp &= ~CMD_RUN;
writel(temp, &oxu->regs->command);
return handshake(oxu, &oxu->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* Put TDI/ARC silicon into EHCI mode */
static void tdi_reset(struct oxu_hcd *oxu)
{
u32 __iomem *reg_ptr;
u32 tmp;
reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
tmp = readl(reg_ptr);
tmp |= 0x3;
writel(tmp, reg_ptr);
}
/* Reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct oxu_hcd *oxu)
{
int retval;
u32 command = readl(&oxu->regs->command);
command |= CMD_RESET;
dbg_cmd(oxu, "reset", command);
writel(command, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
oxu->next_statechange = jiffies;
retval = handshake(oxu, &oxu->regs->command,
CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
tdi_reset(oxu);
return retval;
}
/* Idle the controller (from running) */
static void ehci_quiesce(struct oxu_hcd *oxu)
{
u32 temp;
#ifdef DEBUG
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
BUG();
#endif
/* wait for any schedule enables/disables to take effect */
temp = readl(&oxu->regs->command) << 10;
temp &= STS_ASS | STS_PSS;
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
temp, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
/* then disable anything that's still active */
temp = readl(&oxu->regs->command);
temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
writel(temp, &oxu->regs->command);
/* hardware can take 16 microframes to turn off ... */
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
0, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
}
static int check_reset_complete(struct oxu_hcd *oxu, int index,
u32 __iomem *status_reg, int port_status)
{
if (!(port_status & PORT_CONNECT)) {
oxu->reset_done[index] = 0;
return port_status;
}
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
} else
oxu_dbg(oxu, "port %d high speed\n", index + 1);
return port_status;
}
static void ehci_hub_descriptor(struct oxu_hcd *oxu,
struct usb_hub_descriptor *desc)
{
int ports = HCS_N_PORTS(oxu->hcs_params);
u16 temp;
desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
temp |= 0x0001; /* per-port power control */
else
temp |= 0x0002; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
/* Allocate an OXU210HP on-chip memory data buffer
*
* An on-chip memory data buffer is required for each OXU210HP USB transfer.
* Each transfer descriptor has one or more on-chip memory data buffers.
*
* Data buffers are allocated from a fix sized pool of data blocks.
* To minimise fragmentation and give reasonable memory utlisation,
* data buffers are allocated with sizes the power of 2 multiples of
* the block size, starting on an address a multiple of the allocated size.
*
* FIXME: callers of this function require a buffer to be allocated for
* len=0. This is a waste of on-chip memory and should be fix. Then this
* function should be changed to not allocate a buffer for len=0.
*/
static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{
int n_blocks; /* minium blocks needed to hold len */
int a_blocks; /* blocks allocated */
int i, j;
/* Don't allocte bigger than supported */
if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len);
return -ENOMEM;
}
spin_lock(&oxu->mem_lock);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
/* Round the number of blocks up to the power of 2 */
for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
;
/* Find a suitable available data buffer */
for (i = 0; i < BUFFER_NUM;
i += max(a_blocks, (int)oxu->db_used[i])) {
/* Check all the required blocks are available */
for (j = 0; j < a_blocks; j++)
if (oxu->db_used[i + j])
break;
if (j != a_blocks)
continue;
/* Allocate blocks found! */
qtd->buffer = (void *) &oxu->mem->db_pool[i];
qtd->buffer_dma = virt_to_phys(qtd->buffer);
qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
oxu->db_used[i] = a_blocks;
spin_unlock(&oxu->mem_lock);
return 0;
}
/* Failed */
spin_unlock(&oxu->mem_lock);
return -ENOMEM;
}
static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
spin_lock(&oxu->mem_lock);
index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
/ BUFFER_SIZE;
oxu->db_used[index] = 0;
qtd->qtd_buffer_len = 0;
qtd->buffer_dma = 0;
qtd->buffer = NULL;
spin_unlock(&oxu->mem_lock);
}
static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
}
static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
if (qtd->buffer)
oxu_buf_free(oxu, qtd);
spin_lock(&oxu->mem_lock);
index = qtd - &oxu->mem->qtd_pool[0];
oxu->qtd_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qtd *qtd = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QTD_NUM; i++)
if (!oxu->qtd_used[i])
break;
if (i < QTD_NUM) {
qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
memset(qtd, 0, sizeof *qtd);
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
qtd->qtd_dma = virt_to_phys(qtd);
oxu->qtd_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return qtd;
}
static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int index;
spin_lock(&oxu->mem_lock);
index = qh - &oxu->mem->qh_pool[0];
oxu->qh_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static void qh_destroy(struct kref *kref)
{
struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct oxu_hcd *oxu = qh->oxu;
/* clean qtds first, and know this is not linked */
if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
oxu_dbg(oxu, "unused qh not empty!\n");
BUG();
}
if (qh->dummy)
oxu_qtd_free(oxu, qh->dummy);
oxu_qh_free(oxu, qh);
}
static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qh *qh = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QHEAD_NUM; i++)
if (!oxu->qh_used[i])
break;
if (i < QHEAD_NUM) {
qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
memset(qh, 0, sizeof *qh);
kref_init(&qh->kref);
qh->oxu = oxu;
qh->qh_dma = virt_to_phys(qh);
INIT_LIST_HEAD(&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc(oxu);
if (qh->dummy == NULL) {
oxu_dbg(oxu, "no dummy td\n");
oxu->qh_used[i] = 0;
qh = NULL;
goto unlock;
}
oxu->qh_used[i] = 1;
}
unlock:
spin_unlock(&oxu->mem_lock);
return qh;
}
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
{
kref_get(&qh->kref);
return qh;
}
static inline void qh_put(struct ehci_qh *qh)
{
kref_put(&qh->kref, qh_destroy);
}
static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
{
int index;
spin_lock(&oxu->mem_lock);
index = murb - &oxu->murb_pool[0];
oxu->murb_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
{
int i;
struct oxu_murb *murb = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < MURB_NUM; i++)
if (!oxu->murb_used[i])
break;
if (i < MURB_NUM) {
murb = &(oxu->murb_pool)[i];
oxu->murb_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return murb;
}
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup(struct oxu_hcd *oxu)
{
kfree(oxu->murb_pool);
oxu->murb_pool = NULL;
if (oxu->async)
qh_put(oxu->async);
oxu->async = NULL;
del_timer(&oxu->urb_timer);
oxu->periodic = NULL;
/* shadow periodic table */
kfree(oxu->pshadow);
oxu->pshadow = NULL;
}
/* Remember to add cleanup code (above) if you add anything here.
*/
static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
{
int i;
for (i = 0; i < oxu->periodic_size; i++)
oxu->mem->frame_list[i] = EHCI_LIST_END;
for (i = 0; i < QHEAD_NUM; i++)
oxu->qh_used[i] = 0;
for (i = 0; i < QTD_NUM; i++)
oxu->qtd_used[i] = 0;
oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
if (!oxu->murb_pool)
goto fail;
for (i = 0; i < MURB_NUM; i++)
oxu->murb_used[i] = 0;
oxu->async = oxu_qh_alloc(oxu);
if (!oxu->async)
goto fail;
oxu->periodic = (__le32 *) &oxu->mem->frame_list;
oxu->periodic_dma = virt_to_phys(oxu->periodic);
for (i = 0; i < oxu->periodic_size; i++)
oxu->periodic[i] = EHCI_LIST_END;
/* software shadow of hardware table */
oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
if (oxu->pshadow != NULL)
return 0;
fail:
oxu_dbg(oxu, "couldn't init memory\n");
ehci_mem_cleanup(oxu);
return -ENOMEM;
}
/* Fill a qtd, returning how much of the buffer we were able to queue up.
*/
static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely(len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32((count << 16) | token);
qtd->length = count;
return count;
}
static inline void qh_update(struct oxu_hcd *oxu,
struct ehci_qh *qh, struct ehci_qtd *qtd)
{
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END;
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
usb_settoggle(qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb();
qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
}
/* If it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry(qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
if (qtd)
qh_update(oxu, qh, qtd);
}
static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
size_t length, u32 token)
{
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
urb->actual_length += length - QTD_LENGTH(token);
/* don't modify error codes */
if (unlikely(urb->status != -EINPROGRESS))
return;
/* force cleanup after short read; not always an error */
if (unlikely(IS_SHORT_READ(token)))
urb->status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
urb->status = -EOVERFLOW;
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
urb->status = -EPROTO;
} else if (token & QTD_STS_DBE) {
urb->status = (QTD_PID(token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR(token))
urb->status = -EPIPE;
else {
oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
urb->status = -EPROTO;
}
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR(token))
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
token, urb->status);
}
}
static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
__releases(oxu->lock)
__acquires(oxu->lock)
{
if (likely(urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
}
qh_put(qh);
}
urb->hcpriv = NULL;
switch (urb->status) {
case -EINPROGRESS: /* success */
urb->status = 0;
default: /* fault */
break;
case -EREMOTEIO: /* fault or normal */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = 0;
break;
case -ECONNRESET: /* canceled */
case -ENOENT:
break;
}
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
spin_unlock(&oxu->lock);
usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
spin_lock(&oxu->lock);
}
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
#define HALT_BIT cpu_to_le32(QTD_STS_HALT)
/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct list_head *entry, *tmp;
int stopped;
unsigned count = 0;
int do_status = 0;
u8 state;
struct oxu_murb *murb = NULL;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_safe(entry, tmp, &qh->qtd_list) {
struct ehci_qtd *qtd;
struct urb *urb;
u32 token = 0;
qtd = list_entry(entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/
if (last) {
if (likely(last->urb != urb)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
}
oxu_qtd_free(oxu, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb();
token = le32_to_cpu(qtd->hw_token);
/* always clean up qtds the hc de-activated */
if ((token & QTD_STS_ACTIVE) == 0) {
if ((token & QTD_STS_HALT) != 0) {
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*/
} else if (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next & EHCI_LIST_END)) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely(!stopped &&
HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
break;
} else {
stopped = 1;
if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
urb->status = -ESHUTDOWN;
/* ignore active urbs unless some previous qtd
* for the urb faulted (including short read) or
* its urb was canceled. we may patch qh or qtds.
*/
if (likely(urb->status == -EINPROGRESS))
continue;
/* issue status after short control reads */
if (unlikely(do_status != 0)
&& QTD_PID(token) == 0 /* OUT */) {
do_status = 0;
continue;
}
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_le32(qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu(qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb();
}
}
/* Remove it from the queue */
qtd_copy_status(oxu, urb->complete ?
urb : ((struct oxu_murb *) urb)->main,
qtd->length, token);
if ((usb_pipein(qtd->urb->pipe)) &&
(NULL != qtd->transfer_buffer))
memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol(urb->pipe);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
list_del(&qtd->qtd_list);
last = qtd;
}
/* last urb's completion might still need calling */
if (likely(last != NULL)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_qtd_free(oxu, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(oxu, qh);
break;
case QH_STATE_LINKED:
/* should be rare for periodic transfers,
* except maybe high bandwidth ...
*/
if ((cpu_to_le32(QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule(oxu, qh);
(void) qh_schedule(oxu, qh);
} else
unlink_async(oxu, qh);
break;
/* otherwise, unlink already started */
}
}
return count;
}
/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/* ... and packet size, for any kind of endpoint descriptor */
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/* Reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list)
{
struct list_head *entry, *temp;
list_for_each_safe(entry, temp, qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry(entry, struct ehci_qtd, qtd_list);
list_del(&qtd->qtd_list);
oxu_qtd_free(oxu, qtd);
}
}
/* Create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
struct urb *urb,
struct list_head *head,
gfp_t flags)
{
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input;
u32 token;
void *transfer_buf = NULL;
int ret;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe);
if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
if (usb_pipecontrol(urb->pipe)) {
/* SETUP pid */
ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
if (ret)
goto cleanup;
qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
memcpy(qtd->buffer, qtd->urb->setup_packet,
sizeof(struct usb_ctrlrequest));
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* Data transfer stage: buffer setup
*/
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
buf = qtd->buffer_dma;
transfer_buf = urb->transfer_buffer;
if (!is_input)
memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
qtd->transfer_buffer = transfer_buf;
len -= this_qtd_len;
buf += this_qtd_len;
transfer_buf += this_qtd_len;
if (is_input)
qtd->hw_alt_next = oxu->async->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
if (likely(len > 0)) {
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean
* up after short reads, hc should advance qh past this urb
*/
if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
qtd->hw_token |= cpu_to_le32(QTD_IOC);
return head;
cleanup:
qtd_list_free(oxu, urb, head);
return NULL;
}
/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
struct urb *urb, gfp_t flags)
{
struct ehci_qh *qh = oxu_qh_alloc(oxu);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
dbg("intr period %d uframes, NYET!",
urb->interval);
goto done;
}
} else {
struct usb_tt *tt = urb->dev->tt;
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
qh->c_usecs = qh->usecs + HS_USECS(0);
qh->usecs = HS_USECS(1);
} else { /* SPLIT+DATA, gap, CSPLIT */
qh->usecs += HS_USECS(1);
qh->c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time(urb->dev->speed,
is_input, 0, max_packet(maxp)));
qh->period = urb->interval;
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet(maxp) << 16;
info2 |= hb_mult(maxp) << 30;
}
break;
default:
dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
done:
qh_put(qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32(info1);
qh->hw_info2 = cpu_to_le32(info2);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
qh_refresh(oxu, qh);
return qh;
}
/* Move qh (and its qtds) onto async queue; maybe enable queue.
*/
static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
__le32 dma = QH_NEXT(qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
head = oxu->async;
timer_action_done(oxu, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = readl(&oxu->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(oxu, &oxu->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
writel(cmd, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh(oxu, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
#define QH_ADDR_MASK cpu_to_le32(0x7f)
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list,
int epnum, void **ptr)
{
struct ehci_qh *qh = NULL;
qh = (struct ehci_qh *) *ptr;
if (unlikely(qh == NULL)) {
/* can't sleep here, we have oxu->lock... */
qh = qh_make(oxu, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely(qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
else
qtd = list_entry(qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice(urb->pipe) == 0)
qh->hw_info1 &= ~QH_ADDR_MASK;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__le32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT;
wmb();
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del(&qtd->qtd_list);
list_add(&dummy->qtd_list, qtd_list);
list_splice(qtd_list, qh->qtd_list.prev);
ehci_qtd_init(qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(dma);
/* let the hc process these next qtds */
dummy->hw_token = (token & ~(0x80));
wmb();
dummy->hw_token = token;
urb->hcpriv = qh_get(qh);
}
}
return qh;
}
static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc = 0;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(oxu, qh_get(qh));
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(oxu, urb, qtd_list);
return rc;
}
/* The async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh = oxu->reclaim;
struct ehci_qh *next;
timer_action_done(oxu, TIMER_IAA_WATCHDOG);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put(qh); /* refcount from reclaim */
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
oxu->reclaim = next;
oxu->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions(oxu, qh);
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
qh_link_async(oxu, qh);
else {
qh_put(qh); /* refcount from async list */
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
&& oxu->async->qh_next.qh == NULL)
timer_action(oxu, TIMER_ASYNC_OFF);
}
if (next) {
oxu->reclaim = NULL;
start_unlink_async(oxu, next);
}
}
/* makes sure the async qh will become idle */
/* caller must own oxu->lock */
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int cmd = readl(&oxu->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&oxu->lock);
if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT))
BUG();
#endif
/* stop async schedule right now? */
if (unlikely(qh == oxu->async)) {
/* can't get here without STS_ASS set */
if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
&& !oxu->reclaim) {
/* ... and CMD_IAAD clear */
writel(cmd & ~CMD_ASE, &oxu->regs->command);
wmb();
/* handshake later, if we need to */
timer_action_done(oxu, TIMER_ASYNC_OFF);
}
return;
}
qh->qh_state = QH_STATE_UNLINK;
oxu->reclaim = qh = qh_get(qh);
prev = oxu->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb();
if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
/* if (unlikely(qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async(oxu);
return;
}
oxu->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel(cmd, &oxu->regs->command);
(void) readl(&oxu->regs->command);
timer_action(oxu, TIMER_IAA_WATCHDOG);
}
static void scan_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
if (!++(oxu->stamp))
oxu->stamp++;
timer_action_done(oxu, TIMER_ASYNC_SHRINK);
rescan:
qh = oxu->async->qh_next.qh;
if (likely(qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)
&& qh->stamp != oxu->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get(qh);
qh->stamp = oxu->stamp;
temp = qh_completions(oxu, qh);
qh_put(qh);
if (temp != 0)
goto rescan;
}
/* unlink idle entries, reducing HC PCI usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
if (list_empty(&qh->qtd_list)) {
if (qh->stamp == oxu->stamp)
action = TIMER_ASYNC_SHRINK;
else if (!oxu->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
qh = qh->qh_next.qh;
} while (qh);
}
if (action == TIMER_ASYNC_SHRINK)
timer_action(oxu, TIMER_ASYNC_SHRINK);
}
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
__le32 tag)
{
switch (tag) {
default:
case Q_TYPE_QH:
return &periodic->qh->qh_next;
}
}
/* caller must hold oxu->lock */
static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &oxu->pshadow[frame];
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
hw_p = here.hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
*hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short periodic_usecs(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe)
{
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow *q = &oxu->pshadow[frame];
unsigned usecs = 0;
while (q->ptr) {
switch (Q_NEXT_TYPE(*hw_p)) {
case Q_TYPE_QH:
default:
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
}
}
#ifdef DEBUG
if (usecs > 100)
oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
return usecs;
}
static int enable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) | CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... PSS happens later */
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
oxu->next_uframe = readl(&oxu->regs->frame_index)
% (oxu->periodic_size << 3);
return 0;
}
static int disable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) & ~CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... */
oxu->next_uframe = -1;
return 0;
}
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; oxu 0.96+)
*/
static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period) {
union ehci_shadow *prev = &oxu->pshadow[i];
__le32 *hw_p = &oxu->periodic[i];
union ehci_shadow here = *prev;
__le32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(*hw_p);
if (type == Q_TYPE_QH)
break;
prev = periodic_next_shadow(prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh_get(qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* maybe enable periodic schedule processing */
if (!oxu->periodic_sched++)
return enable_periodic(oxu);
return 0;
}
static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/* FIXME:
* IF this isn't high speed
* and this qh is active in the current uframe
* (and overlay token SplitXstate is false?)
* THEN
* qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period;
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period)
periodic_unlink(oxu, i, qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
qh_put(qh);
/* maybe turn off periodic schedule */
oxu->periodic_sched--;
if (!oxu->periodic_sched)
(void) disable_periodic(oxu);
}
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned wait;
qh_unlink_periodic(oxu, qh);
/* simple/paranoid: always delay, expecting the HC needs to read
* qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
* expect khubd to clean up after any CSPLITs we won't issue.
* active high speed queues may need bigger delays...
*/
if (list_empty(&qh->qtd_list)
|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay(wait);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
wmb();
}
static int check_period(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
unsigned period, unsigned usecs)
{
int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/*
* 80% periodic == 100 usec/uframe available
* convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
*/
if (unlikely(period == 0)) {
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
}
} while ((frame += 1) < oxu->periodic_size);
/* just check the specified uframe, at that period */
} else {
do {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
} while ((frame += period) < oxu->periodic_size);
}
return 1;
}
static int check_intr_schedule(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
const struct ehci_qh *qh, __le32 *c_maskp)
{
int retval = -ENOSPC;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int status;
unsigned uframe;
__le32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh_refresh(oxu, qh);
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
status = check_intr_schedule(oxu, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(oxu,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && frame--);
/* qh->period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
}
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
? cpu_to_le32(1 << uframe)
: cpu_to_le32(QH_SMASK);
qh->hw_info2 |= c_mask;
} else
oxu_dbg(oxu, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
status = qh_link_periodic(oxu, qh);
done:
return status;
}
static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(oxu, qh);
if (status != 0)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* ... update usbfs periodic stats */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (status)
qtd_list_free(oxu, urb, qtd_list);
return status;
}
static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "iso support is missing!\n");
return -ENOSYS;
}
static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "split iso support is missing!\n");
return -ENOSYS;
}
static void scan_periodic(struct oxu_hcd *oxu)
{
unsigned frame, clock, now_uframe, mod;
unsigned modified;
mod = oxu->periodic_size << 3;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = oxu->next_uframe;
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
clock = readl(&oxu->regs->frame_index);
else
clock = now_uframe + mod - 1;
clock %= mod;
for (;;) {
union ehci_shadow q, *q_p;
__le32 type, *hw_p;
unsigned uframes;
/* don't scan past the live uframe */
frame = now_uframe >> 3;
if (frame == (clock >> 3))
uframes = now_uframe & 0x07;
else {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
uframes = 8;
}
restart:
/* scan each element in frame's queue for completions */
q_p = &oxu->pshadow[frame];
hw_p = &oxu->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(*hw_p);
modified = 0;
while (q.ptr != NULL) {
union ehci_shadow temp;
int live;
live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
switch (type) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get(q.qh);
type = Q_NEXT_TYPE(q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions(oxu, temp.qh);
if (unlikely(list_empty(&temp.qh->qtd_list)))
intr_deschedule(oxu, temp.qh);
qh_put(temp.qh);
break;
default:
dbg("corrupt type %d frame %d shadow %p",
type, frame, q.ptr);
q.ptr = NULL;
}
/* assume completion callbacks modify the queue */
if (unlikely(modified))
goto restart;
}
/* Stop when we catch up to the HC */
/* FIXME: this assumes we won't get lapped when
* latencies climb; that should be rare, but...
* detect it, and just go all the way around.
* FLR might help detect this case, so long as latencies
* don't exceed periodic_size msec (default 1.024 sec).
*/
/* FIXME: likewise assumes HC doesn't halt mid-scan */
if (now_uframe == clock) {
unsigned now;
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
break;
oxu->next_uframe = now_uframe;
now = readl(&oxu->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
clock = now;
} else {
now_uframe++;
now_uframe %= mod;
}
}
}
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
{
int port = HCS_N_PORTS(oxu->hcs_params);
while (port--)
writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
}
static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
{
unsigned port;
if (!HCS_PPC(oxu->hcs_params))
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
(void) oxu_hub_control(oxu_to_hcd(oxu),
is_on ? SetPortFeature : ClearPortFeature,
USB_PORT_FEAT_POWER,
port--, NULL, 0);
msleep(20);
}
/* Called from some interrupts, timers, and so on.
* It calls driver completion functions, after dropping oxu->lock.
*/
static void ehci_work(struct oxu_hcd *oxu)
{
timer_action_done(oxu, TIMER_IO_WATCHDOG);
if (oxu->reclaim_ready)
end_unlink_async(oxu);
/* another CPU may drop oxu->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (oxu->scanning)
return;
oxu->scanning = 1;
scan_async(oxu);
if (oxu->next_uframe != -1)
scan_periodic(oxu);
oxu->scanning = 0;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
(oxu->async->qh_next.ptr != NULL ||
oxu->periodic_sched != 0))
timer_action(oxu, TIMER_IO_WATCHDOG);
}
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& oxu->reclaim
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
struct ehci_qh *last;
for (last = oxu->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
/* bypass IAA if the hc can't care */
} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
end_unlink_async(oxu);
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
/*
* USB host controller methods
*/
static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 status, pcd_status = 0;
int bh;
spin_lock(&oxu->lock);
status = readl(&oxu->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
oxu_dbg(oxu, "device removed\n");
goto dead;
}
/* Shared IRQ? */
status &= INTR_MASK;
if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
spin_unlock(&oxu->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
writel(status, &oxu->regs->status);
readl(&oxu->regs->command); /* unblock posted write */
bh = 0;
#ifdef OXU_VERBOSE_DEBUG
/* unrequested/ignored: Frame List Rollover */
dbg_status(oxu, "irq", status);
#endif
/* INT, ERR, and IAA interrupt rates can be throttled */
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0))
bh = 1;
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
oxu->reclaim_ready = 1;
bh = 1;
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS(oxu->hcs_params);
pcd_status = status;
/* resume root hub? */
if (!(readl(&oxu->regs->command) & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
while (i--) {
int pstatus = readl(&oxu->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(pstatus & PORT_RESUME)
|| oxu->reset_done[i] != 0)
continue;
/* start 20 msec resume signaling from this port,
* and make khubd collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely((status & STS_FATAL) != 0)) {
/* bogus "fatal" IRQs appear on some chips... why? */
status = readl(&oxu->regs->status);
dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
dbg_status(oxu, "fatal", status);
if (status & STS_HALT) {
oxu_err(oxu, "fatal error\n");
dead:
ehci_reset(oxu);
writel(0, &oxu->regs->configured_flag);
usb_hc_died(hcd);
/* generic layer kills/unlinks all urbs, then
* uses oxu_stop to clean up the rest
*/
bh = 1;
}
}
if (bh)
ehci_work(oxu);
spin_unlock(&oxu->lock);
if (pcd_status & STS_PCD)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
static irqreturn_t oxu_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ret = IRQ_HANDLED;
u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
/* Disable all interrupt */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
(!oxu->is_otg && (status & OXU_USBSPHI)))
oxu210_hcd_irq(hcd);
else
ret = IRQ_NONE;
/* Enable all interrupt back */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
return ret;
}
static void oxu_watchdog(unsigned long param)
{
struct oxu_hcd *oxu = (struct oxu_hcd *) param;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
/* lost IAA irqs wedge things badly; seen with a vt8235 */
if (oxu->reclaim) {
u32 status = readl(&oxu->regs->status);
if (status & STS_IAA) {
oxu_vdbg(oxu, "lost IAA\n");
writel(STS_IAA, &oxu->regs->status);
oxu->reclaim_ready = 1;
}
}
/* stop async processing after it's idled a bit */
if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
start_unlink_async(oxu, oxu->async);
/* oxu could run by timer, without IRQs ... */
ehci_work(oxu);
spin_unlock_irqrestore(&oxu->lock, flags);
}
/* One-time init, only for memory state.
*/
static int oxu_hcd_init(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int retval;
u32 hcc_params;
spin_lock_init(&oxu->lock);
init_timer(&oxu->watchdog);
oxu->watchdog.function = oxu_watchdog;
oxu->watchdog.data = (unsigned long) oxu;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
oxu->periodic_size = DEFAULT_I_TDPS;
retval = ehci_mem_init(oxu, GFP_KERNEL);
if (retval < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
oxu->i_thresh = 8;
else /* N microframes cached */
oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
oxu->reclaim = NULL;
oxu->reclaim_ready = 0;
oxu->next_uframe = -1;
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
oxu->async->qh_next.qh = NULL;
oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
oxu->async->hw_qtd_next = EHCI_LIST_END;
oxu->async->qh_state = QH_STATE_LINKED;
oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
oxu_dbg(oxu, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
oxu->command = temp;
return 0;
}
/* Called during probe() after chip reset completes.
*/
static int oxu_reset(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ret;
spin_lock_init(&oxu->mem_lock);
INIT_LIST_HEAD(&oxu->urb_list);
oxu->urb_len = 0;
/* FIMXE */
hcd->self.controller->dma_mask = NULL;
if (oxu->is_otg) {
oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_SPH_MEM;
} else {
oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_OTG_MEM;
}
oxu->hcs_params = readl(&oxu->caps->hcs_params);
oxu->sbrn = 0x20;
ret = oxu_hcd_init(hcd);
if (ret)
return ret;
return 0;
}
static int oxu_run(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int retval;
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
if (retval != 0) {
ehci_mem_cleanup(oxu);
return retval;
}
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* pci_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dma_supported(), so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
writel(0, &oxu->regs->segment);
oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
CMD_ASE | CMD_RESET);
oxu->command |= CMD_RUN;
writel(oxu->command, &oxu->regs->command);
dbg_cmd(oxu, "init", oxu->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*/
hcd->state = HC_STATE_RUNNING;
writel(FLAG_CF, &oxu->regs->configured_flag);
readl(&oxu->regs->command); /* unblock posted writes */
temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
temp >> 8, temp & 0xff, DRIVER_VERSION,
ignore_oc ? ", overcurrent ignored" : "");
writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
return 0;
}
static void oxu_stop(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
/* Turn off port power on all root hub ports. */
ehci_port_power(oxu, 0);
/* no more interrupts ... */
del_timer_sync(&oxu->watchdog);
spin_lock_irq(&oxu->lock);
if (HC_IS_RUNNING(hcd->state))
ehci_quiesce(oxu);
ehci_reset(oxu);
writel(0, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
/* let companion controllers work when we aren't */
writel(0, &oxu->regs->configured_flag);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq(&oxu->lock);
if (oxu->async)
ehci_work(oxu);
spin_unlock_irq(&oxu->lock);
ehci_mem_cleanup(oxu);
dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
}
/* Kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void oxu_shutdown(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
(void) ehci_halt(oxu);
ehci_turn_off_all_ports(oxu);
/* make BIOS/etc use companion controller during reboot */
writel(0, &oxu->regs->configured_flag);
/* unblock posted writes */
readl(&oxu->regs->configured_flag);
}
/* Non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(oxu, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(oxu, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit(oxu, urb, mem_flags);
else
return sitd_submit(oxu, urb, mem_flags);
}
}
/* This function is responsible for breaking URBs with big data size
* into smaller size and processing small urbs in sequence.
*/
static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int num, rem;
int transfer_buffer_length;
void *transfer_buffer;
struct urb *murb;
int i, ret;
/* If not bulk pipe just enqueue the URB */
if (!usb_pipebulk(urb->pipe))
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Otherwise we should verify the USB transfer buffer size! */
transfer_buffer = urb->transfer_buffer;
transfer_buffer_length = urb->transfer_buffer_length;
num = urb->transfer_buffer_length / 4096;
rem = urb->transfer_buffer_length % 4096;
if (rem != 0)
num++;
/* If URB is smaller than 4096 bytes just enqueue it! */
if (num == 1)
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Ok, we have more job to do! :) */
for (i = 0; i < num - 1; i++) {
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = 4096;
murb->transfer_buffer = transfer_buffer + i * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 0;
/* This loop is to guarantee urb to be processed when there's
* not enough resources at a particular time by retrying.
*/
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
}
/* Last urb requires special handling */
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = rem > 0 ? rem : 4096;
murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 1;
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
return ret;
}
/* Remove from hardware lists.
* Completions normally happen asynchronously
*/
static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct ehci_qh *qh;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
unlink_async(oxu, qh);
break;
case PIPE_INTERRUPT:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
intr_deschedule(oxu, qh);
/* FALL THROUGH */
case QH_STATE_IDLE:
qh_completions(oxu, qh);
break;
default:
oxu_dbg(oxu, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state)) {
int status;
status = qh_schedule(oxu, qh);
spin_unlock_irqrestore(&oxu->lock, flags);
if (status != 0) {
/* shouldn't happen often, but ...
* FIXME kill those tds' urbs
*/
err("can't reschedule qh %p, err %d",
qh, status);
}
return status;
}
break;
}
done:
spin_unlock_irqrestore(&oxu->lock, flags);
return 0;
}
/* Bulk qh holds the data toggle */
static void oxu_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
unsigned long flags;
struct ehci_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave(&oxu->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw_info1 == 0) {
oxu_vdbg(oxu, "iso delay\n");
goto idle_timeout;
}
if (!HC_IS_RUNNING(hcd->state))
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
for (tmp = oxu->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty */
if (!tmp)
goto nogood;
unlink_async(oxu, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
idle_timeout:
spin_unlock_irqrestore(&oxu->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (list_empty(&qh->qtd_list)) {
qh_put(qh);
break;
}
/* else FALL THROUGH */
default:
nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
ep->hcpriv = NULL;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
}
static int oxu_get_frame(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
return (readl(&oxu->regs->frame_index) >> 3) %
oxu->periodic_size;
}
/* Build "status change" packet (one or two bytes) from HC registers */
static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp, mask, status = 0;
int ports, i, retval = 1;
unsigned long flags;
/* if !USB_SUSPEND, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf[0] = 0;
ports = HCS_N_PORTS(oxu->hcs_params);
if (ports > 7) {
buf[1] = 0;
retval++;
}
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave(&oxu->lock, flags);
for (i = 0; i < ports; i++) {
temp = readl(&oxu->regs->port_status[i]);
/*
* Return status information even for ports with OWNER set.
* Otherwise khubd wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if (!(temp & PORT_CONNECT))
oxu->reset_done[i] = 0;
if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
time_after_eq(jiffies, oxu->reset_done[i]))) {
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore(&oxu->lock, flags);
return status ? retval : 0;
}
/* Returns the speed of a device attached to a port on the root hub. */
static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
unsigned int portsc)
{
switch ((portsc >> 26) & 3) {
case 0:
return 0;
case 1:
return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
return USB_PORT_STAT_HIGH_SPEED;
}
}
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ports = HCS_N_PORTS(oxu->hcs_params);
u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
u32 temp, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&oxu->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
writel(temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
writel(temp | PORT_RESUME, status_reg);
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = readl(status_reg);
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!oxu->reset_done[wIndex]) {
/* resume signaling for 20 msec */
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&oxu_to_hcd(oxu)->rh_timer,
oxu->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = readl(status_reg);
writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
retval = handshake(oxu, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
oxu_err(oxu,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(oxu, status_reg,
PORT_RESET, 0, 750);
if (retval != 0) {
oxu_err(oxu, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete(oxu, wIndex, status_reg,
readl(status_reg));
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &oxu->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
writel(temp, status_reg);
oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
temp = readl(status_reg);
}
/*
* Even if OWNER is set, there's no harm letting khubd
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
#endif
dbg_port(oxu, "GetStatus", wIndex + 1, temp);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
if (device_may_wakeup(&hcd->self.root_hub->dev))
temp |= PORT_WAKE_BITS;
writel(temp | PORT_SUSPEND, status_reg);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp | PORT_POWER, status_reg);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(50);
writel(temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
ehci_quiesce(oxu);
ehci_halt(oxu);
temp |= selector << 16;
writel(temp, status_reg);
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&oxu->lock, flags);
return retval;
}
#ifdef CONFIG_PM
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int port;
int mask;
oxu_dbg(oxu, "suspend root hub\n");
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
port = HCS_N_PORTS(oxu->hcs_params);
spin_lock_irq(&oxu->lock);
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce(oxu);
hcd->state = HC_STATE_QUIESCING;
}
oxu->command = readl(&oxu->regs->command);
if (oxu->reclaim)
oxu->reclaim_ready = 1;
ehci_work(oxu);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
oxu->bus_suspended = 0;
while (port--) {
u32 __iomem *reg = &oxu->regs->port_status[port];
u32 t1 = readl(reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
/* keep track of which ports we suspend */
if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
!(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &oxu->bus_suspended);
}
/* enable remote wakeup on all ports */
if (device_may_wakeup(&hcd->self.root_hub->dev))
t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
else
t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
if (t1 != t2) {
oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
writel(t2, reg);
}
}
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
/* allow remote wakeup */
mask = INTR_MASK;
if (!device_may_wakeup(&hcd->self.root_hub->dev))
mask &= ~STS_PCD;
writel(mask, &oxu->regs->intr_enable);
readl(&oxu->regs->intr_enable);
oxu->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq(&oxu->lock);
return 0;
}
/* Caller has locked the root hub, and should reset/reinit on error */
static int oxu_bus_resume(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int i;
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
spin_lock_irq(&oxu->lock);
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
temp = readl(&oxu->regs->intr_enable);
oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
writel(0, &oxu->regs->intr_enable);
/* re-init operational registers */
writel(0, &oxu->regs->segment);
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
writel(oxu->command, &oxu->regs->command);
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
mdelay(8);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS(oxu->hcs_params);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
temp &= ~(PORT_RWC_BITS
| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
temp |= PORT_RESUME;
}
writel(temp, &oxu->regs->port_status[i]);
}
i = HCS_N_PORTS(oxu->hcs_params);
mdelay(20);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
writel(temp, &oxu->regs->port_status[i]);
oxu_vdbg(oxu, "resumed port %d\n", i + 1);
}
}
(void) readl(&oxu->regs->command);
/* maybe re-activate the schedule(s) */
temp = 0;
if (oxu->async->qh_next.qh)
temp |= CMD_ASE;
if (oxu->periodic_sched)
temp |= CMD_PSE;
if (temp) {
oxu->command |= temp;
writel(oxu->command, &oxu->regs->command);
}
oxu->next_statechange = jiffies + msecs_to_jiffies(5);
hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
writel(INTR_MASK, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
return 0;
}
#else
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
return 0;
}
static int oxu_bus_resume(struct usb_hcd *hcd)
{
return 0;
}
#endif /* CONFIG_PM */
static const struct hc_driver oxu_hc_driver = {
.description = "oxu210hp_hcd",
.product_desc = "oxu210hp HCD",
.hcd_priv_size = sizeof(struct oxu_hcd),
/*
* Generic hardware linkage
*/
.irq = oxu_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* Basic lifecycle operations
*/
.reset = oxu_reset,
.start = oxu_run,
.stop = oxu_stop,
.shutdown = oxu_shutdown,
/*
* Managing i/o requests and associated device resources
*/
.urb_enqueue = oxu_urb_enqueue,
.urb_dequeue = oxu_urb_dequeue,
.endpoint_disable = oxu_endpoint_disable,
/*
* Scheduling support
*/
.get_frame_number = oxu_get_frame,
/*
* Root hub support
*/
.hub_status_data = oxu_hub_status_data,
.hub_control = oxu_hub_control,
.bus_suspend = oxu_bus_suspend,
.bus_resume = oxu_bus_resume,
};
/*
* Module stuff
*/
static void oxu_configuration(struct platform_device *pdev, void *base)
{
u32 tmp;
/* Initialize top level registers.
* First write ever
*/
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
OXU_COMPARATOR | OXU_ASO_OP);
tmp = oxu_readl(base, OXU_CLKCTRL_SET);
oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
/* Clear all top interrupt enable */
oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
/* Clear all top interrupt status */
oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
/* Enable all needed top interrupt except OTG SPH core */
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
static int oxu_verify_id(struct platform_device *pdev, void *base)
{
u32 id;
static const char * const bo[] = {
"reserved",
"128-pin LQFP",
"84-pin TFBGA",
"reserved",
};
/* Read controller signature register to find a match */
id = oxu_readl(base, OXU_DEVICEID);
dev_info(&pdev->dev, "device ID %x\n", id);
if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
return -1;
dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
id >> OXU_REV_SHIFT,
bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
return 0;
}
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
struct oxu_hcd *oxu;
int ret;
/* Set endian mode and host mode */
oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
OXU_USBMODE,
OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
hcd = usb_create_hcd(&oxu_hc_driver, dev,
otg ? "oxu210hp_otg" : "oxu210hp_sph");
if (!hcd)
return ERR_PTR(-ENOMEM);
hcd->rsrc_start = memstart;
hcd->rsrc_len = memlen;
hcd->regs = base;
hcd->irq = irq;
hcd->state = HC_STATE_HALT;
oxu = hcd_to_oxu(hcd);
oxu->is_otg = otg;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret < 0)
return ERR_PTR(ret);
return hcd;
}
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
int ret;
/* First time configuration at start up */
oxu_configuration(pdev, base);
ret = oxu_verify_id(pdev, base);
if (ret) {
dev_err(&pdev->dev, "no devices found!\n");
return -ENODEV;
}
/* Create the OTG controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create OTG controller!\n");
ret = PTR_ERR(hcd);
goto error_create_otg;
}
info->hcd[0] = hcd;
/* Create the SPH host controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create SPH controller!\n");
ret = PTR_ERR(hcd);
goto error_create_sph;
}
info->hcd[1] = hcd;
oxu_writel(base, OXU_CHIPIRQEN_SET,
oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
return 0;
error_create_sph:
usb_remove_hcd(info->hcd[0]);
usb_put_hcd(info->hcd[0]);
error_create_otg:
return ret;
}
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
void *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
if (usb_disabled())
return -ENODEV;
/*
* Get the platform resources
*/
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev,
"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
return -ENODEV;
}
irq = res->start;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no registers address! Check %s setup!\n",
dev_name(&pdev->dev));
return -ENODEV;
}
memstart = res->start;
memlen = res->end - res->start + 1;
dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
if (!request_mem_region(memstart, memlen,
oxu_hc_driver.description)) {
dev_dbg(&pdev->dev, "memory area already in use\n");
return -EBUSY;
}
ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
goto error_set_irq_type;
}
base = ioremap(memstart, memlen);
if (!base) {
dev_dbg(&pdev->dev, "error mapping memory\n");
ret = -EFAULT;
goto error_ioremap;
}
/* Allocate a driver data struct to hold useful info for both
* SPH & OTG devices
*/
info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL);
if (!info) {
dev_dbg(&pdev->dev, "error allocating memory\n");
ret = -EFAULT;
goto error_alloc;
}
platform_set_drvdata(pdev, info);
ret = oxu_init(pdev, memstart, memlen, base, irq);
if (ret < 0) {
dev_dbg(&pdev->dev, "cannot init USB devices\n");
goto error_init;
}
dev_info(&pdev->dev, "devices enabled and running\n");
platform_set_drvdata(pdev, info);
return 0;
error_init:
kfree(info);
platform_set_drvdata(pdev, NULL);
error_alloc:
iounmap(base);
error_set_irq_type:
error_ioremap:
release_mem_region(memstart, memlen);
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
return ret;
}
static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
{
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static int oxu_drv_remove(struct platform_device *pdev)
{
struct oxu_info *info = platform_get_drvdata(pdev);
unsigned long memstart = info->hcd[0]->rsrc_start,
memlen = info->hcd[0]->rsrc_len;
void *base = info->hcd[0]->regs;
oxu_remove(pdev, info->hcd[0]);
oxu_remove(pdev, info->hcd[1]);
iounmap(base);
release_mem_region(memstart, memlen);
kfree(info);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void oxu_drv_shutdown(struct platform_device *pdev)
{
oxu_drv_remove(pdev);
}
#if 0
/* FIXME: TODO */
static int oxu_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
static int oxu_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
#else
#define oxu_drv_suspend NULL
#define oxu_drv_resume NULL
#endif
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
.remove = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
.driver = {
.name = "oxu210hp-hcd",
.bus = &platform_bus_type
}
};
static int __init oxu_module_init(void)
{
int retval = 0;
retval = platform_driver_register(&oxu_driver);
if (retval < 0)
return retval;
return retval;
}
static void __exit oxu_module_cleanup(void)
{
platform_driver_unregister(&oxu_driver);
}
module_init(oxu_module_init);
module_exit(oxu_module_cleanup);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
rofehr/linux-wetek | fs/cifs/cifs_spnego.c | 2757 | 4952 | /*
* fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <keys/user-type.h>
#include <linux/key-type.h>
#include <linux/inet.h>
#include "cifsglob.h"
#include "cifs_spnego.h"
#include "cifs_debug.h"
/* create a new cifs key */
static int
cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
char *payload;
int ret;
ret = -ENOMEM;
payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
if (!payload)
goto error;
/* attach the data */
key->payload.data = payload;
ret = 0;
error:
return ret;
}
static void
cifs_spnego_key_destroy(struct key *key)
{
kfree(key->payload.data);
}
/*
* keytype for CIFS spnego keys
*/
struct key_type cifs_spnego_key_type = {
.name = "cifs.spnego",
.instantiate = cifs_spnego_key_instantiate,
.match = user_match,
.destroy = cifs_spnego_key_destroy,
.describe = user_describe,
};
/* length of longest version string e.g. strlen("ver=0xFF") */
#define MAX_VER_STR_LEN 8
/* length of longest security mechanism name, eg in future could have
* strlen(";sec=ntlmsspi") */
#define MAX_MECH_STR_LEN 13
/* strlen of "host=" */
#define HOST_KEY_LEN 5
/* strlen of ";ip4=" or ";ip6=" */
#define IP_KEY_LEN 5
/* strlen of ";uid=0x" */
#define UID_KEY_LEN 7
/* strlen of ";creduid=0x" */
#define CREDUID_KEY_LEN 11
/* strlen of ";user=" */
#define USER_KEY_LEN 6
/* strlen of ";pid=0x" */
#define PID_KEY_LEN 7
/* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key *
cifs_get_spnego_key(struct cifs_ses *sesInfo)
{
struct TCP_Server_Info *server = sesInfo->server;
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
char *description, *dp;
size_t desc_len;
struct key *spnego_key;
const char *hostname = server->hostname;
/* length of fields (with semicolons): ver=0xyz ip4=ipaddress
host=hostname sec=mechanism uid=0xFF user=username */
desc_len = MAX_VER_STR_LEN +
HOST_KEY_LEN + strlen(hostname) +
IP_KEY_LEN + INET6_ADDRSTRLEN +
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
if (sesInfo->user_name)
desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
spnego_key = ERR_PTR(-ENOMEM);
description = kzalloc(desc_len, GFP_KERNEL);
if (description == NULL)
goto out;
dp = description;
/* start with version and hostname portion of UNC string */
spnego_key = ERR_PTR(-EINVAL);
sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION,
hostname);
dp = description + strlen(description);
/* add the server address */
if (server->dstaddr.ss_family == AF_INET)
sprintf(dp, "ip4=%pI4", &sa->sin_addr);
else if (server->dstaddr.ss_family == AF_INET6)
sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
else
goto out;
dp = description + strlen(description);
/* for now, only sec=krb5 and sec=mskrb5 are valid */
if (server->sec_kerberos)
sprintf(dp, ";sec=krb5");
else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
else
goto out;
dp = description + strlen(description);
sprintf(dp, ";uid=0x%x",
from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
dp = description + strlen(description);
sprintf(dp, ";creduid=0x%x",
from_kuid_munged(&init_user_ns, sesInfo->cred_uid));
if (sesInfo->user_name) {
dp = description + strlen(description);
sprintf(dp, ";user=%s", sesInfo->user_name);
}
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
cifs_dbg(FYI, "key description = %s\n", description);
spnego_key = request_key(&cifs_spnego_key_type, description, "");
#ifdef CONFIG_CIFS_DEBUG2
if (cifsFYI && !IS_ERR(spnego_key)) {
struct cifs_spnego_msg *msg = spnego_key->payload.data;
cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
msg->secblob_len + msg->sesskey_len));
}
#endif /* CONFIG_CIFS_DEBUG2 */
out:
kfree(description);
return spnego_key;
}
| gpl-2.0 |
shaqfu786/android_kernel_lge_omap4-common | lib/find_next_bit.c | 3013 | 6535 | /* find_next_bit.c: fallback find next bit implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);
#endif
#ifndef find_next_zero_bit
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found_middle:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp(const unsigned long * x)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64p((u64 *) x);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32p((u32 *) x);
#else
#error BITS_PER_LONG not defined
#endif
}
/* include/linux/byteorder doesn't support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp |= (~0UL >> (BITS_PER_LONG - offset));
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
if (~(tmp = *(p++)))
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. Skip ffz */
found_middle:
return result + ffz(tmp);
found_middle_swap:
return result + ffz(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
tmp = *(p++);
if (tmp)
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
found_middle_swap:
return result + __ffs(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */
| gpl-2.0 |
lg-devs/android_kernel_lge_msm8974 | drivers/target/iscsi/iscsi_target_util.c | 3269 | 41350 | /*******************************************************************************
* This file contains the iSCSI Target specific utility functions.
*
* \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/list.h>
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_tq.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#define PRINT_BUFF(buff, len) \
{ \
int zzz; \
\
pr_debug("%d:\n", __LINE__); \
for (zzz = 0; zzz < len; zzz++) { \
if (zzz % 16 == 0) { \
if (zzz) \
pr_debug("\n"); \
pr_debug("%4i: ", zzz); \
} \
pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
} \
if ((len + 1) % 16) \
pr_debug("\n"); \
}
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
/*
* Called with cmd->r2t_lock held.
*/
int iscsit_add_r2t_to_list(
struct iscsi_cmd *cmd,
u32 offset,
u32 xfer_len,
int recovery,
u32 r2t_sn)
{
struct iscsi_r2t *r2t;
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
if (!r2t) {
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
return -1;
}
INIT_LIST_HEAD(&r2t->r2t_list);
r2t->recovery_r2t = recovery;
r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
r2t->offset = offset;
r2t->xfer_len = xfer_len;
list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
spin_unlock_bh(&cmd->r2t_lock);
iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
spin_lock_bh(&cmd->r2t_lock);
return 0;
}
struct iscsi_r2t *iscsit_get_r2t_for_eos(
struct iscsi_cmd *cmd,
u32 offset,
u32 length)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if ((r2t->offset <= offset) &&
(r2t->offset + r2t->xfer_len) >= (offset + length)) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
pr_err("Unable to locate R2T for Offset: %u, Length:"
" %u\n", offset, length);
return NULL;
}
struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (!r2t->sent_r2t) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
pr_err("Unable to locate next R2T to send for ITT:"
" 0x%08x.\n", cmd->init_task_tag);
return NULL;
}
/*
* Called with cmd->r2t_lock held.
*/
void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
{
list_del(&r2t->r2t_list);
kmem_cache_free(lio_r2t_cache, r2t);
}
void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
{
struct iscsi_r2t *r2t, *r2t_tmp;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
iscsit_free_r2t(r2t, cmd);
spin_unlock_bh(&cmd->r2t_lock);
}
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
{
struct iscsi_cmd *cmd;
cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
if (!cmd) {
pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
return NULL;
}
cmd->conn = conn;
INIT_LIST_HEAD(&cmd->i_list);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
init_completion(&cmd->reject_comp);
spin_lock_init(&cmd->datain_lock);
spin_lock_init(&cmd->dataout_timeout_lock);
spin_lock_init(&cmd->istate_lock);
spin_lock_init(&cmd->error_lock);
spin_lock_init(&cmd->r2t_lock);
return cmd;
}
/*
* Called from iscsi_handle_scsi_cmd()
*/
struct iscsi_cmd *iscsit_allocate_se_cmd(
struct iscsi_conn *conn,
u32 data_length,
int data_direction,
int iscsi_task_attr)
{
struct iscsi_cmd *cmd;
struct se_cmd *se_cmd;
int sam_task_attr;
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
return NULL;
cmd->data_direction = data_direction;
cmd->data_length = data_length;
/*
* Figure out the SAM Task Attribute for the incoming SCSI CDB
*/
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
sam_task_attr = MSG_SIMPLE_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
sam_task_attr = MSG_ORDERED_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
sam_task_attr = MSG_HEAD_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
sam_task_attr = MSG_ACA_TAG;
else {
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
" MSG_SIMPLE_TAG\n", iscsi_task_attr);
sam_task_attr = MSG_SIMPLE_TAG;
}
se_cmd = &cmd->se_cmd;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
conn->sess->se_sess, data_length, data_direction,
sam_task_attr, &cmd->sense_buffer[0]);
return cmd;
}
struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
struct iscsi_conn *conn,
u8 function)
{
struct iscsi_cmd *cmd;
struct se_cmd *se_cmd;
int rc;
u8 tcm_function;
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
return NULL;
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
if (!cmd->tmr_req) {
pr_err("Unable to allocate memory for"
" Task Management command!\n");
goto out;
}
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
return cmd;
se_cmd = &cmd->se_cmd;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
conn->sess->se_sess, 0, DMA_NONE,
MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
tcm_function = TMR_ABORT_TASK;
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
tcm_function = TMR_ABORT_TASK_SET;
break;
case ISCSI_TM_FUNC_CLEAR_ACA:
tcm_function = TMR_CLEAR_ACA;
break;
case ISCSI_TM_FUNC_CLEAR_TASK_SET:
tcm_function = TMR_CLEAR_TASK_SET;
break;
case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
tcm_function = TMR_LUN_RESET;
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
tcm_function = TMR_TARGET_WARM_RESET;
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
tcm_function = TMR_TARGET_COLD_RESET;
break;
default:
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
goto out;
}
rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
if (rc < 0)
goto out;
cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
return cmd;
out:
iscsit_release_cmd(cmd);
return NULL;
}
int iscsit_decide_list_to_build(
struct iscsi_cmd *cmd,
u32 immediate_data_length)
{
struct iscsi_build_list bl;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na;
if (sess->sess_ops->DataSequenceInOrder &&
sess->sess_ops->DataPDUInOrder)
return 0;
if (cmd->data_direction == DMA_NONE)
return 0;
na = iscsit_tpg_get_node_attrib(sess);
memset(&bl, 0, sizeof(struct iscsi_build_list));
if (cmd->data_direction == DMA_FROM_DEVICE) {
bl.data_direction = ISCSI_PDU_READ;
bl.type = PDULIST_NORMAL;
if (na->random_datain_pdu_offsets)
bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
if (na->random_datain_seq_offsets)
bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
} else {
bl.data_direction = ISCSI_PDU_WRITE;
bl.immediate_data_length = immediate_data_length;
if (na->random_r2t_offsets)
bl.randomize |= RANDOM_R2T_OFFSETS;
if (!cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_NORMAL;
else if (cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE;
else if (!cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_UNSOLICITED;
else if (cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
}
return iscsit_do_build_list(cmd, &bl);
}
struct iscsi_seq *iscsit_get_seq_holder_for_datain(
struct iscsi_cmd *cmd,
u32 seq_send_order)
{
u32 i;
for (i = 0; i < cmd->seq_count; i++)
if (cmd->seq_list[i].seq_send_order == seq_send_order)
return &cmd->seq_list[i];
return NULL;
}
struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
{
u32 i;
if (!cmd->seq_list) {
pr_err("struct iscsi_cmd->seq_list is NULL!\n");
return NULL;
}
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
cmd->seq_send_order++;
return &cmd->seq_list[i];
}
}
return NULL;
}
struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
struct iscsi_cmd *cmd,
u32 r2t_sn)
{
struct iscsi_r2t *r2t;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (r2t->r2t_sn == r2t_sn) {
spin_unlock_bh(&cmd->r2t_lock);
return r2t;
}
}
spin_unlock_bh(&cmd->r2t_lock);
return NULL;
}
static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
{
int ret;
/*
* This is the proper method of checking received CmdSN against
* ExpCmdSN and MaxCmdSN values, as well as accounting for out
* or order CmdSNs due to multiple connection sessions and/or
* CRC failures.
*/
if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
" MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
sess->max_cmd_sn);
ret = CMDSN_ERROR_CANNOT_RECOVER;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
pr_debug("Received CmdSN matches ExpCmdSN,"
" incremented ExpCmdSN to: 0x%08x\n",
sess->exp_cmd_sn);
ret = CMDSN_NORMAL_OPERATION;
} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
pr_debug("Received CmdSN: 0x%08x is greater"
" than ExpCmdSN: 0x%08x, not acknowledging.\n",
cmdsn, sess->exp_cmd_sn);
ret = CMDSN_HIGHER_THAN_EXP;
} else {
pr_err("Received CmdSN: 0x%08x is less than"
" ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
sess->exp_cmd_sn);
ret = CMDSN_LOWER_THAN_EXP;
}
return ret;
}
/*
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
int iscsit_sequence_cmd(
struct iscsi_conn *conn,
struct iscsi_cmd *cmd,
u32 cmdsn)
{
int ret;
int cmdsn_ret;
mutex_lock(&conn->sess->cmdsn_mutex);
cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
switch (cmdsn_ret) {
case CMDSN_NORMAL_OPERATION:
ret = iscsit_execute_cmd(cmd, 0);
if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
iscsit_execute_ooo_cmdsns(conn->sess);
break;
case CMDSN_HIGHER_THAN_EXP:
ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
break;
case CMDSN_LOWER_THAN_EXP:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
ret = cmdsn_ret;
break;
default:
ret = cmdsn_ret;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
return ret;
}
int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
{
struct iscsi_conn *conn = cmd->conn;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (conn->sess->sess_ops->InitialR2T) {
pr_err("Received unexpected unsolicited data"
" while InitialR2T=Yes, protocol error.\n");
transport_send_check_condition_and_sense(se_cmd,
TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
return -1;
}
if ((cmd->first_burst_len + payload_length) >
conn->sess->sess_ops->FirstBurstLength) {
pr_err("Total %u bytes exceeds FirstBurstLength: %u"
" for this Unsolicited DataOut Burst.\n",
(cmd->first_burst_len + payload_length),
conn->sess->sess_ops->FirstBurstLength);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
}
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
return 0;
if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
((cmd->first_burst_len + payload_length) !=
conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data received %u"
" does not equal FirstBurstLength: %u, and does"
" not equal ExpXferLen %u.\n",
(cmd->first_burst_len + payload_length),
conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
}
return 0;
}
struct iscsi_cmd *iscsit_find_cmd_from_itt(
struct iscsi_conn *conn,
u32 init_task_tag)
{
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
init_task_tag, conn->cid);
return NULL;
}
struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
struct iscsi_conn *conn,
u32 init_task_tag,
u32 length)
{
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
" dumping payload\n", init_task_tag, conn->cid);
if (length)
iscsit_dump_data_payload(conn, length, 1);
return NULL;
}
struct iscsi_cmd *iscsit_find_cmd_from_ttt(
struct iscsi_conn *conn,
u32 targ_xfer_tag)
{
struct iscsi_cmd *cmd = NULL;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
if (cmd->targ_xfer_tag == targ_xfer_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
}
}
spin_unlock_bh(&conn->cmd_lock);
pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
targ_xfer_tag, conn->cid);
return NULL;
}
int iscsit_find_cmd_for_recovery(
struct iscsi_session *sess,
struct iscsi_cmd **cmd_ptr,
struct iscsi_conn_recovery **cr_ptr,
u32 init_task_tag)
{
struct iscsi_cmd *cmd = NULL;
struct iscsi_conn_recovery *cr;
/*
* Scan through the inactive connection recovery list's command list.
* If init_task_tag matches the command is still alligent.
*/
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_i_lock);
*cr_ptr = cr;
*cmd_ptr = cmd;
return -2;
}
}
spin_unlock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&sess->cr_i_lock);
/*
* Scan through the active connection recovery list's command list.
* If init_task_tag matches the command is ready to be reassigned.
*/
spin_lock(&sess->cr_a_lock);
list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_a_lock);
*cr_ptr = cr;
*cmd_ptr = cmd;
return 0;
}
}
spin_unlock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&sess->cr_a_lock);
return -1;
}
void iscsit_add_cmd_to_immediate_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
return;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
qr->state = state;
spin_lock_bh(&conn->immed_queue_lock);
list_add_tail(&qr->qr_list, &conn->immed_queue_list);
atomic_inc(&cmd->immed_queue_count);
atomic_set(&conn->check_immediate_queue, 1);
spin_unlock_bh(&conn->immed_queue_lock);
wake_up_process(conn->thread_set->tx_thread);
}
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr;
spin_lock_bh(&conn->immed_queue_lock);
if (list_empty(&conn->immed_queue_list)) {
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
break;
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->immed_queue_count);
spin_unlock_bh(&conn->immed_queue_lock);
return qr;
}
static void iscsit_remove_cmd_from_immediate_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->immed_queue_lock);
if (!atomic_read(&cmd->immed_queue_count)) {
spin_unlock_bh(&conn->immed_queue_lock);
return;
}
list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
if (qr->cmd != cmd)
continue;
atomic_dec(&qr->cmd->immed_queue_count);
list_del(&qr->qr_list);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->immed_queue_lock);
if (atomic_read(&cmd->immed_queue_count)) {
pr_err("ITT: 0x%08x immed_queue_count: %d\n",
cmd->init_task_tag,
atomic_read(&cmd->immed_queue_count));
}
}
void iscsit_add_cmd_to_response_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
return;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
qr->state = state;
spin_lock_bh(&conn->response_queue_lock);
list_add_tail(&qr->qr_list, &conn->response_queue_list);
atomic_inc(&cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
wake_up_process(conn->thread_set->tx_thread);
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr;
spin_lock_bh(&conn->response_queue_lock);
if (list_empty(&conn->response_queue_list)) {
spin_unlock_bh(&conn->response_queue_lock);
return NULL;
}
list_for_each_entry(qr, &conn->response_queue_list, qr_list)
break;
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
return qr;
}
static void iscsit_remove_cmd_from_response_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->response_queue_lock);
if (!atomic_read(&cmd->response_queue_count)) {
spin_unlock_bh(&conn->response_queue_lock);
return;
}
list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
qr_list) {
if (qr->cmd != cmd)
continue;
atomic_dec(&qr->cmd->response_queue_count);
list_del(&qr->qr_list);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->response_queue_lock);
if (atomic_read(&cmd->response_queue_count)) {
pr_err("ITT: 0x%08x response_queue_count: %d\n",
cmd->init_task_tag,
atomic_read(&cmd->response_queue_count));
}
}
void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
spin_lock_bh(&conn->immed_queue_lock);
list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->immed_queue_count);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->immed_queue_lock);
spin_lock_bh(&conn->response_queue_lock);
list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
qr_list) {
list_del(&qr->qr_list);
if (qr->cmd)
atomic_dec(&qr->cmd->response_queue_count);
kmem_cache_free(lio_qr_cache, qr);
}
spin_unlock_bh(&conn->response_queue_lock);
}
void iscsit_release_cmd(struct iscsi_cmd *cmd)
{
struct iscsi_conn *conn = cmd->conn;
int i;
iscsit_free_r2ts_from_list(cmd);
iscsit_free_all_datain_reqs(cmd);
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
kfree(cmd->iov_data);
for (i = 0; i < cmd->t_mem_sg_nents; i++)
__free_page(sg_page(&cmd->t_mem_sg[i]));
kfree(cmd->t_mem_sg);
if (conn) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
kmem_cache_free(lio_cmd_cache, cmd);
}
void iscsit_free_cmd(struct iscsi_cmd *cmd)
{
/*
* Determine if a struct se_cmd is assoicated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
case ISCSI_OP_SCSI_TMFUNC:
transport_generic_free_cmd(&cmd->se_cmd, 1);
break;
case ISCSI_OP_REJECT:
/*
* Handle special case for REJECT when iscsi_add_reject*() has
* overwritten the original iscsi_opcode assignment, and the
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
transport_generic_free_cmd(&cmd->se_cmd, 1);
break;
}
/* Fall-through */
default:
iscsit_release_cmd(cmd);
break;
}
}
int iscsit_check_session_usage_count(struct iscsi_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
if (sess->session_usage_count != 0) {
sess->session_waiting_on_uc = 1;
spin_unlock_bh(&sess->session_usage_lock);
if (in_interrupt())
return 2;
wait_for_completion(&sess->session_waiting_on_uc_comp);
return 1;
}
spin_unlock_bh(&sess->session_usage_lock);
return 0;
}
void iscsit_dec_session_usage_count(struct iscsi_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count--;
if (!sess->session_usage_count && sess->session_waiting_on_uc)
complete(&sess->session_waiting_on_uc_comp);
spin_unlock_bh(&sess->session_usage_lock);
}
void iscsit_inc_session_usage_count(struct iscsi_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count++;
spin_unlock_bh(&sess->session_usage_lock);
}
/*
* Setup conn->if_marker and conn->of_marker values based upon
* the initial marker-less interval. (see iSCSI v19 A.2)
*/
int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
{
int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
/*
* IFMarkInt and OFMarkInt are negotiated as 32-bit words.
*/
u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
if (conn->conn_ops->OFMarker) {
/*
* Account for the first Login Command received not
* via iscsi_recv_msg().
*/
conn->of_marker += ISCSI_HDR_LEN;
if (conn->of_marker <= OFMarkInt) {
conn->of_marker = (OFMarkInt - conn->of_marker);
} else {
login_ofmarker_count = (conn->of_marker / OFMarkInt);
next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
(login_ofmarker_count * MARKER_SIZE);
conn->of_marker = (next_marker - conn->of_marker);
}
conn->of_marker_offset = 0;
pr_debug("Setting OFMarker value to %u based on Initial"
" Markerless Interval.\n", conn->of_marker);
}
if (conn->conn_ops->IFMarker) {
if (conn->if_marker <= IFMarkInt) {
conn->if_marker = (IFMarkInt - conn->if_marker);
} else {
login_ifmarker_count = (conn->if_marker / IFMarkInt);
next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
(login_ifmarker_count * MARKER_SIZE);
conn->if_marker = (next_marker - conn->if_marker);
}
pr_debug("Setting IFMarker value to %u based on Initial"
" Markerless Interval.\n", conn->if_marker);
}
return 0;
}
struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
{
struct iscsi_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
if ((conn->cid == cid) &&
(conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
iscsit_inc_conn_usage_count(conn);
spin_unlock_bh(&sess->conn_lock);
return conn;
}
}
spin_unlock_bh(&sess->conn_lock);
return NULL;
}
struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
{
struct iscsi_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
if (conn->cid == cid) {
iscsit_inc_conn_usage_count(conn);
spin_lock(&conn->state_lock);
atomic_set(&conn->connection_wait_rcfr, 1);
spin_unlock(&conn->state_lock);
spin_unlock_bh(&sess->conn_lock);
return conn;
}
}
spin_unlock_bh(&sess->conn_lock);
return NULL;
}
void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
if (conn->conn_usage_count != 0) {
conn->conn_waiting_on_uc = 1;
spin_unlock_bh(&conn->conn_usage_lock);
wait_for_completion(&conn->conn_waiting_on_uc_comp);
return;
}
spin_unlock_bh(&conn->conn_usage_lock);
}
void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count--;
if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
complete(&conn->conn_waiting_on_uc_comp);
spin_unlock_bh(&conn->conn_usage_lock);
}
void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count++;
spin_unlock_bh(&conn->conn_usage_lock);
}
static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
{
u8 state;
struct iscsi_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
if (!cmd)
return -1;
cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
ISTATE_SEND_NOPIN_NO_RESPONSE;
cmd->init_task_tag = 0xFFFFFFFF;
spin_lock_bh(&conn->sess->ttt_lock);
cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
0xFFFFFFFF;
if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
spin_unlock_bh(&conn->sess->ttt_lock);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (want_response)
iscsit_start_nopin_response_timer(conn);
iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
return 0;
}
static void iscsit_handle_nopin_response_timeout(unsigned long data)
{
struct iscsi_conn *conn = (struct iscsi_conn *) data;
iscsit_inc_conn_usage_count(conn);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_dec_conn_usage_count(conn);
return;
}
pr_debug("Did not receive response to NOPIN on CID: %hu on"
" SID: %u, failing connection.\n", conn->cid,
conn->sess->sid);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
{
struct iscsi_portal_group *tpg = conn->sess->tpg;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
if (tiqn) {
spin_lock_bh(&tiqn->sess_err_stats.lock);
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
conn->sess->sess_ops->InitiatorName);
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
conn->sess->conn_timeout_errors++;
spin_unlock_bh(&tiqn->sess_err_stats.lock);
}
}
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
mod_timer(&conn->nopin_response_timer,
(get_jiffies_64() + na->nopin_response_timeout * HZ));
spin_unlock_bh(&conn->nopin_timer_lock);
}
/*
* Called with conn->nopin_timer_lock held.
*/
void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
init_timer(&conn->nopin_response_timer);
conn->nopin_response_timer.expires =
(get_jiffies_64() + na->nopin_response_timeout * HZ);
conn->nopin_response_timer.data = (unsigned long)conn;
conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
add_timer(&conn->nopin_response_timer);
pr_debug("Started NOPIN Response Timer on CID: %d to %u"
" seconds\n", conn->cid, na->nopin_response_timeout);
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
del_timer_sync(&conn->nopin_response_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
static void iscsit_handle_nopin_timeout(unsigned long data)
{
struct iscsi_conn *conn = (struct iscsi_conn *) data;
iscsit_inc_conn_usage_count(conn);
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_dec_conn_usage_count(conn);
return;
}
conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
iscsit_add_nopin(conn, 1);
iscsit_dec_conn_usage_count(conn);
}
/*
* Called with conn->nopin_timer_lock held.
*/
void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
/*
* NOPIN timeout is disabled.
*/
if (!na->nopin_timeout)
return;
if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
return;
init_timer(&conn->nopin_timer);
conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
conn->nopin_timer.data = (unsigned long)conn;
conn->nopin_timer.function = iscsit_handle_nopin_timeout;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
add_timer(&conn->nopin_timer);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
}
void iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
/*
* NOPIN timeout is disabled..
*/
if (!na->nopin_timeout)
return;
spin_lock_bh(&conn->nopin_timer_lock);
if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
init_timer(&conn->nopin_timer);
conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
conn->nopin_timer.data = (unsigned long)conn;
conn->nopin_timer.function = iscsit_handle_nopin_timeout;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
add_timer(&conn->nopin_timer);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
spin_unlock_bh(&conn->nopin_timer_lock);
}
void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
spin_unlock_bh(&conn->nopin_timer_lock);
return;
}
conn->nopin_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
del_timer_sync(&conn->nopin_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
int iscsit_send_tx_data(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
int use_misc)
{
int tx_sent, tx_size;
u32 iov_count;
struct kvec *iov;
send_data:
tx_size = cmd->tx_size;
if (!use_misc) {
iov = &cmd->iov_data[0];
iov_count = cmd->iov_data_count;
} else {
iov = &cmd->iov_misc[0];
iov_count = cmd->iov_misc_count;
}
tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
if (tx_size != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_data;
} else
return -1;
}
cmd->tx_size = 0;
return 0;
}
int iscsit_fe_sendpage_sg(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
struct scatterlist *sg = cmd->first_data_sg;
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
int tx_sent, iov_off;
send_hdr:
tx_hdr_size = ISCSI_HDR_LEN;
if (conn->conn_ops->HeaderDigest)
tx_hdr_size += ISCSI_CRC_LEN;
iov.iov_base = cmd->pdu;
iov.iov_len = tx_hdr_size;
tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
if (tx_hdr_size != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_hdr;
}
return -1;
}
data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
/*
* Set iov_off used by padding and data digest tx_data() calls below
* in order to determine proper offset into cmd->iov_data[]
*/
if (conn->conn_ops->DataDigest) {
data_len -= ISCSI_CRC_LEN;
if (cmd->padding)
iov_off = (cmd->iov_data_count - 2);
else
iov_off = (cmd->iov_data_count - 1);
} else {
iov_off = (cmd->iov_data_count - 1);
}
/*
* Perform sendpage() for each page in the scatterlist
*/
while (data_len) {
u32 space = (sg->length - offset);
u32 sub_len = min_t(u32, data_len, space);
send_pg:
tx_sent = conn->sock->ops->sendpage(conn->sock,
sg_page(sg), sg->offset + offset, sub_len, 0);
if (tx_sent != sub_len) {
if (tx_sent == -EAGAIN) {
pr_err("tcp_sendpage() returned"
" -EAGAIN\n");
goto send_pg;
}
pr_err("tcp_sendpage() failure: %d\n",
tx_sent);
return -1;
}
data_len -= sub_len;
offset = 0;
sg = sg_next(sg);
}
send_padding:
if (cmd->padding) {
struct kvec *iov_p = &cmd->iov_data[iov_off++];
tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
if (cmd->padding != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_padding;
}
return -1;
}
}
send_datacrc:
if (conn->conn_ops->DataDigest) {
struct kvec *iov_d = &cmd->iov_data[iov_off];
tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
if (ISCSI_CRC_LEN != tx_sent) {
if (tx_sent == -EAGAIN) {
pr_err("tx_data() returned -EAGAIN\n");
goto send_datacrc;
}
return -1;
}
}
return 0;
}
/*
* This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
* back to the Initiator when an expection condition occurs with the
* errors set in status_class and status_detail.
*
* Parameters: iSCSI Connection, Status Class, Status Detail.
* Returns: 0 on success, -1 on error.
*/
int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
{
u8 iscsi_hdr[ISCSI_HDR_LEN];
int err;
struct kvec iov;
struct iscsi_login_rsp *hdr;
iscsit_collect_login_stats(conn, status_class, status_detail);
memset(&iov, 0, sizeof(struct kvec));
memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
hdr->opcode = ISCSI_OP_LOGIN_RSP;
hdr->status_class = status_class;
hdr->status_detail = status_detail;
hdr->itt = cpu_to_be32(conn->login_itt);
iov.iov_base = &iscsi_hdr;
iov.iov_len = ISCSI_HDR_LEN;
PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (err != ISCSI_HDR_LEN) {
pr_err("tx_data returned less than expected\n");
return -1;
}
return 0;
}
void iscsit_print_session_params(struct iscsi_session *sess)
{
struct iscsi_conn *conn;
pr_debug("-----------------------------[Session Params for"
" SID: %u]-----------------------------\n", sess->sid);
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
iscsi_dump_conn_ops(conn->conn_ops);
spin_unlock_bh(&sess->conn_lock);
iscsi_dump_sess_ops(sess->sess_ops);
}
static int iscsit_do_rx_data(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
iov_p = count->iov;
iov_len = count->iov_count;
while (total_rx < data) {
rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
(data - total_rx), MSG_WAITALL);
if (rx_loop <= 0) {
pr_debug("rx_loop: %d total_rx: %d\n",
rx_loop, total_rx);
return rx_loop;
}
total_rx += rx_loop;
pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
rx_loop, total_rx, data);
}
return total_rx;
}
static int iscsit_do_tx_data(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
if (data <= 0) {
pr_err("Data length is: %d\n", data);
return -1;
}
memset(&msg, 0, sizeof(struct msghdr));
iov_p = count->iov;
iov_len = count->iov_count;
while (total_tx < data) {
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
(data - total_tx));
if (tx_loop <= 0) {
pr_debug("tx_loop: %d total_tx %d\n",
tx_loop, total_tx);
return tx_loop;
}
total_tx += tx_loop;
pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
tx_loop, total_tx, data);
}
return total_tx;
}
int rx_data(
struct iscsi_conn *conn,
struct kvec *iov,
int iov_count,
int data)
{
struct iscsi_data_count c;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&c, 0, sizeof(struct iscsi_data_count));
c.iov = iov;
c.iov_count = iov_count;
c.data_length = data;
c.type = ISCSI_RX_DATA;
return iscsit_do_rx_data(conn, &c);
}
int tx_data(
struct iscsi_conn *conn,
struct kvec *iov,
int iov_count,
int data)
{
struct iscsi_data_count c;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&c, 0, sizeof(struct iscsi_data_count));
c.iov = iov;
c.iov_count = iov_count;
c.data_length = data;
c.type = ISCSI_TX_DATA;
return iscsit_do_tx_data(conn, &c);
}
void iscsit_collect_login_stats(
struct iscsi_conn *conn,
u8 status_class,
u8 status_detail)
{
struct iscsi_param *intrname = NULL;
struct iscsi_tiqn *tiqn;
struct iscsi_login_stats *ls;
tiqn = iscsit_snmp_get_tiqn(conn);
if (!tiqn)
return;
ls = &tiqn->login_stats;
spin_lock(&ls->lock);
if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
((get_jiffies_64() - ls->last_fail_time) < 10)) {
/* We already have the failure info for this login */
spin_unlock(&ls->lock);
return;
}
if (status_class == ISCSI_STATUS_CLS_SUCCESS)
ls->accepts++;
else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
ls->redirects++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
ls->authenticate_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
ls->authorize_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
(status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
ls->negotiate_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
} else {
ls->other_fails++;
ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
}
/* Save initiator name, ip address and time, if it is a failed login */
if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
if (conn->param_list)
intrname = iscsi_find_param_from_key(INITIATORNAME,
conn->param_list);
strcpy(ls->last_intr_fail_name,
(intrname ? intrname->value : "Unknown"));
ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
"%s", conn->login_ip);
ls->last_fail_time = get_jiffies_64();
}
spin_unlock(&ls->lock);
}
struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
{
struct iscsi_portal_group *tpg;
if (!conn || !conn->sess)
return NULL;
tpg = conn->sess->tpg;
if (!tpg)
return NULL;
if (!tpg->tpg_tiqn)
return NULL;
return tpg->tpg_tiqn;
}
| gpl-2.0 |
firefoxu8833/huawei-kernel-test | drivers/input/keyboard/gpio_keys_polled.c | 3269 | 6521 | /*
* Driver for buttons on GPIO lines not capable of generating interrupts
*
* Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
*
* This file was based on: /drivers/input/misc/cobalt_btns.c
* Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* also was based on: /drivers/input/keyboard/gpio_keys.c
* Copyright 2005 Phil Blundell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#define DRV_NAME "gpio-keys-polled"
struct gpio_keys_button_data {
int last_state;
int count;
int threshold;
int can_sleep;
};
struct gpio_keys_polled_dev {
struct input_polled_dev *poll_dev;
struct device *dev;
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button_data data[0];
};
static void gpio_keys_polled_check_state(struct input_dev *input,
struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
int state;
if (bdata->can_sleep)
state = !!gpio_get_value_cansleep(button->gpio);
else
state = !!gpio_get_value(button->gpio);
if (state != bdata->last_state) {
unsigned int type = button->type ?: EV_KEY;
input_event(input, type, button->code,
!!(state ^ button->active_low));
input_sync(input);
bdata->count = 0;
bdata->last_state = state;
}
}
static void gpio_keys_polled_poll(struct input_polled_dev *dev)
{
struct gpio_keys_polled_dev *bdev = dev->private;
struct gpio_keys_platform_data *pdata = bdev->pdata;
struct input_dev *input = dev->input;
int i;
for (i = 0; i < bdev->pdata->nbuttons; i++) {
struct gpio_keys_button_data *bdata = &bdev->data[i];
if (bdata->count < bdata->threshold)
bdata->count++;
else
gpio_keys_polled_check_state(input, &pdata->buttons[i],
bdata);
}
}
static void gpio_keys_polled_open(struct input_polled_dev *dev)
{
struct gpio_keys_polled_dev *bdev = dev->private;
struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->enable)
pdata->enable(bdev->dev);
}
static void gpio_keys_polled_close(struct input_polled_dev *dev)
{
struct gpio_keys_polled_dev *bdev = dev->private;
struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->disable)
pdata->disable(bdev->dev);
}
static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
{
struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct gpio_keys_polled_dev *bdev;
struct input_polled_dev *poll_dev;
struct input_dev *input;
int error;
int i;
if (!pdata || !pdata->poll_interval)
return -EINVAL;
bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) +
pdata->nbuttons * sizeof(struct gpio_keys_button_data),
GFP_KERNEL);
if (!bdev) {
dev_err(dev, "no memory for private data\n");
return -ENOMEM;
}
poll_dev = input_allocate_polled_device();
if (!poll_dev) {
dev_err(dev, "no memory for polled device\n");
error = -ENOMEM;
goto err_free_bdev;
}
poll_dev->private = bdev;
poll_dev->poll = gpio_keys_polled_poll;
poll_dev->poll_interval = pdata->poll_interval;
poll_dev->open = gpio_keys_polled_open;
poll_dev->close = gpio_keys_polled_close;
input = poll_dev->input;
input->evbit[0] = BIT(EV_KEY);
input->name = pdev->name;
input->phys = DRV_NAME"/input0";
input->dev.parent = &pdev->dev;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
input->id.product = 0x0001;
input->id.version = 0x0100;
for (i = 0; i < pdata->nbuttons; i++) {
struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
unsigned int gpio = button->gpio;
unsigned int type = button->type ?: EV_KEY;
if (button->wakeup) {
dev_err(dev, DRV_NAME " does not support wakeup\n");
error = -EINVAL;
goto err_free_gpio;
}
error = gpio_request(gpio,
button->desc ? button->desc : DRV_NAME);
if (error) {
dev_err(dev, "unable to claim gpio %u, err=%d\n",
gpio, error);
goto err_free_gpio;
}
error = gpio_direction_input(gpio);
if (error) {
dev_err(dev,
"unable to set direction on gpio %u, err=%d\n",
gpio, error);
goto err_free_gpio;
}
bdata->can_sleep = gpio_cansleep(gpio);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
pdata->poll_interval);
input_set_capability(input, type, button->code);
}
bdev->poll_dev = poll_dev;
bdev->dev = dev;
bdev->pdata = pdata;
platform_set_drvdata(pdev, bdev);
error = input_register_polled_device(poll_dev);
if (error) {
dev_err(dev, "unable to register polled device, err=%d\n",
error);
goto err_free_gpio;
}
/* report initial state of the buttons */
for (i = 0; i < pdata->nbuttons; i++)
gpio_keys_polled_check_state(input, &pdata->buttons[i],
&bdev->data[i]);
return 0;
err_free_gpio:
while (--i >= 0)
gpio_free(pdata->buttons[i].gpio);
input_free_polled_device(poll_dev);
err_free_bdev:
kfree(bdev);
platform_set_drvdata(pdev, NULL);
return error;
}
static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
{
struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
struct gpio_keys_platform_data *pdata = bdev->pdata;
int i;
input_unregister_polled_device(bdev->poll_dev);
for (i = 0; i < pdata->nbuttons; i++)
gpio_free(pdata->buttons[i].gpio);
input_free_polled_device(bdev->poll_dev);
kfree(bdev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
.remove = __devexit_p(gpio_keys_polled_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init gpio_keys_polled_init(void)
{
return platform_driver_register(&gpio_keys_polled_driver);
}
static void __exit gpio_keys_polled_exit(void)
{
platform_driver_unregister(&gpio_keys_polled_driver);
}
module_init(gpio_keys_polled_init);
module_exit(gpio_keys_polled_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_DESCRIPTION("Polled GPIO Buttons driver");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
SCforAndroid/android_kernel_omap | drivers/media/dvb/frontends/si21xx.c | 3269 | 21537 | /* DVB compliant Linux driver for the DVB-S si2109/2110 demodulator
*
* Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
#include "si21xx.h"
#define REVISION_REG 0x00
#define SYSTEM_MODE_REG 0x01
#define TS_CTRL_REG_1 0x02
#define TS_CTRL_REG_2 0x03
#define PIN_CTRL_REG_1 0x04
#define PIN_CTRL_REG_2 0x05
#define LOCK_STATUS_REG_1 0x0f
#define LOCK_STATUS_REG_2 0x10
#define ACQ_STATUS_REG 0x11
#define ACQ_CTRL_REG_1 0x13
#define ACQ_CTRL_REG_2 0x14
#define PLL_DIVISOR_REG 0x15
#define COARSE_TUNE_REG 0x16
#define FINE_TUNE_REG_L 0x17
#define FINE_TUNE_REG_H 0x18
#define ANALOG_AGC_POWER_LEVEL_REG 0x28
#define CFO_ESTIMATOR_CTRL_REG_1 0x29
#define CFO_ESTIMATOR_CTRL_REG_2 0x2a
#define CFO_ESTIMATOR_CTRL_REG_3 0x2b
#define SYM_RATE_ESTIMATE_REG_L 0x31
#define SYM_RATE_ESTIMATE_REG_M 0x32
#define SYM_RATE_ESTIMATE_REG_H 0x33
#define CFO_ESTIMATOR_OFFSET_REG_L 0x36
#define CFO_ESTIMATOR_OFFSET_REG_H 0x37
#define CFO_ERROR_REG_L 0x38
#define CFO_ERROR_REG_H 0x39
#define SYM_RATE_ESTIMATOR_CTRL_REG 0x3a
#define SYM_RATE_REG_L 0x3f
#define SYM_RATE_REG_M 0x40
#define SYM_RATE_REG_H 0x41
#define SYM_RATE_ESTIMATOR_MAXIMUM_REG 0x42
#define SYM_RATE_ESTIMATOR_MINIMUM_REG 0x43
#define C_N_ESTIMATOR_CTRL_REG 0x7c
#define C_N_ESTIMATOR_THRSHLD_REG 0x7d
#define C_N_ESTIMATOR_LEVEL_REG_L 0x7e
#define C_N_ESTIMATOR_LEVEL_REG_H 0x7f
#define BLIND_SCAN_CTRL_REG 0x80
#define LSA_CTRL_REG_1 0x8D
#define SPCTRM_TILT_CORR_THRSHLD_REG 0x8f
#define ONE_DB_BNDWDTH_THRSHLD_REG 0x90
#define TWO_DB_BNDWDTH_THRSHLD_REG 0x91
#define THREE_DB_BNDWDTH_THRSHLD_REG 0x92
#define INBAND_POWER_THRSHLD_REG 0x93
#define REF_NOISE_LVL_MRGN_THRSHLD_REG 0x94
#define VIT_SRCH_CTRL_REG_1 0xa0
#define VIT_SRCH_CTRL_REG_2 0xa1
#define VIT_SRCH_CTRL_REG_3 0xa2
#define VIT_SRCH_STATUS_REG 0xa3
#define VITERBI_BER_COUNT_REG_L 0xab
#define REED_SOLOMON_CTRL_REG 0xb0
#define REED_SOLOMON_ERROR_COUNT_REG_L 0xb1
#define PRBS_CTRL_REG 0xb5
#define LNB_CTRL_REG_1 0xc0
#define LNB_CTRL_REG_2 0xc1
#define LNB_CTRL_REG_3 0xc2
#define LNB_CTRL_REG_4 0xc3
#define LNB_CTRL_STATUS_REG 0xc4
#define LNB_FIFO_REGS_0 0xc5
#define LNB_FIFO_REGS_1 0xc6
#define LNB_FIFO_REGS_2 0xc7
#define LNB_FIFO_REGS_3 0xc8
#define LNB_FIFO_REGS_4 0xc9
#define LNB_FIFO_REGS_5 0xca
#define LNB_SUPPLY_CTRL_REG_1 0xcb
#define LNB_SUPPLY_CTRL_REG_2 0xcc
#define LNB_SUPPLY_CTRL_REG_3 0xcd
#define LNB_SUPPLY_CTRL_REG_4 0xce
#define LNB_SUPPLY_STATUS_REG 0xcf
#define FAIL -1
#define PASS 0
#define ALLOWABLE_FS_COUNT 10
#define STATUS_BER 0
#define STATUS_UCBLOCKS 1
static int debug;
#define dprintk(args...) \
do { \
if (debug) \
printk(KERN_DEBUG "si21xx: " args); \
} while (0)
enum {
ACTIVE_HIGH,
ACTIVE_LOW
};
enum {
BYTE_WIDE,
BIT_WIDE
};
enum {
CLK_GAPPED_MODE,
CLK_CONTINUOUS_MODE
};
enum {
RISING_EDGE,
FALLING_EDGE
};
enum {
MSB_FIRST,
LSB_FIRST
};
enum {
SERIAL,
PARALLEL
};
struct si21xx_state {
struct i2c_adapter *i2c;
const struct si21xx_config *config;
struct dvb_frontend frontend;
u8 initialised:1;
int errmode;
int fs; /*Sampling rate of the ADC in MHz*/
};
/* register default initialization */
static u8 serit_sp1511lhb_inittab[] = {
0x01, 0x28, /* set i2c_inc_disable */
0x20, 0x03,
0x27, 0x20,
0xe0, 0x45,
0xe1, 0x08,
0xfe, 0x01,
0x01, 0x28,
0x89, 0x09,
0x04, 0x80,
0x05, 0x01,
0x06, 0x00,
0x20, 0x03,
0x24, 0x88,
0x29, 0x09,
0x2a, 0x0f,
0x2c, 0x10,
0x2d, 0x19,
0x2e, 0x08,
0x2f, 0x10,
0x30, 0x19,
0x34, 0x20,
0x35, 0x03,
0x45, 0x02,
0x46, 0x45,
0x47, 0xd0,
0x48, 0x00,
0x49, 0x40,
0x4a, 0x03,
0x4c, 0xfd,
0x4f, 0x2e,
0x50, 0x2e,
0x51, 0x10,
0x52, 0x10,
0x56, 0x92,
0x59, 0x00,
0x5a, 0x2d,
0x5b, 0x33,
0x5c, 0x1f,
0x5f, 0x76,
0x62, 0xc0,
0x63, 0xc0,
0x64, 0xf3,
0x65, 0xf3,
0x79, 0x40,
0x6a, 0x40,
0x6b, 0x0a,
0x6c, 0x80,
0x6d, 0x27,
0x71, 0x06,
0x75, 0x60,
0x78, 0x00,
0x79, 0xb5,
0x7c, 0x05,
0x7d, 0x1a,
0x87, 0x55,
0x88, 0x72,
0x8f, 0x08,
0x90, 0xe0,
0x94, 0x40,
0xa0, 0x3f,
0xa1, 0xc0,
0xa4, 0xcc,
0xa5, 0x66,
0xa6, 0x66,
0xa7, 0x7b,
0xa8, 0x7b,
0xa9, 0x7b,
0xaa, 0x9a,
0xed, 0x04,
0xad, 0x00,
0xae, 0x03,
0xcc, 0xab,
0x01, 0x08,
0xff, 0xff
};
/* low level read/writes */
static int si21_writeregs(struct si21xx_state *state, u8 reg1,
u8 *data, int len)
{
int ret;
u8 buf[60];/* = { reg1, data };*/
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
.len = len + 1
};
msg.buf[0] = reg1;
memcpy(msg.buf + 1, data, len);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, "
"ret == %i)\n", __func__, reg1, data[0], ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
{
int ret;
u8 buf[] = { reg, data };
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
.len = 2
};
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, "
"ret == %i)\n", __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
static int si21_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct si21xx_state *state = fe->demodulator_priv;
if (len != 2)
return -EINVAL;
return si21_writereg(state, buf[0], buf[1]);
}
static u8 si21_readreg(struct si21xx_state *state, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{
.addr = state->config->demod_address,
.flags = 0,
.buf = b0,
.len = 1
}, {
.addr = state->config->demod_address,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
}
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n",
__func__, reg, ret);
return b1[0];
}
static int si21_readregs(struct si21xx_state *state, u8 reg1, u8 *b, u8 len)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = state->config->demod_address,
.flags = 0,
.buf = ®1,
.len = 1
}, {
.addr = state->config->demod_address,
.flags = I2C_M_RD,
.buf = b,
.len = len
}
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
dprintk("%s: readreg error (ret == %i)\n", __func__, ret);
return ret == 2 ? 0 : -1;
}
static int si21xx_wait_diseqc_idle(struct si21xx_state *state, int timeout)
{
unsigned long start = jiffies;
dprintk("%s\n", __func__);
while ((si21_readreg(state, LNB_CTRL_REG_1) & 0x8) == 8) {
if (jiffies - start > timeout) {
dprintk("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
msleep(10);
};
return 0;
}
static int si21xx_set_symbolrate(struct dvb_frontend *fe, u32 srate)
{
struct si21xx_state *state = fe->demodulator_priv;
u32 sym_rate, data_rate;
int i;
u8 sym_rate_bytes[3];
dprintk("%s : srate = %i\n", __func__ , srate);
if ((srate < 1000000) || (srate > 45000000))
return -EINVAL;
data_rate = srate;
sym_rate = 0;
for (i = 0; i < 4; ++i) {
sym_rate /= 100;
sym_rate = sym_rate + ((data_rate % 100) * 0x800000) /
state->fs;
data_rate /= 100;
}
for (i = 0; i < 3; ++i)
sym_rate_bytes[i] = (u8)((sym_rate >> (i * 8)) & 0xff);
si21_writeregs(state, SYM_RATE_REG_L, sym_rate_bytes, 0x03);
return 0;
}
static int si21xx_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *m)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 lnb_status;
u8 LNB_CTRL_1;
int status;
dprintk("%s\n", __func__);
status = PASS;
LNB_CTRL_1 = 0;
status |= si21_readregs(state, LNB_CTRL_STATUS_REG, &lnb_status, 0x01);
status |= si21_readregs(state, LNB_CTRL_REG_1, &lnb_status, 0x01);
/*fill the FIFO*/
status |= si21_writeregs(state, LNB_FIFO_REGS_0, m->msg, m->msg_len);
LNB_CTRL_1 = (lnb_status & 0x70);
LNB_CTRL_1 |= m->msg_len;
LNB_CTRL_1 |= 0x80; /* begin LNB signaling */
status |= si21_writeregs(state, LNB_CTRL_REG_1, &LNB_CTRL_1, 0x01);
return status;
}
static int si21xx_send_diseqc_burst(struct dvb_frontend *fe,
fe_sec_mini_cmd_t burst)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 val;
dprintk("%s\n", __func__);
if (si21xx_wait_diseqc_idle(state, 100) < 0)
return -ETIMEDOUT;
val = (0x80 | si21_readreg(state, 0xc1));
if (si21_writereg(state, LNB_CTRL_REG_1,
burst == SEC_MINI_A ? (val & ~0x10) : (val | 0x10)))
return -EREMOTEIO;
if (si21xx_wait_diseqc_idle(state, 100) < 0)
return -ETIMEDOUT;
if (si21_writereg(state, LNB_CTRL_REG_1, val))
return -EREMOTEIO;
return 0;
}
/* 30.06.2008 */
static int si21xx_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 val;
dprintk("%s\n", __func__);
val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1));
switch (tone) {
case SEC_TONE_ON:
return si21_writereg(state, LNB_CTRL_REG_1, val | 0x20);
case SEC_TONE_OFF:
return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x20));
default:
return -EINVAL;
}
}
static int si21xx_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 val;
dprintk("%s: %s\n", __func__,
volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??");
val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1));
switch (volt) {
case SEC_VOLTAGE_18:
return si21_writereg(state, LNB_CTRL_REG_1, val | 0x40);
break;
case SEC_VOLTAGE_13:
return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x40));
break;
default:
return -EINVAL;
};
}
static int si21xx_init(struct dvb_frontend *fe)
{
struct si21xx_state *state = fe->demodulator_priv;
int i;
int status = 0;
u8 reg1;
u8 val;
u8 reg2[2];
dprintk("%s\n", __func__);
for (i = 0; ; i += 2) {
reg1 = serit_sp1511lhb_inittab[i];
val = serit_sp1511lhb_inittab[i+1];
if (reg1 == 0xff && val == 0xff)
break;
si21_writeregs(state, reg1, &val, 1);
}
/*DVB QPSK SYSTEM MODE REG*/
reg1 = 0x08;
si21_writeregs(state, SYSTEM_MODE_REG, ®1, 0x01);
/*transport stream config*/
/*
mode = PARALLEL;
sdata_form = LSB_FIRST;
clk_edge = FALLING_EDGE;
clk_mode = CLK_GAPPED_MODE;
strt_len = BYTE_WIDE;
sync_pol = ACTIVE_HIGH;
val_pol = ACTIVE_HIGH;
err_pol = ACTIVE_HIGH;
sclk_rate = 0x00;
parity = 0x00 ;
data_delay = 0x00;
clk_delay = 0x00;
pclk_smooth = 0x00;
*/
reg2[0] =
PARALLEL + (LSB_FIRST << 1)
+ (FALLING_EDGE << 2) + (CLK_GAPPED_MODE << 3)
+ (BYTE_WIDE << 4) + (ACTIVE_HIGH << 5)
+ (ACTIVE_HIGH << 6) + (ACTIVE_HIGH << 7);
reg2[1] = 0;
/* sclk_rate + (parity << 2)
+ (data_delay << 3) + (clk_delay << 4)
+ (pclk_smooth << 5);
*/
status |= si21_writeregs(state, TS_CTRL_REG_1, reg2, 0x02);
if (status != 0)
dprintk(" %s : TS Set Error\n", __func__);
return 0;
}
static int si21_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 regs_read[2];
u8 reg_read;
u8 i;
u8 lock;
u8 signal = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG);
si21_readregs(state, LOCK_STATUS_REG_1, regs_read, 0x02);
reg_read = 0;
for (i = 0; i < 7; ++i)
reg_read |= ((regs_read[0] >> i) & 0x01) << (6 - i);
lock = ((reg_read & 0x7f) | (regs_read[1] & 0x80));
dprintk("%s : FE_READ_STATUS : VSTATUS: 0x%02x\n", __func__, lock);
*status = 0;
if (signal > 10)
*status |= FE_HAS_SIGNAL;
if (lock & 0x2)
*status |= FE_HAS_CARRIER;
if (lock & 0x20)
*status |= FE_HAS_VITERBI;
if (lock & 0x40)
*status |= FE_HAS_SYNC;
if ((lock & 0x7b) == 0x7b)
*status |= FE_HAS_LOCK;
return 0;
}
static int si21_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct si21xx_state *state = fe->demodulator_priv;
/*status = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG,
(u8*)agclevel, 0x01);*/
u16 signal = (3 * si21_readreg(state, 0x27) *
si21_readreg(state, 0x28));
dprintk("%s : AGCPWR: 0x%02x%02x, signal=0x%04x\n", __func__,
si21_readreg(state, 0x27),
si21_readreg(state, 0x28), (int) signal);
signal <<= 4;
*strength = signal;
return 0;
}
static int si21_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct si21xx_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
if (state->errmode != STATUS_BER)
return 0;
*ber = (si21_readreg(state, 0x1d) << 8) |
si21_readreg(state, 0x1e);
return 0;
}
static int si21_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct si21xx_state *state = fe->demodulator_priv;
s32 xsnr = 0xffff - ((si21_readreg(state, 0x24) << 8) |
si21_readreg(state, 0x25));
xsnr = 3 * (xsnr - 0xa100);
*snr = (xsnr > 0xffff) ? 0xffff : (xsnr < 0) ? 0 : xsnr;
dprintk("%s\n", __func__);
return 0;
}
static int si21_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct si21xx_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
if (state->errmode != STATUS_UCBLOCKS)
*ucblocks = 0;
else
*ucblocks = (si21_readreg(state, 0x1d) << 8) |
si21_readreg(state, 0x1e);
return 0;
}
/* initiates a channel acquisition sequence
using the specified symbol rate and code rate */
static int si21xx_setacquire(struct dvb_frontend *fe, int symbrate,
fe_code_rate_t crate)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 coderates[] = {
0x0, 0x01, 0x02, 0x04, 0x00,
0x8, 0x10, 0x20, 0x00, 0x3f
};
u8 coderate_ptr;
int status;
u8 start_acq = 0x80;
u8 reg, regs[3];
dprintk("%s\n", __func__);
status = PASS;
coderate_ptr = coderates[crate];
si21xx_set_symbolrate(fe, symbrate);
/* write code rates to use in the Viterbi search */
status |= si21_writeregs(state,
VIT_SRCH_CTRL_REG_1,
&coderate_ptr, 0x01);
/* clear acq_start bit */
status |= si21_readregs(state, ACQ_CTRL_REG_2, ®, 0x01);
reg &= ~start_acq;
status |= si21_writeregs(state, ACQ_CTRL_REG_2, ®, 0x01);
/* use new Carrier Frequency Offset Estimator (QuickLock) */
regs[0] = 0xCB;
regs[1] = 0x40;
regs[2] = 0xCB;
status |= si21_writeregs(state,
TWO_DB_BNDWDTH_THRSHLD_REG,
®s[0], 0x03);
reg = 0x56;
status |= si21_writeregs(state,
LSA_CTRL_REG_1, ®, 1);
reg = 0x05;
status |= si21_writeregs(state,
BLIND_SCAN_CTRL_REG, ®, 1);
/* start automatic acq */
status |= si21_writeregs(state,
ACQ_CTRL_REG_2, &start_acq, 0x01);
return status;
}
static int si21xx_set_property(struct dvb_frontend *fe, struct dtv_property *p)
{
dprintk("%s(..)\n", __func__);
return 0;
}
static int si21xx_get_property(struct dvb_frontend *fe, struct dtv_property *p)
{
dprintk("%s(..)\n", __func__);
return 0;
}
static int si21xx_set_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *dfp)
{
struct si21xx_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
/* freq Channel carrier frequency in KHz (i.e. 1550000 KHz)
datarate Channel symbol rate in Sps (i.e. 22500000 Sps)*/
/* in MHz */
unsigned char coarse_tune_freq;
int fine_tune_freq;
unsigned char sample_rate = 0;
/* boolean */
bool inband_interferer_ind;
/* INTERMEDIATE VALUES */
int icoarse_tune_freq; /* MHz */
int ifine_tune_freq; /* MHz */
unsigned int band_high;
unsigned int band_low;
unsigned int x1;
unsigned int x2;
int i;
bool inband_interferer_div2[ALLOWABLE_FS_COUNT];
bool inband_interferer_div4[ALLOWABLE_FS_COUNT];
int status;
/* allowable sample rates for ADC in MHz */
int afs[ALLOWABLE_FS_COUNT] = { 200, 192, 193, 194, 195,
196, 204, 205, 206, 207
};
/* in MHz */
int if_limit_high;
int if_limit_low;
int lnb_lo;
int lnb_uncertanity;
int rf_freq;
int data_rate;
unsigned char regs[4];
dprintk("%s : FE_SET_FRONTEND\n", __func__);
if (c->delivery_system != SYS_DVBS) {
dprintk("%s: unsupported delivery system selected (%d)\n",
__func__, c->delivery_system);
return -EOPNOTSUPP;
}
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i)
inband_interferer_div2[i] = inband_interferer_div4[i] = false;
if_limit_high = -700000;
if_limit_low = -100000;
/* in MHz */
lnb_lo = 0;
lnb_uncertanity = 0;
rf_freq = 10 * c->frequency ;
data_rate = c->symbol_rate / 100;
status = PASS;
band_low = (rf_freq - lnb_lo) - ((lnb_uncertanity * 200)
+ (data_rate * 135)) / 200;
band_high = (rf_freq - lnb_lo) + ((lnb_uncertanity * 200)
+ (data_rate * 135)) / 200;
icoarse_tune_freq = 100000 *
(((rf_freq - lnb_lo) -
(if_limit_low + if_limit_high) / 2)
/ 100000);
ifine_tune_freq = (rf_freq - lnb_lo) - icoarse_tune_freq ;
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) {
x1 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) *
(afs[i] * 2500) + afs[i] * 2500;
x2 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) *
(afs[i] * 2500);
if (((band_low < x1) && (x1 < band_high)) ||
((band_low < x2) && (x2 < band_high)))
inband_interferer_div4[i] = true;
}
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) {
x1 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) *
(afs[i] * 5000) + afs[i] * 5000;
x2 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) *
(afs[i] * 5000);
if (((band_low < x1) && (x1 < band_high)) ||
((band_low < x2) && (x2 < band_high)))
inband_interferer_div2[i] = true;
}
inband_interferer_ind = true;
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) {
if (inband_interferer_div2[i] || inband_interferer_div4[i]) {
inband_interferer_ind = false;
break;
}
}
if (inband_interferer_ind) {
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) {
if (!inband_interferer_div2[i]) {
sample_rate = (u8) afs[i];
break;
}
}
} else {
for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) {
if ((inband_interferer_div2[i] ||
!inband_interferer_div4[i])) {
sample_rate = (u8) afs[i];
break;
}
}
}
if (sample_rate > 207 || sample_rate < 192)
sample_rate = 200;
fine_tune_freq = ((0x4000 * (ifine_tune_freq / 10)) /
((sample_rate) * 1000));
coarse_tune_freq = (u8)(icoarse_tune_freq / 100000);
regs[0] = sample_rate;
regs[1] = coarse_tune_freq;
regs[2] = fine_tune_freq & 0xFF;
regs[3] = fine_tune_freq >> 8 & 0xFF;
status |= si21_writeregs(state, PLL_DIVISOR_REG, ®s[0], 0x04);
state->fs = sample_rate;/*ADC MHz*/
si21xx_setacquire(fe, c->symbol_rate, c->fec_inner);
return 0;
}
static int si21xx_sleep(struct dvb_frontend *fe)
{
struct si21xx_state *state = fe->demodulator_priv;
u8 regdata;
dprintk("%s\n", __func__);
si21_readregs(state, SYSTEM_MODE_REG, ®data, 0x01);
regdata |= 1 << 6;
si21_writeregs(state, SYSTEM_MODE_REG, ®data, 0x01);
state->initialised = 0;
return 0;
}
static void si21xx_release(struct dvb_frontend *fe)
{
struct si21xx_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
kfree(state);
}
static struct dvb_frontend_ops si21xx_ops = {
.info = {
.name = "SL SI21XX DVB-S",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
.frequency_tolerance = 0,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_QPSK |
FE_CAN_FEC_AUTO
},
.release = si21xx_release,
.init = si21xx_init,
.sleep = si21xx_sleep,
.write = si21_write,
.read_status = si21_read_status,
.read_ber = si21_read_ber,
.read_signal_strength = si21_read_signal_strength,
.read_snr = si21_read_snr,
.read_ucblocks = si21_read_ucblocks,
.diseqc_send_master_cmd = si21xx_send_diseqc_msg,
.diseqc_send_burst = si21xx_send_diseqc_burst,
.set_tone = si21xx_set_tone,
.set_voltage = si21xx_set_voltage,
.set_property = si21xx_set_property,
.get_property = si21xx_get_property,
.set_frontend = si21xx_set_frontend,
};
struct dvb_frontend *si21xx_attach(const struct si21xx_config *config,
struct i2c_adapter *i2c)
{
struct si21xx_state *state = NULL;
int id;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL);
if (state == NULL)
goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->initialised = 0;
state->errmode = STATUS_BER;
/* check if the demod is there */
id = si21_readreg(state, SYSTEM_MODE_REG);
si21_writereg(state, SYSTEM_MODE_REG, id | 0x40); /* standby off */
msleep(200);
id = si21_readreg(state, 0x00);
/* register 0x00 contains:
0x34 for SI2107
0x24 for SI2108
0x14 for SI2109
0x04 for SI2110
*/
if (id != 0x04 && id != 0x14)
goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &si21xx_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(si21xx_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("SL SI21XX DVB Demodulator driver");
MODULE_AUTHOR("Igor M. Liplianin");
MODULE_LICENSE("GPL");
| gpl-2.0 |
loiz/android_kernel_samsung_hllte | arch/mips/kernel/smp-bmips.c | 4293 | 11726 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
*
* SMP support for BMIPS
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/time.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
#include <asm/pmon.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mipsregs.h>
#include <asm/bmips.h>
#include <asm/traps.h>
#include <asm/barrier.h>
static int __maybe_unused max_cpus = 1;
/* these may be configured by the platform code */
int bmips_smp_enabled = 1;
int bmips_cpu_offset;
cpumask_t bmips_booted_mask;
#ifdef CONFIG_SMP
/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
unsigned long bmips_smp_boot_sp;
unsigned long bmips_smp_boot_gp;
static void bmips_send_ipi_single(int cpu, unsigned int action);
static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
/* SW interrupts 0,1 are used for interprocessor signaling */
#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
static void __init bmips_smp_setup(void)
{
int i;
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
/* NBK and weak order flags */
set_c0_brcm_config_0(0x30000);
/*
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*/
change_c0_brcm_cmt_intr(0xf8018000,
(0x02 << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
#elif defined(CONFIG_CPU_BMIPS5000)
/* enable raceless SW interrupts */
set_c0_brcm_config(0x03 << 22);
/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
/* N cores, 2 threads per core */
max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
/* clear any pending SW interrupts */
for (i = 0; i < max_cpus; i++) {
write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
}
#endif
if (!bmips_smp_enabled)
max_cpus = 1;
/* this can be overridden by the BSP */
if (!board_ebase_setup)
board_ebase_setup = &bmips_ebase_setup;
for (i = 0; i < max_cpus; i++) {
__cpu_number_map[i] = 1;
__cpu_logical_map[i] = 1;
set_cpu_possible(i, 1);
set_cpu_present(i, 1);
}
}
/*
* IPI IRQ setup - runs on CPU0
*/
static void bmips_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi0", NULL))
panic("Can't request IPI0 interrupt\n");
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi1", NULL))
panic("Can't request IPI1 interrupt\n");
}
/*
* Tell the hardware to boot CPUx - runs on CPU0
*/
static void bmips_boot_secondary(int cpu, struct task_struct *idle)
{
bmips_smp_boot_sp = __KSTK_TOS(idle);
bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
mb();
/*
* Initial boot sequence for secondary CPU:
* bmips_reset_nmi_vec @ a000_0000 ->
* bmips_smp_entry ->
* plat_wired_tlb_setup (cached function call; optional) ->
* start_secondary (cached jump)
*
* Warm restart sequence:
* play_dead WAIT loop ->
* bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
* eret to play_dead ->
* bmips_secondary_reentry ->
* start_secondary
*/
pr_info("SMP: Booting CPU%d...\n", cpu);
if (cpumask_test_cpu(cpu, &bmips_booted_mask))
bmips_send_ipi_single(cpu, 0);
else {
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
set_c0_brcm_cmt_ctrl(0x01);
#elif defined(CONFIG_CPU_BMIPS5000)
if (cpu & 0x01)
write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
else {
/*
* core N thread 0 was already booted; just
* pulse the NMI line
*/
bmips_write_zscm_reg(0x210, 0xc0000000);
udelay(10);
bmips_write_zscm_reg(0x210, 0x00);
}
#endif
cpumask_set_cpu(cpu, &bmips_booted_mask);
}
}
/*
* Early setup - runs on secondary CPU after cache probe
*/
static void bmips_init_secondary(void)
{
/* move NMI vector to kseg0, in case XKS01 is enabled */
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
void __iomem *cbr = BMIPS_GET_CBR();
unsigned long old_vec;
old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1);
__raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
#elif defined(CONFIG_CPU_BMIPS5000)
write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
(smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
#endif
/* make sure there won't be a timer interrupt for a little while */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
irq_enable_hazard();
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
irq_enable_hazard();
}
/*
* Late setup - runs on secondary CPU before entering the idle loop
*/
static void bmips_smp_finish(void)
{
pr_info("SMP: CPU%d is running\n", smp_processor_id());
}
/*
* Runs on CPU0 after all CPUs have been booted
*/
static void bmips_cpus_done(void)
{
}
#if defined(CONFIG_CPU_BMIPS5000)
/*
* BMIPS5000 raceless IPIs
*
* Each CPU has two inbound SW IRQs which are independent of all other CPUs.
* IPI0 is used for SMP_RESCHEDULE_YOURSELF
* IPI1 is used for SMP_CALL_FUNCTION
*/
static void bmips_send_ipi_single(int cpu, unsigned int action)
{
write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
}
static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
{
int action = irq - IPI0_IRQ;
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
if (action == 0)
scheduler_ipi();
else
smp_call_function_interrupt();
return IRQ_HANDLED;
}
#else
/*
* BMIPS43xx racey IPIs
*
* We use one inbound SW IRQ for each CPU.
*
* A spinlock must be held in order to keep CPUx from accidentally clearing
* an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
* same spinlock is used to protect the action masks.
*/
static DEFINE_SPINLOCK(ipi_lock);
static DEFINE_PER_CPU(int, ipi_action_mask);
static void bmips_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
spin_lock_irqsave(&ipi_lock, flags);
set_c0_cause(cpu ? C_SW1 : C_SW0);
per_cpu(ipi_action_mask, cpu) |= action;
irq_enable_hazard();
spin_unlock_irqrestore(&ipi_lock, flags);
}
static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
{
unsigned long flags;
int action, cpu = irq - IPI0_IRQ;
spin_lock_irqsave(&ipi_lock, flags);
action = __get_cpu_var(ipi_action_mask);
per_cpu(ipi_action_mask, cpu) = 0;
clear_c0_cause(cpu ? C_SW1 : C_SW0);
spin_unlock_irqrestore(&ipi_lock, flags);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
return IRQ_HANDLED;
}
#endif /* BMIPS type */
static void bmips_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bmips_send_ipi_single(i, action);
}
#ifdef CONFIG_HOTPLUG_CPU
static int bmips_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_flush_tlb_all();
local_flush_icache_range(0, ~0);
return 0;
}
static void bmips_cpu_die(unsigned int cpu)
{
}
void __ref play_dead(void)
{
idle_task_exit();
/* flush data cache */
_dma_cache_wback_inv(0, ~0);
/*
* Wakeup is on SW0 or SW1; disable everything else
* Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
* IRQ handlers; this clears ST0_IE and returns immediately.
*/
clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
irq_disable_hazard();
/*
* wait for SW interrupt from bmips_boot_secondary(), then jump
* back to start_secondary()
*/
__asm__ __volatile__(
" wait\n"
" j bmips_secondary_reentry\n"
: : : "memory");
}
#endif /* CONFIG_HOTPLUG_CPU */
struct plat_smp_ops bmips_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.cpus_done = bmips_cpus_done,
.send_ipi_single = bmips_send_ipi_single,
.send_ipi_mask = bmips_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
};
#endif /* CONFIG_SMP */
/***********************************************************************
* BMIPS vector relocation
* This is primarily used for SMP boot, but it is applicable to some
* UP BMIPS systems as well.
***********************************************************************/
static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
{
memcpy((void *)dst, start, end - start);
dma_cache_wback((unsigned long)start, end - start);
local_flush_icache_range(dst, dst + (end - start));
instruction_hazard();
}
static inline void __cpuinit bmips_nmi_handler_setup(void)
{
bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
&bmips_reset_nmi_vec_end);
bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
&bmips_smp_int_vec_end);
}
void __cpuinit bmips_ebase_setup(void)
{
unsigned long new_ebase = ebase;
void __iomem __maybe_unused *cbr;
BUG_ON(ebase != CKSEG0);
#if defined(CONFIG_CPU_BMIPS4350)
/*
* BMIPS4350 cannot relocate the normal vectors, but it
* can relocate the BEV=1 vectors. So CPU1 starts up at
* the relocated BEV=1, IV=0 general exception vector @
* 0xa000_0380.
*
* set_uncached_handler() is used here because:
* - CPU1 will run this from uncached space
* - None of the cacheflush functions are set up yet
*/
set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
&bmips_smp_int_vec, 0x80);
__sync();
return;
#elif defined(CONFIG_CPU_BMIPS4380)
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_0400: normal vectors
*/
new_ebase = 0x80000400;
cbr = BMIPS_GET_CBR();
__raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
__raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
#elif defined(CONFIG_CPU_BMIPS5000)
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_1000: normal vectors
*/
new_ebase = 0x80001000;
write_c0_brcm_bootvec(0xa0088008);
write_c0_ebase(new_ebase);
if (max_cpus > 2)
bmips_write_zscm_reg(0xa0, 0xa008a008);
#else
return;
#endif
board_nmi_handler_setup = &bmips_nmi_handler_setup;
ebase = new_ebase;
}
asmlinkage void __weak plat_wired_tlb_setup(void)
{
/*
* Called when starting/restarting a secondary CPU.
* Kernel stacks and other important data might only be accessible
* once the wired entries are present.
*/
}
| gpl-2.0 |
nasty007/kernel_msm | drivers/media/rc/ir-sanyo-decoder.c | 4549 | 5392 | /* ir-sanyo-decoder.c - handle SANYO IR Pulse/Space protocol
*
* Copyright (C) 2011 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This protocol uses the NEC protocol timings. However, data is formatted as:
* 13 bits Custom Code
* 13 bits NOT(Custom Code)
* 8 bits Key data
* 8 bits NOT(Key data)
*
* According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
* Information for this protocol is available at the Sanyo LC7461 datasheet.
*/
#include <linux/module.h>
#include <linux/bitrev.h>
#include "rc-core-priv.h"
#define SANYO_NBITS (13+13+8+8)
#define SANYO_UNIT 562500 /* ns */
#define SANYO_HEADER_PULSE (16 * SANYO_UNIT)
#define SANYO_HEADER_SPACE (8 * SANYO_UNIT)
#define SANYO_BIT_PULSE (1 * SANYO_UNIT)
#define SANYO_BIT_0_SPACE (1 * SANYO_UNIT)
#define SANYO_BIT_1_SPACE (3 * SANYO_UNIT)
#define SANYO_REPEAT_SPACE (150 * SANYO_UNIT)
#define SANYO_TRAILER_PULSE (1 * SANYO_UNIT)
#define SANYO_TRAILER_SPACE (10 * SANYO_UNIT) /* in fact, 42 */
enum sanyo_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
};
/**
* ir_sanyo_decode() - Decode one SANYO pulse or space
* @dev: the struct rc_dev descriptor of the device
* @duration: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
u8 address, not_address, command, not_command;
if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
return 0;
if (!is_timing_event(ev)) {
if (ev.reset) {
IR_dprintk(1, "SANYO event reset received. reset to state 0\n");
data->state = STATE_INACTIVE;
}
return 0;
}
IR_dprintk(2, "SANYO decode started at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, SANYO_HEADER_PULSE, SANYO_UNIT / 2)) {
data->count = 0;
data->state = STATE_HEADER_SPACE;
return 0;
}
break;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (eq_margin(ev.duration, SANYO_HEADER_SPACE, SANYO_UNIT / 2)) {
data->state = STATE_BIT_PULSE;
return 0;
}
break;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SANYO_BIT_PULSE, SANYO_UNIT / 2))
break;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
if (!data->count && geq_margin(ev.duration, SANYO_REPEAT_SPACE, SANYO_UNIT / 2)) {
if (!dev->keypressed) {
IR_dprintk(1, "SANYO discarding last key repeat: event after key up\n");
} else {
rc_repeat(dev);
IR_dprintk(1, "SANYO repeat last key\n");
data->state = STATE_INACTIVE;
}
return 0;
}
data->bits <<= 1;
if (eq_margin(ev.duration, SANYO_BIT_1_SPACE, SANYO_UNIT / 2))
data->bits |= 1;
else if (!eq_margin(ev.duration, SANYO_BIT_0_SPACE, SANYO_UNIT / 2))
break;
data->count++;
if (data->count == SANYO_NBITS)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SANYO_TRAILER_PULSE, SANYO_UNIT / 2))
break;
data->state = STATE_TRAILER_SPACE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SANYO_TRAILER_SPACE, SANYO_UNIT / 2))
break;
address = bitrev16((data->bits >> 29) & 0x1fff) >> 3;
not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3;
command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
if ((command ^ not_command) != 0xff) {
IR_dprintk(1, "SANYO checksum error: received 0x%08Lx\n",
data->bits);
data->state = STATE_INACTIVE;
return 0;
}
scancode = address << 8 | command;
IR_dprintk(1, "SANYO scancode: 0x%06x\n", scancode);
rc_keydown(dev, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
IR_dprintk(1, "SANYO decode failed at count %d state %d (%uus %s)\n",
data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static struct ir_raw_handler sanyo_handler = {
.protocols = RC_TYPE_SANYO,
.decode = ir_sanyo_decode,
};
static int __init ir_sanyo_decode_init(void)
{
ir_raw_handler_register(&sanyo_handler);
printk(KERN_INFO "IR SANYO protocol handler initialized\n");
return 0;
}
static void __exit ir_sanyo_decode_exit(void)
{
ir_raw_handler_unregister(&sanyo_handler);
}
module_init(ir_sanyo_decode_init);
module_exit(ir_sanyo_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("SANYO IR protocol decoder");
| gpl-2.0 |
aopp/android_kernel_google_msm | drivers/net/ethernet/packetengines/hamachi.c | 4805 | 64238 | /* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
/*
Written 1998-2000 by Donald Becker.
Updates 2000 by Keith Underwood.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
adapter.
Support and updates available at
http://www.scyld.com/network/hamachi.html
[link no longer provides useful info -jgarzik]
or
http://www.parl.clemson.edu/~keithu/hamachi.html
*/
#define DRV_NAME "hamachi"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sept 11, 2006"
/* A few user-configurable values. */
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
#define final_version
#define hamachi_debug debug
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 40;
static int mtu;
/* Default values selected by testing on a dual processor PIII-450 */
/* These six interrupt control parameters may be set directly when loading the
* module, or through the rx_params and tx_params variables
*/
static int max_rx_latency = 0x11;
static int max_rx_gap = 0x05;
static int min_rx_pkt = 0x18;
static int max_tx_latency = 0x00;
static int max_tx_gap = 0x00;
static int min_tx_pkt = 0x30;
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
-Setting to > 1518 causes all frames to be copied
-Setting to 0 disables copies
*/
static int rx_copybreak;
/* An override for the hardware detection of bus width.
Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
Add 2 to disable parity detection.
*/
static int force32;
/* Used to pass the media type, etc.
These exist for driver interoperability.
No media types are currently defined.
- The lower 4 bits are reserved for the media type.
- The next three bits may be set to one of the following:
0x00000000 : Autodetect PCI bus
0x00000010 : Force 32 bit PCI bus
0x00000020 : Disable parity detection
0x00000040 : Force 64 bit PCI bus
Default is autodetect
- The next bit can be used to force half-duplex. This is a bad
idea since no known implementations implement half-duplex, and,
in general, half-duplex for gigabit ethernet is a bad idea.
0x00000080 : Force half-duplex
Default is full-duplex.
- In the original driver, the ninth bit could be used to force
full-duplex. Maintain that for compatibility
0x00000200 : Force full-duplex
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* The Hamachi chipset supports 3 parameters each for Rx and Tx
* interruput management. Parameters will be loaded as specified into
* the TxIntControl and RxIntControl registers.
*
* The registers are arranged as follows:
* 23 - 16 15 - 8 7 - 0
* _________________________________
* | min_pkt | max_gap | max_latency |
* ---------------------------------
* min_pkt : The minimum number of packets processed between
* interrupts.
* max_gap : The maximum inter-packet gap in units of 8.192 us
* max_latency : The absolute time between interrupts in units of 8.192 us
*
*/
static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for compile efficiency.
The compiler will convert <unsigned>'%'<2^N> into a bit mask.
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings, except for
excessive memory usage */
/* Empirically it appears that the Tx ring needs to be a little bigger
for these Gbit adapters or you get into an overrun condition really
easily. Also, things appear to work a bit better in back-to-back
configurations if the Rx ring is 8 times the size of the Tx ring
*/
#define TX_RING_SIZE 64
#define RX_RING_SIZE 512
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc)
/*
* Enable netdev_ioctl. Added interrupt coalescing parameter adjustment.
* 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov>
*/
/* play with 64-bit addrlen; seems to be a teensy bit slower --pw */
/* #define ADDRLEN 64 */
/*
* RX_CHECKSUM turns on card-generated receive checksum generation for
* TCP and UDP packets. Otherwise the upper layers do the calculation.
* 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
*/
#define RX_CHECKSUM
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/unaligned.h>
#include <asm/cache.h>
static const char version[] __devinitconst =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
/* IP_MF appears to be only defined in <netinet/ip.h>, however,
we need it for hardware checksumming support. FYI... some of
the definitions in <netinet/ip.h> conflict/duplicate those in
other linux headers causing many compiler warnings.
*/
#ifndef IP_MF
#define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
#endif
/* Define IP_OFFSET to be IPOPT_OFFSET */
#ifndef IP_OFFSET
#ifdef IPOPT_OFFSET
#define IP_OFFSET IPOPT_OFFSET
#else
#define IP_OFFSET 2
#endif
#endif
#define RUN_AT(x) (jiffies + (x))
#ifndef ADDRLEN
#define ADDRLEN 32
#endif
/* Condensed bus+endian portability operations. */
#if ADDRLEN == 64
#define cpu_to_leXX(addr) cpu_to_le64(addr)
#define leXX_to_cpu(addr) le64_to_cpu(addr)
#else
#define cpu_to_leXX(addr) cpu_to_le32(addr)
#define leXX_to_cpu(addr) le32_to_cpu(addr)
#endif
/*
Theory of Operation
I. Board Compatibility
This device driver is designed for the Packet Engines "Hamachi"
Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
66Mhz PCI card.
II. Board-specific settings
No jumpers exist on the board. The chip supports software correction of
various motherboard wiring errors, however this driver does not support
that feature.
III. Driver operation
IIIa. Ring buffers
The Hamachi uses a typical descriptor based bus-master architecture.
The descriptor list is similar to that used by the Digital Tulip.
This driver uses two statically allocated fixed-size descriptor lists
formed into rings by a branch from the final descriptor to the beginning of
the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
This driver uses a zero-copy receive and transmit scheme similar my other
network drivers.
The driver allocates full frame size skbuffs for the Rx ring buffers at
open() time and passes the skb->data field to the Hamachi as receive data
buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
a fresh skbuff is allocated and the frame is copied to the new skbuff.
When the incoming frame is larger, the skbuff is passed directly up the
protocol stack and replaced by a newly allocated skbuff.
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
using a full-sized skbuff for small frames vs. the copying costs of larger
frames. Gigabit cards are typically used on generously configured machines
and the underfilled buffers have negligible impact compared to the benefit of
a single allocation size, so the default value of zero results in never
copying packets.
IIIb/c. Transmit/Receive Structure
The Rx and Tx descriptor structure are straight-forward, with no historical
baggage that must be explained. Unlike the awkward DBDMA structure, there
are no unused fields or option bits that had only one allowable setting.
Two details should be noted about the descriptors: The chip supports both 32
bit and 64 bit address structures, and the length field is overwritten on
the receive descriptors. The descriptor length is set in the control word
for each channel. The development driver uses 32 bit addresses only, however
64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
IIId. Synchronization
This driver is very similar to my other network drivers.
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'hmp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. After reaping the stats, it marks the Tx queue entry as
empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
clears both the tx_full and tbusy flags.
IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
IVb. References
Hamachi Engineering Design Specification, 5/15/97
(Note: This version was marked "Confidential".)
IVc. Errata
None noted.
V. Recent Changes
01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
to help avoid some stall conditions -- this needs further research.
01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
the Tx ring and is called from hamachi_start_xmit (this used to be
called from hamachi_interrupt but it tends to delay execution of the
interrupt handler and thus reduce bandwidth by reducing the latency
between hamachi_rx()'s). Notably, some modification has been made so
that the cleaning loop checks only to make sure that the DescOwn bit
isn't set in the status flag since the card is not required
to set the entire flag to zero after processing.
01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
checked before attempting to add a buffer to the ring. If the ring is full
an attempt is made to free any dirty buffers and thus find space for
the new buffer or the function returns non-zero which should case the
scheduler to reschedule the buffer later.
01/15/1999 EPK Some adjustments were made to the chip initialization.
End-to-end flow control should now be fully active and the interrupt
algorithm vars have been changed. These could probably use further tuning.
01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to
set the rx and tx latencies for the Hamachi interrupts. If you're having
problems with network stalls, try setting these to higher values.
Valid values are 0x00 through 0xff.
01/15/1999 EPK In general, the overall bandwidth has increased and
latencies are better (sometimes by a factor of 2). Stalls are rare at
this point, however there still appears to be a bug somewhere between the
hardware and driver. TCP checksum errors under load also appear to be
eliminated at this point.
01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the
Rx and Tx rings. This appears to have been affecting whether a particular
peer-to-peer connection would hang under high load. I believe the Rx
rings was typically getting set correctly, but the Tx ring wasn't getting
the DescEndRing bit set during initialization. ??? Does this mean the
hamachi card is using the DescEndRing in processing even if a particular
slot isn't in use -- hypothetically, the card might be searching the
entire Tx ring for slots with the DescOwn bit set and then processing
them. If the DescEndRing bit isn't set, then it might just wander off
through memory until it hits a chunk of data with that bit set
and then looping back.
02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
problem (TxCmd and RxCmd need only to be set when idle or stopped.
02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
(Michel Mueller pointed out the ``permanently busy'' potential
problem here).
02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
02/23/1999 EPK Verified that the interrupt status field bits for Tx were
incorrectly defined and corrected (as per Michel Mueller).
02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
were available before reseting the tbusy and tx_full flags
(as per Michel Mueller).
03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
12/31/1999 KDU Cleaned up assorted things and added Don's code to force
32 bit.
02/20/2000 KDU Some of the control was just plain odd. Cleaned up the
hamachi_start_xmit() and hamachi_interrupt() code. There is still some
re-structuring I would like to do.
03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
parameters on a dual P3-450 setup yielded the new default interrupt
mitigation parameters. Tx should interrupt VERY infrequently due to
Eric's scheme. Rx should be more often...
03/13/2000 KDU Added a patch to make the Rx Checksum code interact
nicely with non-linux machines.
03/13/2000 KDU Experimented with some of the configuration values:
-It seems that enabling PCI performance commands for descriptors
(changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
performance impact for any of my tests. (ttcp, netpipe, netperf) I will
leave them that way until I hear further feedback.
-Increasing the PCI_LATENCY_TIMER to 130
(2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
degrade performance. Leaving default at 64 pending further information.
03/14/2000 KDU Further tuning:
-adjusted boguscnt in hamachi_rx() to depend on interrupt
mitigation parameters chosen.
-Selected a set of interrupt parameters based on some extensive testing.
These may change with more testing.
TO DO:
-Consider borrowing from the acenic driver code to check PCI_COMMAND for
PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in
that case.
-fix the reset procedure. It doesn't quite work.
*/
/* A few values that may be tweaked. */
/* Size of each temporary Rx buffer, calculated as:
* 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
* the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
*/
#define PKT_BUF_SZ 1536
/* For now, this is going to be set to the maximum size of an ethernet
* packet. Eventually, we may want to make it a variable that is
* related to the MTU
*/
#define MAX_FRAME_SIZE 1518
/* The rest of these values should never change. */
static void hamachi_timer(unsigned long data);
enum capability_flags {CanHaveMII=1, };
static const struct chip_info {
u16 vendor_id, device_id, device_id_mask, pad;
const char *name;
void (*media_timer)(unsigned long data);
int flags;
} chip_tbl[] = {
{0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
{0,},
};
/* Offsets to the Hamachi registers. Various sizes. */
enum hamachi_offsets {
TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E,
TxChecksum=0x074, RxChecksum=0x076,
TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
EventStatus=0x08C,
MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
/* See enum MII_offsets below. */
MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
AddrMode=0x0D0, StationAddr=0x0D2,
/* Gigabit AutoNegotiation. */
ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
ANLinkPartnerAbility=0x0EA,
EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
FIFOcfg=0x0F8,
};
/* Offsets to the MII-mode registers. */
enum MII_offsets {
MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
MII_Status=0xAE,
};
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
/* The Hamachi Rx and Tx buffer descriptors. */
struct hamachi_desc {
__le32 status_n_length;
#if ADDRLEN == 64
u32 pad;
__le64 addr;
#else
__le32 addr;
#endif
};
/* Bits in hamachi_desc.status_n_length */
enum desc_status_bits {
DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
DescIntr=0x10000000,
};
#define PRIV_ALIGN 15 /* Required alignment mask */
#define MII_CNT 4
struct hamachi_private {
/* Descriptor rings first for alignment. Tx requires a second descriptor
for status. */
struct hamachi_desc *rx_ring;
struct hamachi_desc *tx_ring;
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct timer_list timer; /* Media selection timer. */
/* Frequently used and paired value: keep adjacent for cache effect. */
spinlock_t lock;
int chip_id;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int duplex_lock:1;
unsigned int default_port:4; /* Last dev->if_port value. */
/* MII transceiver section. */
int mii_cnt; /* MII device addresses. */
struct mii_if_info mii_if; /* MII lib hooks/info */
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
u32 rx_int_var, tx_int_var; /* interrupt control variables */
u32 option; /* Hold on to a copy of the options */
struct pci_dev *pci_dev;
void __iomem *base;
};
MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>");
MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(min_rx_pkt, int, 0);
module_param(max_rx_gap, int, 0);
module_param(max_rx_latency, int, 0);
module_param(min_tx_pkt, int, 0);
module_param(max_tx_gap, int, 0);
module_param(max_tx_latency, int, 0);
module_param(rx_copybreak, int, 0);
module_param_array(rx_params, int, NULL, 0);
module_param_array(tx_params, int, NULL, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
module_param(force32, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void hamachi_timer(unsigned long data);
static void hamachi_tx_timeout(struct net_device *dev);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t hamachi_interrupt(int irq, void *dev_instance);
static int hamachi_rx(struct net_device *dev);
static inline int hamachi_tx(struct net_device *dev);
static void hamachi_error(struct net_device *dev, int intr_status);
static int hamachi_close(struct net_device *dev);
static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
static const struct ethtool_ops ethtool_ops_no_mii;
static const struct net_device_ops hamachi_netdev_ops = {
.ndo_open = hamachi_open,
.ndo_stop = hamachi_close,
.ndo_start_xmit = hamachi_start_xmit,
.ndo_get_stats = hamachi_get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
.ndo_do_ioctl = netdev_ioctl,
};
static int __devinit hamachi_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct hamachi_private *hmp;
int option, i, rx_int_var, tx_int_var, boguscnt;
int chip_id = ent->driver_data;
int irq;
void __iomem *ioaddr;
unsigned long base;
static int card_idx;
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
static int printed_version;
if (!printed_version++)
printk(version);
#endif
if (pci_enable_device(pdev)) {
ret = -EIO;
goto err_out;
}
base = pci_resource_start(pdev, 0);
#ifdef __alpha__ /* Really "64 bit addrs" */
base |= (pci_resource_start(pdev, 1) << 32);
#endif
pci_set_master(pdev);
i = pci_request_regions(pdev, DRV_NAME);
if (i)
return i;
irq = pdev->irq;
ioaddr = ioremap(base, 0x400);
if (!ioaddr)
goto err_out_release;
dev = alloc_etherdev(sizeof(struct hamachi_private));
if (!dev)
goto err_out_iounmap;
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
: readb(ioaddr + StationAddr + i);
#if ! defined(final_version)
if (hamachi_debug > 4)
for (i = 0; i < 0x10; i++)
printk("%2.2x%s",
read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n");
#endif
hmp = netdev_priv(dev);
spin_lock_init(&hmp->lock);
hmp->mii_if.dev = dev;
hmp->mii_if.mdio_read = mdio_read;
hmp->mii_if.mdio_write = mdio_write;
hmp->mii_if.phy_id_mask = 0x1f;
hmp->mii_if.reg_num_mask = 0x1f;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
hmp->tx_ring = ring_space;
hmp->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
hmp->rx_ring = ring_space;
hmp->rx_ring_dma = ring_dma;
/* Check for options being passed in */
option = card_idx < MAX_UNITS ? options[card_idx] : 0;
if (dev->mem_start)
option = dev->mem_start;
/* If the bus size is misidentified, do the following. */
force32 = force32 ? force32 :
((option >= 0) ? ((option & 0x00000070) >> 4) : 0 );
if (force32)
writeb(force32, ioaddr + VirtualJumpers);
/* Hmmm, do we really need to reset the chip???. */
writeb(0x01, ioaddr + ChipReset);
/* After a reset, the clock speed measurement of the PCI bus will not
* be valid for a moment. Wait for a little while until it is. If
* it takes more than 10ms, forget it.
*/
udelay(10);
i = readb(ioaddr + PCIClkMeas);
for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
udelay(10);
i = readb(ioaddr + PCIClkMeas);
}
hmp->base = ioaddr;
dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
pci_set_drvdata(pdev, dev);
hmp->chip_id = chip_id;
hmp->pci_dev = pdev;
/* The lower four bits are the media type. */
if (option > 0) {
hmp->option = option;
if (option & 0x200)
hmp->mii_if.full_duplex = 1;
else if (option & 0x080)
hmp->mii_if.full_duplex = 0;
hmp->default_port = option & 15;
if (hmp->default_port)
hmp->mii_if.force_media = 1;
}
if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
hmp->mii_if.full_duplex = 1;
/* lock the duplex mode if someone specified a value */
if (hmp->mii_if.full_duplex || (option & 0x080))
hmp->duplex_lock = 1;
/* Set interrupt tuning parameters */
max_rx_latency = max_rx_latency & 0x00ff;
max_rx_gap = max_rx_gap & 0x00ff;
min_rx_pkt = min_rx_pkt & 0x00ff;
max_tx_latency = max_tx_latency & 0x00ff;
max_tx_gap = max_tx_gap & 0x00ff;
min_tx_pkt = min_tx_pkt & 0x00ff;
rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
(min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
(min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
/* The Hamachi-specific entries in the device structure. */
dev->netdev_ops = &hamachi_netdev_ops;
if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
SET_ETHTOOL_OPS(dev, ðtool_ops);
else
SET_ETHTOOL_OPS(dev, ðtool_ops_no_mii);
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
i = register_netdev(dev);
if (i) {
ret = i;
goto err_out_unmap_rx;
}
printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n",
dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
ioaddr, dev->dev_addr, irq);
i = readb(ioaddr + PCIClkMeas);
printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
"%2.2x, LPA %4.4x.\n",
dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
readw(ioaddr + ANLinkPartnerAbility));
if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
int mii_status = mdio_read(dev, phy, MII_BMSR);
if (mii_status != 0xffff &&
mii_status != 0x0000) {
hmp->phys[phy_idx++] = phy;
hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
printk(KERN_INFO "%s: MII PHY found at address %d, status "
"0x%4.4x advertising %4.4x.\n",
dev->name, phy, mii_status, hmp->mii_if.advertising);
}
}
hmp->mii_cnt = phy_idx;
if (hmp->mii_cnt > 0)
hmp->mii_if.phy_id = hmp->phys[0];
else
memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
}
/* Configure gigabit autonegotiation. */
writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
card_idx++;
return 0;
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
err_out_cleardev:
free_netdev (dev);
err_out_iounmap:
iounmap(ioaddr);
err_out_release:
pci_release_regions(pdev);
err_out:
return ret;
}
static int __devinit read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 1000;
/* We should check busy first - per docs -KDU */
while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
writew(location, ioaddr + EEAddr);
writeb(0x02, ioaddr + EECmdStatus);
bogus_cnt = 1000;
while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
if (hamachi_debug > 5)
printk(" EEPROM status is %2.2x after %d ticks.\n",
(int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
return readb(ioaddr + EEData);
}
/* MII Managemen Data I/O accesses.
These routines assume the MDIO controller is idle, and do not exit until
the command is finished. */
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
/* We should check busy first - per docs -KDU */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
writew((phy_id<<8) + location, ioaddr + MII_Addr);
writew(0x0001, ioaddr + MII_Cmd);
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
return readw(ioaddr + MII_Rd_Data);
}
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
/* We should check busy first - per docs -KDU */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
writew((phy_id<<8) + location, ioaddr + MII_Addr);
writew(value, ioaddr + MII_Wr_Data);
/* Wait for the command to finish. */
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
}
static int hamachi_open(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int i;
u32 rx_int_var, tx_int_var;
u16 fifo_info;
i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
if (i)
return i;
if (hamachi_debug > 1)
printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
dev->name, dev->irq);
hamachi_init_ring(dev);
#if ADDRLEN == 64
/* writellll anyone ? */
writel(hmp->rx_ring_dma, ioaddr + RxPtr);
writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4);
writel(hmp->tx_ring_dma, ioaddr + TxPtr);
writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4);
#else
writel(hmp->rx_ring_dma, ioaddr + RxPtr);
writel(hmp->tx_ring_dma, ioaddr + TxPtr);
#endif
/* TODO: It would make sense to organize this as words since the card
* documentation does. -KDU
*/
for (i = 0; i < 6; i++)
writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers: with so many this eventually this will
converted to an offset/value list. */
/* Configure the FIFO */
fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
switch (fifo_info){
case 0 :
/* No FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
case 1 :
/* Configure the FIFO for 512K external, 16K used for Tx. */
writew(0x0028, ioaddr + FIFOcfg);
break;
case 2 :
/* Configure the FIFO for 1024 external, 32K used for Tx. */
writew(0x004C, ioaddr + FIFOcfg);
break;
case 3 :
/* Configure the FIFO for 2048 external, 32K used for Tx. */
writew(0x006C, ioaddr + FIFOcfg);
break;
default :
printk(KERN_WARNING "%s: Unsupported external memory config!\n",
dev->name);
/* Default to no FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
}
if (dev->if_port == 0)
dev->if_port = hmp->default_port;
/* Setting the Rx mode will start the Rx process. */
/* If someone didn't choose a duplex, default to full-duplex */
if (hmp->duplex_lock != 1)
hmp->mii_if.full_duplex = 1;
/* always 1, takes no more time to do it */
writew(0x0001, ioaddr + RxChecksum);
writew(0x0000, ioaddr + TxChecksum);
writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
writew(0x215F, ioaddr + MACCnfg);
writew(0x000C, ioaddr + FrameGap0);
/* WHAT?!?!? Why isn't this documented somewhere? -KDU */
writew(0x1018, ioaddr + FrameGap1);
/* Why do we enable receives/transmits here? -KDU */
writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
/* Enable automatic generation of flow control frames, period 0xffff. */
writel(0x0030FFFF, ioaddr + FlowCtrl);
writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */
/* Enable legacy links. */
writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
/* Initial Link LED to blinking red. */
writeb(0x03, ioaddr + LEDCtrl);
/* Configure interrupt mitigation. This has a great effect on
performance, so systems tuning should start here!. */
rx_int_var = hmp->rx_int_var;
tx_int_var = hmp->tx_int_var;
if (hamachi_debug > 1) {
printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
(tx_int_var & 0x00ff0000) >> 16);
printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
(rx_int_var & 0x00ff0000) >> 16);
printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
}
writel(tx_int_var, ioaddr + TxIntrCtrl);
writel(rx_int_var, ioaddr + RxIntrCtrl);
set_rx_mode(dev);
netif_start_queue(dev);
/* Enable interrupts by setting the interrupt mask. */
writel(0x80878787, ioaddr + InterruptEnable);
writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
/* Configure and start the DMA channels. */
/* Burst sizes are in the low three bits: size = 4<<(val&7) */
#if ADDRLEN == 64
writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */
writew(0x005D, ioaddr + TxDMACtrl);
#else
writew(0x001D, ioaddr + RxDMACtrl);
writew(0x001D, ioaddr + TxDMACtrl);
#endif
writew(0x0001, ioaddr + RxCmd);
if (hamachi_debug > 2) {
printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
}
/* Set the timer to check for link beat. */
init_timer(&hmp->timer);
hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
hmp->timer.data = (unsigned long)dev;
hmp->timer.function = hamachi_timer; /* timer handler */
add_timer(&hmp->timer);
return 0;
}
static inline int hamachi_tx(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
/* Update the dirty pointer until we find an entry that is
still owned by the card */
for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
/* Free the original skb. */
skb = hmp->tx_skbuff[entry];
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[entry].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
cpu_to_le32(DescEndRing);
dev->stats.tx_packets++;
}
return 0;
}
static void hamachi_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
if (hamachi_debug > 2) {
printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
"%4.4x.\n", dev->name, readw(ioaddr + ANStatus),
readw(ioaddr + ANLinkPartnerAbility));
printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
"%4.4x %4.4x %4.4x.\n", dev->name,
readw(ioaddr + 0x0e0),
readw(ioaddr + 0x0e2),
readw(ioaddr + 0x0e4),
readw(ioaddr + 0x0e6),
readw(ioaddr + 0x0e8),
readw(ioaddr + 0x0eA));
}
/* We could do something here... nah. */
hmp->timer.expires = RUN_AT(next_tick);
add_timer(&hmp->timer);
}
static void hamachi_tx_timeout(struct net_device *dev)
{
int i;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
" resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
{
printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
printk(KERN_CONT " %8.8x",
le32_to_cpu(hmp->rx_ring[i].status_n_length));
printk(KERN_CONT "\n");
printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_CONT " %4.4x",
le32_to_cpu(hmp->tx_ring[i].status_n_length));
printk(KERN_CONT "\n");
}
/* Reinit the hardware and make sure the Rx and Tx processes
are up and running.
*/
dev->if_port = 0;
/* The right way to do Reset. -KDU
* -Clear OWN bit in all Rx/Tx descriptors
* -Wait 50 uS for channels to go idle
* -Turn off MAC receiver
* -Issue Reset
*/
for (i = 0; i < RX_RING_SIZE; i++)
hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
/* Presume that all packets in the Tx queue are gone if we have to
* re-init the hardware.
*/
for (i = 0; i < TX_RING_SIZE; i++){
struct sk_buff *skb;
if (i >= TX_RING_SIZE - 1)
hmp->tx_ring[i].status_n_length =
cpu_to_le32(DescEndRing) |
(hmp->tx_ring[i].status_n_length &
cpu_to_le32(0x0000ffff));
else
hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
skb = hmp->tx_skbuff[i];
if (skb){
pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
}
}
udelay(60); /* Sleep 60 us just for safety sake */
writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
hmp->tx_full = 0;
hmp->cur_rx = hmp->cur_tx = 0;
hmp->dirty_rx = hmp->dirty_tx = 0;
/* Rx packets are also presumed lost; however, we need to make sure a
* ring of buffers is in tact. -KDU
*/
for (i = 0; i < RX_RING_SIZE; i++){
struct sk_buff *skb = hmp->rx_skbuff[i];
if (skb){
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[i].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
}
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
writew(0x0001, ioaddr + TxCmd); /* START Tx */
writew(0x0001, ioaddr + RxCmd); /* START Rx */
netif_wake_queue(dev);
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void hamachi_init_ring(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
int i;
hmp->tx_full = 0;
hmp->cur_rx = hmp->cur_tx = 0;
hmp->dirty_rx = hmp->dirty_tx = 0;
/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card. -KDU
*/
hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
(((dev->mtu+26+7) & ~7) + 16));
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[i].status_n_length = 0;
hmp->rx_skbuff[i] = NULL;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
for (i = 0; i < TX_RING_SIZE; i++) {
hmp->tx_skbuff[i] = NULL;
hmp->tx_ring[i].status_n_length = 0;
}
/* Mark the last entry of the ring */
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
}
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
unsigned entry;
u16 status;
/* Ok, now make sure that the queue has space before trying to
add another skbuff. if we return non-zero the scheduler
should interpret this as a queue full and requeue the buffer
for later.
*/
if (hmp->tx_full) {
/* We should NEVER reach this point -KDU */
printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
/* Wake the potentially-idle transmit channel. */
/* If we don't need to read status, DON'T -KDU */
status=readw(hmp->base + TxStatus);
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
return NETDEV_TX_BUSY;
}
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
/* Calculate the next Tx descriptor entry. */
entry = hmp->cur_tx % TX_RING_SIZE;
hmp->tx_skbuff[entry] = skb;
hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
/* Hmmmm, could probably put a DescIntr on these, but the way
the driver is currently coded makes Tx interrupts unnecessary
since the clearing of the Tx ring is handled by the start_xmit
routine. This organization helps mitigate the interrupts a
bit and probably renders the max_tx_latency param useless.
Update: Putting a DescIntr bit on all of the descriptors and
mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
*/
if (entry >= TX_RING_SIZE-1) /* Wrap ring */
hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescEndRing | DescIntr | skb->len);
else
hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | skb->len);
hmp->cur_tx++;
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel. */
/* If we don't need to read status, DON'T -KDU */
status=readw(hmp->base + TxStatus);
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
/* Immediately before returning, let's clear as many entries as we can. */
hamachi_tx(dev);
/* We should kick the bottom half here, since we are not accepting
* interrupts with every packet. i.e. realize that Gigabit ethernet
* can transmit faster than ordinary machines can load packets;
* hence, any packet that got put off because we were in the transmit
* routine should IMMEDIATELY get a chance to be re-queued. -KDU
*/
if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
netif_wake_queue(dev); /* Typical path */
else {
hmp->tx_full = 1;
netif_stop_queue(dev);
}
if (hamachi_debug > 4) {
printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
dev->name, hmp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
long boguscnt = max_interrupt_work;
int handled = 0;
#ifndef final_version /* Can never occur. */
if (dev == NULL) {
printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
#endif
spin_lock(&hmp->lock);
do {
u32 intr_status = readl(ioaddr + InterruptClear);
if (hamachi_debug > 4)
printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
dev->name, intr_status);
if (intr_status == 0)
break;
handled = 1;
if (intr_status & IntrRxDone)
hamachi_rx(dev);
if (intr_status & IntrTxDone){
/* This code should RARELY need to execute. After all, this is
* a gigabit link, it should consume packets as fast as we put
* them in AND we clear the Tx ring in hamachi_start_xmit().
*/
if (hmp->tx_full){
for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
skb = hmp->tx_skbuff[entry];
/* Free the original skb. */
if (skb){
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[entry].addr),
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
cpu_to_le32(DescEndRing);
dev->stats.tx_packets++;
}
if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
/* The ring is no longer full */
hmp->tx_full = 0;
netif_wake_queue(dev);
}
} else {
netif_wake_queue(dev);
}
}
/* Abnormal error summary/uncommon events handlers. */
if (intr_status &
(IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
LinkChange | NegotiationChange | StatsMax))
hamachi_error(dev, intr_status);
if (--boguscnt < 0) {
printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
dev->name, intr_status);
break;
}
} while (1);
if (hamachi_debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, readl(ioaddr + IntrStatus));
#ifndef final_version
/* Code that should never be run! Perhaps remove after testing.. */
{
static int stopit = 10;
if (dev->start == 0 && --stopit < 0) {
printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
dev->name);
free_irq(irq, dev);
}
}
#endif
spin_unlock(&hmp->lock);
return IRQ_RETVAL(handled);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int hamachi_rx(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
int entry = hmp->cur_rx % RX_RING_SIZE;
int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
if (hamachi_debug > 4) {
printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
entry, hmp->rx_ring[entry].status_n_length);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while (1) {
struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
u32 desc_status = le32_to_cpu(desc->status_n_length);
u16 data_size = desc_status; /* Implicit truncate */
u8 *buf_addr;
s32 frame_status;
if (desc_status & DescOwn)
break;
pci_dma_sync_single_for_cpu(hmp->pci_dev,
leXX_to_cpu(desc->addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
if (hamachi_debug > 4)
printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & DescEndPacket)) {
printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
"multiple buffers, entry %#x length %d status %4.4x!\n",
dev->name, hmp->cur_rx, data_size, desc_status);
printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
dev->name,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
dev->stats.rx_length_errors++;
} /* else Omit for prototype errata??? */
if (frame_status & 0x00380000) {
/* There was an error. */
if (hamachi_debug > 2)
printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x00600000)
dev->stats.rx_length_errors++;
if (frame_status & 0x00080000)
dev->stats.rx_frame_errors++;
if (frame_status & 0x00100000)
dev->stats.rx_crc_errors++;
if (frame_status < 0)
dev->stats.rx_dropped++;
} else {
struct sk_buff *skb;
/* Omit CRC */
u16 pkt_len = (frame_status & 0x07ff) - 4;
#ifdef RX_CHECKSUM
u32 pfck = *(u32 *) &buf_addr[data_size - 8];
#endif
#ifndef final_version
if (hamachi_debug > 4)
printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
" of %d, bogus_cnt %d.\n",
pkt_len, data_size, boguscnt);
if (hamachi_debug > 5)
printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
dev->name,
*(s32*)&(buf_addr[data_size - 20]),
*(s32*)&(buf_addr[data_size - 16]),
*(s32*)&(buf_addr[data_size - 12]),
*(s32*)&(buf_addr[data_size - 8]),
*(s32*)&(buf_addr[data_size - 4]));
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
#ifdef RX_CHECKSUM
printk(KERN_ERR "%s: rx_copybreak non-zero "
"not good with RX_CHECKSUM\n", dev->name);
#endif
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
skb_copy_to_linear_data(skb,
hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
+ entry*sizeof(*desc), pkt_len);
#endif
pci_dma_sync_single_for_device(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
} else {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
hmp->rx_skbuff[entry] = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
#ifdef RX_CHECKSUM
/* TCP or UDP on ipv4, DIX encoding */
if (pfck>>24 == 0x91 || pfck>>24 == 0x51) {
struct iphdr *ih = (struct iphdr *) skb->data;
/* Check that IP packet is at least 46 bytes, otherwise,
* there may be pad bytes included in the hardware checksum.
* This wouldn't happen if everyone padded with 0.
*/
if (ntohs(ih->tot_len) >= 46){
/* don't worry about frags */
if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) {
u32 inv = *(u32 *) &buf_addr[data_size - 16];
u32 *p = (u32 *) &buf_addr[data_size - 20];
register u32 crc, p_r, p_r1;
if (inv & 4) {
inv &= ~4;
--p;
}
p_r = *p;
p_r1 = *(p-1);
switch (inv) {
case 0:
crc = (p_r & 0xffff) + (p_r >> 16);
break;
case 1:
crc = (p_r >> 16) + (p_r & 0xffff)
+ (p_r1 >> 16 & 0xff00);
break;
case 2:
crc = p_r + (p_r1 >> 16);
break;
case 3:
crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
break;
default: /*NOTREACHED*/ crc = 0;
}
if (crc & 0xffff0000) {
crc &= 0xffff;
++crc;
}
/* tcp/udp will add in pseudo */
skb->csum = ntohs(pfck & 0xffff);
if (skb->csum > crc)
skb->csum -= crc;
else
skb->csum += (~crc & 0xffff);
/*
* could do the pseudo myself and return
* CHECKSUM_UNNECESSARY
*/
skb->ip_summed = CHECKSUM_COMPLETE;
}
}
}
#endif /* RX_CHECKSUM */
netif_rx(skb);
dev->stats.rx_packets++;
}
entry = (++hmp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
struct hamachi_desc *desc;
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
if (entry >= RX_RING_SIZE-1)
desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescEndRing | DescIntr);
else
desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescIntr);
}
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
if (readw(hmp->base + RxStatus) & 0x0002)
writew(0x0001, hmp->base + RxCmd);
return 0;
}
/* This is more properly named "uncommon interrupt events", as it covers more
than just errors. */
static void hamachi_error(struct net_device *dev, int intr_status)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
if (intr_status & (LinkChange|NegotiationChange)) {
if (hamachi_debug > 1)
printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
" %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2),
readw(ioaddr + ANLinkPartnerAbility),
readl(ioaddr + IntrStatus));
if (readw(ioaddr + ANStatus) & 0x20)
writeb(0x01, ioaddr + LEDCtrl);
else
writeb(0x03, ioaddr + LEDCtrl);
}
if (intr_status & StatsMax) {
hamachi_get_stats(dev);
/* Read the overflow bits to clear. */
readl(ioaddr + 0x370);
readl(ioaddr + 0x3F0);
}
if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) &&
hamachi_debug)
printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
dev->name, intr_status);
/* Hmmmmm, it's not clear how to recover from PCI faults. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_fifo_errors++;
if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
dev->stats.rx_fifo_errors++;
}
static int hamachi_close(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
struct sk_buff *skb;
int i;
netif_stop_queue(dev);
if (hamachi_debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
dev->name, readw(ioaddr + TxStatus),
readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
writel(0x0000, ioaddr + InterruptEnable);
/* Stop the chip's Tx and Rx processes. */
writel(2, ioaddr + RxCmd);
writew(2, ioaddr + TxCmd);
#ifdef __i386__
if (hamachi_debug > 2) {
printk(KERN_DEBUG " Tx ring at %8.8x:\n",
(int)hmp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)hmp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
if (hamachi_debug > 6) {
if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
u16 *addr = (u16 *)
hmp->rx_skbuff[i]->data;
int j;
printk(KERN_DEBUG "Addr: ");
for (j = 0; j < 0x50; j++)
printk(" %4.4x", addr[j]);
printk("\n");
}
}
}
}
#endif /* __i386__ debugging only */
free_irq(dev->irq, dev);
del_timer_sync(&hmp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = hmp->rx_skbuff[i];
hmp->rx_ring[i].status_n_length = 0;
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[i].addr),
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
}
hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = hmp->tx_skbuff[i];
if (skb) {
pci_unmap_single(hmp->pci_dev,
leXX_to_cpu(hmp->tx_ring[i].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
}
}
writeb(0x00, ioaddr + LEDCtrl);
return 0;
}
static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
/* We should lock this segment of code for SMP eventually, although
the vulnerability window is very small and statistics are
non-critical. */
/* Ok, what goes here? This appears to be stuck at 21 packets
according to ifconfig. It does get incremented in hamachi_tx(),
so I think I'll comment it out here and see if better things
happen.
*/
/* dev->stats.tx_packets = readl(ioaddr + 0x000); */
/* Total Uni+Brd+Multi */
dev->stats.rx_bytes = readl(ioaddr + 0x330);
/* Total Uni+Brd+Multi */
dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
/* Multicast Rx */
dev->stats.multicast = readl(ioaddr + 0x320);
/* Over+Undersized */
dev->stats.rx_length_errors = readl(ioaddr + 0x368);
/* Jabber */
dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
/* Jabber */
dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
/* Symbol Errs */
dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
/* Dropped */
dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
return &dev->stats;
}
static void set_rx_mode(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
void __iomem *ioaddr = hmp->base;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
} else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct netdev_hw_addr *ha;
int i = 0;
netdev_for_each_mc_addr(ha, dev) {
writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
/* Clear remaining entries. */
for (; i < 64; i++)
writel(0, ioaddr + 0x104 + i*8);
writew(0x0003, ioaddr + AddrMode);
} else { /* Normal, unicast/broadcast-only mode. */
writew(0x0001, ioaddr + AddrMode);
}
}
static int check_if_running(struct net_device *dev)
{
if (!netif_running(dev))
return -EINVAL;
return 0;
}
static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct hamachi_private *np = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
}
static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct hamachi_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
mii_ethtool_gset(&np->mii_if, ecmd);
spin_unlock_irq(&np->lock);
return 0;
}
static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct hamachi_private *np = netdev_priv(dev);
int res;
spin_lock_irq(&np->lock);
res = mii_ethtool_sset(&np->mii_if, ecmd);
spin_unlock_irq(&np->lock);
return res;
}
static int hamachi_nway_reset(struct net_device *dev)
{
struct hamachi_private *np = netdev_priv(dev);
return mii_nway_restart(&np->mii_if);
}
static u32 hamachi_get_link(struct net_device *dev)
{
struct hamachi_private *np = netdev_priv(dev);
return mii_link_ok(&np->mii_if);
}
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
.get_settings = hamachi_get_settings,
.set_settings = hamachi_set_settings,
.nway_reset = hamachi_nway_reset,
.get_link = hamachi_get_link,
};
static const struct ethtool_ops ethtool_ops_no_mii = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct hamachi_private *np = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
int rc;
if (!netif_running(dev))
return -EINVAL;
if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
u32 *d = (u32 *)&rq->ifr_ifru;
/* Should add this check here or an ordinary user can do nasty
* things. -KDU
*
* TODO: Shut down the Rx and Tx engines while doing this.
*/
if (!capable(CAP_NET_ADMIN))
return -EPERM;
writel(d[0], np->base + TxIntrCtrl);
writel(d[1], np->base + RxIntrCtrl);
printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
(u32) readl(np->base + TxIntrCtrl),
(u32) readl(np->base + RxIntrCtrl));
rc = 0;
}
else {
spin_lock_irq(&np->lock);
rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
spin_unlock_irq(&np->lock);
}
return rc;
}
static void __devexit hamachi_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct hamachi_private *hmp = netdev_priv(dev);
pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
unregister_netdev(dev);
iounmap(hmp->base);
free_netdev(dev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl);
static struct pci_driver hamachi_driver = {
.name = DRV_NAME,
.id_table = hamachi_pci_tbl,
.probe = hamachi_init_one,
.remove = __devexit_p(hamachi_remove_one),
};
static int __init hamachi_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
printk(version);
#endif
return pci_register_driver(&hamachi_driver);
}
static void __exit hamachi_exit (void)
{
pci_unregister_driver(&hamachi_driver);
}
module_init(hamachi_init);
module_exit(hamachi_exit);
| gpl-2.0 |
Dazzozo/huawei-kernel-3.4 | lib/swiotlb.c | 4805 | 26277 | /*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
* 08/12/11 beckyb Add highmem support
*/
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
int swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *io_tlb_start, *io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
*/
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
static void *io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
static phys_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
static int late_alloc;
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (!strcmp(str, "force"))
swiotlb_force = 1;
return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
{
return io_tlb_nslabs;
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
{
return phys_to_dma(hwdev, virt_to_phys(address));
}
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
phys_addr_t pstart, pend;
pstart = virt_to_phys(io_tlb_start);
pend = virt_to_phys(io_tlb_end);
printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
bytes >> 20, io_tlb_start, io_tlb_end);
printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
(unsigned long long)pstart,
(unsigned long long)pend);
}
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = tlb;
io_tlb_end = io_tlb_start + bytes;
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
if (verbose)
swiotlb_print_info();
}
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
swiotlb_init_with_default_size(size_t default_size, int verbose)
{
unsigned long bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from the low pages
*/
io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
}
void __init
swiotlb_init(int verbose)
{
swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
}
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
order);
if (io_tlb_start)
break;
order--;
}
if (!io_tlb_start)
goto cleanup1;
if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
}
io_tlb_end = io_tlb_start + bytes;
memset(io_tlb_start, 0, bytes);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
goto cleanup2;
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup3;
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!io_tlb_overflow_buffer)
goto cleanup4;
swiotlb_print_info();
late_alloc = 1;
return 0;
cleanup4:
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
io_tlb_orig_addr = NULL;
cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
cleanup2:
io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL;
cleanup1:
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
void __init swiotlb_free(void)
{
if (!io_tlb_overflow_buffer)
return;
if (late_alloc) {
free_pages((unsigned long)io_tlb_overflow_buffer,
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
free_pages((unsigned long)io_tlb_start,
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
free_bootmem_late(__pa(io_tlb_overflow_buffer),
PAGE_ALIGN(io_tlb_overflow));
free_bootmem_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
free_bootmem_late(__pa(io_tlb_list),
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
free_bootmem_late(__pa(io_tlb_start),
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
}
static int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= virt_to_phys(io_tlb_start) &&
paddr < virt_to_phys(io_tlb_end);
}
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(phys);
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = phys & ~PAGE_MASK;
char *buffer;
unsigned int sz = 0;
unsigned long flags;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE)
memcpy(dma_addr, buffer + offset, sz);
else
memcpy(buffer + offset, dma_addr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags);
size -= sz;
pfn++;
dma_addr += sz;
offset = 0;
}
} else {
if (dir == DMA_TO_DEVICE)
memcpy(dma_addr, phys_to_virt(phys), size);
else
memcpy(phys_to_virt(phys), dma_addr, size);
}
}
EXPORT_SYMBOL_GPL(swiotlb_bounce);
void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
unsigned long flags;
char *dma_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
* For mappings greater than a page, we limit the stride (and
* hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
BUG_ON(!nslots);
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
do {
while (iommu_is_span_boundary(index, nslots, offset_slots,
max_slots)) {
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
/*
* If we find a slot that indicates we have 'nslots' number of
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
* round.
*/
io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0);
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
return NULL;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
return dma_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
}
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
void
swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
enum dma_data_direction dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t phys = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--)
io_tlb_list[i] = ++count;
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
*/
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void
swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
enum dma_data_direction dir,
enum dma_sync_target target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t phys = io_tlb_orig_addr[index];
phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
default:
BUG();
}
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
free_pages((unsigned long) ret, order);
ret = NULL;
}
if (!ret) {
/*
* We are either out of memory or the device can't DMA to
* GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
if (!ret)
return NULL;
}
memset(ret, 0, size);
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL;
}
*dma_handle = dev_addr;
return ret;
}
EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
WARN_ON(irqs_disabled());
if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev_name(dev) : "?");
if (size <= io_tlb_overflow || !do_panic)
return;
if (dir == DMA_BIDIRECTIONAL)
panic("DMA: Random memory could be DMA accessed\n");
if (dir == DMA_FROM_DEVICE)
panic("DMA: Random memory could be DMA written\n");
if (dir == DMA_TO_DEVICE)
panic("DMA: Random memory could be DMA read\n");
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
*/
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
void *map;
BUG_ON(dir == DMA_NONE);
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
map = map_single(dev, phys, size, dir);
if (!map) {
swiotlb_full(dev, size, dir, 1);
map = io_tlb_overflow_buffer;
}
dev_addr = swiotlb_virt_to_bus(dev, map);
/*
* Ensure that the address returned is DMA'ble
*/
if (!dma_capable(dev, dev_addr, size)) {
swiotlb_tbl_unmap_single(dev, map, size, dir);
dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
}
return dev_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_page call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
/*
* phys_to_virt doesn't work with hihgmem page but we could
* call dma_mark_clean() with hihgmem page here. However, we
* are fine since dma_mark_clean() is null on POWERPC. We can
* make dma_mark_clean() take a physical address if necessary.
*/
dma_mark_clean(phys_to_virt(paddr), size);
}
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unmap_single(hwdev, dev_addr, size, dir);
}
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
* If you perform a swiotlb_map_page() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
target);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
dma_mark_clean(phys_to_virt(paddr), size);
}
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_page
* interface. Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for swiotlb_map_page are the
* same here.
*/
int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
if (swiotlb_force ||
!dma_capable(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, sg_phys(sg),
sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sgl[0].dma_length = 0;
return 0;
}
sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
}
return nelems;
}
EXPORT_SYMBOL(swiotlb_map_sg_attrs);
int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_map_sg);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for swiotlb_unmap_page() above.
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_unmap_sg);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer.
*
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
*/
static void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
enum dma_sync_target target)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
swiotlb_sync_single(hwdev, sg->dma_address,
sg->dma_length, dir, target);
}
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
}
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_dma_supported);
| gpl-2.0 |
wanam/Adam-Kernel-GS4-LTE | arch/sh/drivers/pci/fixups-se7751.c | 5061 | 4136 | #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/io.h>
#include "pci-sh4.h"
int __init pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin)
{
switch (slot) {
case 0: return 13;
case 1: return 13; /* AMD Ethernet controller */
case 2: return -1;
case 3: return -1;
case 4: return -1;
default:
printk("PCI: Bad IRQ mapping request for slot %d\n", slot);
return -1;
}
}
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
/*
* Only long word accesses of the PCIC's internal local registers and the
* configuration registers from the CPU is supported.
*/
#define PCIC_WRITE(x,v) writel((v), PCI_REG(x))
#define PCIC_READ(x) readl(PCI_REG(x))
/*
* Description: This function sets up and initializes the pcic, sets
* up the BARS, maps the DRAM into the address space etc, etc.
*/
int pci_fixup_pcic(struct pci_channel *chan)
{
unsigned long bcr1, wcr1, wcr2, wcr3, mcr;
unsigned short bcr2;
/*
* Initialize the slave bus controller on the pcic. The values used
* here should not be hardcoded, but they should be taken from the bsc
* on the processor, to make this function as generic as possible.
* (i.e. Another sbc may usr different SDRAM timing settings -- in order
* for the pcic to work, its settings need to be exactly the same.)
*/
bcr1 = (*(volatile unsigned long*)(SH7751_BCR1));
bcr2 = (*(volatile unsigned short*)(SH7751_BCR2));
wcr1 = (*(volatile unsigned long*)(SH7751_WCR1));
wcr2 = (*(volatile unsigned long*)(SH7751_WCR2));
wcr3 = (*(volatile unsigned long*)(SH7751_WCR3));
mcr = (*(volatile unsigned long*)(SH7751_MCR));
bcr1 = bcr1 | 0x00080000; /* Enable Bit 19, BREQEN */
(*(volatile unsigned long*)(SH7751_BCR1)) = bcr1;
bcr1 = bcr1 | 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
PCIC_WRITE(SH7751_PCIBCR1, bcr1); /* PCIC BCR1 */
PCIC_WRITE(SH7751_PCIBCR2, bcr2); /* PCIC BCR2 */
PCIC_WRITE(SH7751_PCIWCR1, wcr1); /* PCIC WCR1 */
PCIC_WRITE(SH7751_PCIWCR2, wcr2); /* PCIC WCR2 */
PCIC_WRITE(SH7751_PCIWCR3, wcr3); /* PCIC WCR3 */
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
PCIC_WRITE(SH7751_PCIMCR, mcr); /* PCIC MCR */
/* Enable all interrupts, so we know what to fix */
PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff);
PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f);
/* Set up standard PCI config registers */
PCIC_WRITE(SH7751_PCICONF1, 0xF39000C7); /* Bus Master, Mem & I/O access */
PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */
PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */
PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM) */
PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */
PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */
PCIC_WRITE(SH7751_PCILSR0, 0x03f00000); /* MEM (full 64M exposed) */
PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */
PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */
PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */
/* Now turn it on... */
PCIC_WRITE(SH7751_PCICR, 0xa5000001);
/*
* Set PCIMBR and PCIIOBR here, assuming a single window
* (16M MEM, 256K IO) is enough. If a larger space is
* needed, the readx/writex and inx/outx functions will
* have to do more (e.g. setting registers for each call).
*/
/*
* Set the MBR so PCI address is one-to-one with window,
* meaning all calls go straight through... use BUG_ON to
* catch erroneous assumption.
*/
BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE);
PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start);
/* Set IOBR for window containing area specified in pci.h */
PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK));
/* All done, may as well say so... */
printk("SH7751 PCI: Finished initialization of the PCI controller\n");
return 1;
}
| gpl-2.0 |
wyldstallyns/Roughneck_kernel_m8-GPE-5.1 | arch/x86/um/os-Linux/mcontext.c | 9669 | 1132 | #include <sys/ucontext.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <sysdep/ptrace.h>
void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
{
#ifdef __i386__
#define COPY2(X,Y) regs->gp[X] = mc->gregs[REG_##Y]
#define COPY(X) regs->gp[X] = mc->gregs[REG_##X]
#define COPY_SEG(X) regs->gp[X] = mc->gregs[REG_##X] & 0xffff;
#define COPY_SEG_CPL3(X) regs->gp[X] = (mc->gregs[REG_##X] & 0xffff) | 3;
COPY_SEG(GS); COPY_SEG(FS); COPY_SEG(ES); COPY_SEG(DS);
COPY(EDI); COPY(ESI); COPY(EBP);
COPY2(UESP, ESP); /* sic */
COPY(EBX); COPY(EDX); COPY(ECX); COPY(EAX);
COPY(EIP); COPY_SEG_CPL3(CS); COPY(EFL); COPY_SEG_CPL3(SS);
#else
#define COPY2(X,Y) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##Y]
#define COPY(X) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##X]
COPY(R8); COPY(R9); COPY(R10); COPY(R11);
COPY(R12); COPY(R13); COPY(R14); COPY(R15);
COPY(RDI); COPY(RSI); COPY(RBP); COPY(RBX);
COPY(RDX); COPY(RAX); COPY(RCX); COPY(RSP);
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
regs->gp[CS / sizeof(unsigned long)] |= 3;
#endif
}
| gpl-2.0 |
aopp/android_kernel_asus_grouper | arch/frv/kernel/pm-mb93093.c | 13765 | 1429 | /*
* FR-V MB93093 Power Management Routines
*
* Copyright (c) 2004 Red Hat, Inc.
*
* Written by: msalter@redhat.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/mb86943a.h>
#include "local.h"
static unsigned long imask;
/*
* Setup interrupt masks, etc to enable wakeup by power switch
*/
static void mb93093_power_switch_setup(void)
{
/* mask all but FPGA interrupt sources. */
imask = *(volatile unsigned long *)0xfeff9820;
*(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000;
}
/*
* Cleanup interrupt masks, etc after wakeup by power switch
*/
static void mb93093_power_switch_cleanup(void)
{
*(volatile unsigned long *)0xfeff9820 = imask;
}
/*
* Return non-zero if wakeup irq was caused by power switch
*/
static int mb93093_power_switch_check(void)
{
return 1;
}
/*
* Initialize power interface
*/
static int __init mb93093_pm_init(void)
{
__power_switch_wake_setup = mb93093_power_switch_setup;
__power_switch_wake_check = mb93093_power_switch_check;
__power_switch_wake_cleanup = mb93093_power_switch_cleanup;
return 0;
}
__initcall(mb93093_pm_init);
| gpl-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/iptables/extensions/libxt_pkttype.c | 454 | 3234 | /*
* Shared library add-on to iptables to match
* packets by their type (BROADCAST, UNICAST, MULTICAST).
*
* Michal Ludvig <michal@logix.cz>
*/
#include <stdio.h>
#include <string.h>
#include <xtables.h>
#include <linux/if_packet.h>
#include <linux/netfilter/xt_pkttype.h>
enum {
O_PKTTYPE = 0,
};
struct pkttypes {
const char *name;
unsigned char pkttype;
unsigned char printhelp;
const char *help;
};
static const struct pkttypes supported_types[] = {
{"unicast", PACKET_HOST, 1, "to us"},
{"broadcast", PACKET_BROADCAST, 1, "to all"},
{"multicast", PACKET_MULTICAST, 1, "to group"},
/*
{"otherhost", PACKET_OTHERHOST, 1, "to someone else"},
{"outgoing", PACKET_OUTGOING, 1, "outgoing of any type"},
*/
/* aliases */
{"bcast", PACKET_BROADCAST, 0, NULL},
{"mcast", PACKET_MULTICAST, 0, NULL},
{"host", PACKET_HOST, 0, NULL}
};
static void print_types(void)
{
unsigned int i;
printf("Valid packet types:\n");
for (i = 0; i < ARRAY_SIZE(supported_types); ++i)
if(supported_types[i].printhelp == 1)
printf("\t%-14s\t\t%s\n", supported_types[i].name, supported_types[i].help);
printf("\n");
}
static void pkttype_help(void)
{
printf(
"pkttype match options:\n"
"[!] --pkt-type packettype match packet type\n");
print_types();
}
static const struct xt_option_entry pkttype_opts[] = {
{.name = "pkt-type", .id = O_PKTTYPE, .type = XTTYPE_STRING,
.flags = XTOPT_MAND | XTOPT_INVERT},
XTOPT_TABLEEND,
};
static void parse_pkttype(const char *pkttype, struct xt_pkttype_info *info)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(supported_types); ++i)
if(strcasecmp(pkttype, supported_types[i].name)==0)
{
info->pkttype=supported_types[i].pkttype;
return;
}
xtables_error(PARAMETER_PROBLEM, "Bad packet type '%s'", pkttype);
}
static void pkttype_parse(struct xt_option_call *cb)
{
struct xt_pkttype_info *info = cb->data;
xtables_option_parse(cb);
parse_pkttype(cb->arg, info);
if (cb->invert)
info->invert = 1;
}
static void print_pkttype(const struct xt_pkttype_info *info)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(supported_types); ++i)
if(supported_types[i].pkttype==info->pkttype)
{
printf("%s", supported_types[i].name);
return;
}
printf("%d", info->pkttype); /* in case we didn't find an entry in named-packtes */
}
static void pkttype_print(const void *ip, const struct xt_entry_match *match,
int numeric)
{
const struct xt_pkttype_info *info = (const void *)match->data;
printf(" PKTTYPE %s= ", info->invert ? "!" : "");
print_pkttype(info);
}
static void pkttype_save(const void *ip, const struct xt_entry_match *match)
{
const struct xt_pkttype_info *info = (const void *)match->data;
printf("%s --pkt-type ", info->invert ? " !" : "");
print_pkttype(info);
}
static struct xtables_match pkttype_match = {
.family = NFPROTO_UNSPEC,
.name = "pkttype",
.version = XTABLES_VERSION,
.size = XT_ALIGN(sizeof(struct xt_pkttype_info)),
.userspacesize = XT_ALIGN(sizeof(struct xt_pkttype_info)),
.help = pkttype_help,
.print = pkttype_print,
.save = pkttype_save,
.x6_parse = pkttype_parse,
.x6_options = pkttype_opts,
};
void _init(void)
{
xtables_register_match(&pkttype_match);
}
| gpl-2.0 |
civato/CivZ-P900-KK | drivers/net/wireless/bcmdhd/dhd_linux_sched.c | 1478 | 1604 | /*
* Expose some of the kernel scheduler routines
*
* Copyright (C) 1999-2013, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_linux_sched.c 291086 2011-10-21 01:17:24Z $
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <typedefs.h>
#include <linuxver.h>
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
int rc = 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = sched_setscheduler(p, policy, param);
#endif /* LinuxVer */
return rc;
}
| gpl-2.0 |
neohackt/android_kernel_motorola_otus | drivers/net/ethernet/sfc/siena.c | 1478 | 20326 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "spi.h"
#include "regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
{
efx_dword_t timer_cmd;
if (channel->irq_moderation)
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
FRF_CZ_TC_TIMER_VAL,
channel->irq_moderation - 1);
else
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_DIS,
FRF_CZ_TC_TIMER_VAL, 0);
efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
channel->channel);
}
static int siena_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
uint32_t status;
int rc;
rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
addr, value, &status);
if (rc)
return rc;
if (status != MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
}
static int siena_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
uint16_t value;
uint32_t status;
int rc;
rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
addr, &value, &status);
if (rc)
return rc;
if (status != MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (int)value;
}
/* This call is responsible for hooking in the MAC and PHY operations */
static int siena_probe_port(struct efx_nic *efx)
{
int rc;
/* Hook in PHY operations table */
efx->phy_op = &efx_mcdi_phy_ops;
/* Set up MDIO structure for PHY */
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
/* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
"stats buffer at %llx (virt %p phys %llx)\n",
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
return 0;
}
static void siena_remove_port(struct efx_nic *efx)
{
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
efx_mcdi_set_mac(efx);
}
void siena_finish_flush(struct efx_nic *efx)
{
if (--efx->fc_disable == 0)
efx_mcdi_set_mac(efx);
}
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
{ FR_AZ_TX_CFG,
EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
{ FR_AZ_TX_RESERVED,
EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
{ FR_AZ_SRM_TX_DC_CFG,
EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_CFG,
EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_PF_WM,
EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_BZ_DP_CTRL,
EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_BZ_RX_RSS_TKEY,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG1,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG2,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
{ FR_CZ_RX_RSS_IPV6_REG3,
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
};
static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2;
efx_reset_down(efx, reset_method);
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
rc = siena_reset_hw(efx, reset_method);
if (rc)
goto out;
tests->registers =
efx_nic_test_registers(efx, siena_register_tests,
ARRAY_SIZE(siena_register_tests))
? -1 : 1;
rc = siena_reset_hw(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
}
/**************************************************************************
*
* Device reset
*
**************************************************************************
*/
static enum reset_type siena_map_reset_reason(enum reset_type reason)
{
return RESET_TYPE_ALL;
}
static int siena_map_reset_flags(u32 *flags)
{
enum {
SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
ETH_RESET_OFFLOAD | ETH_RESET_MAC |
ETH_RESET_PHY),
SIENA_RESET_MC = (SIENA_RESET_PORT |
ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
};
if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
*flags &= ~SIENA_RESET_MC;
return RESET_TYPE_WORLD;
}
if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
*flags &= ~SIENA_RESET_PORT;
return RESET_TYPE_ALL;
}
/* no invisible reset implemented */
return -EINVAL;
}
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
int rc;
/* Recover from a failed assertion pre-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_port(efx);
}
static int siena_probe_nvconfig(struct efx_nic *efx)
{
u32 caps = 0;
int rc;
rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
efx->timer_quantum_ns =
(caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
3072 : 6144; /* 768 cycles */
return rc;
}
static void siena_dimension_resources(struct efx_nic *efx)
{
/* Each port has a small block of internal SRAM dedicated to
* the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't.
*/
efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
bool already_attached = false;
efx_oword_t reg;
int rc;
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
efx->nic_data = nic_data;
if (efx_nic_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
efx_reado(efx, ®, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
goto fail1;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
/* Now we can reset the NIC */
rc = siena_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
siena_init_wol(efx);
/* Allocate memory for INT_KER */
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
netif_dbg(efx, probe, efx->net_dev,
"INT_KER at %llx (virt %p phys %llx)\n",
(unsigned long long)efx->irq_status.dma_addr,
efx->irq_status.addr,
(unsigned long long)virt_to_phys(efx->irq_status.addr));
/* Read in the non-volatile configuration */
rc = siena_probe_nvconfig(efx);
if (rc == -EINVAL) {
netif_err(efx, probe, efx->net_dev,
"NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
} else if (rc) {
goto fail5;
}
rc = efx_mcdi_mon_probe(efx);
if (rc)
goto fail5;
efx_sriov_probe(efx);
return 0;
fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
fail1:
kfree(efx->nic_data);
return rc;
}
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
*/
static int siena_init_nic(struct efx_nic *efx)
{
efx_oword_t temp;
int rc;
/* Recover from a failed assertion post-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
return rc;
/* Squash TX of packets of 16 bytes or less */
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
* descriptors (which is bad).
*/
efx_reado(efx, &temp, FR_AZ_TX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
efx_writeo(efx, &temp, FR_AZ_TX_CFG);
efx_reado(efx, &temp, FR_AZ_RX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
/* Enable hash insertion. This is broken for the 'Falcon' hash
* if IPv6 hashing is also enabled, so also select Toeplitz
* TCP/IPv4 and IPv4 hashes. */
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
/* Enable IPv6 RSS */
BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
if (rc)
return rc;
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
efx_nic_init_common(efx);
return 0;
}
static void siena_remove_nic(struct efx_nic *efx)
{
efx_mcdi_mon_remove(efx);
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
/* Relinquish the device back to the BMC */
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
#define STATS_GENERATION_INVALID ((__force __le64)(-1))
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
__le64 *dma_stats;
struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == STATS_GENERATION_INVALID)
return 0;
rmb();
#define MAC_STAT(M, D) \
mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
MAC_STAT(tx_control, TX_CONTROL_PKTS);
MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
MAC_STAT(tx_lt64, TX_LT64_PKTS);
MAC_STAT(tx_64, TX_64_PKTS);
MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
mac_stats->tx_collision = 0;
MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
mac_stats->tx_collision = (mac_stats->tx_single_collision +
mac_stats->tx_multiple_collision +
mac_stats->tx_excessive_collision +
mac_stats->tx_late_collision);
MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
MAC_STAT(rx_pause, RX_PAUSE_PKTS);
MAC_STAT(rx_control, RX_CONTROL_PKTS);
MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
MAC_STAT(rx_64, RX_64_PKTS);
MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
mac_stats->rx_bad_lt64 = 0;
mac_stats->rx_bad_64_to_15xx = 0;
mac_stats->rx_bad_15xx_to_jumbo = 0;
MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
mac_stats->rx_missed = 0;
MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
mac_stats->rx_good_lt64 = 0;
efx->n_rx_nodesc_drop_cnt =
le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
#undef MAC_STAT
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
return 0;
}
static void siena_update_nic_stats(struct efx_nic *efx)
{
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
return;
udelay(100);
}
/* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
{
__le64 *dma_stats = efx->stats_buffer.addr;
dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
}
static void siena_stop_nic_stats(struct efx_nic *efx)
{
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
}
/**************************************************************************
*
* Wake on LAN
*
**************************************************************************
*/
static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
struct siena_nic_data *nic_data = efx->nic_data;
wol->supported = WAKE_MAGIC;
if (nic_data->wol_filter_id != -1)
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int siena_set_wol(struct efx_nic *efx, u32 type)
{
struct siena_nic_data *nic_data = efx->nic_data;
int rc;
if (type & ~WAKE_MAGIC)
return -EINVAL;
if (type & WAKE_MAGIC) {
if (nic_data->wol_filter_id != -1)
efx_mcdi_wol_filter_remove(efx,
nic_data->wol_filter_id);
rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
&nic_data->wol_filter_id);
if (rc)
goto fail;
pci_wake_from_d3(efx->pci_dev, true);
} else {
rc = efx_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
pci_wake_from_d3(efx->pci_dev, false);
if (rc)
goto fail;
}
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
__func__, type, rc);
return rc;
}
static void siena_init_wol(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
int rc;
rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
if (rc != 0) {
/* If it failed, attempt to get into a synchronised
* state with MC by resetting any set WoL filters */
efx_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
} else if (nic_data->wol_filter_id != -1) {
pci_wake_from_d3(efx->pci_dev, true);
}
}
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
const struct efx_nic_type siena_a0_nic_type = {
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = NULL,
.map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
.probe_port = siena_probe_port,
.remove_port = siena_remove_port,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
.update_stats = siena_update_nic_stats,
.start_stats = siena_start_nic_stats,
.stop_stats = siena_stop_nic_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
.reconfigure_mac = efx_mcdi_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
.reconfigure_port = efx_mcdi_phy_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.revision = EFX_REV_SIENA_A0,
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
};
| gpl-2.0 |
JB1tz/kernel-msm | drivers/net/ethernet/sfc/nic.c | 1478 | 63792 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
* Configurable values
*
**************************************************************************
*/
/* This is set to 16 for a good reason. In summary, if larger than
* 16, the descriptor cache holds more than a default socket
* buffer's worth of packets (for UDP we can only have at most one
* socket buffer's worth outstanding). This combined with the fact
* that we only get 1 TX event per descriptor cache means the NIC
* goes idle.
*/
#define TX_DC_ENTRIES 16
#define TX_DC_ENTRIES_ORDER 1
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 3
/* If EFX_MAX_INT_ERRORS internal errors occur within
* EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
*/
#define EFX_INT_ERROR_EXPIRE 3600
#define EFX_MAX_INT_ERRORS 5
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
/* Driver generated events */
#define _EFX_CHANNEL_MAGIC_TEST 0x000101
#define _EFX_CHANNEL_MAGIC_FILL 0x000102
#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
#define EFX_CHANNEL_MAGIC_TEST(_channel) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
(_tx_queue)->queue)
static void efx_magic_event(struct efx_channel *channel, u32 magic);
/**************************************************************************
*
* Solarstorm hardware access
*
**************************************************************************/
static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
unsigned int index)
{
efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
value, index);
}
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int efx_event_present(efx_qword_t *event)
{
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
}
static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
const efx_oword_t *mask)
{
return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
}
int efx_nic_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs,
size_t n_regs)
{
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
EFX_INVERT_OWORD(imask);
efx_reado(efx, &original, address);
/* bit sweep on and off */
for (j = 0; j < 128; j++) {
if (!EFX_EXTRACT_OWORD32(mask, j, j))
continue;
/* Test this testable bit can be set in isolation */
EFX_AND_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 1);
efx_writeo(efx, ®, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(®, &buf, &mask))
goto fail;
/* Test this testable bit can be cleared in isolation */
EFX_OR_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 0);
efx_writeo(efx, ®, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(®, &buf, &mask))
goto fail;
}
efx_writeo(efx, &original, address);
}
return 0;
fail:
netif_err(efx, hw, efx->net_dev,
"wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
/**************************************************************************
*
* Special buffer handling
* Special buffers are used for event queues and the TX and RX
* descriptor rings.
*
*************************************************************************/
/*
* Initialise a special buffer
*
* This will define a buffer (previously allocated via
* efx_alloc_special_buffer()) in the buffer table, allowing
* it to be used for event queues, descriptor rings etc.
*/
static void
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
unsigned int index;
dma_addr_t dma_addr;
int i;
EFX_BUG_ON_PARANOID(!buffer->addr);
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev,
"mapping special buffer %d at %llx\n",
index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
efx_write_buf_tbl(efx, &buf_desc, index);
}
}
/* Unmaps a buffer and clears the buffer table entries */
static void
efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd;
unsigned int start = buffer->index;
unsigned int end = (buffer->index + buffer->entries - 1);
if (!buffer->entries)
return;
netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
FRF_AZ_BUF_UPD_CMD, 0,
FRF_AZ_BUF_CLR_CMD, 1,
FRF_AZ_BUF_CLR_END_ID, end,
FRF_AZ_BUF_CLR_START_ID, start);
efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
}
/*
* Allocate a new special buffer
*
* This allocates memory for a new buffer, clears it and allocates a
* new buffer ID range. It does not write into the buffer table.
*
* This call will allocate 4KB buffers, since 8KB buffers can't be
* used for event queues and descriptor rings.
*/
static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
len = ALIGN(len, EFX_BUF_SIZE);
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, GFP_KERNEL);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
/* All zeros is a potentially valid event so memset to 0xff */
memset(buffer->addr, 0xff, len);
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
#ifdef CONFIG_SFC_SRIOV
BUG_ON(efx_sriov_enabled(efx) &&
efx->vf_buftbl_base < efx->next_buffer_table);
#endif
netif_dbg(efx, probe, efx->net_dev,
"allocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
static void
efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
if (!buffer->addr)
return;
netif_dbg(efx, hw, efx->net_dev,
"deallocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
buffer->dma_addr);
buffer->addr = NULL;
buffer->entries = 0;
}
/**************************************************************************
*
* Generic buffer handling
* These buffers are used for interrupt status and MAC stats
*
**************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
&buffer->dma_addr);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
memset(buffer->addr, 0, len);
return 0;
}
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
pci_free_consistent(efx->pci_dev, buffer->len,
buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
/**************************************************************************
*
* TX path
*
**************************************************************************/
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
}
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned write_ptr;
efx_dword_t reg;
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(tx_queue->efx, ®,
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
}
/* Write pointer and first descriptor for TX descriptor ring */
static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
const efx_qword_t *txd)
{
unsigned write_ptr;
efx_oword_t reg;
BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
FRF_AZ_TX_DESC_WPTR, write_ptr);
reg.qword[0] = *txd;
efx_writeo_page(tx_queue->efx, ®,
FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
}
static inline bool
efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
{
unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
tx_queue->empty_read_count = 0;
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
&& tx_queue->write_count - write_count == 1;
}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
* write a doorbell.
*/
void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
/* Create TX descriptor ring entry */
EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT, buffer->continuation,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
txd = efx_tx_desc(tx_queue,
old_write_count & tx_queue->ptr_mask);
efx_push_tx_desc(tx_queue, txd);
++tx_queue->pushes;
} else {
efx_notify_tx_desc(tx_queue);
}
}
/* Allocate hardware resources for a TX queue */
int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned entries;
entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
/* Pin TX descriptor ring */
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
FRF_AZ_TX_DESCQ_EVQ_ID,
tx_queue->channel->channel,
FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
FRF_AZ_TX_DESCQ_SIZE,
__ffs(tx_queue->txd.entries),
FRF_AZ_TX_DESCQ_TYPE, 0,
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
clear_bit_le(tx_queue->queue, (void *)®);
else
set_bit_le(tx_queue->queue, (void *)®);
efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
}
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
tx_queue->queue);
}
}
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
WARN_ON(atomic_read(&tx_queue->flush_outstanding));
atomic_set(&tx_queue->flush_outstanding, 1);
EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
}
void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
/* Unpin TX descriptor ring */
efx_fini_special_buffer(efx, &tx_queue->txd);
}
/* Free buffers backing TX queue */
void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
{
efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
}
/**************************************************************************
*
* RX path
*
**************************************************************************/
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
}
/* This creates an entry in the RX descriptor queue */
static inline void
efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_rx_buffer *rx_buf;
efx_qword_t *rxd;
rxd = efx_rx_desc(rx_queue, index);
rx_buf = efx_rx_buffer(rx_queue, index);
EFX_POPULATE_QWORD_3(*rxd,
FSF_AZ_RX_KER_BUF_SIZE,
rx_buf->len -
rx_queue->efx->type->rx_buffer_padding,
FSF_AZ_RX_KER_BUF_REGION, 0,
FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
}
/* This writes to the RX_DESC_WPTR register for the specified receive
* descriptor ring.
*/
void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
efx_build_rx_desc(
rx_queue,
rx_queue->notified_count & rx_queue->ptr_mask);
++rx_queue->notified_count;
}
wmb();
write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
efx_rx_queue_index(rx_queue));
}
int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned entries;
entries = rx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &rx_queue->rxd,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
FRF_AZ_RX_DESCQ_LABEL,
efx_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
/* For >=B0 this is scatter so disable */
FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
}
static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_oword_t rx_flush_descq;
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_RX_FLUSH_DESCQ,
efx_rx_queue_index(rx_queue));
efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
}
void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
/* Unpin RX descriptor ring */
efx_fini_special_buffer(efx, &rx_queue->rxd);
}
/* Free buffers backing RX queue */
void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
{
efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
}
/**************************************************************************
*
* Flush handling
*
**************************************************************************/
/* efx_nic_flush_queues() must be woken up when all flushes are completed,
* or more RX flushes can be kicked off.
*/
static bool efx_flush_wake(struct efx_nic *efx)
{
/* Ensure that all updates are visible to efx_nic_flush_queues() */
smp_mb();
return (atomic_read(&efx->drain_pending) == 0 ||
(atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
&& atomic_read(&efx->rxq_flush_pending) > 0));
}
static bool efx_check_tx_flush_complete(struct efx_nic *efx)
{
bool i = true;
efx_oword_t txd_ptr_tbl;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
if (EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev,
"flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) {
/* The flush is complete, but we didn't
* receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev,
"flush complete on TXQ %d, so drain "
"the queue\n", tx_queue->queue);
/* Don't need to increment drain_pending as it
* has already been incremented for the queues
* which did not drain
*/
efx_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until
* they're all flushed. Wait for the DRAIN events to be recieved so that there
* are no more RX and TX events left on any channel. */
int efx_nic_flush_queues(struct efx_nic *efx)
{
unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int rc = 0;
efx->type->prepare_flush(efx);
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
atomic_inc(&efx->drain_pending);
efx_flush_tx_queue(tx_queue);
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
atomic_inc(&efx->drain_pending);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
}
}
while (timeout && atomic_read(&efx->drain_pending) > 0) {
/* If SRIOV is enabled, then offload receive queue flushing to
* the firmware (though we will still have to poll for
* completion). If that fails, fall back to the old scheme.
*/
if (efx_sriov_enabled(efx)) {
rc = efx_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
}
/* The hardware supports four concurrent rx flushes, each of
* which may need to be retried if there is an outstanding
* descriptor fetch
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (atomic_read(&efx->rxq_flush_outstanding) >=
EFX_RX_FLUSH_COUNT)
break;
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
atomic_inc(&efx->rxq_flush_outstanding);
efx_flush_rx_queue(rx_queue);
}
}
}
wait:
timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
timeout);
}
if (atomic_read(&efx->drain_pending) &&
!efx_check_tx_flush_complete(efx)) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
"(rx %d+%d)\n", atomic_read(&efx->drain_pending),
atomic_read(&efx->rxq_flush_outstanding),
atomic_read(&efx->rxq_flush_pending));
rc = -ETIMEDOUT;
atomic_set(&efx->drain_pending, 0);
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
}
efx->type->finish_flush(efx);
return rc;
}
/**************************************************************************
*
* Event queue processing
* Event queues are processed by per-channel tasklets.
*
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
channel->channel);
}
/* Use HW to insert a SW defined event */
void efx_generate_event(struct efx_nic *efx, unsigned int evq,
efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
FRF_AZ_DRV_EV_DATA_WIDTH != 64);
drv_ev_reg.u32[0] = event->u32[0];
drv_ev_reg.u32[1] = event->u32[1];
drv_ev_reg.u32[2] = 0;
drv_ev_reg.u32[3] = 0;
EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
}
static void efx_magic_event(struct efx_channel *channel, u32 magic)
{
efx_qword_t event;
EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
FSE_AZ_EV_CODE_DRV_GEN_EV,
FSF_AZ_DRV_GEN_EV_MAGIC, magic);
efx_generate_event(channel->efx, channel->channel, &event);
}
/* Handle a transmit completion event
*
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
netif_tx_lock(efx->net_dev);
efx_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
++channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
++channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
++channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
++channel->n_rx_tcp_udp_chksum_err;
}
/* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow.
*/
#ifdef DEBUG
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
rx_ev_tcp_udp_chksum_err ?
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. */
static void
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
}
/* Handle a packet received event
*
* The NIC gives a "discard" flag if it's a unicast packet with the
* wrong destination address
* Also "is multicast" and "matches multicast filter" flags can be used to
* discard non-matching multicast packets.
*/
static void
efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
{
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
bool rx_ev_pkt_ok;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
/* Basic packet information */
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
if (unlikely(rx_ev_desc_ptr != expected_ptr))
efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IP or
* UDP/IP, then we can rely on the hardware checksum.
*/
flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
EFX_RX_PKT_CSUMMED : 0;
} else {
flags = efx_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
if (rx_ev_mcast_pkt) {
unsigned int rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
flags |= EFX_RX_PKT_DISCARD;
}
}
channel->irq_mod_score += 2;
/* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
* send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
* of all transmit completions.
*/
static void
efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
}
}
}
/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
* was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
* the RX queue back to the mask of RX queues in need of flushing.
*/
static void
efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
int qid;
bool failed;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (qid >= efx->n_channels)
return;
channel = efx_get_channel(efx, qid);
if (!efx_channel_has_rx_queue(channel))
return;
rx_queue = efx_channel_get_rx_queue(channel);
if (failed) {
netif_info(efx, hw, efx->net_dev,
"RXQ %d flush retry\n", qid);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
} else {
efx_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
}
atomic_dec(&efx->rxq_flush_outstanding);
if (efx_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void
efx_handle_drain_event(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
WARN_ON(atomic_read(&efx->drain_pending) == 0);
atomic_dec(&efx->drain_pending);
if (efx_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void
efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue =
efx_channel_has_rx_queue(channel) ?
efx_channel_get_rx_queue(channel) : NULL;
unsigned magic, code;
magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
code = _EFX_CHANNEL_MAGIC_CODE(magic);
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
channel->event_test_cpu = raw_smp_processor_id();
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
rx_queue->enabled = false;
efx_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
efx_handle_drain_event(channel);
} else {
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(*event));
}
}
static void
efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
unsigned int ev_sub_code;
unsigned int ev_sub_data;
ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_handle_tx_flush_done(efx, event);
efx_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_handle_rx_flush_done(efx, event);
efx_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
"channel %d EVQ %d initialised\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_SRM_UPD_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d SRAM update done\n", channel->channel);
break;
case FSE_AZ_WAKE_UP_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RXQ %d wakeup event\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_TIMER_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RX queue %d timer expired\n",
channel->channel, ev_sub_data);
break;
case FSE_AA_RX_RECOVER_EV:
netif_err(efx, rx_err, efx->net_dev,
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, rx_err, efx->net_dev,
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, tx_err, efx->net_dev,
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d unknown driver event code %d "
"data %04x\n", channel->channel, ev_sub_code,
ev_sub_data);
break;
}
}
int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
int tx_packets = 0;
int spent = 0;
read_ptr = channel->eventq_read_ptr;
for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
if (!efx_event_present(&event))
/* End of events */
break;
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d event is "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
if (++spent == budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
tx_packets += efx_handle_tx_event(channel, &event);
if (tx_packets > efx->txq_entries) {
spent = budget;
goto out;
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_USER_EV:
efx_sriov_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
/* else fall through */
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
}
out:
channel->eventq_read_ptr = read_ptr;
return spent;
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool efx_nic_event_present(struct efx_channel *channel)
{
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned entries;
entries = channel->eventq_mask + 1;
return efx_alloc_special_buffer(efx, &channel->eventq,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_eventq(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
netif_dbg(efx, hw, efx->net_dev,
"channel %d event queue in special buffers %d-%d\n",
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
}
/* Pin event queue buffer */
efx_init_special_buffer(efx, &channel->eventq);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.addr, 0xff, channel->eventq.len);
/* Push event queue to card */
EFX_POPULATE_OWORD_3(reg,
FRF_AZ_EVQ_EN, 1,
FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
channel->channel);
efx->type->push_irq_moderation(channel);
}
void efx_nic_fini_eventq(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
/* Remove event queue from card */
EFX_ZERO_OWORD(reg);
efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
channel->channel);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
/* Unpin event queue */
efx_fini_special_buffer(efx, &channel->eventq);
}
/* Free buffers backing event queue */
void efx_nic_remove_eventq(struct efx_channel *channel)
{
efx_free_special_buffer(channel->efx, &channel->eventq);
}
void efx_nic_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
}
void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
{
efx_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_FILL(rx_queue));
}
/**************************************************************************
*
* Hardware interrupts
* The hardware interrupt handler does very little work; all the event
* queue processing is carried out by per-channel tasklets.
*
**************************************************************************/
/* Enable/disable/generate interrupts */
static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
}
void efx_nic_enable_interrupts(struct efx_nic *efx)
{
EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
efx_nic_interrupts(efx, true, false);
}
void efx_nic_disable_interrupts(struct efx_nic *efx)
{
/* Disable interrupts */
efx_nic_interrupts(efx, false, false);
}
/* Generate a test interrupt
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
void efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
efx_nic_interrupts(efx, true, true);
}
/* Process a fatal interrupt
* Disable bus mastering ASAP and schedule a reset
*/
irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, ®, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
EFX_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev);
if (efx_nic_is_dual_func(efx))
pci_clear_master(nic_data->pci_dev2);
efx_nic_disable_interrupts(efx);
/* Count errors and reset or disable the NIC accordingly */
if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0;
efx->int_error_expire =
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
}
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
}
/* Handle a legacy interrupt
* Acknowledges the interrupt and schedule event queue processing.
*/
static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
int syserr;
/* Could this be ours? If interrupts are disabled then the
* channel state may not be valid.
*/
if (!efx->legacy_irq_enabled)
return result;
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Handle non-event-queue sources */
if (queues & (1U << efx->irq_level)) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */
efx_for_each_channel(channel, efx) {
if (queues & 1)
efx_schedule_channel_irq(channel);
queues >>= 1;
}
result = IRQ_HANDLED;
} else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
/* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */
if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel_irq(channel);
else
efx_nic_eventq_read_ack(channel);
}
}
if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
return result;
}
/* Handle an MSI interrupt
*
* Handle an MSI hardware interrupt. This routine schedules event
* queue processing. No interrupt acknowledgement cycle is necessary.
* Also, we never need to check that the interrupt is for us, since
* MSI interrupts cannot be shared.
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
struct efx_channel *channel = *(struct efx_channel **)dev_id;
struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Handle non-event-queue sources */
if (channel->channel == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
/* Schedule processing of the channel */
efx_schedule_channel_irq(channel);
return IRQ_HANDLED;
}
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
size_t i = 0;
efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return;
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
efx->rx_indir_table[i]);
efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
}
}
/* Hook interrupt handler(s)
* Try MSI and then legacy interrupts.
*/
int efx_nic_init_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler;
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
handler = efx_legacy_interrupt;
else
handler = falcon_legacy_interrupt_a1;
rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook legacy IRQ %d\n",
efx->pci_dev->irq);
goto fail1;
}
return 0;
}
/* Hook MSI or MSI-X interrupt */
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
efx->channel_name[channel->channel],
&efx->channel[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
}
return 0;
fail2:
efx_for_each_channel(channel, efx)
free_irq(channel->irq, &efx->channel[channel->channel]);
fail1:
return rc;
}
void efx_nic_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx) {
if (channel->irq)
free_irq(channel->irq, &efx->channel[channel->channel]);
}
/* ACK legacy interrupt */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
efx_reado(efx, ®, FR_BZ_INT_ISR0);
else
falcon_irq_ack_a1(efx);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
* SRAM is split up as follows:
* 0 buftbl entries for channels
* efx->vf_buftbl_base buftbl entries for SR-IOV
* efx->rx_dc_base RX descriptor caches
* efx->tx_dc_base TX descriptor caches
*/
void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, buftbl_min;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx_sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
efx->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
buftbl_free = (sram_lim_qw - buftbl_min -
vi_count * vi_dc_entries);
entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
efx_vf_size(efx));
vf_limit = min(buftbl_free / entries_per_vf,
(1024U - EFX_VI_BASE) >> efx->vi_scale);
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
"Reducing VF count from from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
}
u32 efx_nic_fpga_ver(struct efx_nic *efx)
{
efx_oword_t altera_build;
efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
}
void efx_nic_init_common(struct efx_nic *efx)
{
efx_oword_t temp;
/* Set positions of descriptor caches in SRAM. */
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
/* Set TX descriptor cache size. */
BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
/* Set RX descriptor cache size. Set low watermark to size-8, as
* this allows most efficient prefetching.
*/
BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
/* Program INT_KER address */
EFX_POPULATE_OWORD_2(temp,
FRF_AZ_NORM_INT_VEC_DIS_KER,
EFX_INT_MODE_USE_MSI(efx),
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
/* Use an interrupt level unused by event queues */
efx->irq_level = 0x1f;
else
/* Use a valid MSI-X vector */
efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
*
* Note: All other fatal interrupts are enabled
*/
EFX_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
efx_nic_push_rx_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
}
/* Register dump */
#define REGISTER_REVISION_A 1
#define REGISTER_REVISION_B 2
#define REGISTER_REVISION_C 3
#define REGISTER_REVISION_Z 3 /* latest revision */
struct efx_nic_reg {
u32 offset:24;
u32 min_revision:2, max_revision:2;
};
#define REGISTER(name, min_rev, max_rev) { \
FR_ ## min_rev ## max_rev ## _ ## name, \
REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
}
#define REGISTER_AA(name) REGISTER(name, A, A)
#define REGISTER_AB(name) REGISTER(name, A, B)
#define REGISTER_AZ(name) REGISTER(name, A, Z)
#define REGISTER_BB(name) REGISTER(name, B, B)
#define REGISTER_BZ(name) REGISTER(name, B, Z)
#define REGISTER_CZ(name) REGISTER(name, C, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
REGISTER_AZ(INT_EN_KER),
REGISTER_BZ(INT_EN_CHAR),
REGISTER_AZ(INT_ADR_KER),
REGISTER_BZ(INT_ADR_CHAR),
/* INT_ACK_KER is WO */
/* INT_ISR0 is RC */
REGISTER_AZ(HW_INIT),
REGISTER_CZ(USR_EV_CFG),
REGISTER_AB(EE_SPI_HCMD),
REGISTER_AB(EE_SPI_HADR),
REGISTER_AB(EE_SPI_HDATA),
REGISTER_AB(EE_BASE_PAGE),
REGISTER_AB(EE_VPD_CFG0),
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
/* PCIE_CORE_INDIRECT is indirect */
REGISTER_AB(NIC_STAT),
REGISTER_AB(GPIO_CTL),
REGISTER_AB(GLB_CTL),
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
REGISTER_BZ(DP_CTRL),
REGISTER_AZ(MEM_STAT),
REGISTER_AZ(CS_DEBUG),
REGISTER_AZ(ALTERA_BUILD),
REGISTER_AZ(CSR_SPARE),
REGISTER_AB(PCIE_SD_CTL0123),
REGISTER_AB(PCIE_SD_CTL45),
REGISTER_AB(PCIE_PCS_CTL_STAT),
/* DEBUG_DATA_OUT is not used */
/* DRV_EV is WO */
REGISTER_AZ(EVQ_CTL),
REGISTER_AZ(EVQ_CNT1),
REGISTER_AZ(EVQ_CNT2),
REGISTER_AZ(BUF_TBL_CFG),
REGISTER_AZ(SRM_RX_DC_CFG),
REGISTER_AZ(SRM_TX_DC_CFG),
REGISTER_AZ(SRM_CFG),
/* BUF_TBL_UPD is WO */
REGISTER_AZ(SRM_UPD_EVQ),
REGISTER_AZ(SRAM_PARITY),
REGISTER_AZ(RX_CFG),
REGISTER_BZ(RX_FILTER_CTL),
/* RX_FLUSH_DESCQ is WO */
REGISTER_AZ(RX_DC_CFG),
REGISTER_AZ(RX_DC_PF_WM),
REGISTER_BZ(RX_RSS_TKEY),
/* RX_NODESC_DROP is RC */
REGISTER_AA(RX_SELF_RST),
/* RX_DEBUG, RX_PUSH_DROP are not used */
REGISTER_CZ(RX_RSS_IPV6_REG1),
REGISTER_CZ(RX_RSS_IPV6_REG2),
REGISTER_CZ(RX_RSS_IPV6_REG3),
/* TX_FLUSH_DESCQ is WO */
REGISTER_AZ(TX_DC_CFG),
REGISTER_AA(TX_CHKSM_CFG),
REGISTER_AZ(TX_CFG),
/* TX_PUSH_DROP is not used */
REGISTER_AZ(TX_RESERVED),
REGISTER_BZ(TX_PACE),
/* TX_PACE_DROP_QID is RC */
REGISTER_BB(TX_VLAN),
REGISTER_BZ(TX_IPFIL_PORTEN),
REGISTER_AB(MD_TXD),
REGISTER_AB(MD_RXD),
REGISTER_AB(MD_CS),
REGISTER_AB(MD_PHY_ADR),
REGISTER_AB(MD_ID),
/* MD_STAT is RC */
REGISTER_AB(MAC_STAT_DMA),
REGISTER_AB(MAC_CTRL),
REGISTER_BB(GEN_MODE),
REGISTER_AB(MAC_MC_HASH_REG0),
REGISTER_AB(MAC_MC_HASH_REG1),
REGISTER_AB(GM_CFG1),
REGISTER_AB(GM_CFG2),
/* GM_IPG and GM_HD are not used */
REGISTER_AB(GM_MAX_FLEN),
/* GM_TEST is not used */
REGISTER_AB(GM_ADR1),
REGISTER_AB(GM_ADR2),
REGISTER_AB(GMF_CFG0),
REGISTER_AB(GMF_CFG1),
REGISTER_AB(GMF_CFG2),
REGISTER_AB(GMF_CFG3),
REGISTER_AB(GMF_CFG4),
REGISTER_AB(GMF_CFG5),
REGISTER_BB(TX_SRC_MAC_CTL),
REGISTER_AB(XM_ADR_LO),
REGISTER_AB(XM_ADR_HI),
REGISTER_AB(XM_GLB_CFG),
REGISTER_AB(XM_TX_CFG),
REGISTER_AB(XM_RX_CFG),
REGISTER_AB(XM_MGT_INT_MASK),
REGISTER_AB(XM_FC),
REGISTER_AB(XM_PAUSE_TIME),
REGISTER_AB(XM_TX_PARAM),
REGISTER_AB(XM_RX_PARAM),
/* XM_MGT_INT_MSK (note no 'A') is RC */
REGISTER_AB(XX_PWR_RST),
REGISTER_AB(XX_SD_CTL),
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
};
struct efx_nic_reg_table {
u32 offset:24;
u32 min_revision:2, max_revision:2;
u32 step:6, rows:21;
};
#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
offset, \
REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
step, rows \
}
#define REGISTER_TABLE(name, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
name, FR_ ## min_rev ## max_rev ## _ ## name, \
min_rev, max_rev, \
FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
REGISTER_TABLE_BB(TX_IPFIL_TBL),
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
/* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
/* TX_FILTER_TBL0 is huge and not used by this driver */
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)
{
const struct efx_nic_reg *reg;
const struct efx_nic_reg_table *table;
size_t len = 0;
for (reg = efx_nic_regs;
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
reg++)
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision)
len += sizeof(efx_oword_t);
for (table = efx_nic_reg_tables;
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
table++)
if (efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision)
len += table->rows * min_t(size_t, table->step, 16);
return len;
}
void efx_nic_get_regs(struct efx_nic *efx, void *buf)
{
const struct efx_nic_reg *reg;
const struct efx_nic_reg_table *table;
for (reg = efx_nic_regs;
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
reg++) {
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision) {
efx_reado(efx, (efx_oword_t *)buf, reg->offset);
buf += sizeof(efx_oword_t);
}
}
for (table = efx_nic_reg_tables;
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
table++) {
size_t size, i;
if (!(efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision))
continue;
size = min_t(size_t, table->step, 16);
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit register or SRAM */
efx_readd_table(efx, buf, table->offset, i);
break;
case 8: /* 64-bit SRAM */
efx_sram_readq(efx,
efx->membase + table->offset,
buf, i);
break;
case 16: /* 128-bit register */
efx_reado_table(efx, buf, table->offset, i);
break;
case 32: /* 128-bit register, interleaved */
efx_reado_table(efx, buf, table->offset, 2 * i);
break;
default:
WARN_ON(1);
return;
}
buf += size;
}
}
}
| gpl-2.0 |
gskywolf/Firefly-RK3288-Kernel | drivers/iommu/tegra-smmu.c | 1990 | 32888 | /*
* IOMMU API for SMMU in Tegra30
*
* Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define pr_fmt(fmt) "%s(): " fmt, __func__
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/tegra-ahb.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
enum smmu_hwgrp {
HWGRP_AFI,
HWGRP_AVPC,
HWGRP_DC,
HWGRP_DCB,
HWGRP_EPP,
HWGRP_G2,
HWGRP_HC,
HWGRP_HDA,
HWGRP_ISP,
HWGRP_MPE,
HWGRP_NV,
HWGRP_NV2,
HWGRP_PPCS,
HWGRP_SATA,
HWGRP_VDE,
HWGRP_VI,
HWGRP_COUNT,
HWGRP_END = ~0,
};
#define HWG_AFI (1 << HWGRP_AFI)
#define HWG_AVPC (1 << HWGRP_AVPC)
#define HWG_DC (1 << HWGRP_DC)
#define HWG_DCB (1 << HWGRP_DCB)
#define HWG_EPP (1 << HWGRP_EPP)
#define HWG_G2 (1 << HWGRP_G2)
#define HWG_HC (1 << HWGRP_HC)
#define HWG_HDA (1 << HWGRP_HDA)
#define HWG_ISP (1 << HWGRP_ISP)
#define HWG_MPE (1 << HWGRP_MPE)
#define HWG_NV (1 << HWGRP_NV)
#define HWG_NV2 (1 << HWGRP_NV2)
#define HWG_PPCS (1 << HWGRP_PPCS)
#define HWG_SATA (1 << HWGRP_SATA)
#define HWG_VDE (1 << HWGRP_VDE)
#define HWG_VI (1 << HWGRP_VI)
/* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K)
#define SMMU_CONFIG 0x10
#define SMMU_CONFIG_DISABLE 0
#define SMMU_CONFIG_ENABLE 1
/* REVISIT: To support multiple MCs */
enum {
_MC = 0,
};
enum {
_TLB = 0,
_PTC,
};
#define SMMU_CACHE_CONFIG_BASE 0x14
#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
#define SMMU_PTB_ASID 0x1c
#define SMMU_PTB_ASID_CURRENT_SHIFT 0
#define SMMU_PTB_DATA 0x20
#define SMMU_PTB_DATA_RESET_VAL 0
#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
#define SMMU_TLB_FLUSH 0x30
#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
#define SMMU_TLB_FLUSH_ASID_SHIFT 29
#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
#define SMMU_PTC_FLUSH 0x34
#define SMMU_PTC_FLUSH_TYPE_ALL 0
#define SMMU_PTC_FLUSH_TYPE_ADR 1
#define SMMU_PTC_FLUSH_ADR_SHIFT 4
#define SMMU_ASID_SECURITY 0x38
#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
(SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
#define SMMU_TRANSLATION_ENABLE_0 0x228
#define SMMU_TRANSLATION_ENABLE_1 0x22c
#define SMMU_TRANSLATION_ENABLE_2 0x230
#define SMMU_AFI_ASID 0x238 /* PCIE */
#define SMMU_AVPC_ASID 0x23c /* AVP */
#define SMMU_DC_ASID 0x240 /* Display controller */
#define SMMU_DCB_ASID 0x244 /* Display controller B */
#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
#define SMMU_G2_ASID 0x24c /* 2D engine */
#define SMMU_HC_ASID 0x250 /* Host1x */
#define SMMU_HDA_ASID 0x254 /* High-def audio */
#define SMMU_ISP_ASID 0x258 /* Image signal processor */
#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
#define SMMU_NV_ASID 0x268 /* (3D) */
#define SMMU_NV2_ASID 0x26c /* (3D) */
#define SMMU_PPCS_ASID 0x270 /* AHB */
#define SMMU_SATA_ASID 0x278 /* SATA */
#define SMMU_VDE_ASID 0x27c /* Video decoder */
#define SMMU_VI_ASID 0x280 /* Video input */
#define SMMU_PDE_NEXT_SHIFT 28
#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
#define SMMU_TLB_FLUSH_VA(iova, which) \
((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
SMMU_TLB_FLUSH_VA_MATCH_##which)
#define SMMU_PTB_ASID_CUR(n) \
((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
(SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
(SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
#define SMMU_PAGE_SHIFT 12
#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
#define SMMU_PDIR_COUNT 1024
#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
#define SMMU_PTBL_COUNT 1024
#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
#define SMMU_PDIR_SHIFT 12
#define SMMU_PDE_SHIFT 12
#define SMMU_PTE_SHIFT 12
#define SMMU_PFN_MASK 0x000fffff
#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
#define SMMU_MK_PDIR(page, attr) \
((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
#define SMMU_MK_PDE(page, attr) \
(unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
#define SMMU_EX_PTBL_PAGE(pde) \
pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
#define SMMU_ASID_DISABLE 0
#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
#define NUM_SMMU_REG_BANKS 3
#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
static const u32 smmu_hwgrp_asid_reg[] = {
HWGRP_INIT(AFI),
HWGRP_INIT(AVPC),
HWGRP_INIT(DC),
HWGRP_INIT(DCB),
HWGRP_INIT(EPP),
HWGRP_INIT(G2),
HWGRP_INIT(HC),
HWGRP_INIT(HDA),
HWGRP_INIT(ISP),
HWGRP_INIT(MPE),
HWGRP_INIT(NV),
HWGRP_INIT(NV2),
HWGRP_INIT(PPCS),
HWGRP_INIT(SATA),
HWGRP_INIT(VDE),
HWGRP_INIT(VI),
};
#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
/*
* Per client for address space
*/
struct smmu_client {
struct device *dev;
struct list_head list;
struct smmu_as *as;
u32 hwgrp;
};
/*
* Per address space
*/
struct smmu_as {
struct smmu_device *smmu; /* back pointer to container */
unsigned int asid;
spinlock_t lock; /* for pagetable */
struct page *pdir_page;
unsigned long pdir_attr;
unsigned long pde_attr;
unsigned long pte_attr;
unsigned int *pte_count;
struct list_head client;
spinlock_t client_lock; /* for client list */
};
struct smmu_debugfs_info {
struct smmu_device *smmu;
int mc;
int cache;
};
/*
* Per SMMU device - IOMMU device
*/
struct smmu_device {
void __iomem *regbase; /* register offset base */
void __iomem **regs; /* register block start address array */
void __iomem **rege; /* register block end address array */
int nregs; /* number of register blocks */
unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */
spinlock_t lock;
char *name;
struct device *dev;
struct page *avp_vector_page; /* dummy page shared by all AS's */
/*
* Register image savers for suspend/resume
*/
unsigned long translation_enable_0;
unsigned long translation_enable_1;
unsigned long translation_enable_2;
unsigned long asid_security;
struct dentry *debugfs_root;
struct smmu_debugfs_info *debugfs_info;
struct device_node *ahb;
int num_as;
struct smmu_as as[0]; /* Run-time allocated array */
};
static struct smmu_device *smmu_handle; /* unique for a system */
/*
* SMMU register accessors
*/
static bool inline smmu_valid_reg(struct smmu_device *smmu,
void __iomem *addr)
{
int i;
for (i = 0; i < smmu->nregs; i++) {
if (addr < smmu->regs[i])
break;
if (addr <= smmu->rege[i])
return true;
}
return false;
}
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{
void __iomem *addr = smmu->regbase + offs;
BUG_ON(!smmu_valid_reg(smmu, addr));
return readl(addr);
}
static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
void __iomem *addr = smmu->regbase + offs;
BUG_ON(!smmu_valid_reg(smmu, addr));
writel(val, addr);
}
#define VA_PAGE_TO_PA(va, page) \
(page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
#define FLUSH_CPU_DCACHE(va, page, size) \
do { \
unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
__cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
outer_flush_range(_pa_, _pa_+(size_t)(size)); \
} while (0)
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back barriers to ensure the APB/AHB bus
* transaction is complete before initiating activity on the PPSB
* block.
*/
#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
static int __smmu_client_set_hwgrp(struct smmu_client *c,
unsigned long map, int on)
{
int i;
struct smmu_as *as = c->as;
u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
struct smmu_device *smmu = as->smmu;
WARN_ON(!on && map);
if (on && !map)
return -EINVAL;
if (!on)
map = smmu_client_hwgrp(c);
for_each_set_bit(i, &map, HWGRP_COUNT) {
offs = HWGRP_ASID_REG(i);
val = smmu_read(smmu, offs);
if (on) {
if (WARN_ON(val & mask))
goto err_hw_busy;
val |= mask;
} else {
WARN_ON((val & mask) == mask);
val &= ~mask;
}
smmu_write(smmu, val, offs);
}
FLUSH_SMMU_REGS(smmu);
c->hwgrp = map;
return 0;
err_hw_busy:
for_each_set_bit(i, &map, HWGRP_COUNT) {
offs = HWGRP_ASID_REG(i);
val = smmu_read(smmu, offs);
val &= ~mask;
smmu_write(smmu, val, offs);
}
return -EBUSY;
}
static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
{
u32 val;
unsigned long flags;
struct smmu_as *as = c->as;
struct smmu_device *smmu = as->smmu;
spin_lock_irqsave(&smmu->lock, flags);
val = __smmu_client_set_hwgrp(c, map, on);
spin_unlock_irqrestore(&smmu->lock, flags);
return val;
}
/*
* Flush all TLB entries and all PTC entries
* Caller must lock smmu
*/
static void smmu_flush_regs(struct smmu_device *smmu, int enable)
{
u32 val;
smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
FLUSH_SMMU_REGS(smmu);
val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
SMMU_TLB_FLUSH_ASID_MATCH_disable;
smmu_write(smmu, val, SMMU_TLB_FLUSH);
if (enable)
smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
FLUSH_SMMU_REGS(smmu);
}
static int smmu_setup_regs(struct smmu_device *smmu)
{
int i;
u32 val;
for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *as = &smmu->as[i];
struct smmu_client *c;
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
val = as->pdir_page ?
SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
SMMU_PTB_DATA_RESET_VAL;
smmu_write(smmu, val, SMMU_PTB_DATA);
list_for_each_entry(c, &as->client, list)
__smmu_client_set_hwgrp(c, c->hwgrp, 1);
}
smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
smmu_flush_regs(smmu, 1);
return tegra_ahb_enable_smmu(smmu->ahb);
}
static void flush_ptc_and_tlb(struct smmu_device *smmu,
struct smmu_as *as, dma_addr_t iova,
unsigned long *pte, struct page *page, int is_pde)
{
u32 val;
unsigned long tlb_flush_va = is_pde
? SMMU_TLB_FLUSH_VA(iova, SECTION)
: SMMU_TLB_FLUSH_VA(iova, GROUP);
val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
smmu_write(smmu, val, SMMU_PTC_FLUSH);
FLUSH_SMMU_REGS(smmu);
val = tlb_flush_va |
SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
(as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
smmu_write(smmu, val, SMMU_TLB_FLUSH);
FLUSH_SMMU_REGS(smmu);
}
static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
{
unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
if (pdir[pdn] != _PDE_VACANT(pdn)) {
dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
__free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
pdir[pdn] = _PDE_VACANT(pdn);
FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
as->pdir_page, 1);
}
}
static void free_pdir(struct smmu_as *as)
{
unsigned addr;
int count;
struct device *dev = as->smmu->dev;
if (!as->pdir_page)
return;
addr = as->smmu->iovmm_base;
count = as->smmu->page_count;
while (count-- > 0) {
free_ptbl(as, addr);
addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
}
ClearPageReserved(as->pdir_page);
__free_page(as->pdir_page);
as->pdir_page = NULL;
devm_kfree(dev, as->pte_count);
as->pte_count = NULL;
}
/*
* Maps PTBL for given iova and returns the PTE address
* Caller must unmap the mapped PTBL returned in *ptbl_page_p
*/
static unsigned long *locate_pte(struct smmu_as *as,
dma_addr_t iova, bool allocate,
struct page **ptbl_page_p,
unsigned int **count)
{
unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
unsigned long *pdir = page_address(as->pdir_page);
unsigned long *ptbl;
if (pdir[pdn] != _PDE_VACANT(pdn)) {
/* Mapped entry table already exists */
*ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
ptbl = page_address(*ptbl_page_p);
} else if (!allocate) {
return NULL;
} else {
int pn;
unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
/* Vacant - allocate a new page table */
dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
*ptbl_page_p = alloc_page(GFP_ATOMIC);
if (!*ptbl_page_p) {
dev_err(as->smmu->dev,
"failed to allocate smmu_device page table\n");
return NULL;
}
SetPageReserved(*ptbl_page_p);
ptbl = (unsigned long *)page_address(*ptbl_page_p);
for (pn = 0; pn < SMMU_PTBL_COUNT;
pn++, addr += SMMU_PAGE_SIZE) {
ptbl[pn] = _PTE_VACANT(addr);
}
FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
as->pde_attr | _PDE_NEXT);
FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
as->pdir_page, 1);
}
*count = &as->pte_count[pdn];
return &ptbl[ptn % SMMU_PTBL_COUNT];
}
#ifdef CONFIG_SMMU_SIG_DEBUG
static void put_signature(struct smmu_as *as,
dma_addr_t iova, unsigned long pfn)
{
struct page *page;
unsigned long *vaddr;
page = pfn_to_page(pfn);
vaddr = page_address(page);
if (!vaddr)
return;
vaddr[0] = iova;
vaddr[1] = pfn << PAGE_SHIFT;
FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
}
#else
static inline void put_signature(struct smmu_as *as,
unsigned long addr, unsigned long pfn)
{
}
#endif
/*
* Caller must not hold as->lock
*/
static int alloc_pdir(struct smmu_as *as)
{
unsigned long *pdir, flags;
int pdn, err = 0;
u32 val;
struct smmu_device *smmu = as->smmu;
struct page *page;
unsigned int *cnt;
/*
* do the allocation, then grab as->lock
*/
cnt = devm_kzalloc(smmu->dev,
sizeof(cnt[0]) * SMMU_PDIR_COUNT,
GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_DMA);
spin_lock_irqsave(&as->lock, flags);
if (as->pdir_page) {
/* We raced, free the redundant */
err = -EAGAIN;
goto err_out;
}
if (!page || !cnt) {
dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
err = -ENOMEM;
goto err_out;
}
as->pdir_page = page;
as->pte_count = cnt;
SetPageReserved(as->pdir_page);
pdir = page_address(as->pdir_page);
for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
pdir[pdn] = _PDE_VACANT(pdn);
FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
smmu_write(smmu, val, SMMU_PTC_FLUSH);
FLUSH_SMMU_REGS(as->smmu);
val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
(as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
smmu_write(smmu, val, SMMU_TLB_FLUSH);
FLUSH_SMMU_REGS(as->smmu);
spin_unlock_irqrestore(&as->lock, flags);
return 0;
err_out:
spin_unlock_irqrestore(&as->lock, flags);
devm_kfree(smmu->dev, cnt);
if (page)
__free_page(page);
return err;
}
static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
{
unsigned long *pte;
struct page *page;
unsigned int *count;
pte = locate_pte(as, iova, false, &page, &count);
if (WARN_ON(!pte))
return;
if (WARN_ON(*pte == _PTE_VACANT(iova)))
return;
*pte = _PTE_VACANT(iova);
FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
if (!--(*count))
free_ptbl(as, iova);
}
static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
unsigned long pfn)
{
struct smmu_device *smmu = as->smmu;
unsigned long *pte;
unsigned int *count;
struct page *page;
pte = locate_pte(as, iova, true, &page, &count);
if (WARN_ON(!pte))
return;
if (*pte == _PTE_VACANT(iova))
(*count)++;
*pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
if (unlikely((*pte == _PTE_VACANT(iova))))
(*count)--;
FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
put_signature(as, iova, pfn);
}
static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot)
{
struct smmu_as *as = domain->priv;
unsigned long pfn = __phys_to_pfn(pa);
unsigned long flags;
dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
if (!pfn_valid(pfn))
return -ENOMEM;
spin_lock_irqsave(&as->lock, flags);
__smmu_iommu_map_pfn(as, iova, pfn);
spin_unlock_irqrestore(&as->lock, flags);
return 0;
}
static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes)
{
struct smmu_as *as = domain->priv;
unsigned long flags;
dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
spin_lock_irqsave(&as->lock, flags);
__smmu_iommu_unmap(as, iova);
spin_unlock_irqrestore(&as->lock, flags);
return SMMU_PAGE_SIZE;
}
static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct smmu_as *as = domain->priv;
unsigned long *pte;
unsigned int *count;
struct page *page;
unsigned long pfn;
unsigned long flags;
spin_lock_irqsave(&as->lock, flags);
pte = locate_pte(as, iova, true, &page, &count);
pfn = *pte & SMMU_PFN_MASK;
WARN_ON(!pfn_valid(pfn));
dev_dbg(as->smmu->dev,
"iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
pfn, as->asid);
spin_unlock_irqrestore(&as->lock, flags);
return PFN_PHYS(pfn);
}
static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
static int smmu_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct smmu_as *as = domain->priv;
struct smmu_device *smmu = as->smmu;
struct smmu_client *client, *c;
u32 map;
int err;
client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
if (!client)
return -ENOMEM;
client->dev = dev;
client->as = as;
map = (unsigned long)dev->platform_data;
if (!map)
return -EINVAL;
err = smmu_client_enable_hwgrp(client, map);
if (err)
goto err_hwgrp;
spin_lock(&as->client_lock);
list_for_each_entry(c, &as->client, list) {
if (c->dev == dev) {
dev_err(smmu->dev,
"%s is already attached\n", dev_name(c->dev));
err = -EINVAL;
goto err_client;
}
}
list_add(&client->list, &as->client);
spin_unlock(&as->client_lock);
/*
* Reserve "page zero" for AVP vectors using a common dummy
* page.
*/
if (map & HWG_AVPC) {
struct page *page;
page = as->smmu->avp_vector_page;
__smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
smmu_client_disable_hwgrp(client);
spin_unlock(&as->client_lock);
err_hwgrp:
devm_kfree(smmu->dev, client);
return err;
}
static void smmu_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct smmu_as *as = domain->priv;
struct smmu_device *smmu = as->smmu;
struct smmu_client *c;
spin_lock(&as->client_lock);
list_for_each_entry(c, &as->client, list) {
if (c->dev == dev) {
smmu_client_disable_hwgrp(c);
list_del(&c->list);
devm_kfree(smmu->dev, c);
c->as = NULL;
dev_dbg(smmu->dev,
"%s is detached\n", dev_name(c->dev));
goto out;
}
}
dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
out:
spin_unlock(&as->client_lock);
}
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
int i, err = -EAGAIN;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
as = &smmu->as[i];
if (as->pdir_page)
continue;
err = alloc_pdir(as);
if (!err)
goto found;
if (err != -EAGAIN)
break;
}
if (i == smmu->num_as)
dev_err(smmu->dev, "no free AS\n");
return err;
found:
spin_lock_irqsave(&smmu->lock, flags);
/* Update PDIR register */
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
smmu_write(smmu,
SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
FLUSH_SMMU_REGS(smmu);
spin_unlock_irqrestore(&smmu->lock, flags);
domain->priv = as;
domain->geometry.aperture_start = smmu->iovmm_base;
domain->geometry.aperture_end = smmu->iovmm_base +
smmu->page_count * SMMU_PAGE_SIZE - 1;
domain->geometry.force_aperture = true;
dev_dbg(smmu->dev, "smmu_as@%p\n", as);
return 0;
}
static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
{
struct smmu_as *as = domain->priv;
struct smmu_device *smmu = as->smmu;
unsigned long flags;
spin_lock_irqsave(&as->lock, flags);
if (as->pdir_page) {
spin_lock(&smmu->lock);
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
FLUSH_SMMU_REGS(smmu);
spin_unlock(&smmu->lock);
free_pdir(as);
}
if (!list_empty(&as->client)) {
struct smmu_client *c;
list_for_each_entry(c, &as->client, list)
smmu_iommu_detach_dev(domain, c->dev);
}
spin_unlock_irqrestore(&as->lock, flags);
domain->priv = NULL;
dev_dbg(smmu->dev, "smmu_as@%p\n", as);
}
static struct iommu_ops smmu_iommu_ops = {
.domain_init = smmu_iommu_domain_init,
.domain_destroy = smmu_iommu_domain_destroy,
.attach_dev = smmu_iommu_attach_dev,
.detach_dev = smmu_iommu_detach_dev,
.map = smmu_iommu_map,
.unmap = smmu_iommu_unmap,
.iova_to_phys = smmu_iommu_iova_to_phys,
.domain_has_cap = smmu_iommu_domain_has_cap,
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
};
/* Should be in the order of enum */
static const char * const smmu_debugfs_mc[] = { "mc", };
static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
static ssize_t smmu_debugfs_stats_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *pos)
{
struct smmu_debugfs_info *info;
struct smmu_device *smmu;
int i;
enum {
_OFF = 0,
_ON,
_RESET,
};
const char * const command[] = {
[_OFF] = "off",
[_ON] = "on",
[_RESET] = "reset",
};
char str[] = "reset";
u32 val;
size_t offs;
count = min_t(size_t, count, sizeof(str));
if (copy_from_user(str, buffer, count))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(command); i++)
if (strncmp(str, command[i],
strlen(command[i])) == 0)
break;
if (i == ARRAY_SIZE(command))
return -EINVAL;
info = file_inode(file)->i_private;
smmu = info->smmu;
offs = SMMU_CACHE_CONFIG(info->cache);
val = smmu_read(smmu, offs);
switch (i) {
case _OFF:
val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
smmu_write(smmu, val, offs);
break;
case _ON:
val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
smmu_write(smmu, val, offs);
break;
case _RESET:
val |= SMMU_CACHE_CONFIG_STATS_TEST;
smmu_write(smmu, val, offs);
val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
smmu_write(smmu, val, offs);
break;
default:
BUG();
break;
}
dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
val, smmu_read(smmu, offs), offs);
return count;
}
static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
{
struct smmu_debugfs_info *info = s->private;
struct smmu_device *smmu = info->smmu;
int i;
const char * const stats[] = { "hit", "miss", };
for (i = 0; i < ARRAY_SIZE(stats); i++) {
u32 val;
size_t offs;
offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
val = smmu_read(smmu, offs);
seq_printf(s, "%s:%08x ", stats[i], val);
dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
stats[i], val, offs);
}
seq_printf(s, "\n");
return 0;
}
static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, smmu_debugfs_stats_show, inode->i_private);
}
static const struct file_operations smmu_debugfs_stats_fops = {
.open = smmu_debugfs_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = smmu_debugfs_stats_write,
};
static void smmu_debugfs_delete(struct smmu_device *smmu)
{
debugfs_remove_recursive(smmu->debugfs_root);
kfree(smmu->debugfs_info);
}
static void smmu_debugfs_create(struct smmu_device *smmu)
{
int i;
size_t bytes;
struct dentry *root;
bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
sizeof(*smmu->debugfs_info);
smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
if (!smmu->debugfs_info)
return;
root = debugfs_create_dir(dev_name(smmu->dev), NULL);
if (!root)
goto err_out;
smmu->debugfs_root = root;
for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
int j;
struct dentry *mc;
mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
if (!mc)
goto err_out;
for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
struct dentry *cache;
struct smmu_debugfs_info *info;
info = smmu->debugfs_info;
info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
info->smmu = smmu;
info->mc = i;
info->cache = j;
cache = debugfs_create_file(smmu_debugfs_cache[j],
S_IWUGO | S_IRUGO, mc,
(void *)info,
&smmu_debugfs_stats_fops);
if (!cache)
goto err_out;
}
}
return;
err_out:
smmu_debugfs_delete(smmu);
}
static int tegra_smmu_suspend(struct device *dev)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
return 0;
}
static int tegra_smmu_resume(struct device *dev)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
unsigned long flags;
int err;
spin_lock_irqsave(&smmu->lock, flags);
err = smmu_setup_regs(smmu);
spin_unlock_irqrestore(&smmu->lock, flags);
return err;
}
static int tegra_smmu_probe(struct platform_device *pdev)
{
struct smmu_device *smmu;
struct device *dev = &pdev->dev;
int i, asids, err = 0;
dma_addr_t uninitialized_var(base);
size_t bytes, uninitialized_var(size);
if (smmu_handle)
return -EIO;
BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
return -ENODEV;
bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate smmu_device\n");
return -ENOMEM;
}
smmu->nregs = pdev->num_resources;
smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs),
GFP_KERNEL);
smmu->rege = smmu->regs + smmu->nregs;
if (!smmu->regs)
return -ENOMEM;
for (i = 0; i < smmu->nregs; i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(smmu->regs[i]))
return PTR_ERR(smmu->regs[i]);
smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
}
/* Same as "mc" 1st regiter block start address */
smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK);
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
if (err)
return -ENODEV;
if (size & SMMU_PAGE_MASK)
return -EINVAL;
size >>= SMMU_PAGE_SHIFT;
if (!size)
return -EINVAL;
smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
if (!smmu->ahb)
return -ENODEV;
smmu->dev = dev;
smmu->num_as = asids;
smmu->iovmm_base = base;
smmu->page_count = size;
smmu->translation_enable_0 = ~0;
smmu->translation_enable_1 = ~0;
smmu->translation_enable_2 = ~0;
smmu->asid_security = 0;
for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *as = &smmu->as[i];
as->smmu = smmu;
as->asid = i;
as->pdir_attr = _PDIR_ATTR;
as->pde_attr = _PDE_ATTR;
as->pte_attr = _PTE_ATTR;
spin_lock_init(&as->lock);
spin_lock_init(&as->client_lock);
INIT_LIST_HEAD(&as->client);
}
spin_lock_init(&smmu->lock);
err = smmu_setup_regs(smmu);
if (err)
return err;
platform_set_drvdata(pdev, smmu);
smmu->avp_vector_page = alloc_page(GFP_KERNEL);
if (!smmu->avp_vector_page)
return -ENOMEM;
smmu_debugfs_create(smmu);
smmu_handle = smmu;
bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return 0;
}
static int tegra_smmu_remove(struct platform_device *pdev)
{
struct smmu_device *smmu = platform_get_drvdata(pdev);
int i;
smmu_debugfs_delete(smmu);
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
for (i = 0; i < smmu->num_as; i++)
free_pdir(&smmu->as[i]);
__free_page(smmu->avp_vector_page);
smmu_handle = NULL;
return 0;
}
const struct dev_pm_ops tegra_smmu_pm_ops = {
.suspend = tegra_smmu_suspend,
.resume = tegra_smmu_resume,
};
static struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe,
.remove = tegra_smmu_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops,
.of_match_table = tegra_smmu_of_match,
},
};
static int tegra_smmu_init(void)
{
return platform_driver_register(&tegra_smmu_driver);
}
static void __exit tegra_smmu_exit(void)
{
platform_driver_unregister(&tegra_smmu_driver);
}
subsys_initcall(tegra_smmu_init);
module_exit(tegra_smmu_exit);
MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-smmu");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
djvoleur/S6_UniPR_BOI1 | drivers/input/misc/ab8500-ponkey.c | 2246 | 3844 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
*
* AB8500 Power-On Key handler
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/of.h>
#include <linux/slab.h>
/**
* struct ab8500_ponkey - ab8500 ponkey information
* @input_dev: pointer to input device
* @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
struct ab8500_ponkey {
struct input_dev *idev;
struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
struct ab8500_ponkey *ponkey = data;
if (irq == ponkey->irq_dbf)
input_report_key(ponkey->idev, KEY_POWER, true);
else if (irq == ponkey->irq_dbr)
input_report_key(ponkey->idev, KEY_POWER, false);
input_sync(ponkey->idev);
return IRQ_HANDLED;
}
static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
struct input_dev *input;
int irq_dbf, irq_dbr;
int error;
irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
if (irq_dbf < 0) {
dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
return irq_dbf;
}
irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
if (irq_dbr < 0) {
dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
return irq_dbr;
}
ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
input = input_allocate_device();
if (!ponkey || !input) {
error = -ENOMEM;
goto err_free_mem;
}
ponkey->idev = input;
ponkey->ab8500 = ab8500;
ponkey->irq_dbf = irq_dbf;
ponkey->irq_dbr = irq_dbr;
input->name = "AB8500 POn(PowerOn) Key";
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
0, "ab8500-ponkey-dbf", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
ponkey->irq_dbf, error);
goto err_free_mem;
}
error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
0, "ab8500-ponkey-dbr", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
ponkey->irq_dbr, error);
goto err_free_dbf_irq;
}
error = input_register_device(ponkey->idev);
if (error) {
dev_err(ab8500->dev, "Can't register input device: %d\n", error);
goto err_free_dbr_irq;
}
platform_set_drvdata(pdev, ponkey);
return 0;
err_free_dbr_irq:
free_irq(ponkey->irq_dbr, ponkey);
err_free_dbf_irq:
free_irq(ponkey->irq_dbf, ponkey);
err_free_mem:
input_free_device(input);
kfree(ponkey);
return error;
}
static int ab8500_ponkey_remove(struct platform_device *pdev)
{
struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
free_irq(ponkey->irq_dbf, ponkey);
free_irq(ponkey->irq_dbr, ponkey);
input_unregister_device(ponkey->idev);
kfree(ponkey);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id ab8500_ponkey_match[] = {
{ .compatible = "stericsson,ab8500-ponkey", },
{}
};
#endif
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
.remove = ab8500_ponkey_remove,
};
module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
MODULE_DESCRIPTION("ST-Ericsson AB8500 Power-ON(Pon) Key driver");
| gpl-2.0 |
lolhi/ef52-kernel | drivers/scsi/isci/request.c | 3270 | 108440 | /*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/scsi_cmnd.h>
#include "isci.h"
#include "task.h"
#include "request.h"
#include "scu_completion_codes.h"
#include "scu_event_codes.h"
#include "sas.h"
#undef C
#define C(a) (#a)
const char *req_state_name(enum sci_base_request_states state)
{
static const char * const strings[] = REQUEST_STATES;
return strings[state];
}
#undef C
static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
int idx)
{
if (idx == 0)
return &ireq->tc->sgl_pair_ab;
else if (idx == 1)
return &ireq->tc->sgl_pair_cd;
else if (idx < 0)
return NULL;
else
return &ireq->sg_table[idx - 2];
}
static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
struct isci_request *ireq, u32 idx)
{
u32 offset;
if (idx == 0) {
offset = (void *) &ireq->tc->sgl_pair_ab -
(void *) &ihost->task_context_table[0];
return ihost->task_context_dma + offset;
} else if (idx == 1) {
offset = (void *) &ireq->tc->sgl_pair_cd -
(void *) &ihost->task_context_table[0];
return ihost->task_context_dma + offset;
}
return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
}
static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
{
e->length = sg_dma_len(sg);
e->address_upper = upper_32_bits(sg_dma_address(sg));
e->address_lower = lower_32_bits(sg_dma_address(sg));
e->address_modifier = 0;
}
static void sci_request_build_sgl(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->isci_host;
struct sas_task *task = isci_request_access_task(ireq);
struct scatterlist *sg = NULL;
dma_addr_t dma_addr;
u32 sg_idx = 0;
struct scu_sgl_element_pair *scu_sg = NULL;
struct scu_sgl_element_pair *prev_sg = NULL;
if (task->num_scatter > 0) {
sg = task->scatter;
while (sg) {
scu_sg = to_sgl_element_pair(ireq, sg_idx);
init_sgl_element(&scu_sg->A, sg);
sg = sg_next(sg);
if (sg) {
init_sgl_element(&scu_sg->B, sg);
sg = sg_next(sg);
} else
memset(&scu_sg->B, 0, sizeof(scu_sg->B));
if (prev_sg) {
dma_addr = to_sgl_element_pair_dma(ihost,
ireq,
sg_idx);
prev_sg->next_pair_upper =
upper_32_bits(dma_addr);
prev_sg->next_pair_lower =
lower_32_bits(dma_addr);
}
prev_sg = scu_sg;
sg_idx++;
}
} else { /* handle when no sg */
scu_sg = to_sgl_element_pair(ireq, sg_idx);
dma_addr = dma_map_single(&ihost->pdev->dev,
task->scatter,
task->total_xfer_len,
task->data_dir);
ireq->zero_scatter_daddr = dma_addr;
scu_sg->A.length = task->total_xfer_len;
scu_sg->A.address_upper = upper_32_bits(dma_addr);
scu_sg->A.address_lower = lower_32_bits(dma_addr);
}
if (scu_sg) {
scu_sg->next_pair_upper = 0;
scu_sg->next_pair_lower = 0;
}
}
static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
{
struct ssp_cmd_iu *cmd_iu;
struct sas_task *task = isci_request_access_task(ireq);
cmd_iu = &ireq->ssp.cmd;
memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
cmd_iu->add_cdb_len = 0;
cmd_iu->_r_a = 0;
cmd_iu->_r_b = 0;
cmd_iu->en_fburst = 0; /* unsupported */
cmd_iu->task_prio = task->ssp_task.task_prio;
cmd_iu->task_attr = task->ssp_task.task_attr;
cmd_iu->_r_c = 0;
sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
sizeof(task->ssp_task.cdb) / sizeof(u32));
}
static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
{
struct ssp_task_iu *task_iu;
struct sas_task *task = isci_request_access_task(ireq);
struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
task_iu = &ireq->ssp.tmf;
memset(task_iu, 0, sizeof(struct ssp_task_iu));
memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
task_iu->task_func = isci_tmf->tmf_code;
task_iu->task_tag =
(test_bit(IREQ_TMF, &ireq->flags)) ?
isci_tmf->io_tag :
SCI_CONTROLLER_INVALID_IO_TAG;
}
/**
* This method is will fill in the SCU Task Context for any type of SSP request.
* @sci_req:
* @task_context:
*
*/
static void scu_ssp_reqeust_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
dma_addr_t dma_addr;
struct isci_remote_device *idev;
struct isci_port *iport;
idev = ireq->target_device;
iport = idev->owning_port;
/* Fill in the TC with the its required data */
task_context->abort = 0;
task_context->priority = 0;
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = ISCI_PEG;
task_context->logical_port_index = iport->physical_port_index;
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = idev->rnc.remote_node_index;
task_context->command_code = 0;
task_context->link_layer_control = 0;
task_context->do_not_dma_ssp_good_response = 1;
task_context->strict_ordering = 0;
task_context->control_frame = 0;
task_context->timeout_enable = 0;
task_context->block_guard_enable = 0;
task_context->address_modifier = 0;
/* task_context->type.ssp.tag = ireq->io_tag; */
task_context->task_phase = 0x01;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
(iport->physical_port_index <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
/*
* Copy the physical address for the command buffer to the
* SCU Task Context
*/
dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
task_context->command_iu_upper = upper_32_bits(dma_addr);
task_context->command_iu_lower = lower_32_bits(dma_addr);
/*
* Copy the physical address for the response buffer to the
* SCU Task Context
*/
dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
task_context->response_iu_upper = upper_32_bits(dma_addr);
task_context->response_iu_lower = lower_32_bits(dma_addr);
}
static u8 scu_bg_blk_size(struct scsi_device *sdp)
{
switch (sdp->sector_size) {
case 512:
return 0;
case 1024:
return 1;
case 4096:
return 3;
default:
return 0xff;
}
}
static u32 scu_dif_bytes(u32 len, u32 sector_size)
{
return (len >> ilog2(sector_size)) * 8;
}
static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
{
struct scu_task_context *tc = ireq->tc;
struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
u8 blk_sz = scu_bg_blk_size(scmd->device);
tc->block_guard_enable = 1;
tc->blk_prot_en = 1;
tc->blk_sz = blk_sz;
/* DIF write insert */
tc->blk_prot_func = 0x2;
tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
scmd->device->sector_size);
/* always init to 0, used by hw */
tc->interm_crc_val = 0;
tc->init_crc_seed = 0;
tc->app_tag_verify = 0;
tc->app_tag_gen = 0;
tc->ref_tag_seed_verify = 0;
/* always init to same as bg_blk_sz */
tc->UD_bytes_immed_val = scmd->device->sector_size;
tc->reserved_DC_0 = 0;
/* always init to 8 */
tc->DIF_bytes_immed_val = 8;
tc->reserved_DC_1 = 0;
tc->bgc_blk_sz = scmd->device->sector_size;
tc->reserved_E0_0 = 0;
tc->app_tag_gen_mask = 0;
/** setup block guard control **/
tc->bgctl = 0;
/* DIF write insert */
tc->bgctl_f.op = 0x2;
tc->app_tag_verify_mask = 0;
/* must init to 0 for hw */
tc->blk_guard_err = 0;
tc->reserved_E8_0 = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_gen = 0;
}
static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
{
struct scu_task_context *tc = ireq->tc;
struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
u8 blk_sz = scu_bg_blk_size(scmd->device);
tc->block_guard_enable = 1;
tc->blk_prot_en = 1;
tc->blk_sz = blk_sz;
/* DIF read strip */
tc->blk_prot_func = 0x1;
tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
scmd->device->sector_size);
/* always init to 0, used by hw */
tc->interm_crc_val = 0;
tc->init_crc_seed = 0;
tc->app_tag_verify = 0;
tc->app_tag_gen = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_verify = 0;
/* always init to same as bg_blk_sz */
tc->UD_bytes_immed_val = scmd->device->sector_size;
tc->reserved_DC_0 = 0;
/* always init to 8 */
tc->DIF_bytes_immed_val = 8;
tc->reserved_DC_1 = 0;
tc->bgc_blk_sz = scmd->device->sector_size;
tc->reserved_E0_0 = 0;
tc->app_tag_gen_mask = 0;
/** setup block guard control **/
tc->bgctl = 0;
/* DIF read strip */
tc->bgctl_f.crc_verify = 1;
tc->bgctl_f.op = 0x1;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
tc->bgctl_f.ref_tag_chk = 1;
tc->bgctl_f.app_f_detect = 1;
} else if (type & SCSI_PROT_DIF_TYPE3)
tc->bgctl_f.app_ref_f_detect = 1;
tc->app_tag_verify_mask = 0;
/* must init to 0 for hw */
tc->blk_guard_err = 0;
tc->reserved_E8_0 = 0;
tc->ref_tag_seed_gen = 0;
}
/**
* This method is will fill in the SCU Task Context for a SSP IO request.
* @sci_req:
*
*/
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
enum dma_data_direction dir,
u32 len)
{
struct scu_task_context *task_context = ireq->tc;
struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
struct scsi_cmnd *scmd = sas_task->uldd_task;
u8 prot_type = scsi_get_prot_type(scmd);
u8 prot_op = scsi_get_prot_op(scmd);
scu_ssp_reqeust_construct_task_context(ireq, task_context);
task_context->ssp_command_iu_length =
sizeof(struct ssp_cmd_iu) / sizeof(u32);
task_context->type.ssp.frame_type = SSP_COMMAND;
switch (dir) {
case DMA_FROM_DEVICE:
case DMA_NONE:
default:
task_context->task_type = SCU_TASK_TYPE_IOREAD;
break;
case DMA_TO_DEVICE:
task_context->task_type = SCU_TASK_TYPE_IOWRITE;
break;
}
task_context->transfer_length_bytes = len;
if (task_context->transfer_length_bytes > 0)
sci_request_build_sgl(ireq);
if (prot_type != SCSI_PROT_DIF_TYPE0) {
if (prot_op == SCSI_PROT_READ_STRIP)
scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
else if (prot_op == SCSI_PROT_WRITE_INSERT)
scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
}
}
/**
* This method will fill in the SCU Task Context for a SSP Task request. The
* following important settings are utilized: -# priority ==
* SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
* ahead of other task destined for the same Remote Node. -# task_type ==
* SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
* (i.e. non-raw frame) is being utilized to perform task management. -#
* control_frame == 1. This ensures that the proper endianess is set so
* that the bytes are transmitted in the right order for a task frame.
* @sci_req: This parameter specifies the task request object being
* constructed.
*
*/
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
{
struct scu_task_context *task_context = ireq->tc;
scu_ssp_reqeust_construct_task_context(ireq, task_context);
task_context->control_frame = 1;
task_context->priority = SCU_TASK_PRIORITY_HIGH;
task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
task_context->transfer_length_bytes = 0;
task_context->type.ssp.frame_type = SSP_TASK;
task_context->ssp_command_iu_length =
sizeof(struct ssp_task_iu) / sizeof(u32);
}
/**
* This method is will fill in the SCU Task Context for any type of SATA
* request. This is called from the various SATA constructors.
* @sci_req: The general IO request object which is to be used in
* constructing the SCU task context.
* @task_context: The buffer pointer for the SCU task context which is being
* constructed.
*
* The general io request construction is complete. The buffer assignment for
* the command buffer is complete. none Revisit task context construction to
* determine what is common for SSP/SMP/STP task context structures.
*/
static void scu_sata_reqeust_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
dma_addr_t dma_addr;
struct isci_remote_device *idev;
struct isci_port *iport;
idev = ireq->target_device;
iport = idev->owning_port;
/* Fill in the TC with the its required data */
task_context->abort = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL;
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = ISCI_PEG;
task_context->logical_port_index = iport->physical_port_index;
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = idev->rnc.remote_node_index;
task_context->command_code = 0;
task_context->link_layer_control = 0;
task_context->do_not_dma_ssp_good_response = 1;
task_context->strict_ordering = 0;
task_context->control_frame = 0;
task_context->timeout_enable = 0;
task_context->block_guard_enable = 0;
task_context->address_modifier = 0;
task_context->task_phase = 0x01;
task_context->ssp_command_iu_length =
(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
/* Set the first word of the H2D REG FIS */
task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
(iport->physical_port_index <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
/*
* Copy the physical address for the command buffer to the SCU Task
* Context. We must offset the command buffer by 4 bytes because the
* first 4 bytes are transfered in the body of the TC.
*/
dma_addr = sci_io_request_get_dma_addr(ireq,
((char *) &ireq->stp.cmd) +
sizeof(u32));
task_context->command_iu_upper = upper_32_bits(dma_addr);
task_context->command_iu_lower = lower_32_bits(dma_addr);
/* SATA Requests do not have a response buffer */
task_context->response_iu_upper = 0;
task_context->response_iu_lower = 0;
}
static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
{
struct scu_task_context *task_context = ireq->tc;
scu_sata_reqeust_construct_task_context(ireq, task_context);
task_context->control_frame = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL;
task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
task_context->type.stp.fis_type = FIS_REGH2D;
task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
}
static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
bool copy_rx_frame)
{
struct isci_stp_request *stp_req = &ireq->stp.req;
scu_stp_raw_request_construct_task_context(ireq);
stp_req->status = 0;
stp_req->sgl.offset = 0;
stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
if (copy_rx_frame) {
sci_request_build_sgl(ireq);
stp_req->sgl.index = 0;
} else {
/* The user does not want the data copied to the SGL buffer location */
stp_req->sgl.index = -1;
}
return SCI_SUCCESS;
}
/**
*
* @sci_req: This parameter specifies the request to be constructed as an
* optimized request.
* @optimized_task_type: This parameter specifies whether the request is to be
* an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
* value of 1 indicates NCQ.
*
* This method will perform request construction common to all types of STP
* requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
* returns an indication as to whether the construction was successful.
*/
static void sci_stp_optimized_request_construct(struct isci_request *ireq,
u8 optimized_task_type,
u32 len,
enum dma_data_direction dir)
{
struct scu_task_context *task_context = ireq->tc;
/* Build the STP task context structure */
scu_sata_reqeust_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */
sci_request_build_sgl(ireq);
/* Copy over the number of bytes to be transfered */
task_context->transfer_length_bytes = len;
if (dir == DMA_TO_DEVICE) {
/*
* The difference between the DMA IN and DMA OUT request task type
* values are consistent with the difference between FPDMA READ
* and FPDMA WRITE values. Add the supplied task type parameter
* to this difference to set the task type properly for this
* DATA OUT (WRITE) case. */
task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
- SCU_TASK_TYPE_DMA_IN);
} else {
/*
* For the DATA IN (READ) case, simply save the supplied
* optimized task type. */
task_context->task_type = optimized_task_type;
}
}
static void sci_atapi_construct(struct isci_request *ireq)
{
struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
struct sas_task *task;
/* To simplify the implementation we take advantage of the
* silicon's partial acceleration of atapi protocol (dma data
* transfers), so we promote all commands to dma protocol. This
* breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
*/
h2d_fis->features |= ATAPI_PKT_DMA;
scu_stp_raw_request_construct_task_context(ireq);
task = isci_request_access_task(ireq);
if (task->data_dir == DMA_NONE)
task->total_xfer_len = 0;
/* clear the response so we can detect arrivial of an
* unsolicited h2d fis
*/
ireq->stp.rsp.fis_type = 0;
}
static enum sci_status
sci_io_request_construct_sata(struct isci_request *ireq,
u32 len,
enum dma_data_direction dir,
bool copy)
{
enum sci_status status = SCI_SUCCESS;
struct sas_task *task = isci_request_access_task(ireq);
struct domain_device *dev = ireq->target_device->domain_dev;
/* check for management protocols */
if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
dev_err(&ireq->owning_controller->pdev->dev,
"%s: Request 0x%p received un-handled SAT "
"management protocol 0x%x.\n",
__func__, ireq, tmf->tmf_code);
return SCI_FAILURE;
}
if (!sas_protocol_ata(task->task_proto)) {
dev_err(&ireq->owning_controller->pdev->dev,
"%s: Non-ATA protocol in SATA path: 0x%x\n",
__func__,
task->task_proto);
return SCI_FAILURE;
}
/* ATAPI */
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
task->ata_task.fis.command == ATA_CMD_PACKET) {
sci_atapi_construct(ireq);
return SCI_SUCCESS;
}
/* non data */
if (task->data_dir == DMA_NONE) {
scu_stp_raw_request_construct_task_context(ireq);
return SCI_SUCCESS;
}
/* NCQ */
if (task->ata_task.use_ncq) {
sci_stp_optimized_request_construct(ireq,
SCU_TASK_TYPE_FPDMAQ_READ,
len, dir);
return SCI_SUCCESS;
}
/* DMA */
if (task->ata_task.dma_xfer) {
sci_stp_optimized_request_construct(ireq,
SCU_TASK_TYPE_DMA_IN,
len, dir);
return SCI_SUCCESS;
} else /* PIO */
return sci_stp_pio_request_construct(ireq, copy);
return status;
}
static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
{
struct sas_task *task = isci_request_access_task(ireq);
ireq->protocol = SCIC_SSP_PROTOCOL;
scu_ssp_io_request_construct_task_context(ireq,
task->data_dir,
task->total_xfer_len);
sci_io_request_build_ssp_command_iu(ireq);
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
enum sci_status sci_task_request_construct_ssp(
struct isci_request *ireq)
{
/* Construct the SSP Task SCU Task Context */
scu_ssp_task_request_construct_task_context(ireq);
/* Fill in the SSP Task IU */
sci_task_request_build_ssp_task_iu(ireq);
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
{
enum sci_status status;
bool copy = false;
struct sas_task *task = isci_request_access_task(ireq);
ireq->protocol = SCIC_STP_PROTOCOL;
copy = (task->data_dir == DMA_NONE) ? false : true;
status = sci_io_request_construct_sata(ireq,
task->total_xfer_len,
task->data_dir,
copy);
if (status == SCI_SUCCESS)
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return status;
}
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
* @ireq: request that was terminated early
*/
#define SCU_TASK_CONTEXT_SRAM 0x200000
static u32 sci_req_tx_bytes(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->owning_controller;
u32 ret_val = 0;
if (readl(&ihost->smu_registers->address_modifier) == 0) {
void __iomem *scu_reg_base = ihost->scu_registers;
/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
* BAR1 is the scu_registers
* 0x20002C = 0x200000 + 0x2c
* = start of task context SRAM + offset of (type.ssp.data_offset)
* TCi is the io_tag of struct sci_request
*/
ret_val = readl(scu_reg_base +
(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
}
return ret_val;
}
enum sci_status sci_request_start(struct isci_request *ireq)
{
enum sci_base_request_states state;
struct scu_task_context *tc = ireq->tc;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
if (state != SCI_REQ_CONSTRUCTED) {
dev_warn(&ihost->pdev->dev,
"%s: SCIC IO Request requested to start while in wrong "
"state %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
switch (tc->protocol_type) {
case SCU_TASK_CONTEXT_PROTOCOL_SMP:
case SCU_TASK_CONTEXT_PROTOCOL_SSP:
/* SSP/SMP Frame */
tc->type.ssp.tag = ireq->io_tag;
tc->type.ssp.target_port_transfer_tag = 0xFFFF;
break;
case SCU_TASK_CONTEXT_PROTOCOL_STP:
/* STP/SATA Frame
* tc->type.stp.ncq_tag = ireq->ncq_tag;
*/
break;
case SCU_TASK_CONTEXT_PROTOCOL_NONE:
/* / @todo When do we set no protocol type? */
break;
default:
/* This should never happen since we build the IO
* requests */
break;
}
/* Add to the post_context the io tag value */
ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
/* Everything is good go ahead and change state */
sci_change_state(&ireq->sm, SCI_REQ_STARTED);
return SCI_SUCCESS;
}
enum sci_status
sci_io_request_terminate(struct isci_request *ireq)
{
enum sci_base_request_states state;
state = ireq->sm.current_state_id;
switch (state) {
case SCI_REQ_CONSTRUCTED:
ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
case SCI_REQ_STARTED:
case SCI_REQ_TASK_WAIT_TC_COMP:
case SCI_REQ_SMP_WAIT_RESP:
case SCI_REQ_SMP_WAIT_TC_COMP:
case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
case SCI_REQ_STP_UDMA_WAIT_D2H:
case SCI_REQ_STP_NON_DATA_WAIT_H2D:
case SCI_REQ_STP_NON_DATA_WAIT_D2H:
case SCI_REQ_STP_PIO_WAIT_H2D:
case SCI_REQ_STP_PIO_WAIT_FRAME:
case SCI_REQ_STP_PIO_DATA_IN:
case SCI_REQ_STP_PIO_DATA_OUT:
case SCI_REQ_ATAPI_WAIT_H2D:
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
case SCI_REQ_ATAPI_WAIT_D2H:
case SCI_REQ_ATAPI_WAIT_TC_COMP:
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
return SCI_SUCCESS;
case SCI_REQ_TASK_WAIT_TC_RESP:
/* The task frame was already confirmed to have been
* sent by the SCU HW. Since the state machine is
* now only waiting for the task response itself,
* abort the request and complete it immediately
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
case SCI_REQ_ABORTING:
/* If a request has a termination requested twice, return
* a failure indication, since HW confirmation of the first
* abort is still outstanding.
*/
case SCI_REQ_COMPLETED:
default:
dev_warn(&ireq->owning_controller->pdev->dev,
"%s: SCIC IO Request requested to abort while in wrong "
"state %d\n",
__func__,
ireq->sm.current_state_id);
break;
}
return SCI_FAILURE_INVALID_STATE;
}
enum sci_status sci_request_complete(struct isci_request *ireq)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
if (WARN_ONCE(state != SCI_REQ_COMPLETED,
"isci: request completion from wrong state (%s)\n",
req_state_name(state)))
return SCI_FAILURE_INVALID_STATE;
if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
sci_controller_release_frame(ihost,
ireq->saved_rx_frame_index);
/* XXX can we just stop the machine and remove the 'final' state? */
sci_change_state(&ireq->sm, SCI_REQ_FINAL);
return SCI_SUCCESS;
}
enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
u32 event_code)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
if (state != SCI_REQ_STP_PIO_DATA_IN) {
dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
__func__, event_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
switch (scu_get_event_specifier(event_code)) {
case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
/* We are waiting for data and the SCU has R_ERR the data frame.
* Go back to waiting for the D2H Register FIS
*/
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
return SCI_SUCCESS;
default:
dev_err(&ihost->pdev->dev,
"%s: pio request unexpected event %#x\n",
__func__, event_code);
/* TODO Should we fail the PIO request when we get an
* unexpected event?
*/
return SCI_FAILURE;
}
}
/*
* This function copies response data for requests returning response data
* instead of sense data.
* @sci_req: This parameter specifies the request object for which to copy
* the response data.
*/
static void sci_io_request_copy_response(struct isci_request *ireq)
{
void *resp_buf;
u32 len;
struct ssp_response_iu *ssp_response;
struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
ssp_response = &ireq->ssp.rsp;
resp_buf = &isci_tmf->resp.resp_iu;
len = min_t(u32,
SSP_RESP_IU_MAX_SIZE,
be32_to_cpu(ssp_response->response_data_len));
memcpy(resp_buf, ssp_response->resp_data, len);
}
static enum sci_status
request_started_state_tc_event(struct isci_request *ireq,
u32 completion_code)
{
struct ssp_response_iu *resp_iu;
u8 datapres;
/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
* to determine SDMA status
*/
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
/* There are times when the SCU hardware will return an early
* response because the io request specified more data than is
* returned by the target device (mode pages, inquiry data,
* etc.). We must check the response stats to see if this is
* truly a failed request or a good request that just got
* completed early.
*/
struct ssp_response_iu *resp = &ireq->ssp.rsp;
ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
sci_swab32_cpy(&ireq->ssp.rsp,
&ireq->ssp.rsp,
word_cnt);
if (resp->status == 0) {
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
} else {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
}
break;
}
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
sci_swab32_cpy(&ireq->ssp.rsp,
&ireq->ssp.rsp,
word_cnt);
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
break;
}
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
* guaranteed to be received before this completion status is
* posted?
*/
resp_iu = &ireq->ssp.rsp;
datapres = resp_iu->datapres;
if (datapres == 1 || datapres == 2) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
}
break;
/* only stp device gets suspended. */
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
if (ireq->protocol == SCIC_STP_PROTOCOL) {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
} else {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
}
break;
/* both stp/ssp device gets suspended */
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
break;
/* neither ssp nor stp gets suspended. */
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
default:
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
break;
}
/*
* TODO: This is probably wrong for ACK/NAK timeout conditions
*/
/* In all cases we will treat this as the completion of the IO req. */
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
}
static enum sci_status
request_aborting_state_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
default:
/* Unless we get some strange error wait for the task abort to complete
* TODO: Should there be a state change for this completion?
*/
break;
}
return SCI_SUCCESS;
}
static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
/* Currently, the decision is to simply allow the task request
* to timeout if the task IU wasn't received successfully.
* There is a potential for receiving multiple task responses if
* we decide to send the task IU again.
*/
dev_warn(&ireq->owning_controller->pdev->dev,
"%s: TaskRequest:0x%p CompletionCode:%x - "
"ACK/NAK timeout\n", __func__, ireq,
completion_code);
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
break;
default:
/*
* All other completion status cause the IO to be complete.
* If a NAK was received, then it is up to the user to retry
* the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
static enum sci_status
smp_request_await_response_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
/* In the AWAIT RESPONSE state, any TC completion is
* unexpected. but if the TC has success status, we
* complete the IO anyway.
*/
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
/* These status has been seen in a specific LSI
* expander, which sometimes is not able to send smp
* response within 2 ms. This causes our hardware break
* the connection and set TC completion with one of
* these SMP_XXX_XX_ERR status. For these type of error,
* we ask ihost user to retry the request.
*/
ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
default:
/* All other completion status cause the IO to be complete. If a NAK
* was received, then it is up to the user to retry the request
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
static enum sci_status
smp_request_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
default:
/* All other completion status cause the IO to be
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
{
struct scu_sgl_element *sgl;
struct scu_sgl_element_pair *sgl_pair;
struct isci_request *ireq = to_ireq(stp_req);
struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
if (!sgl_pair)
sgl = NULL;
else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
if (sgl_pair->B.address_lower == 0 &&
sgl_pair->B.address_upper == 0) {
sgl = NULL;
} else {
pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
sgl = &sgl_pair->B;
}
} else {
if (sgl_pair->next_pair_lower == 0 &&
sgl_pair->next_pair_upper == 0) {
sgl = NULL;
} else {
pio_sgl->index++;
pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
sgl = &sgl_pair->A;
}
}
return sgl;
}
static enum sci_status
stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
break;
default:
/* All other completion status cause the IO to be
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
/* transmit DATA_FIS from (current sgl + offset) for input
* parameter length. current sgl and offset is alreay stored in the IO request
*/
static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
struct isci_request *ireq,
u32 length)
{
struct isci_stp_request *stp_req = &ireq->stp.req;
struct scu_task_context *task_context = ireq->tc;
struct scu_sgl_element_pair *sgl_pair;
struct scu_sgl_element *current_sgl;
/* Recycle the TC and reconstruct it for sending out DATA FIS containing
* for the data from current_sgl+offset for the input length
*/
sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
current_sgl = &sgl_pair->A;
else
current_sgl = &sgl_pair->B;
/* update the TC */
task_context->command_iu_upper = current_sgl->address_upper;
task_context->command_iu_lower = current_sgl->address_lower;
task_context->transfer_length_bytes = length;
task_context->type.stp.fis_type = FIS_DATA;
/* send the new TC out. */
return sci_controller_continue_io(ireq);
}
static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
{
struct isci_stp_request *stp_req = &ireq->stp.req;
struct scu_sgl_element_pair *sgl_pair;
enum sci_status status = SCI_SUCCESS;
struct scu_sgl_element *sgl;
u32 offset;
u32 len = 0;
offset = stp_req->sgl.offset;
sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
return SCI_FAILURE;
if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
sgl = &sgl_pair->A;
len = sgl_pair->A.length - offset;
} else {
sgl = &sgl_pair->B;
len = sgl_pair->B.length - offset;
}
if (stp_req->pio_len == 0)
return SCI_SUCCESS;
if (stp_req->pio_len >= len) {
status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
if (status != SCI_SUCCESS)
return status;
stp_req->pio_len -= len;
/* update the current sgl, offset and save for future */
sgl = pio_sgl_next(stp_req);
offset = 0;
} else if (stp_req->pio_len < len) {
sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
/* Sgl offset will be adjusted and saved for future */
offset += stp_req->pio_len;
sgl->address_lower += stp_req->pio_len;
stp_req->pio_len = 0;
}
stp_req->sgl.offset = offset;
return status;
}
/**
*
* @stp_request: The request that is used for the SGL processing.
* @data_buffer: The buffer of data to be copied.
* @length: The length of the data transfer.
*
* Copy the data from the buffer for the length specified to the IO reqeust SGL
* specified data region. enum sci_status
*/
static enum sci_status
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
u8 *data_buf, u32 len)
{
struct isci_request *ireq;
u8 *src_addr;
int copy_len;
struct sas_task *task;
struct scatterlist *sg;
void *kaddr;
int total_len = len;
ireq = to_ireq(stp_req);
task = isci_request_access_task(ireq);
src_addr = data_buf;
if (task->num_scatter > 0) {
sg = task->scatter;
while (total_len > 0) {
struct page *page = sg_page(sg);
copy_len = min_t(int, total_len, sg_dma_len(sg));
kaddr = kmap_atomic(page);
memcpy(kaddr + sg->offset, src_addr, copy_len);
kunmap_atomic(kaddr);
total_len -= copy_len;
src_addr += copy_len;
sg = sg_next(sg);
}
} else {
BUG_ON(task->total_xfer_len < total_len);
memcpy(task->scatter, src_addr, total_len);
}
return SCI_SUCCESS;
}
/**
*
* @sci_req: The PIO DATA IN request that is to receive the data.
* @data_buffer: The buffer to copy from.
*
* Copy the data buffer to the io request data region. enum sci_status
*/
static enum sci_status sci_stp_request_pio_data_in_copy_data(
struct isci_stp_request *stp_req,
u8 *data_buffer)
{
enum sci_status status;
/*
* If there is less than 1K remaining in the transfer request
* copy just the data for the transfer */
if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, stp_req->pio_len);
if (status == SCI_SUCCESS)
stp_req->pio_len = 0;
} else {
/* We are transfering the whole frame so copy */
status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
if (status == SCI_SUCCESS)
stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
}
return status;
}
static enum sci_status
stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
u32 completion_code)
{
enum sci_status status = SCI_SUCCESS;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
break;
default:
/* All other completion status cause the IO to be
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
static enum sci_status
pio_data_out_tx_done_tc_event(struct isci_request *ireq,
u32 completion_code)
{
enum sci_status status = SCI_SUCCESS;
bool all_frames_transferred = false;
struct isci_stp_request *stp_req = &ireq->stp.req;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
/* Transmit data */
if (stp_req->pio_len != 0) {
status = sci_stp_request_pio_data_out_transmit_data(ireq);
if (status == SCI_SUCCESS) {
if (stp_req->pio_len == 0)
all_frames_transferred = true;
}
} else if (stp_req->pio_len == 0) {
/*
* this will happen if the all data is written at the
* first time after the pio setup fis is received
*/
all_frames_transferred = true;
}
/* all data transferred. */
if (all_frames_transferred) {
/*
* Change the state to SCI_REQ_STP_PIO_DATA_IN
* and wait for PIO_SETUP fis / or D2H REg fis. */
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
}
break;
default:
/*
* All other completion status cause the IO to be complete.
* If a NAK was received, then it is up to the user to retry
* the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
u32 frame_index)
{
struct isci_host *ihost = ireq->owning_controller;
struct dev_to_host_fis *frame_header;
enum sci_status status;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if ((status == SCI_SUCCESS) &&
(frame_header->fis_type == FIS_REGD2H)) {
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
}
sci_controller_release_frame(ihost, frame_index);
return status;
}
static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
u32 frame_index)
{
struct isci_host *ihost = ireq->owning_controller;
enum sci_status status;
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS)
return status;
if (frame_header->fis_type != FIS_REGD2H) {
dev_err(&ireq->isci_host->pdev->dev,
"%s ERROR: invalid fis type 0x%X\n",
__func__, frame_header->fis_type);
return SCI_FAILURE;
}
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
sci_controller_copy_sata_response(&ireq->stp.rsp,
(u32 *)frame_header,
frame_buffer);
/* Frame has been decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
u32 frame_index)
{
struct sas_task *task = isci_request_access_task(ireq);
enum sci_status status;
status = process_unsolicited_fis(ireq, frame_index);
if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR)
status = SCI_IO_FAILURE_RESPONSE_VALID;
} else {
status = SCI_IO_FAILURE_RESPONSE_VALID;
}
if (status != SCI_SUCCESS) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = status;
} else {
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
}
/* the d2h ufi is the end of non-data commands */
if (task->data_dir == DMA_NONE)
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return status;
}
static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
{
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
struct scu_task_context *task_context = ireq->tc;
/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
* type. The TC for previous Packet fis was already there, we only need to
* change the H2D fis content.
*/
memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
task_context->type.stp.fis_type = FIS_DATA;
task_context->transfer_length_bytes = dev->cdb_len;
}
static void scu_atapi_construct_task_context(struct isci_request *ireq)
{
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
struct sas_task *task = isci_request_access_task(ireq);
struct scu_task_context *task_context = ireq->tc;
int cdb_len = dev->cdb_len;
/* reference: SSTL 1.13.4.2
* task_type, sata_direction
*/
if (task->data_dir == DMA_TO_DEVICE) {
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
task_context->sata_direction = 0;
} else {
/* todo: for NO_DATA command, we need to send out raw frame. */
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
task_context->sata_direction = 1;
}
memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
task_context->type.stp.fis_type = FIS_DATA;
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
/* task phase is set to TX_CMD */
task_context->task_phase = 0x1;
/* retry counter */
task_context->stp_retry_count = 0;
/* data transfer size. */
task_context->transfer_length_bytes = task->total_xfer_len;
/* setup sgl */
sci_request_build_sgl(ireq);
}
enum sci_status
sci_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index)
{
struct isci_host *ihost = ireq->owning_controller;
struct isci_stp_request *stp_req = &ireq->stp.req;
enum sci_base_request_states state;
enum sci_status status;
ssize_t word_cnt;
state = ireq->sm.current_state_id;
switch (state) {
case SCI_REQ_STARTED: {
struct ssp_frame_hdr ssp_hdr;
void *frame_header;
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
if (ssp_hdr.frame_type == SSP_RESPONSE) {
struct ssp_response_iu *resp_iu;
ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&resp_iu);
sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
resp_iu = &ireq->ssp.rsp;
if (resp_iu->datapres == 0x01 ||
resp_iu->datapres == 0x02) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
} else {
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
}
} else {
/* not a response frame, why did it get forwarded? */
dev_err(&ihost->pdev->dev,
"%s: SCIC IO Request 0x%p received unexpected "
"frame %d type 0x%02x\n", __func__, ireq,
frame_index, ssp_hdr.frame_type);
}
/*
* In any case we are done with this frame buffer return it to
* the controller
*/
sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
}
case SCI_REQ_TASK_WAIT_TC_RESP:
sci_io_request_copy_response(ireq);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
case SCI_REQ_SMP_WAIT_RESP: {
struct sas_task *task = isci_request_access_task(ireq);
struct scatterlist *sg = &task->smp_task.smp_resp;
void *frame_header, *kaddr;
u8 *rsp;
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
kaddr = kmap_atomic(sg_page(sg));
rsp = kaddr + sg->offset;
sci_swab32_cpy(rsp, frame_header, 1);
if (rsp[0] == SMP_RESPONSE) {
void *smp_resp;
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
&smp_resp);
word_cnt = (sg->length/4)-1;
if (word_cnt > 0)
word_cnt = min_t(unsigned int, word_cnt,
SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
} else {
/*
* This was not a response frame why did it get
* forwarded?
*/
dev_err(&ihost->pdev->dev,
"%s: SCIC SMP Request 0x%p received unexpected "
"frame %d type 0x%02x\n",
__func__,
ireq,
frame_index,
rsp[0]);
ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
kunmap_atomic(kaddr);
sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
}
case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
return sci_stp_request_udma_general_frame_handler(ireq,
frame_index);
case SCI_REQ_STP_UDMA_WAIT_D2H:
/* Use the general frame handler to copy the resposne data */
status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
if (status != SCI_SUCCESS)
return status;
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: SCIC IO Request 0x%p could not get frame "
"header for frame index %d, status %x\n",
__func__,
stp_req,
frame_index,
status);
return status;
}
switch (frame_header->fis_type) {
case FIS_REGD2H:
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
/* The command has completed with error */
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
break;
default:
dev_warn(&ihost->pdev->dev,
"%s: IO Request:0x%p Frame Id:%d protocol "
"violation occurred\n", __func__, stp_req,
frame_index);
ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
break;
}
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
case SCI_REQ_STP_PIO_WAIT_FRAME: {
struct sas_task *task = isci_request_access_task(ireq);
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: SCIC IO Request 0x%p could not get frame "
"header for frame index %d, status %x\n",
__func__, stp_req, frame_index, status);
return status;
}
switch (frame_header->fis_type) {
case FIS_PIO_SETUP:
/* Get from the frame buffer the PIO Setup Data */
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
/* Get the data from the PIO Setup The SCU Hardware
* returns first word in the frame_header and the rest
* of the data is in the frame buffer so we need to
* back up one dword
*/
/* transfer_count: first 16bits in the 4th dword */
stp_req->pio_len = frame_buffer[3] & 0xffff;
/* status: 4th byte in the 3rd dword */
stp_req->status = (frame_buffer[2] >> 24) & 0xff;
sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
ireq->stp.rsp.status = stp_req->status;
/* The next state is dependent on whether the
* request was PIO Data-in or Data out
*/
if (task->data_dir == DMA_FROM_DEVICE) {
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
} else if (task->data_dir == DMA_TO_DEVICE) {
/* Transmit data */
status = sci_stp_request_pio_data_out_transmit_data(ireq);
if (status != SCI_SUCCESS)
break;
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
}
break;
case FIS_SETDEVBITS:
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
break;
case FIS_REGD2H:
if (frame_header->status & ATA_BUSY) {
/*
* Now why is the drive sending a D2H Register
* FIS when it is still busy? Do nothing since
* we are still in the right state.
*/
dev_dbg(&ihost->pdev->dev,
"%s: SCIC PIO Request 0x%p received "
"D2H Register FIS with BSY status "
"0x%x\n",
__func__,
stp_req,
frame_header->status);
break;
}
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
sci_controller_copy_sata_response(&ireq->stp.req,
frame_header,
frame_buffer);
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
default:
/* FIXME: what do we do here? */
break;
}
/* Frame is decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
case SCI_REQ_STP_PIO_DATA_IN: {
struct dev_to_host_fis *frame_header;
struct sata_fis_data *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: SCIC IO Request 0x%p could not get frame "
"header for frame index %d, status %x\n",
__func__,
stp_req,
frame_index,
status);
return status;
}
if (frame_header->fis_type != FIS_DATA) {
dev_err(&ihost->pdev->dev,
"%s: SCIC PIO Request 0x%p received frame %d "
"with fis type 0x%02x when expecting a data "
"fis.\n",
__func__,
stp_req,
frame_index,
frame_header->fis_type);
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame is decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
if (stp_req->sgl.index < 0) {
ireq->saved_rx_frame_index = frame_index;
stp_req->pio_len = 0;
} else {
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
status = sci_stp_request_pio_data_in_copy_data(stp_req,
(u8 *)frame_buffer);
/* Frame is decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
}
/* Check for the end of the transfer, are there more
* bytes remaining for this data transfer
*/
if (status != SCI_SUCCESS || stp_req->pio_len != 0)
return status;
if ((stp_req->status & ATA_BUSY) == 0) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} else {
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
}
return status;
}
case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
struct sas_task *task = isci_request_access_task(ireq);
sci_controller_release_frame(ihost, frame_index);
ireq->target_device->working_request = ireq;
if (task->data_dir == DMA_NONE) {
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
scu_atapi_reconstruct_raw_frame_task_context(ireq);
} else {
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
scu_atapi_construct_task_context(ireq);
}
sci_controller_continue_io(ireq);
return SCI_SUCCESS;
}
case SCI_REQ_ATAPI_WAIT_D2H:
return atapi_d2h_reg_frame_handler(ireq, frame_index);
case SCI_REQ_ABORTING:
/*
* TODO: Is it even possible to get an unsolicited frame in the
* aborting state?
*/
sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
default:
dev_warn(&ihost->pdev->dev,
"%s: SCIC IO Request given unexpected frame %x while "
"in state %d\n",
__func__,
frame_index,
state);
sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
}
}
static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{
enum sci_status status = SCI_SUCCESS;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
/* We must check ther response buffer to see if the D2H
* Register FIS was received before we got the TC
* completion.
*/
if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} else {
/* If we have an error completion status for the
* TC then we can expect a D2H register FIS from
* the device so we must change state to wait
* for it
*/
sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
}
break;
/* TODO Check to see if any of these completion status need to
* wait for the device to host register fis.
*/
/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
* - this comes only for B0
*/
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
/* Fall through to the default case */
default:
/* All other completion status cause the IO to be complete. */
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
enum sci_base_request_states next)
{
enum sci_status status = SCI_SUCCESS;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, next);
break;
default:
/* All other completion status cause the IO to be complete.
* If a NAK was received, then it is up to the user to retry
* the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
u32 completion_code)
{
struct isci_remote_device *idev = ireq->target_device;
struct dev_to_host_fis *d2h = &ireq->stp.rsp;
enum sci_status status = SCI_SUCCESS;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
u16 len = sci_req_tx_bytes(ireq);
/* likely non-error data underrrun, workaround missing
* d2h frame from the controller
*/
if (d2h->fis_type != FIS_REGD2H) {
d2h->fis_type = FIS_REGD2H;
d2h->flags = (1 << 6);
d2h->status = 0x50;
d2h->error = 0;
d2h->lbal = 0;
d2h->byte_count_low = len & 0xff;
d2h->byte_count_high = len >> 8;
d2h->device = 0xa0;
d2h->lbal_exp = 0;
d2h->lbam_exp = 0;
d2h->lbah_exp = 0;
d2h->_r_a = 0;
d2h->sector_count = 0x3;
d2h->sector_count_exp = 0;
d2h->_r_b = 0;
d2h->_r_c = 0;
d2h->_r_d = 0;
}
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
status = ireq->sci_status;
/* the hw will have suspended the rnc, so complete the
* request upon pending resume
*/
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
break;
}
case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
/* In this case, there is no UF coming after.
* compelte the IO now.
*/
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
default:
if (d2h->fis_type == FIS_REGD2H) {
/* UF received change the device state to ATAPI_ERROR */
status = ireq->sci_status;
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
} else {
/* If receiving any non-sucess TC status, no UF
* received yet, then an UF for the status fis
* is coming after (XXX: suspect this is
* actually a protocol error or a bug like the
* DONE_UNEXP_FIS case)
*/
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
}
break;
}
return status;
}
enum sci_status
sci_io_request_tc_completion(struct isci_request *ireq,
u32 completion_code)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
switch (state) {
case SCI_REQ_STARTED:
return request_started_state_tc_event(ireq, completion_code);
case SCI_REQ_TASK_WAIT_TC_COMP:
return ssp_task_request_await_tc_event(ireq,
completion_code);
case SCI_REQ_SMP_WAIT_RESP:
return smp_request_await_response_tc_event(ireq,
completion_code);
case SCI_REQ_SMP_WAIT_TC_COMP:
return smp_request_await_tc_event(ireq, completion_code);
case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
return stp_request_udma_await_tc_event(ireq,
completion_code);
case SCI_REQ_STP_NON_DATA_WAIT_H2D:
return stp_request_non_data_await_h2d_tc_event(ireq,
completion_code);
case SCI_REQ_STP_PIO_WAIT_H2D:
return stp_request_pio_await_h2d_completion_tc_event(ireq,
completion_code);
case SCI_REQ_STP_PIO_DATA_OUT:
return pio_data_out_tx_done_tc_event(ireq, completion_code);
case SCI_REQ_ABORTING:
return request_aborting_state_tc_event(ireq,
completion_code);
case SCI_REQ_ATAPI_WAIT_H2D:
return atapi_raw_completion(ireq, completion_code,
SCI_REQ_ATAPI_WAIT_PIO_SETUP);
case SCI_REQ_ATAPI_WAIT_TC_COMP:
return atapi_raw_completion(ireq, completion_code,
SCI_REQ_ATAPI_WAIT_D2H);
case SCI_REQ_ATAPI_WAIT_D2H:
return atapi_data_tc_completion_handler(ireq, completion_code);
default:
dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
__func__, completion_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
/**
* isci_request_process_response_iu() - This function sets the status and
* response iu, in the task struct, from the request object for the upper
* layer driver.
* @sas_task: This parameter is the task struct from the upper layer driver.
* @resp_iu: This parameter points to the response iu of the completed request.
* @dev: This parameter specifies the linux device struct.
*
* none.
*/
static void isci_request_process_response_iu(
struct sas_task *task,
struct ssp_response_iu *resp_iu,
struct device *dev)
{
dev_dbg(dev,
"%s: resp_iu = %p "
"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
"resp_iu->response_data_len = %x, "
"resp_iu->sense_data_len = %x\nrepsonse data: ",
__func__,
resp_iu,
resp_iu->status,
resp_iu->datapres,
resp_iu->response_data_len,
resp_iu->sense_data_len);
task->task_status.stat = resp_iu->status;
/* libsas updates the task status fields based on the response iu. */
sas_ssp_task_response(dev, task, resp_iu);
}
/**
* isci_request_set_open_reject_status() - This function prepares the I/O
* completion for OPEN_REJECT conditions.
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
* @complete_to_host_ptr: This parameter specifies the action to be taken by
* the LLDD with respect to completing this request or forcing an abort
* condition on the I/O.
* @open_rej_reason: This parameter specifies the encoded reason for the
* abandon-class reject.
*
* none.
*/
static void isci_request_set_open_reject_status(
struct isci_request *request,
struct sas_task *task,
enum service_response *response_ptr,
enum exec_status *status_ptr,
enum isci_completion_selection *complete_to_host_ptr,
enum sas_open_rej_reason open_rej_reason)
{
/* Task in the target is done. */
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAS_OPEN_REJECT;
*complete_to_host_ptr = isci_perform_normal_io_completion;
task->task_status.open_rej_reason = open_rej_reason;
}
/**
* isci_request_handle_controller_specific_errors() - This function decodes
* controller-specific I/O completion error conditions.
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
* @complete_to_host_ptr: This parameter specifies the action to be taken by
* the LLDD with respect to completing this request or forcing an abort
* condition on the I/O.
*
* none.
*/
static void isci_request_handle_controller_specific_errors(
struct isci_remote_device *idev,
struct isci_request *request,
struct sas_task *task,
enum service_response *response_ptr,
enum exec_status *status_ptr,
enum isci_completion_selection *complete_to_host_ptr)
{
unsigned int cstatus;
cstatus = request->scu_status;
dev_dbg(&request->isci_host->pdev->dev,
"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
"- controller status = 0x%x\n",
__func__, request, cstatus);
/* Decode the controller-specific errors; most
* important is to recognize those conditions in which
* the target may still have a task outstanding that
* must be aborted.
*
* Note that there are SCU completion codes being
* named in the decode below for which SCIC has already
* done work to handle them in a way other than as
* a controller-specific completion code; these are left
* in the decode below for completeness sake.
*/
switch (cstatus) {
case SCU_TASK_DONE_DMASETUP_DIRERR:
/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
case SCU_TASK_DONE_XFERCNT_ERR:
/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
if (task->task_proto == SAS_PROTOCOL_SMP) {
/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
*response_ptr = SAS_TASK_COMPLETE;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*complete_to_host_ptr =
isci_perform_normal_io_completion;
} else {
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*complete_to_host_ptr =
isci_perform_error_io_completion;
}
break;
case SCU_TASK_DONE_CRC_ERR:
case SCU_TASK_DONE_NAK_CMD_ERR:
case SCU_TASK_DONE_EXCESS_DATA:
case SCU_TASK_DONE_UNEXP_FIS:
/* Also SCU_TASK_DONE_UNEXP_RESP: */
case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
/* These are conditions in which the target
* has completed the task, so that no cleanup
* is necessary.
*/
*response_ptr = SAS_TASK_COMPLETE;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*complete_to_host_ptr = isci_perform_normal_io_completion;
break;
/* Note that the only open reject completion codes seen here will be
* abandon-class codes; all others are automatically retried in the SCU.
*/
case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
break;
case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
/* Note - the return of AB0 will change when
* libsas implements detection of zone violations.
*/
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_RESV_AB0);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_RESV_AB1);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_RESV_AB2);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_RESV_AB3);
break;
case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_BAD_DEST);
break;
case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_STP_NORES);
break;
case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_EPROTO);
break;
case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
complete_to_host_ptr, SAS_OREJ_CONN_RATE);
break;
case SCU_TASK_DONE_LL_R_ERR:
/* Also SCU_TASK_DONE_ACK_NAK_TO: */
case SCU_TASK_DONE_LL_PERR:
case SCU_TASK_DONE_LL_SY_TERM:
/* Also SCU_TASK_DONE_NAK_ERR:*/
case SCU_TASK_DONE_LL_LF_TERM:
/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
case SCU_TASK_DONE_LL_ABORT_ERR:
case SCU_TASK_DONE_SEQ_INV_TYPE:
/* Also SCU_TASK_DONE_UNEXP_XR: */
case SCU_TASK_DONE_XR_IU_LEN_ERR:
case SCU_TASK_DONE_INV_FIS_LEN:
/* Also SCU_TASK_DONE_XR_WD_LEN: */
case SCU_TASK_DONE_SDMA_ERR:
case SCU_TASK_DONE_OFFSET_ERR:
case SCU_TASK_DONE_MAX_PLD_ERR:
case SCU_TASK_DONE_LF_ERR:
case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
case SCU_TASK_DONE_SMP_LL_RX_ERR:
case SCU_TASK_DONE_UNEXP_DATA:
case SCU_TASK_DONE_UNEXP_SDBFIS:
case SCU_TASK_DONE_REG_ERR:
case SCU_TASK_DONE_SDB_ERR:
case SCU_TASK_DONE_TASK_ABORT:
default:
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAM_STAT_TASK_ABORTED;
if (task->task_proto == SAS_PROTOCOL_SMP) {
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*complete_to_host_ptr = isci_perform_normal_io_completion;
} else {
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*complete_to_host_ptr = isci_perform_error_io_completion;
}
break;
}
}
/**
* isci_task_save_for_upper_layer_completion() - This function saves the
* request for later completion to the upper layer driver.
* @host: This parameter is a pointer to the host on which the the request
* should be queued (either as an error or success).
* @request: This parameter is the completed request.
* @response: This parameter is the response code for the completed task.
* @status: This parameter is the status code for the completed task.
*
* none.
*/
static void isci_task_save_for_upper_layer_completion(
struct isci_host *host,
struct isci_request *request,
enum service_response response,
enum exec_status status,
enum isci_completion_selection task_notification_selection)
{
struct sas_task *task = isci_request_access_task(request);
task_notification_selection
= isci_task_set_completion_status(task, response, status,
task_notification_selection);
/* Tasks aborted specifically by a call to the lldd_abort_task
* function should not be completed to the host in the regular path.
*/
switch (task_notification_selection) {
case isci_perform_normal_io_completion:
/* Normal notification (task_done) */
/* Add to the completed list. */
list_add(&request->completed_node,
&host->requests_to_complete);
/* Take the request off the device's pending request list. */
list_del_init(&request->dev_node);
break;
case isci_perform_aborted_io_completion:
/* No notification to libsas because this request is
* already in the abort path.
*/
/* Wake up whatever process was waiting for this
* request to complete.
*/
WARN_ON(request->io_request_completion == NULL);
if (request->io_request_completion != NULL) {
/* Signal whoever is waiting that this
* request is complete.
*/
complete(request->io_request_completion);
}
break;
case isci_perform_error_io_completion:
/* Use sas_task_abort */
/* Add to the aborted list. */
list_add(&request->completed_node,
&host->requests_to_errorback);
break;
default:
/* Add to the error to libsas list. */
list_add(&request->completed_node,
&host->requests_to_errorback);
break;
}
dev_dbg(&host->pdev->dev,
"%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
__func__, task_notification_selection, task,
(task) ? task->task_status.resp : 0, response,
(task) ? task->task_status.stat : 0, status);
}
static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
{
struct task_status_struct *ts = &task->task_status;
struct ata_task_resp *resp = (void *)&ts->buf[0];
resp->frame_len = sizeof(*fis);
memcpy(resp->ending_fis, fis, sizeof(*fis));
ts->buf_valid_size = sizeof(*resp);
/* If the device fault bit is set in the status register, then
* set the sense data and return.
*/
if (fis->status & ATA_DF)
ts->stat = SAS_PROTO_RESPONSE;
else if (fis->status & ATA_ERR)
ts->stat = SAM_STAT_CHECK_CONDITION;
else
ts->stat = SAM_STAT_GOOD;
ts->resp = SAS_TASK_COMPLETE;
}
static void isci_request_io_request_complete(struct isci_host *ihost,
struct isci_request *request,
enum sci_io_status completion_status)
{
struct sas_task *task = isci_request_access_task(request);
struct ssp_response_iu *resp_iu;
unsigned long task_flags;
struct isci_remote_device *idev = request->target_device;
enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK;
enum isci_request_status request_status;
enum isci_completion_selection complete_to_host
= isci_perform_normal_io_completion;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, task = %p,\n"
"task->data_dir = %d completion_status = 0x%x\n",
__func__,
request,
task,
task->data_dir,
completion_status);
spin_lock(&request->state_lock);
request_status = request->status;
/* Decode the request status. Note that if the request has been
* aborted by a task management function, we don't care
* what the status is.
*/
switch (request_status) {
case aborted:
/* "aborted" indicates that the request was aborted by a task
* management function, since once a task management request is
* perfomed by the device, the request only completes because
* of the subsequent driver terminate.
*
* Aborted also means an external thread is explicitly managing
* this request, so that we do not complete it up the stack.
*
* The target is still there (since the TMF was successful).
*/
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_COMPLETE;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
complete_to_host = isci_perform_aborted_io_completion;
/* This was an aborted request. */
spin_unlock(&request->state_lock);
break;
case aborting:
/* aborting means that the task management function tried and
* failed to abort the request. We need to note the request
* as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
* target as down.
*
* Aborting also means an external thread is explicitly managing
* this request, so that we do not complete it up the stack.
*/
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_UNDELIVERED;
if (!idev)
/* The device has been /is being stopped. Note that
* we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_PHY_DOWN;
complete_to_host = isci_perform_aborted_io_completion;
/* This was an aborted request. */
spin_unlock(&request->state_lock);
break;
case terminating:
/* This was an terminated request. This happens when
* the I/O is being terminated because of an action on
* the device (reset, tear down, etc.), and the I/O needs
* to be completed up the stack.
*/
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_UNDELIVERED;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
complete_to_host = isci_perform_aborted_io_completion;
/* This was a terminated request. */
spin_unlock(&request->state_lock);
break;
case dead:
/* This was a terminated request that timed-out during the
* termination process. There is no task to complete to
* libsas.
*/
complete_to_host = isci_perform_normal_io_completion;
spin_unlock(&request->state_lock);
break;
default:
/* The request is done from an SCU HW perspective. */
request->status = completed;
spin_unlock(&request->state_lock);
/* This is an active request being completed from the core. */
switch (completion_status) {
case SCI_IO_FAILURE_RESPONSE_VALID:
dev_dbg(&ihost->pdev->dev,
"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
__func__,
request,
task);
if (sas_protocol_ata(task->task_proto)) {
isci_process_stp_response(task, &request->stp.rsp);
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
/* crack the iu response buffer. */
resp_iu = &request->ssp.rsp;
isci_request_process_response_iu(task, resp_iu,
&ihost->pdev->dev);
} else if (SAS_PROTOCOL_SMP == task->task_proto) {
dev_err(&ihost->pdev->dev,
"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
"SAS_PROTOCOL_SMP protocol\n",
__func__);
} else
dev_err(&ihost->pdev->dev,
"%s: unknown protocol\n", __func__);
/* use the task status set in the task struct by the
* isci_request_process_response_iu call.
*/
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = task->task_status.resp;
status = task->task_status.stat;
break;
case SCI_IO_SUCCESS:
case SCI_IO_SUCCESS_IO_DONE_EARLY:
response = SAS_TASK_COMPLETE;
status = SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
/* This was an SSP / STP / SATA transfer.
* There is a possibility that less data than
* the maximum was transferred.
*/
u32 transferred_length = sci_req_tx_bytes(request);
task->task_status.residual
= task->total_xfer_len - transferred_length;
/* If there were residual bytes, call this an
* underrun.
*/
if (task->task_status.residual != 0)
status = SAS_DATA_UNDERRUN;
dev_dbg(&ihost->pdev->dev,
"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
__func__,
status);
} else
dev_dbg(&ihost->pdev->dev,
"%s: SCI_IO_SUCCESS\n",
__func__);
break;
case SCI_IO_FAILURE_TERMINATED:
dev_dbg(&ihost->pdev->dev,
"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
__func__,
request,
task);
/* The request was terminated explicitly. No handling
* is needed in the SCSI error handler path.
*/
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_UNDELIVERED;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
complete_to_host = isci_perform_normal_io_completion;
break;
case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
isci_request_handle_controller_specific_errors(
idev, request, task, &response, &status,
&complete_to_host);
break;
case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
/* This is a special case, in that the I/O completion
* is telling us that the device needs a reset.
* In order for the device reset condition to be
* noticed, the I/O has to be handled in the error
* handler. Set the reset flag and cause the
* SCSI error thread to be scheduled.
*/
spin_lock_irqsave(&task->task_state_lock, task_flags);
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
/* Fail the I/O. */
response = SAS_TASK_UNDELIVERED;
status = SAM_STAT_TASK_ABORTED;
complete_to_host = isci_perform_error_io_completion;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
break;
case SCI_FAILURE_RETRY_REQUIRED:
/* Fail the I/O so it can be retried. */
response = SAS_TASK_UNDELIVERED;
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
complete_to_host = isci_perform_normal_io_completion;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
break;
default:
/* Catch any otherwise unhandled error codes here. */
dev_dbg(&ihost->pdev->dev,
"%s: invalid completion code: 0x%x - "
"isci_request = %p\n",
__func__, completion_status, request);
response = SAS_TASK_UNDELIVERED;
/* See if the device has been/is being stopped. Note
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
if (SAS_PROTOCOL_SMP == task->task_proto) {
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
complete_to_host = isci_perform_normal_io_completion;
} else {
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
complete_to_host = isci_perform_error_io_completion;
}
break;
}
break;
}
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (task->data_dir == DMA_NONE)
break;
if (task->num_scatter == 0)
/* 0 indicates a single dma address */
dma_unmap_single(&ihost->pdev->dev,
request->zero_scatter_daddr,
task->total_xfer_len, task->data_dir);
else /* unmap the sgl dma addresses */
dma_unmap_sg(&ihost->pdev->dev, task->scatter,
request->num_sg_entries, task->data_dir);
break;
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg = &task->smp_task.smp_req;
struct smp_req *smp_req;
void *kaddr;
dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
/* need to swab it back in case the command buffer is re-used */
kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
kunmap_atomic(kaddr);
break;
}
default:
break;
}
/* Put the completed request on the correct list */
isci_task_save_for_upper_layer_completion(ihost, request, response,
status, complete_to_host
);
/* complete the io request to the core. */
sci_controller_complete_io(ihost, request->target_device, request);
/* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort
* task to recognize the already completed case.
*/
set_bit(IREQ_TERMINATED, &request->flags);
}
static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct domain_device *dev = ireq->target_device->domain_dev;
enum sci_base_request_states state;
struct sas_task *task;
/* XXX as hch said always creating an internal sas_task for tmf
* requests would simplify the driver
*/
task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
/* all unaccelerated request types (non ssp or ncq) handled with
* substates
*/
if (!task && dev->dev_type == SAS_END_DEV) {
state = SCI_REQ_TASK_WAIT_TC_COMP;
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
state = SCI_REQ_SMP_WAIT_RESP;
} else if (task && sas_protocol_ata(task->task_proto) &&
!task->ata_task.use_ncq) {
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
task->ata_task.fis.command == ATA_CMD_PACKET) {
state = SCI_REQ_ATAPI_WAIT_H2D;
} else if (task->data_dir == DMA_NONE) {
state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
} else if (task->ata_task.dma_xfer) {
state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
} else /* PIO */ {
state = SCI_REQ_STP_PIO_WAIT_H2D;
}
} else {
/* SSP or NCQ are fully accelerated, no substates */
return;
}
sci_change_state(sm, state);
}
static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct isci_host *ihost = ireq->owning_controller;
/* Tell the SCI_USER that the IO request is complete */
if (!test_bit(IREQ_TMF, &ireq->flags))
isci_request_io_request_complete(ihost, ireq,
ireq->sci_status);
else
isci_task_request_complete(ihost, ireq, ireq->sci_status);
}
static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
/* Setting the abort bit in the Task Context is required by the silicon. */
ireq->tc->abort = 1;
}
static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
ireq->target_device->working_request = ireq;
}
static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
ireq->target_device->working_request = ireq;
}
static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_INIT] = { },
[SCI_REQ_CONSTRUCTED] = { },
[SCI_REQ_STARTED] = {
.enter_state = sci_request_started_state_enter,
},
[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
.enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
},
[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
[SCI_REQ_STP_PIO_WAIT_H2D] = {
.enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
},
[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
[SCI_REQ_STP_PIO_DATA_IN] = { },
[SCI_REQ_STP_PIO_DATA_OUT] = { },
[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
[SCI_REQ_TASK_WAIT_TC_COMP] = { },
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
[SCI_REQ_SMP_WAIT_RESP] = { },
[SCI_REQ_SMP_WAIT_TC_COMP] = { },
[SCI_REQ_ATAPI_WAIT_H2D] = { },
[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
[SCI_REQ_ATAPI_WAIT_D2H] = { },
[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
[SCI_REQ_COMPLETED] = {
.enter_state = sci_request_completed_state_enter,
},
[SCI_REQ_ABORTING] = {
.enter_state = sci_request_aborting_state_enter,
},
[SCI_REQ_FINAL] = { },
};
static void
sci_general_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
ireq->target_device = idev;
ireq->protocol = SCIC_NO_PROTOCOL;
ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
ireq->sci_status = SCI_SUCCESS;
ireq->scu_status = 0;
ireq->post_context = 0xFFFFFFFF;
}
static enum sci_status
sci_io_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct domain_device *dev = idev->domain_dev;
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
sci_general_request_construct(ihost, idev, ireq);
if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_REMOTE_DEVICE;
if (dev->dev_type == SAS_END_DEV)
/* pass */;
else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
else if (dev_is_expander(dev))
/* pass */;
else
return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
return status;
}
enum sci_status sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag, struct isci_request *ireq)
{
struct domain_device *dev = idev->domain_dev;
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
sci_general_request_construct(ihost, idev, ireq);
if (dev->dev_type == SAS_END_DEV ||
dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
set_bit(IREQ_TMF, &ireq->flags);
memset(ireq->tc, 0, sizeof(struct scu_task_context));
} else
status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
return status;
}
static enum sci_status isci_request_ssp_request_construct(
struct isci_request *request)
{
enum sci_status status;
dev_dbg(&request->isci_host->pdev->dev,
"%s: request = %p\n",
__func__,
request);
status = sci_io_request_construct_basic_ssp(request);
return status;
}
static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
{
struct sas_task *task = isci_request_access_task(ireq);
struct host_to_dev_fis *fis = &ireq->stp.cmd;
struct ata_queued_cmd *qc = task->uldd_task;
enum sci_status status;
dev_dbg(&ireq->isci_host->pdev->dev,
"%s: ireq = %p\n",
__func__,
ireq);
memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
if (!task->ata_task.device_control_reg_update)
fis->flags |= 0x80;
fis->flags &= 0xF0;
status = sci_io_request_construct_basic_sata(ireq);
if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
qc->tf.command == ATA_CMD_FPDMA_READ)) {
fis->sector_count = qc->tag << 3;
ireq->tc->type.stp.ncq_tag = qc->tag;
}
return status;
}
static enum sci_status
sci_io_request_construct_smp(struct device *dev,
struct isci_request *ireq,
struct sas_task *task)
{
struct scatterlist *sg = &task->smp_task.smp_req;
struct isci_remote_device *idev;
struct scu_task_context *task_context;
struct isci_port *iport;
struct smp_req *smp_req;
void *kaddr;
u8 req_len;
u32 cmd;
kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
/*
* Look at the SMP requests' header fields; for certain SAS 1.x SMP
* functions under SAS 2.0, a zero request length really indicates
* a non-zero default length.
*/
if (smp_req->req_len == 0) {
switch (smp_req->func) {
case SMP_DISCOVER:
case SMP_REPORT_PHY_ERR_LOG:
case SMP_REPORT_PHY_SATA:
case SMP_REPORT_ROUTE_INFO:
smp_req->req_len = 2;
break;
case SMP_CONF_ROUTE_INFO:
case SMP_PHY_CONTROL:
case SMP_PHY_TEST_FUNCTION:
smp_req->req_len = 9;
break;
/* Default - zero is a valid default for 2.0. */
}
}
req_len = smp_req->req_len;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
cmd = *(u32 *) smp_req;
kunmap_atomic(kaddr);
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE;
ireq->protocol = SCIC_SMP_PROTOCOL;
/* byte swap the smp request. */
task_context = ireq->tc;
idev = ireq->target_device;
iport = idev->owning_port;
/*
* Fill in the TC with the its required data
* 00h
*/
task_context->priority = 0;
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = ISCI_PEG;
task_context->logical_port_index = iport->physical_port_index;
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
task_context->abort = 0;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
/* 04h */
task_context->remote_node_index = idev->rnc.remote_node_index;
task_context->command_code = 0;
task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
/* 08h */
task_context->link_layer_control = 0;
task_context->do_not_dma_ssp_good_response = 1;
task_context->strict_ordering = 0;
task_context->control_frame = 1;
task_context->timeout_enable = 0;
task_context->block_guard_enable = 0;
/* 0ch */
task_context->address_modifier = 0;
/* 10h */
task_context->ssp_command_iu_length = req_len;
/* 14h */
task_context->transfer_length_bytes = 0;
/*
* 18h ~ 30h, protocol specific
* since commandIU has been build by framework at this point, we just
* copy the frist DWord from command IU to this location. */
memcpy(&task_context->type.smp, &cmd, sizeof(u32));
/*
* 40h
* "For SMP you could program it to zero. We would prefer that way
* so that done code will be consistent." - Venki
*/
task_context->task_phase = 0;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
(iport->physical_port_index <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
/*
* Copy the physical address for the command buffer to the SCU Task
* Context command buffer should not contain command header.
*/
task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
/* SMP response comes as UF, so no need to set response IU address. */
task_context->response_iu_upper = 0;
task_context->response_iu_lower = 0;
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
/*
* isci_smp_request_build() - This function builds the smp request.
* @ireq: This parameter points to the isci_request allocated in the
* request construct function.
*
* SCI_SUCCESS on successfull completion, or specific failure code.
*/
static enum sci_status isci_smp_request_build(struct isci_request *ireq)
{
struct sas_task *task = isci_request_access_task(ireq);
struct device *dev = &ireq->isci_host->pdev->dev;
enum sci_status status = SCI_FAILURE;
status = sci_io_request_construct_smp(dev, ireq, task);
if (status != SCI_SUCCESS)
dev_dbg(&ireq->isci_host->pdev->dev,
"%s: failed with status = %d\n",
__func__,
status);
return status;
}
/**
* isci_io_request_build() - This function builds the io request object.
* @ihost: This parameter specifies the ISCI host object
* @request: This parameter points to the isci_request object allocated in the
* request construct function.
* @sci_device: This parameter is the handle for the sci core's remote device
* object that is the destination for this request.
*
* SCI_SUCCESS on successfull completion, or specific failure code.
*/
static enum sci_status isci_io_request_build(struct isci_host *ihost,
struct isci_request *request,
struct isci_remote_device *idev)
{
enum sci_status status = SCI_SUCCESS;
struct sas_task *task = isci_request_access_task(request);
dev_dbg(&ihost->pdev->dev,
"%s: idev = 0x%p; request = %p, "
"num_scatter = %d\n",
__func__,
idev,
request,
task->num_scatter);
/* map the sgl addresses, if present.
* libata does the mapping for sata devices
* before we get the request.
*/
if (task->num_scatter &&
!sas_protocol_ata(task->task_proto) &&
!(SAS_PROTOCOL_SMP & task->task_proto)) {
request->num_sg_entries = dma_map_sg(
&ihost->pdev->dev,
task->scatter,
task->num_scatter,
task->data_dir
);
if (request->num_sg_entries == 0)
return SCI_FAILURE_INSUFFICIENT_RESOURCES;
}
status = sci_io_request_construct(ihost, idev, request);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: failed request construct\n",
__func__);
return SCI_FAILURE;
}
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
status = isci_smp_request_build(request);
break;
case SAS_PROTOCOL_SSP:
status = isci_request_ssp_request_construct(request);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
status = isci_request_stp_request_construct(request);
break;
default:
dev_dbg(&ihost->pdev->dev,
"%s: unknown protocol\n", __func__);
return SCI_FAILURE;
}
return SCI_SUCCESS;
}
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
{
struct isci_request *ireq;
ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
ireq->io_tag = tag;
ireq->io_request_completion = NULL;
ireq->flags = 0;
ireq->num_sg_entries = 0;
INIT_LIST_HEAD(&ireq->completed_node);
INIT_LIST_HEAD(&ireq->dev_node);
isci_request_change_state(ireq, allocated);
return ireq;
}
static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
struct sas_task *task,
u16 tag)
{
struct isci_request *ireq;
ireq = isci_request_from_tag(ihost, tag);
ireq->ttype_ptr.io_task_ptr = task;
clear_bit(IREQ_TMF, &ireq->flags);
task->lldd_task = ireq;
return ireq;
}
struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
struct isci_tmf *isci_tmf,
u16 tag)
{
struct isci_request *ireq;
ireq = isci_request_from_tag(ihost, tag);
ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
set_bit(IREQ_TMF, &ireq->flags);
return ireq;
}
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, u16 tag)
{
enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
struct isci_request *ireq;
unsigned long flags;
int ret = 0;
/* do common allocation and init of request object. */
ireq = isci_io_request_from_tag(ihost, task, tag);
status = isci_io_request_build(ihost, ireq, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: request_construct failed - status = 0x%x\n",
__func__,
status);
return status;
}
spin_lock_irqsave(&ihost->scic_lock, flags);
if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
if (isci_task_is_ncq_recovery(task)) {
/* The device is in an NCQ recovery state. Issue the
* request on the task side. Note that it will
* complete on the I/O request side because the
* request was built that way (ie.
* ireq->is_task_management_request is false).
*/
status = sci_controller_start_task(ihost,
idev,
ireq);
} else {
status = SCI_FAILURE;
}
} else {
/* send the request, let the core assign the IO TAG. */
status = sci_controller_start_io(ihost, idev,
ireq);
}
if (status != SCI_SUCCESS &&
status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
dev_dbg(&ihost->pdev->dev,
"%s: failed request start (0x%x)\n",
__func__, status);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return status;
}
/* Either I/O started OK, or the core has signaled that
* the device needs a target reset.
*
* In either case, hold onto the I/O for later.
*
* Update it's status and add it to the list in the
* remote device object.
*/
list_add(&ireq->dev_node, &idev->reqs_in_process);
if (status == SCI_SUCCESS) {
isci_request_change_state(ireq, started);
} else {
/* The request did not really start in the
* hardware, so clear the request handle
* here so no terminations will be done.
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
isci_request_change_state(ireq, completed);
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (status ==
SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
/* Signal libsas that we need the SCSI error
* handler thread to work on this I/O and that
* we want a device reset.
*/
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* Cause this task to be scheduled in the SCSI error
* handler thread.
*/
sas_task_abort(task);
/* Change the status, since we are holding
* the I/O until it is managed by the SCSI
* error handler.
*/
status = SCI_SUCCESS;
}
return ret;
}
| gpl-2.0 |
jdkernel/mecha_sense_2.6.35 | arch/powerpc/platforms/86xx/gef_sbc310.c | 4038 | 5533 | /*
* GE SBC310 board support
*
* Author: Martyn Welch <martyn.welch@ge.com>
*
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines)
* Copyright 2006 Freescale Semiconductor Inc.
*
* NEC fixup adapted from arch/mips/pci/fixup-lm2e.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include "mpc86xx.h"
#include "gef_pic.h"
#undef DEBUG
#ifdef DEBUG
#define DBG (fmt...) do { printk(KERN_ERR "SBC310: " fmt); } while (0)
#else
#define DBG (fmt...) do { } while (0)
#endif
void __iomem *sbc310_regs;
static void __init gef_sbc310_init_irq(void)
{
struct device_node *cascade_node = NULL;
mpc86xx_init_irq();
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic");
if (!cascade_node) {
printk(KERN_WARNING "SBC310: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void __init gef_sbc310_setup_arch(void)
{
struct device_node *regs;
#ifdef CONFIG_PCI
struct device_node *np;
for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
fsl_add_bridge(np, 1);
}
#endif
printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
if (regs) {
sbc310_regs = of_iomap(regs, 0);
if (sbc310_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
}
/* Return the PCB revision */
static unsigned int gef_sbc310_get_board_id(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return reg & 0xff;
}
/* Return the PCB revision */
static unsigned int gef_sbc310_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 8) & 0xff;
}
/* Return the board (software) revision */
static unsigned int gef_sbc310_get_board_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 16) & 0xff;
}
/* Return the FPGA revision */
static unsigned int gef_sbc310_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 24) & 0xf;
}
static void gef_sbc310_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id());
seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(),
('A' + gef_sbc310_get_board_rev() - 1));
seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc310_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
}
static void __init gef_sbc310_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(gef_sbc310))
return;
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
/* Ensure only ports 1 & 2 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x2);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
gef_sbc310_nec_fixup);
/*
* Called very early, device-tree isn't unflattened
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
* board. It is expected thati, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_sbc310_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "gef,sbc310"))
return 1;
return 0;
}
static long __init mpc86xx_time_init(void)
{
unsigned int temp;
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, 0);
temp = mfspr(SPRN_HID0);
temp |= HID0_TBEN;
mtspr(SPRN_HID0, temp);
asm volatile("isync");
return 0;
}
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{},
};
static int __init declare_of_platform_devices(void)
{
printk(KERN_DEBUG "Probe platform devices\n");
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(gef_sbc310, declare_of_platform_devices);
define_machine(gef_sbc310) {
.name = "GE SBC310",
.probe = gef_sbc310_probe,
.setup_arch = gef_sbc310_setup_arch,
.init_IRQ = gef_sbc310_init_irq,
.show_cpuinfo = gef_sbc310_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| gpl-2.0 |
drgroovestarr/kernel_samsung_manta | drivers/mfd/max8925-i2c.c | 4294 | 5508 | /*
* I2C driver for Maxim MAX8925
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/mfd/max8925.h>
#include <linux/slab.h>
#define RTC_I2C_ADDR 0x68
#define ADC_I2C_ADDR 0x47
static inline int max8925_read_device(struct i2c_client *i2c,
int reg, int bytes, void *dest)
{
int ret;
if (bytes > 1)
ret = i2c_smbus_read_i2c_block_data(i2c, reg, bytes, dest);
else {
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
return ret;
*(unsigned char *)dest = (unsigned char)ret;
}
return ret;
}
static inline int max8925_write_device(struct i2c_client *i2c,
int reg, int bytes, void *src)
{
unsigned char buf[bytes + 1];
int ret;
buf[0] = (unsigned char)reg;
memcpy(&buf[1], src, bytes);
ret = i2c_master_send(i2c, buf, bytes + 1);
if (ret < 0)
return ret;
return 0;
}
int max8925_reg_read(struct i2c_client *i2c, int reg)
{
struct max8925_chip *chip = i2c_get_clientdata(i2c);
unsigned char data = 0;
int ret;
mutex_lock(&chip->io_lock);
ret = max8925_read_device(i2c, reg, 1, &data);
mutex_unlock(&chip->io_lock);
if (ret < 0)
return ret;
else
return (int)data;
}
EXPORT_SYMBOL(max8925_reg_read);
int max8925_reg_write(struct i2c_client *i2c, int reg,
unsigned char data)
{
struct max8925_chip *chip = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&chip->io_lock);
ret = max8925_write_device(i2c, reg, 1, &data);
mutex_unlock(&chip->io_lock);
return ret;
}
EXPORT_SYMBOL(max8925_reg_write);
int max8925_bulk_read(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct max8925_chip *chip = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&chip->io_lock);
ret = max8925_read_device(i2c, reg, count, buf);
mutex_unlock(&chip->io_lock);
return ret;
}
EXPORT_SYMBOL(max8925_bulk_read);
int max8925_bulk_write(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct max8925_chip *chip = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&chip->io_lock);
ret = max8925_write_device(i2c, reg, count, buf);
mutex_unlock(&chip->io_lock);
return ret;
}
EXPORT_SYMBOL(max8925_bulk_write);
int max8925_set_bits(struct i2c_client *i2c, int reg,
unsigned char mask, unsigned char data)
{
struct max8925_chip *chip = i2c_get_clientdata(i2c);
unsigned char value;
int ret;
mutex_lock(&chip->io_lock);
ret = max8925_read_device(i2c, reg, 1, &value);
if (ret < 0)
goto out;
value &= ~mask;
value |= data;
ret = max8925_write_device(i2c, reg, 1, &value);
out:
mutex_unlock(&chip->io_lock);
return ret;
}
EXPORT_SYMBOL(max8925_set_bits);
static const struct i2c_device_id max8925_id_table[] = {
{ "max8925", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max8925_id_table);
static int __devinit max8925_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8925_platform_data *pdata = client->dev.platform_data;
static struct max8925_chip *chip;
if (!pdata) {
pr_info("%s: platform data is missing\n", __func__);
return -EINVAL;
}
chip = kzalloc(sizeof(struct max8925_chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->i2c = client;
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
dev_set_drvdata(chip->dev, chip);
mutex_init(&chip->io_lock);
chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
i2c_set_clientdata(chip->rtc, chip);
chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
i2c_set_clientdata(chip->adc, chip);
device_init_wakeup(&client->dev, 1);
max8925_device_init(chip, pdata);
return 0;
}
static int __devexit max8925_remove(struct i2c_client *client)
{
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
kfree(chip);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int max8925_suspend(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct max8925_chip *chip = i2c_get_clientdata(client);
if (device_may_wakeup(dev) && chip->wakeup_flag)
enable_irq_wake(chip->core_irq);
return 0;
}
static int max8925_resume(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct max8925_chip *chip = i2c_get_clientdata(client);
if (device_may_wakeup(dev) && chip->wakeup_flag)
disable_irq_wake(chip->core_irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(max8925_pm_ops, max8925_suspend, max8925_resume);
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
.owner = THIS_MODULE,
.pm = &max8925_pm_ops,
},
.probe = max8925_probe,
.remove = __devexit_p(max8925_remove),
.id_table = max8925_id_table,
};
static int __init max8925_i2c_init(void)
{
int ret;
ret = i2c_add_driver(&max8925_driver);
if (ret != 0)
pr_err("Failed to register MAX8925 I2C driver: %d\n", ret);
return ret;
}
subsys_initcall(max8925_i2c_init);
static void __exit max8925_i2c_exit(void)
{
i2c_del_driver(&max8925_driver);
}
module_exit(max8925_i2c_exit);
MODULE_DESCRIPTION("I2C Driver for Maxim 8925");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
piccolo-dev/android_kernel_bq_piccolo | drivers/net/wimax/i2400m/usb-notif.c | 4294 | 8087 | /*
* Intel Wireless WiMAX Connection 2400m over USB
* Notification handling
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Initial implementation
*
*
* The notification endpoint is active when the device is not in boot
* mode; in here we just read and get notifications; based on those,
* we act to either reinitialize the device after a reboot or to
* submit a RX request.
*
* ROADMAP
*
* i2400mu_usb_notification_setup()
*
* i2400mu_usb_notification_release()
*
* i2400mu_usb_notification_cb() Called when a URB is ready
* i2400mu_notif_grok()
* i2400m_is_boot_barker()
* i2400m_dev_reset_handle()
* i2400mu_rx_kick()
*/
#include <linux/usb.h>
#include <linux/slab.h>
#include "i2400m-usb.h"
#define D_SUBMODULE notif
#include "usb-debug-levels.h"
static const
__le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 };
/*
* Process a received notification
*
* In normal operation mode, we can only receive two types of payloads
* on the notification endpoint:
*
* - a reboot barker, we do a bootstrap (the device has reseted).
*
* - a block of zeroes: there is pending data in the IN endpoint
*/
static
int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
size_t buf_len)
{
int ret;
struct device *dev = &i2400mu->usb_iface->dev;
struct i2400m *i2400m = &i2400mu->i2400m;
d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
i2400mu, buf, buf_len);
ret = -EIO;
if (buf_len < sizeof(i2400m_ZERO_BARKER))
/* Not a bug, just ignore */
goto error_bad_size;
ret = 0;
if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
i2400mu_rx_kick(i2400mu);
goto out;
}
ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
if (unlikely(ret >= 0))
ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
else /* Unknown or unexpected data in the notif message */
i2400m_unknown_barker(i2400m, buf, buf_len);
error_bad_size:
out:
d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
i2400mu, buf, buf_len, ret);
return ret;
}
/*
* URB callback for the notification endpoint
*
* @urb: the urb received from the notification endpoint
*
* This function will just process the USB side of the transaction,
* checking everything is fine, pass the processing to
* i2400m_notification_grok() and resubmit the URB.
*/
static
void i2400mu_notification_cb(struct urb *urb)
{
int ret;
struct i2400mu *i2400mu = urb->context;
struct device *dev = &i2400mu->usb_iface->dev;
d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n",
urb, urb->status, urb->actual_length);
ret = urb->status;
switch (ret) {
case 0:
ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer,
urb->actual_length);
if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME))
goto error_exceeded;
if (ret == -ENOMEM) /* uff...power cycle? shutdown? */
goto error_exceeded;
break;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* ditto */
case -ESHUTDOWN: /* URB killed */
case -ECONNRESET: /* disconnection */
goto out; /* Notify around */
default: /* Some error? */
if (edc_inc(&i2400mu->urb_edc,
EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
goto error_exceeded;
dev_err(dev, "notification: URB error %d, retrying\n",
urb->status);
}
usb_mark_last_busy(i2400mu->usb_dev);
ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC);
switch (ret) {
case 0:
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* ditto */
case -ESHUTDOWN: /* URB killed */
case -ECONNRESET: /* disconnection */
break; /* just ignore */
default: /* Some error? */
dev_err(dev, "notification: cannot submit URB: %d\n", ret);
goto error_submit;
}
d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
urb, urb->status, urb->actual_length);
return;
error_exceeded:
dev_err(dev, "maximum errors in notification URB exceeded; "
"resetting device\n");
error_submit:
usb_queue_reset_device(i2400mu->usb_iface);
out:
d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
urb, urb->status, urb->actual_length);
}
/*
* setup the notification endpoint
*
* @i2400m: device descriptor
*
* This procedure prepares the notification urb and handler for receiving
* unsolicited barkers from the device.
*/
int i2400mu_notification_setup(struct i2400mu *i2400mu)
{
struct device *dev = &i2400mu->usb_iface->dev;
int usb_pipe, ret = 0;
struct usb_endpoint_descriptor *epd;
char *buf;
d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
if (buf == NULL) {
ret = -ENOMEM;
goto error_buf_alloc;
}
i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!i2400mu->notif_urb) {
ret = -ENOMEM;
dev_err(dev, "notification: cannot allocate URB\n");
goto error_alloc_urb;
}
epd = usb_get_epd(i2400mu->usb_iface,
i2400mu->endpoint_cfg.notification);
usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
buf, I2400MU_MAX_NOTIFICATION_LEN,
i2400mu_notification_cb, i2400mu, epd->bInterval);
ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(dev, "notification: cannot submit URB: %d\n", ret);
goto error_submit;
}
d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
return ret;
error_submit:
usb_free_urb(i2400mu->notif_urb);
error_alloc_urb:
kfree(buf);
error_buf_alloc:
d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
return ret;
}
/*
* Tear down of the notification mechanism
*
* @i2400m: device descriptor
*
* Kill the interrupt endpoint urb, free any allocated resources.
*
* We need to check if we have done it before as for example,
* _suspend() call this; if after a suspend() we get a _disconnect()
* (as the case is when hibernating), nothing bad happens.
*/
void i2400mu_notification_release(struct i2400mu *i2400mu)
{
struct device *dev = &i2400mu->usb_iface->dev;
d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
if (i2400mu->notif_urb != NULL) {
usb_kill_urb(i2400mu->notif_urb);
kfree(i2400mu->notif_urb->transfer_buffer);
usb_free_urb(i2400mu->notif_urb);
i2400mu->notif_urb = NULL;
}
d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
}
| gpl-2.0 |
deedwar/evolve | arch/sparc/kernel/nmi.c | 4550 | 6128 | /* Pseudo NMI support on sparc64 systems.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*
* The NMI watchdog support and infrastructure is based almost
* entirely upon the x86 NMI support code.
*/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/nmi.h>
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/kernel_stat.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <asm/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pcr.h>
#include <asm/perfctr.h>
#include "kstack.h"
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
*
* The profile overflow interrupts at level 15, so we use
* level 14 as our IRQ off level.
*/
static int panic_on_timeout;
/* nmi_active:
* >0: the NMI watchdog is active, but can be disabled
* <0: the NMI watchdog has not been set up, and cannot be enabled
* 0: the NMI watchdog is disabled, but can be enabled
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
for_each_present_cpu(cpu) {
if (per_cpu(nmi_touch, cpu) != 1)
per_cpu(nmi_touch, cpu) = 1;
}
}
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
return;
console_verbose();
bust_spinlocks(1);
printk(KERN_EMERG "%s", str);
printk(" on CPU%d, ip %08lx, registers:\n",
smp_processor_id(), regs->tpc);
show_regs(regs);
dump_stack();
bust_spinlocks(0);
if (do_panic || panic_on_oops)
panic("Non maskable interrupt");
nmi_exit();
local_irq_enable();
do_exit(SIGBUS);
}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
void *orig_sp;
clear_softint(1 << irq);
local_cpu_data().__nmi_count++;
nmi_enter();
orig_sp = set_hardirq_stack();
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
else
pcr_ops->write(PCR_PIC_PRIV);
sum = local_cpu_data().irq0_irqs;
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
__this_cpu_inc(alert_counter);
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
__this_cpu_write(alert_counter, 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
restore_hardirq_stack(orig_sp);
nmi_exit();
}
static inline unsigned int get_nmi_count(int cpu)
{
return cpu_data(cpu).__nmi_count;
}
static __init void nmi_cpu_busy(void *data)
{
local_irq_enable_in_hardirq();
while (endflag == 0)
mb();
}
static void report_broken_nmi(int cpu, int *prev_nmi_count)
{
printk(KERN_CONT "\n");
printk(KERN_WARNING
"WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
printk(KERN_WARNING
"Please report this to bugzilla.kernel.org,\n");
printk(KERN_WARNING
"and attach the output of the 'dmesg' command.\n");
per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active);
}
void stop_nmi_watchdog(void *unused)
{
pcr_ops->write(PCR_PIC_PRIV);
__get_cpu_var(wd_enabled) = 0;
atomic_dec(&nmi_active);
}
static int __init check_nmi_watchdog(void)
{
unsigned int *prev_nmi_count;
int cpu, err;
if (!atomic_read(&nmi_active))
return 0;
prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
goto error;
}
printk(KERN_INFO "Testing NMI watchdog ... ");
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = get_nmi_count(cpu);
local_irq_enable();
mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu))
continue;
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
report_broken_nmi(cpu, prev_nmi_count);
}
endflag = 1;
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
err = -ENODEV;
goto error;
}
printk("OK.\n");
nmi_hz = 1;
kfree(prev_nmi_count);
return 0;
error:
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return err;
}
void start_nmi_watchdog(void *unused)
{
__get_cpu_var(wd_enabled) = 1;
atomic_inc(&nmi_active);
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
static void nmi_adjust_hz_one(void *unused)
{
if (!__get_cpu_var(wd_enabled))
return;
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
void nmi_adjust_hz(unsigned int new_hz)
{
nmi_hz = new_hz;
on_each_cpu(nmi_adjust_hz_one, NULL, 1);
}
EXPORT_SYMBOL_GPL(nmi_adjust_hz);
static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
{
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return 0;
}
static struct notifier_block nmi_reboot_notifier = {
.notifier_call = nmi_shutdown,
};
int __init nmi_init(void)
{
int err;
on_each_cpu(start_nmi_watchdog, NULL, 1);
err = check_nmi_watchdog();
if (!err) {
err = register_reboot_notifier(&nmi_reboot_notifier);
if (err) {
on_each_cpu(stop_nmi_watchdog, NULL, 1);
atomic_set(&nmi_active, -1);
}
}
return err;
}
static int __init setup_nmi_watchdog(char *str)
{
if (!strncmp(str, "panic", 5))
panic_on_timeout = 1;
return 0;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);
| gpl-2.0 |
acklinr/omap-android-3.4 | drivers/net/ethernet/amd/depca.c | 4806 | 61304 | /* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
Written 1994, 1995 by David C. Davies.
Copyright 1994 David C. Davies
and
United States Government
(as represented by the Director, National Security Agency).
Copyright 1995 Digital Equipment Corporation.
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
This driver is written for the Digital Equipment Corporation series
of DEPCA and EtherWORKS ethernet cards:
DEPCA (the original)
DE100
DE101
DE200 Turbo
DE201 Turbo
DE202 Turbo (TP BNC)
DE210
DE422 (EISA)
The driver has been tested on DE100, DE200 and DE202 cards in a
relatively busy network. The DE422 has been tested a little.
This driver will NOT work for the DE203, DE204 and DE205 series of
cards, since they have a new custom ASIC in place of the AMD LANCE
chip. See the 'ewrk3.c' driver in the Linux source tree for running
those cards.
I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
a DECstation 5000/200.
The author may be reached at davies@maniac.ultranet.com
=========================================================================
The driver was originally based on the 'lance.c' driver from Donald
Becker which is included with the standard driver distribution for
linux. V0.4 is a complete re-write with only the kernel interface
remaining from the original code.
1) Lance.c code in /linux/drivers/net/
2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
AMD, 1992 [(800) 222-9323].
3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
AMD, Pub. #17881, May 1993.
4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
AMD, Pub. #16907, May 1992
5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
Digital Equipment Corporation, 1989
8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
driver.
The original DEPCA card requires that the ethernet ROM address counter
be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
only done when a 0x08 is read as the first address octet (to minimise
the chances of writing over some other hardware's I/O register). The
NICSR accesses have been changed to byte accesses for all the cards
supported by this driver, since there is only one useful bit in the MSB
(remote boot timeout) and it is not used. Also, there is a maximum of
only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
help debugging all this (and holding my feet to the fire until I got it
right).
The DE200 series boards have on-board 64kB RAM for use as a shared
memory network buffer. Only the DE100 cards make use of a 2kB buffer
mode which has not been implemented in this driver (only the 32kB and
64kB modes are supported [16kB/48kB for the original DEPCA]).
At the most only 2 DEPCA cards can be supported on the ISA bus because
there is only provision for two I/O base addresses on each card (0x300
and 0x200). The I/O address is detected by searching for a byte sequence
in the Ethernet station address PROM at the expected I/O address for the
Ethernet PROM. The shared memory base address is 'autoprobed' by
looking for the self test PROM and detecting the card name. When a
second DEPCA is detected, information is placed in the base_addr
variable of the next device structure (which is created if necessary),
thus enabling ethif_probe initialization for the device. More than 2
EISA cards can be supported, but care will be needed assigning the
shared memory to ensure that each slot has the correct IRQ, I/O address
and shared memory address assigned.
************************************************************************
NOTE: If you are using two ISA DEPCAs, it is important that you assign
the base memory addresses correctly. The driver autoprobes I/O 0x300
then 0x200. The base memory address for the first device must be less
than that of the second so that the auto probe will correctly assign the
I/O and memory addresses on the same card. I can't think of a way to do
this unambiguously at the moment, since there is nothing on the cards to
tie I/O and memory information together.
I am unable to test 2 cards together for now, so this code is
unchecked. All reports, good or bad, are welcome.
************************************************************************
The board IRQ setting must be at an unused IRQ which is auto-probed
using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
{2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
really IRQ9 in machines with 16 IRQ lines.
No 16MB memory limitation should exist with this driver as DMA is not
used and the common memory area is in low memory on the network card (my
current system has 20MB and I've not had problems yet).
The ability to load this driver as a loadable module has been added. To
utilise this ability, you have to do <8 things:
0) have a copy of the loadable modules code installed on your system.
1) copy depca.c from the /linux/drivers/net directory to your favourite
temporary directory.
2) if you wish, edit the source code near line 1530 to reflect the I/O
address and IRQ you're using (see also 5).
3) compile depca.c, but include -DMODULE in the command line to ensure
that the correct bits are compiled (see end of source code).
4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
kernel with the depca configuration turned off and reboot.
5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
[Alan Cox: Changed the code to allow command line irq/io assignments]
[Dave Davies: Changed the code to allow command line mem/name
assignments]
6) run the net startup bits for your eth?? interface manually
(usually /etc/rc.inet[12] at boot time).
7) enjoy!
Note that autoprobing is not allowed in loadable modules - the system is
already up and running and you're messing with interrupts.
To unload a module, turn off the associated interface
'ifconfig eth?? down' then 'rmmod depca'.
To assign a base memory address for the shared memory when running as a
loadable module, see 5 above. To include the adapter name (if you have
no PROM but know the card name) also see 5 above. Note that this last
option will not work with kernel built-in depca's.
The shared memory assignment for a loadable module makes sense to avoid
the 'memory autoprobe' picking the wrong shared memory (for the case of
2 depca's in a PC).
************************************************************************
Support for MCA EtherWORKS cards added 11-3-98.
Verified to work with up to 2 DE212 cards in a system (although not
fully stress-tested).
Currently known bugs/limitations:
Note: with the MCA stuff as a module, it trusts the MCA configuration,
not the command line for IRQ and memory address. You can
specify them if you want, but it will throw your values out.
You still have to pass the IO address it was configured as
though.
************************************************************************
TO DO:
------
Revision History
----------------
Version Date Description
0.1 25-jan-94 Initial writing.
0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
0.3 1-feb-94 Added multiple DEPCA support.
0.31 4-feb-94 Added DE202 recognition.
0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
Add jabber packet fix from murf@perftech.com
and becker@super.org
0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
0.35 8-mar-94 Added DE201 recognition. Tidied up.
0.351 30-apr-94 Added EISA support. Added DE422 recognition.
0.36 16-may-94 DE422 fix released.
0.37 22-jul-94 Added MODULE support
0.38 15-aug-94 Added DBR ROM switch in depca_close().
Multi DEPCA bug fix.
0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
<stromain@alf.dec.com>
0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
0.385 3-apr-95 Fix a recognition bug reported by
<ryan.niemi@lastfrontier.com>
0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
0.40 25-May-95 Rewrite for portability & updated.
ALPHA support from <jestabro@amt.tay1.dec.com>
0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
suggestion by <heiko@colossus.escape.de>
0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
modules.
Add 'adapter_name' for loadable modules when no PROM.
Both above from a suggestion by
<pchen@woodruffs121.residence.gatech.edu>.
Add new multicasting code.
0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
reported by <mmogilvi@elbert.uccs.edu>
0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
by <tymm@computer.org>
0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
0.5 14-Nov-98 Re-spin for 2.1.x kernels.
0.51 27-Jun-99 Correct received packet length for CRC from
report by <worm@dkik.dk>
0.52 16-Oct-00 Fixes for 2.3 io memory accesses
Fix show-stopper (ints left masked) in depca_interrupt
by <peterd@pnd-pc.demon.co.uk>
0.53 12-Jan-01 Release resources on failure, bss tidbits
by acme@conectiva.com.br
0.54 08-Nov-01 use library crc32 functions
by Matt_Domsch@dell.com
0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
=========================================================================
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/unistd.h>
#include <linux/ctype.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/dma.h>
#ifdef CONFIG_MCA
#include <linux/mca.h>
#endif
#ifdef CONFIG_EISA
#include <linux/eisa.h>
#endif
#include "depca.h"
static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
#ifdef DEPCA_DEBUG
static int depca_debug = DEPCA_DEBUG;
#else
static int depca_debug = 1;
#endif
#define DEPCA_NDA 0xffe0 /* No Device Address */
#define TX_TIMEOUT (1*HZ)
/*
** Ethernet PROM defines
*/
#define PROBE_LENGTH 32
#define ETH_PROM_SIG 0xAA5500FFUL
/*
** Set the number of Tx and Rx buffers. Ensure that the memory requested
** here is <= to the amount of shared memory set up by the board switches.
** The number of descriptors MUST BE A POWER OF 2.
**
** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
*/
#define NUM_RX_DESC 8 /* Number of RX descriptors */
#define NUM_TX_DESC 8 /* Number of TX descriptors */
#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
/*
** EISA bus defines
*/
#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
/*
** ISA Bus defines
*/
#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
#define DEPCA_TOTAL_SIZE 0x10
static struct {
u_long iobase;
struct platform_device *device;
} depca_io_ports[] = {
{ 0x300, NULL },
{ 0x200, NULL },
{ 0 , NULL },
};
/*
** Name <-> Adapter mapping
*/
#define DEPCA_SIGNATURE {"DEPCA",\
"DE100","DE101",\
"DE200","DE201","DE202",\
"DE210","DE212",\
"DE422",\
""}
static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
enum depca_type {
DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
};
static char depca_string[] = "depca";
static int depca_device_remove (struct device *device);
#ifdef CONFIG_EISA
static struct eisa_device_id depca_eisa_ids[] = {
{ "DEC4220", de422 },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
static int depca_eisa_probe (struct device *device);
static struct eisa_driver depca_eisa_driver = {
.id_table = depca_eisa_ids,
.driver = {
.name = depca_string,
.probe = depca_eisa_probe,
.remove = __devexit_p (depca_device_remove)
}
};
#endif
#ifdef CONFIG_MCA
/*
** Adapter ID for the MCA EtherWORKS DE210/212 adapter
*/
#define DE210_ID 0x628d
#define DE212_ID 0x6def
static short depca_mca_adapter_ids[] = {
DE210_ID,
DE212_ID,
0x0000
};
static char *depca_mca_adapter_name[] = {
"DEC EtherWORKS MC Adapter (DE210)",
"DEC EtherWORKS MC Adapter (DE212)",
NULL
};
static enum depca_type depca_mca_adapter_type[] = {
de210,
de212,
0
};
static int depca_mca_probe (struct device *);
static struct mca_driver depca_mca_driver = {
.id_table = depca_mca_adapter_ids,
.driver = {
.name = depca_string,
.bus = &mca_bus_type,
.probe = depca_mca_probe,
.remove = __devexit_p(depca_device_remove),
},
};
#endif
static int depca_isa_probe (struct platform_device *);
static int __devexit depca_isa_remove(struct platform_device *pdev)
{
return depca_device_remove(&pdev->dev);
}
static struct platform_driver depca_isa_driver = {
.probe = depca_isa_probe,
.remove = __devexit_p(depca_isa_remove),
.driver = {
.name = depca_string,
},
};
/*
** Miscellaneous info...
*/
#define DEPCA_STRLEN 16
/*
** Memory Alignment. Each descriptor is 4 longwords long. To force a
** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
** and hence the RX descriptor ring's first entry.
*/
#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
/*
** The DEPCA Rx and Tx ring descriptors.
*/
struct depca_rx_desc {
volatile s32 base;
s16 buf_length; /* This length is negative 2's complement! */
s16 msg_length; /* This length is "normal". */
};
struct depca_tx_desc {
volatile s32 base;
s16 length; /* This length is negative 2's complement! */
s16 misc; /* Errors and TDR info */
};
#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
to LANCE memory address space */
/*
** The Lance initialization block, described in databook, in common memory.
*/
struct depca_init {
u16 mode; /* Mode register */
u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
u8 mcast_table[8]; /* Multicast Hash Table. */
u32 rx_ring; /* Rx ring base pointer & ring length */
u32 tx_ring; /* Tx ring base pointer & ring length */
};
#define DEPCA_PKT_STAT_SZ 16
#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
increase DEPCA_PKT_STAT_SZ */
struct depca_private {
char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
enum depca_type adapter; /* Adapter type */
enum {
DEPCA_BUS_MCA = 1,
DEPCA_BUS_ISA,
DEPCA_BUS_EISA,
} depca_bus; /* type of bus */
struct depca_init init_block; /* Shadow Initialization block */
/* CPU address space fields */
struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
u_long mem_start; /* Bus address of device RAM (before remap) */
u_long mem_len; /* device memory size */
/* Device address space fields */
u_long device_ram_start; /* Start of RAM in device addr space */
/* Offsets used in both address spaces */
u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
/* Kernel-only (not device) fields */
int rx_new, tx_new; /* The next free ring entry */
int rx_old, tx_old; /* The ring entries to be free()ed. */
spinlock_t lock;
struct { /* Private stats counters */
u32 bins[DEPCA_PKT_STAT_SZ];
u32 unicast;
u32 multicast;
u32 broadcast;
u32 excessive_collisions;
u32 tx_underruns;
u32 excessive_underruns;
} pktStats;
int txRingMask; /* TX ring mask */
int rxRingMask; /* RX ring mask */
s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
};
/*
** The transmit ring full condition is described by the tx_old and tx_new
** pointers by:
** tx_old = tx_new Empty ring
** tx_old = tx_new+1 Full ring
** tx_old+txRingMask = tx_new Full ring (wrapped condition)
*/
#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
lp->tx_old+lp->txRingMask-lp->tx_new:\
lp->tx_old -lp->tx_new-1)
/*
** Public Functions
*/
static int depca_open(struct net_device *dev);
static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t depca_interrupt(int irq, void *dev_id);
static int depca_close(struct net_device *dev);
static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void depca_tx_timeout(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
/*
** Private functions
*/
static void depca_init_ring(struct net_device *dev);
static int depca_rx(struct net_device *dev);
static int depca_tx(struct net_device *dev);
static void LoadCSRs(struct net_device *dev);
static int InitRestartDepca(struct net_device *dev);
static int DepcaSignature(char *name, u_long paddr);
static int DevicePresent(u_long ioaddr);
static int get_hw_addr(struct net_device *dev);
static void SetMulticastFilter(struct net_device *dev);
static int load_packet(struct net_device *dev, struct sk_buff *skb);
static void depca_dbg_open(struct net_device *dev);
static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
static u_char *depca_irq;
static int irq;
static int io;
static char *adapter_name;
static int mem; /* For loadable module assignment
use insmod mem=0x????? .... */
module_param (irq, int, 0);
module_param (io, int, 0);
module_param (adapter_name, charp, 0);
module_param (mem, int, 0);
MODULE_PARM_DESC(irq, "DEPCA IRQ number");
MODULE_PARM_DESC(io, "DEPCA I/O base address");
MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
MODULE_PARM_DESC(mem, "DEPCA shared memory address");
MODULE_LICENSE("GPL");
/*
** Miscellaneous defines...
*/
#define STOP_DEPCA \
outw(CSR0, DEPCA_ADDR);\
outw(STOP, DEPCA_DATA)
static const struct net_device_ops depca_netdev_ops = {
.ndo_open = depca_open,
.ndo_start_xmit = depca_start_xmit,
.ndo_stop = depca_close,
.ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = depca_ioctl,
.ndo_tx_timeout = depca_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
int i, j, offset, netRAM, mem_len, status = 0;
s16 nicsr;
u_long ioaddr;
u_long mem_start;
/*
* We are now supposed to enter this function with the
* following fields filled with proper values :
*
* dev->base_addr
* lp->mem_start
* lp->depca_bus
* lp->adapter
*
* dev->irq can be set if known from device configuration (on
* MCA or EISA) or module option. Otherwise, it will be auto
* detected.
*/
ioaddr = dev->base_addr;
STOP_DEPCA;
nicsr = inb(DEPCA_NICSR);
nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
outb(nicsr, DEPCA_NICSR);
if (inw(DEPCA_DATA) != STOP) {
return -ENXIO;
}
lp = netdev_priv(dev);
mem_start = lp->mem_start;
if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
return -ENXIO;
printk("%s: %s at 0x%04lx",
dev_name(device), depca_signature[lp->adapter], ioaddr);
switch (lp->depca_bus) {
#ifdef CONFIG_MCA
case DEPCA_BUS_MCA:
printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
break;
#endif
#ifdef CONFIG_EISA
case DEPCA_BUS_EISA:
printk(" (EISA slot %d)", to_eisa_device(device)->slot);
break;
#endif
case DEPCA_BUS_ISA:
break;
default:
printk("Unknown DEPCA bus %d\n", lp->depca_bus);
return -ENXIO;
}
printk(", h/w address ");
status = get_hw_addr(dev);
printk("%pM", dev->dev_addr);
if (status != 0) {
printk(" which has an Ethernet PROM CRC error.\n");
return -ENXIO;
}
/* Set up the maximum amount of network RAM(kB) */
netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
if ((nicsr & _128KB) && (lp->adapter == de422))
netRAM = 128;
/* Shared Memory Base Address */
if (nicsr & BUF) {
nicsr &= ~BS; /* DEPCA RAM in top 32k */
netRAM -= 32;
/* Only EISA/ISA needs start address to be re-computed */
if (lp->depca_bus != DEPCA_BUS_MCA)
mem_start += 0x8000;
}
if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
> (netRAM << 10)) {
printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
return -ENXIO;
}
printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
/* Enable the shadow RAM. */
if (lp->adapter != DEPCA) {
nicsr |= SHE;
outb(nicsr, DEPCA_NICSR);
}
spin_lock_init(&lp->lock);
sprintf(lp->adapter_name, "%s (%s)",
depca_signature[lp->adapter], dev_name(device));
status = -EBUSY;
/* Initialisation Block */
if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
goto out_priv;
}
status = -EIO;
lp->sh_mem = ioremap(mem_start, mem_len);
if (lp->sh_mem == NULL) {
printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
goto out1;
}
lp->mem_start = mem_start;
lp->mem_len = mem_len;
lp->device_ram_start = mem_start & LA_MASK;
offset = 0;
offset += sizeof(struct depca_init);
/* Tx & Rx descriptors (aligned to a quadword boundary) */
offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
lp->rx_ring = lp->sh_mem + offset;
lp->rx_ring_offset = offset;
offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
lp->tx_ring = lp->sh_mem + offset;
lp->tx_ring_offset = offset;
offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
lp->buffs_offset = offset;
/* Finish initialising the ring information. */
lp->rxRingMask = NUM_RX_DESC - 1;
lp->txRingMask = NUM_TX_DESC - 1;
/* Calculate Tx/Rx RLEN size for the descriptors. */
for (i = 0, j = lp->rxRingMask; j > 0; i++) {
j >>= 1;
}
lp->rx_rlen = (s32) (i << 29);
for (i = 0, j = lp->txRingMask; j > 0; i++) {
j >>= 1;
}
lp->tx_rlen = (s32) (i << 29);
/* Load the initialisation block */
depca_init_ring(dev);
/* Initialise the control and status registers */
LoadCSRs(dev);
/* Enable DEPCA board interrupts for autoprobing */
nicsr = ((nicsr & ~IM) | IEN);
outb(nicsr, DEPCA_NICSR);
/* To auto-IRQ we enable the initialization-done and DMA err,
interrupts. For now we will always get a DMA error. */
if (dev->irq < 2) {
unsigned char irqnum;
unsigned long irq_mask, delay;
irq_mask = probe_irq_on();
/* Assign the correct irq list */
switch (lp->adapter) {
case DEPCA:
case de100:
case de101:
depca_irq = de1xx_irq;
break;
case de200:
case de201:
case de202:
case de210:
case de212:
depca_irq = de2xx_irq;
break;
case de422:
depca_irq = de422_irq;
break;
default:
break; /* Not reached */
}
/* Trigger an initialization just for the interrupt. */
outw(INEA | INIT, DEPCA_DATA);
delay = jiffies + HZ/50;
while (time_before(jiffies, delay))
yield();
irqnum = probe_irq_off(irq_mask);
status = -ENXIO;
if (!irqnum) {
printk(" and failed to detect IRQ line.\n");
goto out2;
} else {
for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
if (irqnum == depca_irq[i]) {
dev->irq = irqnum;
printk(" and uses IRQ%d.\n", dev->irq);
}
if (!dev->irq) {
printk(" but incorrect IRQ line detected.\n");
goto out2;
}
}
} else {
printk(" and assigned IRQ%d.\n", dev->irq);
}
if (depca_debug > 1) {
printk(version);
}
/* The DEPCA-specific entries in the device structure. */
dev->netdev_ops = &depca_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->mem_start = 0;
dev_set_drvdata(device, dev);
SET_NETDEV_DEV (dev, device);
status = register_netdev(dev);
if (status == 0)
return 0;
out2:
iounmap(lp->sh_mem);
out1:
release_mem_region (mem_start, mem_len);
out_priv:
return status;
}
static int depca_open(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
s16 nicsr;
int status = 0;
STOP_DEPCA;
nicsr = inb(DEPCA_NICSR);
/* Make sure the shadow RAM is enabled */
if (lp->adapter != DEPCA) {
nicsr |= SHE;
outb(nicsr, DEPCA_NICSR);
}
/* Re-initialize the DEPCA... */
depca_init_ring(dev);
LoadCSRs(dev);
depca_dbg_open(dev);
if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
status = -EAGAIN;
} else {
/* Enable DEPCA board interrupts and turn off LED */
nicsr = ((nicsr & ~IM & ~LED) | IEN);
outb(nicsr, DEPCA_NICSR);
outw(CSR0, DEPCA_ADDR);
netif_start_queue(dev);
status = InitRestartDepca(dev);
if (depca_debug > 1) {
printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
}
}
return status;
}
/* Initialize the lance Rx and Tx descriptor rings. */
static void depca_init_ring(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_int i;
u_long offset;
/* Lock out other processes whilst setting up the hardware */
netif_stop_queue(dev);
lp->rx_new = lp->tx_new = 0;
lp->rx_old = lp->tx_old = 0;
/* Initialize the base address and length of each buffer in the ring */
for (i = 0; i <= lp->rxRingMask; i++) {
offset = lp->buffs_offset + i * RX_BUFF_SZ;
writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
lp->rx_buff[i] = lp->sh_mem + offset;
}
for (i = 0; i <= lp->txRingMask; i++) {
offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
lp->tx_buff[i] = lp->sh_mem + offset;
}
/* Set up the initialization block */
lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
SetMulticastFilter(dev);
for (i = 0; i < ETH_ALEN; i++) {
lp->init_block.phys_addr[i] = dev->dev_addr[i];
}
lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
}
static void depca_tx_timeout(struct net_device *dev)
{
u_long ioaddr = dev->base_addr;
printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
STOP_DEPCA;
depca_init_ring(dev);
LoadCSRs(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
InitRestartDepca(dev);
}
/*
** Writes a socket buffer to TX descriptor ring and starts transmission
*/
static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
int status = 0;
/* Transmitter timeout, serious problems. */
if (skb->len < 1)
goto out;
if (skb_padto(skb, ETH_ZLEN))
goto out;
netif_stop_queue(dev);
if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
status = load_packet(dev, skb);
if (!status) {
/* Trigger an immediate send demand. */
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
dev_kfree_skb(skb);
}
if (TX_BUFFS_AVAIL)
netif_start_queue(dev);
} else
status = NETDEV_TX_LOCKED;
out:
return status;
}
/*
** The DEPCA interrupt handler.
*/
static irqreturn_t depca_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct depca_private *lp;
s16 csr0, nicsr;
u_long ioaddr;
if (dev == NULL) {
printk("depca_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
lp = netdev_priv(dev);
ioaddr = dev->base_addr;
spin_lock(&lp->lock);
/* mask the DEPCA board interrupts and turn on the LED */
nicsr = inb(DEPCA_NICSR);
nicsr |= (IM | LED);
outb(nicsr, DEPCA_NICSR);
outw(CSR0, DEPCA_ADDR);
csr0 = inw(DEPCA_DATA);
/* Acknowledge all of the current interrupt sources ASAP. */
outw(csr0 & INTE, DEPCA_DATA);
if (csr0 & RINT) /* Rx interrupt (packet arrived) */
depca_rx(dev);
if (csr0 & TINT) /* Tx interrupt (packet sent) */
depca_tx(dev);
/* Any resources available? */
if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
netif_wake_queue(dev);
}
/* Unmask the DEPCA board interrupts and turn off the LED */
nicsr = (nicsr & ~IM & ~LED);
outb(nicsr, DEPCA_NICSR);
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
/* Called with lp->lock held */
static int depca_rx(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
int i, entry;
s32 status;
for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
status = readl(&lp->rx_ring[entry].base) >> 16;
if (status & R_STP) { /* Remember start of frame */
lp->rx_old = entry;
}
if (status & R_ENP) { /* Valid frame status */
if (status & R_ERR) { /* There was an error. */
dev->stats.rx_errors++; /* Update the error stats. */
if (status & R_FRAM)
dev->stats.rx_frame_errors++;
if (status & R_OFLO)
dev->stats.rx_over_errors++;
if (status & R_CRC)
dev->stats.rx_crc_errors++;
if (status & R_BUFF)
dev->stats.rx_fifo_errors++;
} else {
short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb != NULL) {
unsigned char *buf;
skb_reserve(skb, 2); /* 16 byte align the IP header */
buf = skb_put(skb, pkt_len);
if (entry < lp->rx_old) { /* Wrapped buffer */
len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
} else { /* Linear buffer */
memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
}
/*
** Notify the upper protocol layers that there is another
** packet to handle
*/
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
/*
** Update stats
*/
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
lp->pktStats.bins[i]++;
i = DEPCA_PKT_STAT_SZ;
}
}
if (is_multicast_ether_addr(buf)) {
if (is_broadcast_ether_addr(buf)) {
lp->pktStats.broadcast++;
} else {
lp->pktStats.multicast++;
}
} else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
lp->pktStats.unicast++;
}
lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
}
} else {
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
dev->stats.rx_dropped++; /* Really, deferred. */
break;
}
}
/* Change buffer ownership for this last frame, back to the adapter */
for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
}
writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
}
/*
** Update entry information
*/
lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
}
return 0;
}
/*
** Buffer sent - check for buffer errors.
** Called with lp->lock held
*/
static int depca_tx(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
int entry;
s32 status;
u_long ioaddr = dev->base_addr;
for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
status = readl(&lp->tx_ring[entry].base) >> 16;
if (status < 0) { /* Packet not yet sent! */
break;
} else if (status & T_ERR) { /* An error occurred. */
status = readl(&lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
if (status & TMD3_RTRY)
dev->stats.tx_aborted_errors++;
if (status & TMD3_LCAR)
dev->stats.tx_carrier_errors++;
if (status & TMD3_LCOL)
dev->stats.tx_window_errors++;
if (status & TMD3_UFLO)
dev->stats.tx_fifo_errors++;
if (status & (TMD3_BUFF | TMD3_UFLO)) {
/* Trigger an immediate send demand. */
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
}
} else if (status & (T_MORE | T_ONE)) {
dev->stats.collisions++;
} else {
dev->stats.tx_packets++;
}
/* Update all the pointers */
lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
}
return 0;
}
static int depca_close(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
s16 nicsr;
u_long ioaddr = dev->base_addr;
netif_stop_queue(dev);
outw(CSR0, DEPCA_ADDR);
if (depca_debug > 1) {
printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
}
/*
** We stop the DEPCA here -- it occasionally polls
** memory if we don't.
*/
outw(STOP, DEPCA_DATA);
/*
** Give back the ROM in case the user wants to go to DOS
*/
if (lp->adapter != DEPCA) {
nicsr = inb(DEPCA_NICSR);
nicsr &= ~SHE;
outb(nicsr, DEPCA_NICSR);
}
/*
** Free the associated irq
*/
free_irq(dev->irq, dev);
return 0;
}
static void LoadCSRs(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
outw((u16) lp->device_ram_start, DEPCA_DATA);
outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
outw(CSR3, DEPCA_ADDR); /* ALE control */
outw(ACON, DEPCA_DATA);
outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
}
static int InitRestartDepca(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
int i, status = 0;
/* Copy the shadow init_block to shared memory */
memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
outw(INIT, DEPCA_DATA); /* initialize DEPCA */
/* wait for lance to complete initialisation */
for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
if (i != 100) {
/* clear IDON by writing a "1", enable interrupts and start lance */
outw(IDON | INEA | STRT, DEPCA_DATA);
if (depca_debug > 2) {
printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
}
} else {
printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
status = -1;
}
return status;
}
/*
** Set or clear the multicast filter for this adaptor.
*/
static void set_multicast_list(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
netif_stop_queue(dev);
while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
STOP_DEPCA; /* Temporarily stop the depca. */
depca_init_ring(dev); /* Initialize the descriptor rings */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
lp->init_block.mode |= PROM;
} else {
SetMulticastFilter(dev);
lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
}
LoadCSRs(dev); /* Reload CSR3 */
InitRestartDepca(dev); /* Resume normal operation. */
netif_start_queue(dev); /* Unlock the TX ring */
}
/*
** Calculate the hash code and update the logical address filter
** from a list of ethernet multicast addresses.
** Big endian crc one liner is mine, all mine, ha ha ha ha!
** LANCE calculates its hash codes big endian.
*/
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
struct netdev_hw_addr *ha;
int i, j, bit, byte;
u16 hashcode;
u32 crc;
if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
lp->init_block.mcast_table[i] = (char) 0xff;
}
} else {
for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc(ETH_ALEN, ha->addr);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
for (j = 0; j < 5; j++) { /* ... in reverse order. */
hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
}
byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
lp->init_block.mcast_table[byte] |= bit;
}
}
}
static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
{
int status = 0;
if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
status = -EBUSY;
goto out;
}
if (DevicePresent(ioaddr)) {
status = -ENODEV;
goto out_release;
}
if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
status = -ENOMEM;
goto out_release;
}
return 0;
out_release:
release_region (ioaddr, DEPCA_TOTAL_SIZE);
out:
return status;
}
#ifdef CONFIG_MCA
/*
** Microchannel bus I/O device probe
*/
static int __init depca_mca_probe(struct device *device)
{
unsigned char pos[2];
unsigned char where;
unsigned long iobase, mem_start;
int irq, err;
struct mca_device *mdev = to_mca_device (device);
struct net_device *dev;
struct depca_private *lp;
/*
** Search for the adapter. If an address has been given, search
** specifically for the card at that address. Otherwise find the
** first card in the system.
*/
pos[0] = mca_device_read_stored_pos(mdev, 2);
pos[1] = mca_device_read_stored_pos(mdev, 3);
/*
** IO of card is handled by bits 1 and 2 of pos0.
**
** bit2 bit1 IO
** 0 0 0x2c00
** 0 1 0x2c10
** 1 0 0x2c20
** 1 1 0x2c30
*/
where = (pos[0] & 6) >> 1;
iobase = 0x2c00 + (0x10 * where);
/*
** Found the adapter we were looking for. Now start setting it up.
**
** First work on decoding the IRQ. It's stored in the lower 4 bits
** of pos1. Bits are as follows (from the ADF file):
**
** Bits
** 3 2 1 0 IRQ
** --------------------
** 0 0 1 0 5
** 0 0 0 1 9
** 0 1 0 0 10
** 1 0 0 0 11
*/
where = pos[1] & 0x0f;
switch (where) {
case 1:
irq = 9;
break;
case 2:
irq = 5;
break;
case 4:
irq = 10;
break;
case 8:
irq = 11;
break;
default:
printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where);
return -EINVAL;
}
/*
** Shared memory address of adapter is stored in bits 3-5 of pos0.
** They are mapped as follows:
**
** Bit
** 5 4 3 Memory Addresses
** 0 0 0 C0000-CFFFF (64K)
** 1 0 0 C8000-CFFFF (32K)
** 0 0 1 D0000-DFFFF (64K)
** 1 0 1 D8000-DFFFF (32K)
** 0 1 0 E0000-EFFFF (64K)
** 1 1 0 E8000-EFFFF (32K)
*/
where = (pos[0] & 0x18) >> 3;
mem_start = 0xc0000 + (where * 0x10000);
if (pos[0] & 0x20) {
mem_start += 0x8000;
}
/* claim the slot */
strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
sizeof(mdev->name));
mca_device_set_claim(mdev, 1);
/*
** Get everything allocated and initialized... (almost just
** like the ISA and EISA probes)
*/
irq = mca_device_transform_irq(mdev, irq);
iobase = mca_device_transform_ioport(mdev, iobase);
if ((err = depca_common_init (iobase, &dev)))
goto out_unclaim;
dev->irq = irq;
dev->base_addr = iobase;
lp = netdev_priv(dev);
lp->depca_bus = DEPCA_BUS_MCA;
lp->adapter = depca_mca_adapter_type[mdev->index];
lp->mem_start = mem_start;
if ((err = depca_hw_init(dev, device)))
goto out_free;
return 0;
out_free:
free_netdev (dev);
release_region (iobase, DEPCA_TOTAL_SIZE);
out_unclaim:
mca_device_set_claim(mdev, 0);
return err;
}
#endif
/*
** ISA bus I/O device probe
*/
static void __init depca_platform_probe (void)
{
int i;
struct platform_device *pldev;
for (i = 0; depca_io_ports[i].iobase; i++) {
depca_io_ports[i].device = NULL;
/* if an address has been specified on the command
* line, use it (if valid) */
if (io && io != depca_io_ports[i].iobase)
continue;
pldev = platform_device_alloc(depca_string, i);
if (!pldev)
continue;
pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
depca_io_ports[i].device = pldev;
if (platform_device_add(pldev)) {
depca_io_ports[i].device = NULL;
pldev->dev.platform_data = NULL;
platform_device_put(pldev);
continue;
}
if (!pldev->dev.driver) {
/* The driver was not bound to this device, there was
* no hardware at this address. Unregister it, as the
* release function will take care of freeing the
* allocated structure */
depca_io_ports[i].device = NULL;
pldev->dev.platform_data = NULL;
platform_device_unregister (pldev);
}
}
}
static enum depca_type __init depca_shmem_probe (ulong *mem_start)
{
u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
enum depca_type adapter = unknown;
int i;
for (i = 0; mem_base[i]; i++) {
*mem_start = mem ? mem : mem_base[i];
adapter = DepcaSignature (adapter_name, *mem_start);
if (adapter != unknown)
break;
}
return adapter;
}
static int __devinit depca_isa_probe (struct platform_device *device)
{
struct net_device *dev;
struct depca_private *lp;
u_long ioaddr, mem_start = 0;
enum depca_type adapter = unknown;
int status = 0;
ioaddr = (u_long) device->dev.platform_data;
if ((status = depca_common_init (ioaddr, &dev)))
goto out;
adapter = depca_shmem_probe (&mem_start);
if (adapter == unknown) {
status = -ENODEV;
goto out_free;
}
dev->base_addr = ioaddr;
dev->irq = irq; /* Use whatever value the user gave
* us, and 0 if he didn't. */
lp = netdev_priv(dev);
lp->depca_bus = DEPCA_BUS_ISA;
lp->adapter = adapter;
lp->mem_start = mem_start;
if ((status = depca_hw_init(dev, &device->dev)))
goto out_free;
return 0;
out_free:
free_netdev (dev);
release_region (ioaddr, DEPCA_TOTAL_SIZE);
out:
return status;
}
/*
** EISA callbacks from sysfs.
*/
#ifdef CONFIG_EISA
static int __init depca_eisa_probe (struct device *device)
{
enum depca_type adapter = unknown;
struct eisa_device *edev;
struct net_device *dev;
struct depca_private *lp;
u_long ioaddr, mem_start;
int status = 0;
edev = to_eisa_device (device);
ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
if ((status = depca_common_init (ioaddr, &dev)))
goto out;
/* It would have been nice to get card configuration from the
* card. Unfortunately, this register is write-only (shares
* it's address with the ethernet prom)... As we don't parse
* the EISA configuration structures (yet... :-), just rely on
* the ISA probing to sort it out... */
adapter = depca_shmem_probe (&mem_start);
if (adapter == unknown) {
status = -ENODEV;
goto out_free;
}
dev->base_addr = ioaddr;
dev->irq = irq;
lp = netdev_priv(dev);
lp->depca_bus = DEPCA_BUS_EISA;
lp->adapter = edev->id.driver_data;
lp->mem_start = mem_start;
if ((status = depca_hw_init(dev, device)))
goto out_free;
return 0;
out_free:
free_netdev (dev);
release_region (ioaddr, DEPCA_TOTAL_SIZE);
out:
return status;
}
#endif
static int __devexit depca_device_remove (struct device *device)
{
struct net_device *dev;
struct depca_private *lp;
int bus;
dev = dev_get_drvdata(device);
lp = netdev_priv(dev);
unregister_netdev (dev);
iounmap (lp->sh_mem);
release_mem_region (lp->mem_start, lp->mem_len);
release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
bus = lp->depca_bus;
free_netdev (dev);
return 0;
}
/*
** Look for a particular board name in the on-board Remote Diagnostics
** and Boot (readb) ROM. This will also give us a clue to the network RAM
** base address.
*/
static int __init DepcaSignature(char *name, u_long base_addr)
{
u_int i, j, k;
void __iomem *ptr;
char tmpstr[16];
u_long prom_addr = base_addr + 0xc000;
u_long mem_addr = base_addr + 0x8000; /* 32KB */
/* Can't reserve the prom region, it is already marked as
* used, at least on x86. Instead, reserve a memory region a
* board would certainly use. If it works, go ahead. If not,
* run like hell... */
if (!request_mem_region (mem_addr, 16, depca_string))
return unknown;
/* Copy the first 16 bytes of ROM */
ptr = ioremap(prom_addr, 16);
if (ptr == NULL) {
printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
return unknown;
}
for (i = 0; i < 16; i++) {
tmpstr[i] = readb(ptr + i);
}
iounmap(ptr);
release_mem_region (mem_addr, 16);
/* Check if PROM contains a valid string */
for (i = 0; *depca_signature[i] != '\0'; i++) {
for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
k++;
} else { /* lost signature; begin search again */
k = 0;
}
}
if (k == strlen(depca_signature[i]))
break;
}
/* Check if name string is valid, provided there's no PROM */
if (name && *name && (i == unknown)) {
for (i = 0; *depca_signature[i] != '\0'; i++) {
if (strcmp(name, depca_signature[i]) == 0)
break;
}
}
return i;
}
/*
** Look for a special sequence in the Ethernet station address PROM that
** is common across all DEPCA products. Note that the original DEPCA needs
** its ROM address counter to be initialized and enabled. Only enable
** if the first address octet is a 0x08 - this minimises the chances of
** messing around with some other hardware, but it assumes that this DEPCA
** card initialized itself correctly.
**
** Search the Ethernet address ROM for the signature. Since the ROM address
** counter can start at an arbitrary point, the search must include the entire
** probe sequence length plus the (length_of_the_signature - 1).
** Stop the search IMMEDIATELY after the signature is found so that the
** PROM address counter is correctly positioned at the start of the
** ethernet address for later read out.
*/
static int __init DevicePresent(u_long ioaddr)
{
union {
struct {
u32 a;
u32 b;
} llsig;
char Sig[sizeof(u32) << 1];
}
dev;
short sigLength = 0;
s8 data;
s16 nicsr;
int i, j, status = 0;
data = inb(DEPCA_PROM); /* clear counter on DEPCA */
data = inb(DEPCA_PROM); /* read data */
if (data == 0x08) { /* Enable counter on DEPCA */
nicsr = inb(DEPCA_NICSR);
nicsr |= AAC;
outb(nicsr, DEPCA_NICSR);
}
dev.llsig.a = ETH_PROM_SIG;
dev.llsig.b = ETH_PROM_SIG;
sigLength = sizeof(u32) << 1;
for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
data = inb(DEPCA_PROM);
if (dev.Sig[j] == data) { /* track signature */
j++;
} else { /* lost signature; begin search again */
if (data == dev.Sig[0]) { /* rare case.... */
j = 1;
} else {
j = 0;
}
}
}
if (j != sigLength) {
status = -ENODEV; /* search failed */
}
return status;
}
/*
** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
** reason: access the upper half of the PROM with x=0; access the lower half
** with x=1.
*/
static int __init get_hw_addr(struct net_device *dev)
{
u_long ioaddr = dev->base_addr;
struct depca_private *lp = netdev_priv(dev);
int i, k, tmp, status = 0;
u_short j, x, chksum;
x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
for (i = 0, k = 0, j = 0; j < 3; j++) {
k <<= 1;
if (k > 0xffff)
k -= 0xffff;
k += (u_char) (tmp = inb(DEPCA_PROM + x));
dev->dev_addr[i++] = (u_char) tmp;
k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
dev->dev_addr[i++] = (u_char) tmp;
if (k > 0xffff)
k -= 0xffff;
}
if (k == 0xffff)
k = 0;
chksum = (u_char) inb(DEPCA_PROM + x);
chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
if (k != chksum)
status = -1;
return status;
}
/*
** Load a packet into the shared memory
*/
static int load_packet(struct net_device *dev, struct sk_buff *skb)
{
struct depca_private *lp = netdev_priv(dev);
int i, entry, end, len, status = NETDEV_TX_OK;
entry = lp->tx_new; /* Ring around buffer number. */
end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
/*
** Caution: the write order is important here... don't set up the
** ownership rights until all the other information is in place.
*/
if (end < entry) { /* wrapped buffer */
len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
memcpy_toio(lp->tx_buff[entry], skb->data, len);
memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
} else { /* linear buffer */
memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
}
/* set up the buffer descriptors */
len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
/* clean out flags */
writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
len -= TX_BUFF_SZ;
}
/* clean out flags */
writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
/* start of packet */
writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
/* end of packet */
writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
for (i = end; i != entry; --i) {
/* ownership of packet */
writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
if (i == 0)
i = lp->txRingMask + 1;
}
writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
} else {
status = NETDEV_TX_LOCKED;
}
return status;
}
static void depca_dbg_open(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
u_long ioaddr = dev->base_addr;
struct depca_init *p = &lp->init_block;
int i;
if (depca_debug > 1) {
/* Do not copy the shadow init block into shared memory */
/* Debugging should not affect normal operation! */
/* The shadow init block will get copied across during InitRestartDepca */
printk("%s: depca open with irq %d\n", dev->name, dev->irq);
printk("Descriptor head addresses (CPU):\n");
printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
printk("Descriptor addresses (CPU):\nRX: ");
for (i = 0; i < lp->rxRingMask; i++) {
if (i < 3) {
printk("%p ", &lp->rx_ring[i].base);
}
}
printk("...%p\n", &lp->rx_ring[i].base);
printk("TX: ");
for (i = 0; i < lp->txRingMask; i++) {
if (i < 3) {
printk("%p ", &lp->tx_ring[i].base);
}
}
printk("...%p\n", &lp->tx_ring[i].base);
printk("\nDescriptor buffers (Device):\nRX: ");
for (i = 0; i < lp->rxRingMask; i++) {
if (i < 3) {
printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
}
}
printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
printk("TX: ");
for (i = 0; i < lp->txRingMask; i++) {
if (i < 3) {
printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
}
}
printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
printk(" mode: 0x%4.4x\n", p->mode);
printk(" physical address: %pM\n", p->phys_addr);
printk(" multicast hash table: ");
for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
printk("%2.2x:", p->mcast_table[i]);
}
printk("%2.2x\n", p->mcast_table[i]);
printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
outw(CSR2, DEPCA_ADDR);
printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
outw(CSR1, DEPCA_ADDR);
printk("%4.4x\n", inw(DEPCA_DATA));
outw(CSR3, DEPCA_ADDR);
printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
}
}
/*
** Perform IOCTL call functions here. Some are privileged operations and the
** effective uid is checked in those cases.
** All multicast IOCTLs will not work here and are for testing purposes only.
*/
static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct depca_private *lp = netdev_priv(dev);
struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
int i, status = 0;
u_long ioaddr = dev->base_addr;
union {
u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
} tmp;
unsigned long flags;
void *buf;
switch (ioc->cmd) {
case DEPCA_GET_HWADDR: /* Get the hardware address */
for (i = 0; i < ETH_ALEN; i++) {
tmp.addr[i] = dev->dev_addr[i];
}
ioc->len = ETH_ALEN;
if (copy_to_user(ioc->data, tmp.addr, ioc->len))
return -EFAULT;
break;
case DEPCA_SET_HWADDR: /* Set the hardware address */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
return -EFAULT;
for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = tmp.addr[i];
}
netif_stop_queue(dev);
while (lp->tx_old != lp->tx_new)
cpu_relax(); /* Wait for the ring to empty */
STOP_DEPCA; /* Temporarily stop the depca. */
depca_init_ring(dev); /* Initialize the descriptor rings */
LoadCSRs(dev); /* Reload CSR3 */
InitRestartDepca(dev); /* Resume normal operation. */
netif_start_queue(dev); /* Unlock the TX ring */
break;
case DEPCA_SET_PROM: /* Set Promiscuous Mode */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
netif_stop_queue(dev);
while (lp->tx_old != lp->tx_new)
cpu_relax(); /* Wait for the ring to empty */
STOP_DEPCA; /* Temporarily stop the depca. */
depca_init_ring(dev); /* Initialize the descriptor rings */
lp->init_block.mode |= PROM; /* Set promiscuous mode */
LoadCSRs(dev); /* Reload CSR3 */
InitRestartDepca(dev); /* Resume normal operation. */
netif_start_queue(dev); /* Unlock the TX ring */
break;
case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
netif_stop_queue(dev);
while (lp->tx_old != lp->tx_new)
cpu_relax(); /* Wait for the ring to empty */
STOP_DEPCA; /* Temporarily stop the depca. */
depca_init_ring(dev); /* Initialize the descriptor rings */
lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
LoadCSRs(dev); /* Reload CSR3 */
InitRestartDepca(dev); /* Resume normal operation. */
netif_start_queue(dev); /* Unlock the TX ring */
break;
case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
if(!capable(CAP_NET_ADMIN))
return -EPERM;
printk("%s: Boo!\n", dev->name);
break;
case DEPCA_GET_MCA: /* Get the multicast address table */
ioc->len = (HASH_TABLE_LEN >> 3);
if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
return -EFAULT;
break;
case DEPCA_SET_MCA: /* Set a multicast address */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (ioc->len >= HASH_TABLE_LEN)
return -EINVAL;
if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
return -EFAULT;
set_multicast_list(dev);
break;
case DEPCA_CLR_MCA: /* Clear all multicast addresses */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
set_multicast_list(dev);
break;
case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
set_multicast_list(dev);
break;
case DEPCA_GET_STATS: /* Get the driver statistics */
ioc->len = sizeof(lp->pktStats);
buf = kmalloc(ioc->len, GFP_KERNEL);
if(!buf)
return -ENOMEM;
spin_lock_irqsave(&lp->lock, flags);
memcpy(buf, &lp->pktStats, ioc->len);
spin_unlock_irqrestore(&lp->lock, flags);
if (copy_to_user(ioc->data, buf, ioc->len))
status = -EFAULT;
kfree(buf);
break;
case DEPCA_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irqsave(&lp->lock, flags);
memset(&lp->pktStats, 0, sizeof(lp->pktStats));
spin_unlock_irqrestore(&lp->lock, flags);
break;
case DEPCA_GET_REG: /* Get the DEPCA Registers */
i = 0;
tmp.sval[i++] = inw(DEPCA_NICSR);
outw(CSR0, DEPCA_ADDR); /* status register */
tmp.sval[i++] = inw(DEPCA_DATA);
memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
ioc->len = i + sizeof(struct depca_init);
if (copy_to_user(ioc->data, tmp.addr, ioc->len))
return -EFAULT;
break;
default:
return -EOPNOTSUPP;
}
return status;
}
static int __init depca_module_init (void)
{
int err = 0;
#ifdef CONFIG_MCA
err = mca_register_driver(&depca_mca_driver);
if (err)
goto err;
#endif
#ifdef CONFIG_EISA
err = eisa_driver_register(&depca_eisa_driver);
if (err)
goto err_mca;
#endif
err = platform_driver_register(&depca_isa_driver);
if (err)
goto err_eisa;
depca_platform_probe();
return 0;
err_eisa:
#ifdef CONFIG_EISA
eisa_driver_unregister(&depca_eisa_driver);
err_mca:
#endif
#ifdef CONFIG_MCA
mca_unregister_driver(&depca_mca_driver);
err:
#endif
return err;
}
static void __exit depca_module_exit (void)
{
int i;
#ifdef CONFIG_MCA
mca_unregister_driver (&depca_mca_driver);
#endif
#ifdef CONFIG_EISA
eisa_driver_unregister (&depca_eisa_driver);
#endif
platform_driver_unregister (&depca_isa_driver);
for (i = 0; depca_io_ports[i].iobase; i++) {
if (depca_io_ports[i].device) {
depca_io_ports[i].device->dev.platform_data = NULL;
platform_device_unregister (depca_io_ports[i].device);
depca_io_ports[i].device = NULL;
}
}
}
module_init (depca_module_init);
module_exit (depca_module_exit);
| gpl-2.0 |
TamsuiCM11/android_kernel_sony_msm7x27a | drivers/net/ethernet/emulex/benet/be_ethtool.c | 4806 | 22196 | /*
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#include "be.h"
#include "be_cmds.h"
#include <linux/ethtool.h>
struct be_ethtool_stat {
char desc[ETH_GSTRING_LEN];
int type;
int size;
int offset;
};
enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
FIELDINFO(struct be_tx_stats, field)
#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
FIELDINFO(struct be_rx_stats, field)
#define DRVSTAT_INFO(field) #field, DRVSTAT,\
FIELDINFO(struct be_drv_stats, field)
static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(rx_crc_errors)},
{DRVSTAT_INFO(rx_alignment_symbol_errors)},
{DRVSTAT_INFO(rx_pause_frames)},
{DRVSTAT_INFO(rx_control_frames)},
/* Received packets dropped when the Ethernet length field
* is not equal to the actual Ethernet data length.
*/
{DRVSTAT_INFO(rx_in_range_errors)},
/* Received packets dropped when their length field is >= 1501 bytes
* and <= 1535 bytes.
*/
{DRVSTAT_INFO(rx_out_range_errors)},
/* Received packets dropped when they are longer than 9216 bytes */
{DRVSTAT_INFO(rx_frame_too_long)},
/* Received packets dropped when they don't pass the unicast or
* multicast address filtering.
*/
{DRVSTAT_INFO(rx_address_mismatch_drops)},
/* Received packets dropped when IP packet length field is less than
* the IP header length field.
*/
{DRVSTAT_INFO(rx_dropped_too_small)},
/* Received packets dropped when IP length field is greater than
* the actual packet length.
*/
{DRVSTAT_INFO(rx_dropped_too_short)},
/* Received packets dropped when the IP header length field is less
* than 5.
*/
{DRVSTAT_INFO(rx_dropped_header_too_small)},
/* Received packets dropped when the TCP header length field is less
* than 5 or the TCP header length + IP header length is more
* than IP packet length.
*/
{DRVSTAT_INFO(rx_dropped_tcp_length)},
{DRVSTAT_INFO(rx_dropped_runt)},
/* Number of received packets dropped when a fifo for descriptors going
* into the packet demux block overflows. In normal operation, this
* fifo must never overflow.
*/
{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
{DRVSTAT_INFO(rx_ip_checksum_errs)},
{DRVSTAT_INFO(rx_tcp_checksum_errs)},
{DRVSTAT_INFO(rx_udp_checksum_errs)},
{DRVSTAT_INFO(tx_pauseframes)},
{DRVSTAT_INFO(tx_controlframes)},
{DRVSTAT_INFO(rx_priority_pause_frames)},
/* Received packets dropped when an internal fifo going into
* main packet buffer tank (PMEM) overflows.
*/
{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
{DRVSTAT_INFO(jabber_events)},
/* Received packets dropped due to lack of available HW packet buffers
* used to temporarily hold the received packets.
*/
{DRVSTAT_INFO(rx_drops_no_pbuf)},
/* Received packets dropped due to input receive buffer
* descriptor fifo overflowing.
*/
{DRVSTAT_INFO(rx_drops_no_erx_descr)},
/* Packets dropped because the internal FIFO to the offloaded TCP
* receive processing block is full. This could happen only for
* offloaded iSCSI or FCoE trarffic.
*/
{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
/* Received packets dropped when they need more than 8
* receive buffers. This cannot happen as the driver configures
* 2048 byte receive buffers.
*/
{DRVSTAT_INFO(rx_drops_too_many_frags)},
{DRVSTAT_INFO(forwarded_packets)},
/* Received packets dropped when the frame length
* is more than 9018 bytes
*/
{DRVSTAT_INFO(rx_drops_mtu)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
{DRVSTAT_INFO(be_on_die_temperature)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
* are first and second members respectively.
*/
static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
*/
{DRVSTAT_RX_INFO(rx_post_fail)},
/* Recevied packets dropped due to skb allocation failure */
{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
/* Received packets dropped due to lack of available fetched buffers
* posted by the driver.
*/
{DRVSTAT_RX_INFO(rx_drops_no_frags)}
};
#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
/* Stats related to multi TX queues: get_stats routine assumes compl is the
* first member
*/
static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of TX work request blocks DMAed to HW */
{DRVSTAT_TX_INFO(tx_wrbs)},
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
{DRVSTAT_TX_INFO(tx_stops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
"External Loopback test",
"DDR DMA test",
"Link test"
};
#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
char fw_on_flash[FW_VER_LEN];
memset(fw_on_flash, 0 , sizeof(fw_on_flash));
be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
strcat(drvinfo->fw_version, " [");
strcat(drvinfo->fw_version, fw_on_flash);
strcat(drvinfo->fw_version, "]");
}
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static u32
lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
struct be_dma_mem data_len_cmd;
int status;
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
file_name, &data_read, &eof, &addn_status);
return data_read;
}
static int
lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
u32 eof = 0;
u8 addn_status;
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
&read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure while reading dump\n");
return -ENOMEM;
}
while ((total_read_len < buf_len) && !eof) {
chunk_size = min_t(u32, (buf_len - total_read_len),
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
total_read_len, file_name, &read_len,
&eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
eof &= LANCER_READ_FILE_EOF_MASK;
} else {
status = -EIO;
break;
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
read_cmd.dma);
return status;
}
static int
be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
return log_size;
}
static void
be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (be_physfn(adapter)) {
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
}
static int be_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *eqo = &adapter->eq_obj[0];
et->rx_coalesce_usecs = eqo->cur_eqd;
et->rx_coalesce_usecs_high = eqo->max_eqd;
et->rx_coalesce_usecs_low = eqo->min_eqd;
et->tx_coalesce_usecs = eqo->cur_eqd;
et->tx_coalesce_usecs_high = eqo->max_eqd;
et->tx_coalesce_usecs_low = eqo->min_eqd;
et->use_adaptive_rx_coalesce = eqo->enable_aic;
et->use_adaptive_tx_coalesce = eqo->enable_aic;
return 0;
}
/* TX attributes are ignored. Only RX attributes are considered
* eqd cmd is issued in the worker thread.
*/
static int be_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *eqo;
int i;
for_all_evt_queues(adapter, eqo, i) {
eqo->enable_aic = et->use_adaptive_rx_coalesce;
eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
eqo->eqd = et->rx_coalesce_usecs;
}
return 0;
}
static void
be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
void *p;
unsigned int i, j, base = 0, start;
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
data[i] = *(u32 *)p;
}
base += ETHTOOL_STATS_NUM;
for_all_rx_queues(adapter, rxo, j) {
struct be_rx_stats *stats = rx_stats(rxo);
do {
start = u64_stats_fetch_begin_bh(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
data[base + i] = *(u32 *)p;
}
base += ETHTOOL_RXSTATS_NUM;
}
for_all_tx_queues(adapter, txo, j) {
struct be_tx_stats *stats = tx_stats(txo);
do {
start = u64_stats_fetch_begin_bh(&stats->sync_compl);
data[base] = stats->tx_compl;
} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
do {
start = u64_stats_fetch_begin_bh(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
static void
be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_rx_qs; i++) {
for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
sprintf(data, "rxq%d: %s", i,
et_rx_stats[j].desc);
data += ETH_GSTRING_LEN;
}
}
for (i = 0; i < adapter->num_tx_qs; i++) {
for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
sprintf(data, "txq%d: %s", i,
et_tx_stats[j].desc);
data += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_TEST:
for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break;
}
}
static int be_get_sset_count(struct net_device *netdev, int stringset)
{
struct be_adapter *adapter = netdev_priv(netdev);
switch (stringset) {
case ETH_SS_TEST:
return ETHTOOL_TESTS_NUM;
case ETH_SS_STATS:
return ETHTOOL_STATS_NUM +
adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
default:
return -EINVAL;
}
}
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
u8 link_status;
int status;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &mac_speed,
&link_speed, &link_status, 0);
if (!status)
be_link_status_update(adapter, link_status);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
ethtool_cmd_speed_set(ecmd, link_speed*10);
} else {
switch (mac_speed) {
case PHY_LINK_SPEED_10MBPS:
ethtool_cmd_speed_set(ecmd, SPEED_10);
break;
case PHY_LINK_SPEED_100MBPS:
ethtool_cmd_speed_set(ecmd, SPEED_100);
break;
case PHY_LINK_SPEED_1GBPS:
ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
case PHY_LINK_SPEED_10GBPS:
ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
case PHY_LINK_SPEED_ZERO:
ethtool_cmd_speed_set(ecmd, 0);
break;
}
}
status = be_cmd_get_phy_info(adapter, &phy_info);
if (!status) {
switch (phy_info.interface_type) {
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
case PHY_TYPE_SFP_PLUS_10GB:
ecmd->port = PORT_FIBRE;
break;
default:
ecmd->port = PORT_TP;
break;
}
switch (phy_info.interface_type) {
case PHY_TYPE_KR_10GB:
case PHY_TYPE_KX4_10GB:
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_INTERNAL;
break;
default:
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_EXTERNAL;
break;
}
}
/* Save for future use */
adapter->link_speed = ethtool_cmd_speed(ecmd);
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
} else {
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->port = adapter->port_type;
ecmd->transceiver = adapter->transceiver;
ecmd->autoneg = adapter->autoneg;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->phy_address = adapter->port_num;
switch (ecmd->port) {
case PORT_FIBRE:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
break;
case PORT_TP:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
break;
case PORT_AUI:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
break;
}
if (ecmd->autoneg) {
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->supported |= SUPPORTED_Autoneg;
ecmd->advertising |= (ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full);
}
return 0;
}
static void be_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
ecmd->autoneg = 0;
}
static int
be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
if (ecmd->autoneg != 0)
return -EINVAL;
adapter->tx_fc = ecmd->tx_pause;
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
static int
be_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
&adapter->beacon_state);
return 1; /* cycle on/off once per second */
case ETHTOOL_ID_ON:
be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_ENABLED);
break;
case ETHTOOL_ID_OFF:
be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_DISABLED);
break;
case ETHTOOL_ID_INACTIVE:
be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
adapter->beacon_state);
}
return 0;
}
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (be_is_wol_supported(adapter)) {
wol->supported |= WAKE_MAGIC;
wol->wolopts |= WAKE_MAGIC;
} else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int
be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
if (!be_is_wol_supported(adapter)) {
dev_warn(&adapter->pdev->dev, "WOL not supported\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & WAKE_MAGIC)
adapter->wol = true;
else
adapter->wol = false;
return 0;
}
static int
be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
static const u64 pattern[2] = {
0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
err:
dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
ddrdma_cmd.dma);
return ret;
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
be_cmd_set_loopback(adapter, adapter->hba_port_num,
loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500,
2, 0xabc);
be_cmd_set_loopback(adapter, adapter->hba_port_num,
BE_NO_LOOPBACK, 1);
return *status;
}
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
u8 mac_speed = 0;
u16 qos_link_speed = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
&data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
&data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
&data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
}
if (be_test_ddr_dma(adapter) != 0) {
data[3] = 1;
test->flags |= ETH_TEST_FL_FAILED;
}
if (be_cmd_link_status_query(adapter, &mac_speed,
&qos_link_speed, NULL, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
}
}
static int
be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
static int
be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
static int
be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
struct be_cmd_resp_seeprom_read *resp;
int status;
if (!eeprom->len)
return -EINVAL;
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure. Could not read eeprom\n");
return -ENOMEM;
}
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
if (!status) {
resp = eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
eeprom_cmd.dma);
return status;
}
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
.get_eeprom_len = be_get_eeprom_len,
.get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
.get_strings = be_get_stat_strings,
.set_phys_id = be_set_phys_id,
.get_sset_count = be_get_sset_count,
.get_ethtool_stats = be_get_ethtool_stats,
.get_regs_len = be_get_reg_len,
.get_regs = be_get_regs,
.flash_device = be_do_flash,
.self_test = be_self_test,
};
| gpl-2.0 |
AngSanley/angsanleykernel-nokia-normandy | drivers/platform/x86/hdaps.c | 4806 | 17127 | /*
* hdaps.c - driver for IBM's Hard Drive Active Protection System
*
* Copyright (C) 2005 Robert Love <rml@novell.com>
* Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
*
* The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
* starting with the R40, T41, and X40. It provides a basic two-axis
* accelerometer and other data, such as the device's temperature.
*
* This driver is based on the document by Mark A. Smith available at
* http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
* and error.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License v2 as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/io.h>
#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
#define HDAPS_PORT_STATE 0x1611 /* device state */
#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
#define HDAPS_PORT_XPOS 0x1614 /* x-axis position */
#define HDAPS_PORT_TEMP1 0x1616 /* device temperature, in Celsius */
#define HDAPS_PORT_YVAR 0x1617 /* y-axis variance (what is this?) */
#define HDAPS_PORT_XVAR 0x1619 /* x-axis variance (what is this?) */
#define HDAPS_PORT_TEMP2 0x161b /* device temperature (again?) */
#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
#define STATE_FRESH 0x50 /* accelerometer data is fresh */
#define KEYBD_MASK 0x20 /* set if keyboard activity */
#define MOUSE_MASK 0x40 /* set if mouse activity */
#define KEYBD_ISSET(n) (!! (n & KEYBD_MASK)) /* keyboard used? */
#define MOUSE_ISSET(n) (!! (n & MOUSE_MASK)) /* mouse used? */
#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
#define HDAPS_POLL_INTERVAL 50 /* poll for input every 1/20s (50 ms)*/
#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
#define HDAPS_INPUT_FLAT 4
#define HDAPS_X_AXIS (1 << 0)
#define HDAPS_Y_AXIS (1 << 1)
#define HDAPS_BOTH_AXES (HDAPS_X_AXIS | HDAPS_Y_AXIS)
static struct platform_device *pdev;
static struct input_polled_dev *hdaps_idev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
static int rest_y;
static DEFINE_MUTEX(hdaps_mtx);
/*
* __get_latch - Get the value from a given port. Callers must hold hdaps_mtx.
*/
static inline u8 __get_latch(u16 port)
{
return inb(port) & 0xff;
}
/*
* __check_latch - Check a port latch for a given value. Returns zero if the
* port contains the given value. Callers must hold hdaps_mtx.
*/
static inline int __check_latch(u16 port, u8 val)
{
if (__get_latch(port) == val)
return 0;
return -EINVAL;
}
/*
* __wait_latch - Wait up to 100us for a port latch to get a certain value,
* returning zero if the value is obtained. Callers must hold hdaps_mtx.
*/
static int __wait_latch(u16 port, u8 val)
{
unsigned int i;
for (i = 0; i < 20; i++) {
if (!__check_latch(port, val))
return 0;
udelay(5);
}
return -EIO;
}
/*
* __device_refresh - request a refresh from the accelerometer. Does not wait
* for refresh to complete. Callers must hold hdaps_mtx.
*/
static void __device_refresh(void)
{
udelay(200);
if (inb(0x1604) != STATE_FRESH) {
outb(0x11, 0x1610);
outb(0x01, 0x161f);
}
}
/*
* __device_refresh_sync - request a synchronous refresh from the
* accelerometer. We wait for the refresh to complete. Returns zero if
* successful and nonzero on error. Callers must hold hdaps_mtx.
*/
static int __device_refresh_sync(void)
{
__device_refresh();
return __wait_latch(0x1604, STATE_FRESH);
}
/*
* __device_complete - indicate to the accelerometer that we are done reading
* data, and then initiate an async refresh. Callers must hold hdaps_mtx.
*/
static inline void __device_complete(void)
{
inb(0x161f);
inb(0x1604);
__device_refresh();
}
/*
* hdaps_readb_one - reads a byte from a single I/O port, placing the value in
* the given pointer. Returns zero on success or a negative error on failure.
* Can sleep.
*/
static int hdaps_readb_one(unsigned int port, u8 *val)
{
int ret;
mutex_lock(&hdaps_mtx);
/* do a sync refresh -- we need to be sure that we read fresh data */
ret = __device_refresh_sync();
if (ret)
goto out;
*val = inb(port);
__device_complete();
out:
mutex_unlock(&hdaps_mtx);
return ret;
}
/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
int *x, int *y)
{
/* do a sync refresh -- we need to be sure that we read fresh data */
if (__device_refresh_sync())
return -EIO;
*y = inw(port2);
*x = inw(port1);
km_activity = inb(HDAPS_PORT_KMACT);
__device_complete();
/* hdaps_invert is a bitvector to negate the axes */
if (hdaps_invert & HDAPS_X_AXIS)
*x = -*x;
if (hdaps_invert & HDAPS_Y_AXIS)
*y = -*y;
return 0;
}
/*
* hdaps_read_pair - reads the values from a pair of ports, placing the values
* in the given pointers. Returns zero on success. Can sleep.
*/
static int hdaps_read_pair(unsigned int port1, unsigned int port2,
int *val1, int *val2)
{
int ret;
mutex_lock(&hdaps_mtx);
ret = __hdaps_read_pair(port1, port2, val1, val2);
mutex_unlock(&hdaps_mtx);
return ret;
}
/*
* hdaps_device_init - initialize the accelerometer. Returns zero on success
* and negative error code on failure. Can sleep.
*/
static int hdaps_device_init(void)
{
int total, ret = -ENXIO;
mutex_lock(&hdaps_mtx);
outb(0x13, 0x1610);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
/*
* Most ThinkPads return 0x01.
*
* Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
* have "inverted" axises.
*
* The 0x02 value occurs when the chip has been previously initialized.
*/
if (__check_latch(0x1611, 0x03) &&
__check_latch(0x1611, 0x02) &&
__check_latch(0x1611, 0x01))
goto out;
printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
__get_latch(0x1611));
outb(0x17, 0x1610);
outb(0x81, 0x1611);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
if (__wait_latch(0x1611, 0x00))
goto out;
if (__wait_latch(0x1612, 0x60))
goto out;
if (__wait_latch(0x1613, 0x00))
goto out;
outb(0x14, 0x1610);
outb(0x01, 0x1611);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
outb(0x10, 0x1610);
outb(0xc8, 0x1611);
outb(0x00, 0x1612);
outb(0x02, 0x1613);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
if (__device_refresh_sync())
goto out;
if (__wait_latch(0x1611, 0x00))
goto out;
/* we have done our dance, now let's wait for the applause */
for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
int x, y;
/* a read of the device helps push it into action */
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
if (!__wait_latch(0x1611, 0x02)) {
ret = 0;
break;
}
msleep(INIT_WAIT_MSECS);
}
out:
mutex_unlock(&hdaps_mtx);
return ret;
}
/* Device model stuff */
static int hdaps_probe(struct platform_device *dev)
{
int ret;
ret = hdaps_device_init();
if (ret)
return ret;
pr_info("device successfully initialized\n");
return 0;
}
static int hdaps_resume(struct platform_device *dev)
{
return hdaps_device_init();
}
static struct platform_driver hdaps_driver = {
.probe = hdaps_probe,
.resume = hdaps_resume,
.driver = {
.name = "hdaps",
.owner = THIS_MODULE,
},
};
/*
* hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mtx.
*/
static void hdaps_calibrate(void)
{
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
}
static void hdaps_mousedev_poll(struct input_polled_dev *dev)
{
struct input_dev *input_dev = dev->input;
int x, y;
mutex_lock(&hdaps_mtx);
if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
goto out;
input_report_abs(input_dev, ABS_X, x - rest_x);
input_report_abs(input_dev, ABS_Y, y - rest_y);
input_sync(input_dev);
out:
mutex_unlock(&hdaps_mtx);
}
/* Sysfs Files */
static ssize_t hdaps_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, x, y;
ret = hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
if (ret)
return ret;
return sprintf(buf, "(%d,%d)\n", x, y);
}
static ssize_t hdaps_variance_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, x, y;
ret = hdaps_read_pair(HDAPS_PORT_XVAR, HDAPS_PORT_YVAR, &x, &y);
if (ret)
return ret;
return sprintf(buf, "(%d,%d)\n", x, y);
}
static ssize_t hdaps_temp1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 uninitialized_var(temp);
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp);
if (ret)
return ret;
return sprintf(buf, "%u\n", temp);
}
static ssize_t hdaps_temp2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 uninitialized_var(temp);
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp);
if (ret)
return ret;
return sprintf(buf, "%u\n", temp);
}
static ssize_t hdaps_keyboard_activity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", KEYBD_ISSET(km_activity));
}
static ssize_t hdaps_mouse_activity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", MOUSE_ISSET(km_activity));
}
static ssize_t hdaps_calibrate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "(%d,%d)\n", rest_x, rest_y);
}
static ssize_t hdaps_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
mutex_lock(&hdaps_mtx);
hdaps_calibrate();
mutex_unlock(&hdaps_mtx);
return count;
}
static ssize_t hdaps_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hdaps_invert);
}
static ssize_t hdaps_invert_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int invert;
if (sscanf(buf, "%d", &invert) != 1 ||
invert < 0 || invert > HDAPS_BOTH_AXES)
return -EINVAL;
hdaps_invert = invert;
hdaps_calibrate();
return count;
}
static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
static DEVICE_ATTR(temp2, 0444, hdaps_temp2_show, NULL);
static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
static struct attribute *hdaps_attributes[] = {
&dev_attr_position.attr,
&dev_attr_variance.attr,
&dev_attr_temp1.attr,
&dev_attr_temp2.attr,
&dev_attr_keyboard_activity.attr,
&dev_attr_mouse_activity.attr,
&dev_attr_calibrate.attr,
&dev_attr_invert.attr,
NULL,
};
static struct attribute_group hdaps_attribute_group = {
.attrs = hdaps_attributes,
};
/* Module stuff */
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int __init hdaps_dmi_match(const struct dmi_system_id *id)
{
pr_info("%s detected\n", id->ident);
return 1;
}
/* hdaps_dmi_match_invert - found an inverted match. */
static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
{
hdaps_invert = (unsigned long)id->driver_data;
pr_info("inverting axis (%u) readings\n", hdaps_invert);
return hdaps_dmi_match(id);
}
#define HDAPS_DMI_MATCH_INVERT(vendor, model, axes) { \
.ident = vendor " " model, \
.callback = hdaps_dmi_match_invert, \
.driver_data = (void *)axes, \
.matches = { \
DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
DMI_MATCH(DMI_PRODUCT_VERSION, model) \
} \
}
#define HDAPS_DMI_MATCH_NORMAL(vendor, model) \
HDAPS_DMI_MATCH_INVERT(vendor, model, 0)
/* Note that HDAPS_DMI_MATCH_NORMAL("ThinkPad T42") would match
"ThinkPad T42p", so the order of the entries matters.
If your ThinkPad is not recognized, please update to latest
BIOS. This is especially the case for some R52 ThinkPads. */
static struct dmi_system_id __initdata hdaps_whitelist[] = {
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61i", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T400", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad X40"),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad X41", HDAPS_Y_AXIS),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X60", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61s", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad Z60m"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61m", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61p", HDAPS_BOTH_AXES),
{ .ident = NULL }
};
static int __init hdaps_init(void)
{
struct input_dev *idev;
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
if (!request_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS, "hdaps")) {
ret = -ENXIO;
goto out;
}
ret = platform_driver_register(&hdaps_driver);
if (ret)
goto out_region;
pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
goto out_driver;
}
ret = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
if (ret)
goto out_device;
hdaps_idev = input_allocate_polled_device();
if (!hdaps_idev) {
ret = -ENOMEM;
goto out_group;
}
hdaps_idev->poll = hdaps_mousedev_poll;
hdaps_idev->poll_interval = HDAPS_POLL_INTERVAL;
/* initial calibrate for the input device */
hdaps_calibrate();
/* initialize the input class */
idev = hdaps_idev->input;
idev->name = "hdaps";
idev->phys = "isa1600/input0";
idev->id.bustype = BUS_ISA;
idev->dev.parent = &pdev->dev;
idev->evbit[0] = BIT_MASK(EV_ABS);
input_set_abs_params(idev, ABS_X,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
input_set_abs_params(idev, ABS_Y,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
ret = input_register_polled_device(hdaps_idev);
if (ret)
goto out_idev;
pr_info("driver successfully loaded\n");
return 0;
out_idev:
input_free_polled_device(hdaps_idev);
out_group:
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
out_device:
platform_device_unregister(pdev);
out_driver:
platform_driver_unregister(&hdaps_driver);
out_region:
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
out:
pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
static void __exit hdaps_exit(void)
{
input_unregister_polled_device(hdaps_idev);
input_free_polled_device(hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
platform_driver_unregister(&hdaps_driver);
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
pr_info("driver unloaded\n");
}
module_init(hdaps_init);
module_exit(hdaps_exit);
module_param_named(invert, hdaps_invert, int, 0);
MODULE_PARM_DESC(invert, "invert data along each axis. 1 invert x-axis, "
"2 invert y-axis, 3 invert both axes.");
MODULE_AUTHOR("Robert Love");
MODULE_DESCRIPTION("IBM Hard Drive Active Protection System (HDAPS) driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
rudij7/android_kernel_oneplus_one | drivers/net/ethernet/i825xx/3c523.c | 4806 | 39069 | /*
net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip)
This is an extension to the Linux operating system, and is covered by the
same GNU General Public License that covers that work.
Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
This is basically Michael Hipp's ni52 driver, with a new probing
algorithm and some minor changes to the 82586 CA and reset routines.
Thanks a lot Michael for a really clean i82586 implementation! Unless
otherwise documented in ni52.c, any bugs are mine.
Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in
any way. The ni52 is a lot easier to modify.
sources:
ni52.c
Crynwr packet driver collection was a great reference for my first
attempt at this sucker. The 3c507 driver also helped, until I noticed
that ni52.c was a lot nicer.
EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference
Manual, courtesy of 3Com CardFacts, documents the 3c523-specific
stuff. Information on CardFacts is found in the Ethernet HOWTO.
Also see <a href="http://www.3com.com/">
Microprocessor Communications Support Chips, T.J. Byers, ISBN
0-444-01224-9, has a section on the i82586. It tells you just enough
to know that you really don't want to learn how to program the chip.
The original device probe code was stolen from ps2esdi.c
Known Problems:
Since most of the code was stolen from ni52.c, you'll run across the
same bugs in the 0.62 version of ni52.c, plus maybe a few because of
the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there
shouldn't be the overrun problem that the 8K ni52 has.
This driver is for a 16K adapter. It should work fine on the 64K
adapters, but it will only use one of the 4 banks of RAM. Modifying
this for the 64K version would require a lot of heinous bank
switching, which I'm sure not interested in doing. If you try to
implement a bank switching version, you'll basically have to remember
what bank is enabled and do a switch every time you access a memory
location that's not current. You'll also have to remap pointers on
the driver side, because it only knows about 16K of the memory.
Anyone desperate or masochistic enough to try?
It seems to be stable now when multiple transmit buffers are used. I
can't see any performance difference, but then I'm working on a 386SX.
Multicast doesn't work. It doesn't even pretend to work. Don't use
it. Don't compile your kernel with multicast support. I don't know
why.
Features:
This driver is useable as a loadable module. If you try to specify an
IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will
search the MCA slots until it finds a 3c523 with the specified
parameters.
This driver does support multiple ethernet cards when used as a module
(up to MAX_3C523_CARDS, the default being 4)
This has been tested with both BNC and TP versions, internal and
external transceivers. Haven't tested with the 64K version (that I
know of).
History:
Jan 1st, 1996
first public release
Feb 4th, 1996
update to 1.3.59, incorporated multicast diffs from ni52.c
Feb 15th, 1996
added shared irq support
Apr 1999
added support for multiple cards when used as a module
added option to disable multicast as is causes problems
Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk>
Stuart Adamson <stuart.adamson@compsoc.net>
Nov 2001
added support for ethtool (jgarzik)
$Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $
*/
#define DRV_NAME "3c523"
#define DRV_VERSION "17-Nov-2001"
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mca-legacy.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include "3c523.h"
/*************************************************************************/
#define DEBUG /* debug on */
#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */
#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */
#define make32(ptr16) (p->memtop + (short) (ptr16) )
#define make24(ptr32) ((char *) (ptr32) - p->base)
#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
/*************************************************************************/
/*
Tables to which we can map values in the configuration registers.
*/
static int irq_table[] __initdata = {
12, 7, 3, 9
};
static int csr_table[] __initdata = {
0x300, 0x1300, 0x2300, 0x3300
};
static int shm_table[] __initdata = {
0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000
};
/******************* how to calculate the buffers *****************************
* IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
* --------------- in a different (more stable?) mode. Only in this mode it's
* possible to configure the driver with 'NO_NOPCOMMANDS'
sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
sizeof(rfd) = 24; sizeof(rbd) = 12;
sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
sizeof(nop_cmd) = 8;
* if you don't know the driver, better do not change this values: */
#define RECV_BUFF_SIZE 1524 /* slightly oversized */
#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
#if (NUM_XMIT_BUFFS == 1)
#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
#endif
/**************************************************************************/
#define DELAY(x) { mdelay(32 * x); }
/* a much shorter delay: */
#define DELAY_16(); { udelay(16) ; }
/* wait for command with timeout: */
#define WAIT_4_SCB_CMD() { int i; \
for(i=0;i<1024;i++) { \
if(!p->scb->cmd) break; \
DELAY_16(); \
if(i == 1023) { \
pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\
dev->name,__LINE__); \
elmc_id_reset586(); } } }
static irqreturn_t elmc_interrupt(int irq, void *dev_id);
static int elmc_open(struct net_device *dev);
static int elmc_close(struct net_device *dev);
static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *);
static struct net_device_stats *elmc_get_stats(struct net_device *dev);
static void elmc_timeout(struct net_device *dev);
#ifdef ELMC_MULTICAST
static void set_multicast_list(struct net_device *dev);
#endif
static const struct ethtool_ops netdev_ethtool_ops;
/* helper-functions */
static int init586(struct net_device *dev);
static int check586(struct net_device *dev, unsigned long where, unsigned size);
static void alloc586(struct net_device *dev);
static void startrecv586(struct net_device *dev);
static void *alloc_rfa(struct net_device *dev, void *ptr);
static void elmc_rcv_int(struct net_device *dev);
static void elmc_xmt_int(struct net_device *dev);
static void elmc_rnr_int(struct net_device *dev);
struct priv {
unsigned long base;
char *memtop;
unsigned long mapped_start; /* Start of ioremap */
volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
volatile struct scp_struct *scp; /* volatile is important */
volatile struct iscp_struct *iscp; /* volatile is important */
volatile struct scb_struct *scb; /* volatile is important */
volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
#if (NUM_XMIT_BUFFS == 1)
volatile struct transmit_cmd_struct *xmit_cmds[2];
volatile struct nop_cmd_struct *nop_cmds[2];
#else
volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
#endif
volatile int nop_point, num_recv_buffs;
volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
volatile int xmit_count, xmit_last;
volatile int slot;
};
#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);}
#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);}
/* with interrupts disabled - this will clear the interrupt bit in the
3c523 control register, and won't put it back. This effectively
disables interrupts on the card. */
#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);}
#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);}
/*************************************************************************/
/*
Do a Channel Attention on the 3c523. This is extremely board dependent.
*/
static void elmc_do_attn586(int ioaddr, int ints)
{
/* the 3c523 requires a minimum of 500 ns. The delays here might be
a little too large, and hence they may cut the performance of the
card slightly. If someone who knows a little more about Linux
timing would care to play with these, I'd appreciate it. */
/* this bit masking stuff is crap. I'd rather have separate
registers with strobe triggers for each of these functions. <sigh>
Ya take what ya got. */
outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL);
DELAY_16(); /* > 500 ns */
outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL);
}
/*************************************************************************/
/*
Reset the 82586 on the 3c523. Also very board dependent.
*/
static void elmc_do_reset586(int ioaddr, int ints)
{
/* toggle the RST bit low then high */
outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL);
DELAY_16(); /* > 500 ns */
outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL);
elmc_do_attn586(ioaddr, ints);
}
/**********************************************
* close device
*/
static int elmc_close(struct net_device *dev)
{
netif_stop_queue(dev);
elmc_id_reset586(); /* the hard way to stop the receiver */
free_irq(dev->irq, dev);
return 0;
}
/**********************************************
* open device
*/
static int elmc_open(struct net_device *dev)
{
int ret;
elmc_id_attn586(); /* disable interrupts */
ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
dev->name, dev);
if (ret) {
pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
elmc_id_reset586();
return ret;
}
alloc586(dev);
init586(dev);
startrecv586(dev);
netif_start_queue(dev);
return 0; /* most done by init */
}
/**********************************************
* Check to see if there's an 82586 out there.
*/
static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
{
struct priv *p = netdev_priv(dev);
char *iscp_addrs[2];
int i = 0;
p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
p->memtop = isa_bus_to_virt((unsigned long)where) + size;
p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
memset((char *) p->scp, 0, sizeof(struct scp_struct));
p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
for (i = 0; i < 2; i++) {
p->iscp = (struct iscp_struct *) iscp_addrs[i];
memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
p->scp->iscp = make24(p->iscp);
p->iscp->busy = 1;
elmc_id_reset586();
/* reset586 does an implicit CA */
/* apparently, you sometimes have to kick the 82586 twice... */
elmc_id_attn586();
DELAY(1);
if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */
return 0;
}
}
return 1;
}
/******************************************************************
* set iscp at the right place, called by elmc_probe and open586.
*/
static void alloc586(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
elmc_id_reset586();
DELAY(2);
p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct));
memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
memset((char *) p->scp, 0, sizeof(struct scp_struct));
p->scp->iscp = make24(p->iscp);
p->scp->sysbus = SYSBUSVAL;
p->iscp->scb_offset = make16(p->scb);
p->iscp->busy = 1;
elmc_id_reset586();
elmc_id_attn586();
DELAY(2);
if (p->iscp->busy)
pr_err("%s: Init-Problems (alloc).\n", dev->name);
memset((char *) p->scb, 0, sizeof(struct scb_struct));
}
/*****************************************************************/
static int elmc_getinfo(char *buf, int slot, void *d)
{
int len = 0;
struct net_device *dev = d;
if (dev == NULL)
return len;
len += sprintf(buf + len, "Revision: 0x%x\n",
inb(dev->base_addr + ELMC_REVISION) & 0xf);
len += sprintf(buf + len, "IRQ: %d\n", dev->irq);
len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr,
dev->base_addr + ELMC_IO_EXTENT);
len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
dev->mem_end - 1);
len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
"External" : "Internal");
len += sprintf(buf + len, "Device: %s\n", dev->name);
len += sprintf(buf + len, "Hardware Address: %pM\n",
dev->dev_addr);
return len;
} /* elmc_getinfo() */
static const struct net_device_ops netdev_ops = {
.ndo_open = elmc_open,
.ndo_stop = elmc_close,
.ndo_get_stats = elmc_get_stats,
.ndo_start_xmit = elmc_send_packet,
.ndo_tx_timeout = elmc_timeout,
#ifdef ELMC_MULTICAST
.ndo_set_rx_mode = set_multicast_list,
#endif
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*****************************************************************/
static int __init do_elmc_probe(struct net_device *dev)
{
static int slot;
int base_addr = dev->base_addr;
int irq = dev->irq;
u_char status = 0;
u_char revision = 0;
int i = 0;
unsigned int size = 0;
int retval;
struct priv *pr = netdev_priv(dev);
if (MCA_bus == 0) {
return -ENODEV;
}
/* search through the slots for the 3c523. */
slot = mca_find_adapter(ELMC_MCA_ID, 0);
while (slot != -1) {
status = mca_read_stored_pos(slot, 2);
dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6];
dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1];
/*
If we're trying to match a specified irq or IO address,
we'll reject a match unless it's what we're looking for.
Also reject it if the card is already in use.
*/
if ((irq && irq != dev->irq) ||
(base_addr && base_addr != dev->base_addr)) {
slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
continue;
}
if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) {
slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
continue;
}
/* found what we're looking for... */
break;
}
/* we didn't find any 3c523 in the slots we checked for */
if (slot == MCA_NOTFOUND)
return (base_addr || irq) ? -ENXIO : -ENODEV;
mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
/* if we get this far, adapter has been found - carry on */
pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
/* Now we extract configuration info from the card.
The 3c523 provides information in two of the POS registers, but
the second one is only needed if we want to tell the card what IRQ
to use. I suspect that whoever sets the thing up initially would
prefer we don't screw with those things.
Note that we read the status info when we found the card...
See 3c523.h for more details.
*/
/* revision is stored in the first 4 bits of the revision register */
revision = inb(dev->base_addr + ELMC_REVISION) & 0xf;
/* according to docs, we read the interrupt and write it back to
the IRQ select register, since the POST might not configure the IRQ
properly. */
switch (dev->irq) {
case 3:
mca_write_pos(slot, 3, 0x04);
break;
case 7:
mca_write_pos(slot, 3, 0x02);
break;
case 9:
mca_write_pos(slot, 3, 0x08);
break;
case 12:
mca_write_pos(slot, 3, 0x01);
break;
}
pr->slot = slot;
pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
dev->base_addr);
/* Determine if we're using the on-board transceiver (i.e. coax) or
an external one. The information is pretty much useless, but I
guess it's worth brownie points. */
dev->if_port = (status & ELMC_STATUS_DISABLE_THIN);
/* The 3c523 has a 24K chunk of memory. The first 16K is the
shared memory, while the last 8K is for the EtherStart BIOS ROM.
Which we don't care much about here. We'll just tell Linux that
we're using 16K. MCA won't permit address space conflicts caused
by not mapping the other 8K. */
dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3];
/* We're using MCA, so it's a given that the information about memory
size is correct. The Crynwr drivers do something like this. */
elmc_id_reset586(); /* seems like a good idea before checking it... */
size = 0x4000; /* check for 16K mem */
if (!check586(dev, dev->mem_start, size)) {
pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
dev->mem_start);
retval = -ENODEV;
goto err_out;
}
dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
pr->memtop = isa_bus_to_virt(dev->mem_start) + size;
pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
alloc586(dev);
elmc_id_reset586(); /* make sure it doesn't generate spurious ints */
/* set number of receive-buffs according to memsize */
pr->num_recv_buffs = NUM_RECV_BUFFS_16;
/* dump all the assorted information */
pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
dev->irq, dev->if_port ? "ex" : "in",
dev->mem_start, dev->mem_end - 1);
/* The hardware address for the 3c523 is stored in the first six
bytes of the IO address. */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(dev->base_addr + i);
pr_info("%s: hardware address %pM\n",
dev->name, dev->dev_addr);
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = HZ;
dev->ethtool_ops = &netdev_ethtool_ops;
/* note that we haven't actually requested the IRQ from the kernel.
That gets done in elmc_open(). I'm not sure that's such a good idea,
but it works, so I'll go with it. */
#ifndef ELMC_MULTICAST
dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
#endif
retval = register_netdev(dev);
if (retval)
goto err_out;
return 0;
err_out:
mca_set_adapter_procfn(slot, NULL, NULL);
release_region(dev->base_addr, ELMC_IO_EXTENT);
return retval;
}
#ifdef MODULE
static void cleanup_card(struct net_device *dev)
{
mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot,
NULL, NULL);
release_region(dev->base_addr, ELMC_IO_EXTENT);
}
#else
struct net_device * __init elmc_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct priv));
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_elmc_probe(dev);
if (err)
goto out;
return dev;
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
/**********************************************
* init the chip (elmc-interrupt should be disabled?!)
* needs a correct 'allocated' memory
*/
static int init586(struct net_device *dev)
{
void *ptr;
unsigned long s;
int i, result = 0;
struct priv *p = netdev_priv(dev);
volatile struct configure_cmd_struct *cfg_cmd;
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */
cfg_cmd->cmd_status = 0;
cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
cfg_cmd->cmd_link = 0xffff;
cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */
cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
cfg_cmd->priority = 0x00;
cfg_cmd->ifs = 0x60;
cfg_cmd->time_low = 0x00;
cfg_cmd->time_high = 0xf2;
cfg_cmd->promisc = 0;
if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
cfg_cmd->promisc = 1;
cfg_cmd->carr_coll = 0x00;
p->scb->cbl_offset = make16(cfg_cmd);
p->scb->cmd = CUC_START; /* cmd.-unit start */
elmc_id_attn586();
s = jiffies; /* warning: only active with interrupts on !! */
while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
if (time_after(jiffies, s + 30*HZ/100))
break;
}
if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
return 1;
}
/*
* individual address setup
*/
ias_cmd = (struct iasetup_cmd_struct *) ptr;
ias_cmd->cmd_status = 0;
ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
ias_cmd->cmd_link = 0xffff;
memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
p->scb->cmd = CUC_START; /* cmd.-unit start */
elmc_id_attn586();
s = jiffies;
while (!(ias_cmd->cmd_status & STAT_COMPL)) {
if (time_after(jiffies, s + 30*HZ/100))
break;
}
if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
pr_warning("%s (elmc): individual address setup command failed: %04x\n",
dev->name, ias_cmd->cmd_status);
return 1;
}
/*
* TDR, wire check .. e.g. no resistor e.t.c
*/
tdr_cmd = (struct tdr_cmd_struct *) ptr;
tdr_cmd->cmd_status = 0;
tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
tdr_cmd->cmd_link = 0xffff;
tdr_cmd->status = 0;
p->scb->cbl_offset = make16(tdr_cmd);
p->scb->cmd = CUC_START; /* cmd.-unit start */
elmc_attn586();
s = jiffies;
while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
if (time_after(jiffies, s + 30*HZ/100)) {
pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
result = 1;
break;
}
}
if (!result) {
DELAY(2); /* wait for result */
result = tdr_cmd->status;
p->scb->cmd = p->scb->status & STAT_MASK;
elmc_id_attn586(); /* ack the interrupts */
if (result & TDR_LNK_OK) {
/* empty */
} else if (result & TDR_XCVR_PRB) {
pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
} else if (result & TDR_ET_OPN) {
pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
} else if (result & TDR_ET_SRT) {
if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
} else {
pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
}
}
/*
* ack interrupts
*/
p->scb->cmd = p->scb->status & STAT_MASK;
elmc_id_attn586();
/*
* alloc nop/xmit-cmds
*/
#if (NUM_XMIT_BUFFS == 1)
for (i = 0; i < 2; i++) {
p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
p->nop_cmds[i]->cmd_cmd = CMD_NOP;
p->nop_cmds[i]->cmd_status = 0;
p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
}
p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */
ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
#else
for (i = 0; i < NUM_XMIT_BUFFS; i++) {
p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
p->nop_cmds[i]->cmd_cmd = CMD_NOP;
p->nop_cmds[i]->cmd_status = 0;
p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */
ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
}
#endif
ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */
/*
* Multicast setup
*/
if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
} else {
if (len < num_addrs) {
num_addrs = len;
pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
dev->name, num_addrs);
}
mc_cmd = (struct mcsetup_cmd_struct *) ptr;
mc_cmd->cmd_status = 0;
mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
ha->addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
s = jiffies;
while (!(mc_cmd->cmd_status & STAT_COMPL)) {
if (time_after(jiffies, s + 30*HZ/100))
break;
}
if (!(mc_cmd->cmd_status & STAT_COMPL)) {
pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
}
}
}
/*
* alloc xmit-buffs / init xmit_cmds
*/
for (i = 0; i < NUM_XMIT_BUFFS; i++) {
p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
if ((void *) ptr > (void *) p->iscp) {
pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
return 1;
}
memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
p->xmit_cmds[i]->cmd_status = STAT_COMPL;
p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
p->xmit_buffs[i]->next = 0xffff;
p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
}
p->xmit_count = 0;
p->xmit_last = 0;
#ifndef NO_NOPCOMMANDS
p->nop_point = 0;
#endif
/*
* 'start transmitter' (nop-loop)
*/
#ifndef NO_NOPCOMMANDS
p->scb->cbl_offset = make16(p->nop_cmds[0]);
p->scb->cmd = CUC_START;
elmc_id_attn586();
WAIT_4_SCB_CMD();
#else
p->xmit_cmds[0]->cmd_link = 0xffff;
p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
#endif
return 0;
}
/******************************************************
* This is a helper routine for elmc_rnr_int() and init586().
* It sets up the Receive Frame Area (RFA).
*/
static void *alloc_rfa(struct net_device *dev, void *ptr)
{
volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
volatile struct rbd_struct *rbd;
int i;
struct priv *p = netdev_priv(dev);
memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
p->rfd_first = rfd;
for (i = 0; i < p->num_recv_buffs; i++) {
rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs);
}
rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */
ptr = (void *) (rfd + p->num_recv_buffs);
rbd = (struct rbd_struct *) ptr;
ptr = (void *) (rbd + p->num_recv_buffs);
/* clr descriptors */
memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs);
for (i = 0; i < p->num_recv_buffs; i++) {
rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs));
rbd[i].size = RECV_BUFF_SIZE;
rbd[i].buffer = make24(ptr);
ptr = (char *) ptr + RECV_BUFF_SIZE;
}
p->rfd_top = p->rfd_first;
p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
p->scb->rfa_offset = make16(p->rfd_first);
p->rfd_first->rbd_offset = make16(rbd);
return ptr;
}
/**************************************************
* Interrupt Handler ...
*/
static irqreturn_t
elmc_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
unsigned short stat;
struct priv *p;
if (!netif_running(dev)) {
/* The 3c523 has this habit of generating interrupts during the
reset. I'm not sure if the ni52 has this same problem, but it's
really annoying if we haven't finished initializing it. I was
hoping all the elmc_id_* commands would disable this, but I
might have missed a few. */
elmc_id_attn586(); /* ack inter. and disable any more */
return IRQ_HANDLED;
} else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
/* wasn't this device */
return IRQ_NONE;
}
/* reading ELMC_CTRL also clears the INT bit. */
p = netdev_priv(dev);
while ((stat = p->scb->status & STAT_MASK))
{
p->scb->cmd = stat;
elmc_attn586(); /* ack inter. */
if (stat & STAT_CX) {
/* command with I-bit set complete */
elmc_xmt_int(dev);
}
if (stat & STAT_FR) {
/* received a frame */
elmc_rcv_int(dev);
}
#ifndef NO_NOPCOMMANDS
if (stat & STAT_CNA) {
/* CU went 'not ready' */
if (netif_running(dev)) {
pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
dev->name, (int) stat, (int) p->scb->status);
}
}
#endif
if (stat & STAT_RNR) {
/* RU went 'not ready' */
if (p->scb->status & RU_SUSPEND) {
/* special case: RU_SUSPEND */
WAIT_4_SCB_CMD();
p->scb->cmd = RUC_RESUME;
elmc_attn586();
} else {
pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
dev->name, (int) stat, (int) p->scb->status);
elmc_rnr_int(dev);
}
}
WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */
if (p->scb->cmd) { /* timed out? */
break;
}
}
return IRQ_HANDLED;
}
/*******************************************************
* receive-interrupt
*/
static void elmc_rcv_int(struct net_device *dev)
{
int status;
unsigned short totlen;
struct sk_buff *skb;
struct rbd_struct *rbd;
struct priv *p = netdev_priv(dev);
for (; (status = p->rfd_top->status) & STAT_COMPL;) {
rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
if (status & STAT_OK) { /* frame received without error? */
if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
skb = netdev_alloc_skb(dev, totlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += totlen;
} else {
dev->stats.rx_dropped++;
}
} else {
pr_warning("%s: received oversized frame.\n", dev->name);
dev->stats.rx_dropped++;
}
} else { /* frame !(ok), only with 'save-bad-frames' */
pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
dev->stats.rx_errors++;
}
p->rfd_top->status = 0;
p->rfd_top->last = RFD_SUSP;
p->rfd_last->last = 0; /* delete RU_SUSP */
p->rfd_last = p->rfd_top;
p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
}
}
/**********************************************************
* handle 'Receiver went not ready'.
*/
static void elmc_rnr_int(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
dev->stats.rx_errors++;
WAIT_4_SCB_CMD(); /* wait for the last cmd */
p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
elmc_attn586();
WAIT_4_SCB_CMD(); /* wait for accept cmd. */
alloc_rfa(dev, (char *) p->rfd_first);
startrecv586(dev); /* restart RU */
pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
}
/**********************************************************
* handle xmit - interrupt
*/
static void elmc_xmt_int(struct net_device *dev)
{
int status;
struct priv *p = netdev_priv(dev);
status = p->xmit_cmds[p->xmit_last]->cmd_status;
if (!(status & STAT_COMPL)) {
pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
}
if (status & STAT_OK) {
dev->stats.tx_packets++;
dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
} else {
dev->stats.tx_errors++;
if (status & TCMD_LATECOLL) {
pr_warning("%s: late collision detected.\n", dev->name);
dev->stats.collisions++;
} else if (status & TCMD_NOCARRIER) {
dev->stats.tx_carrier_errors++;
pr_warning("%s: no carrier detected.\n", dev->name);
} else if (status & TCMD_LOSTCTS) {
pr_warning("%s: loss of CTS detected.\n", dev->name);
} else if (status & TCMD_UNDERRUN) {
dev->stats.tx_fifo_errors++;
pr_warning("%s: DMA underrun detected.\n", dev->name);
} else if (status & TCMD_MAXCOLL) {
pr_warning("%s: Max. collisions exceeded.\n", dev->name);
dev->stats.collisions += 16;
}
}
#if (NUM_XMIT_BUFFS != 1)
if ((++p->xmit_last) == NUM_XMIT_BUFFS) {
p->xmit_last = 0;
}
#endif
netif_wake_queue(dev);
}
/***********************************************************
* (re)start the receiver
*/
static void startrecv586(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
p->scb->rfa_offset = make16(p->rfd_first);
p->scb->cmd = RUC_START;
elmc_attn586(); /* start cmd. */
WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */
}
/******************************************************
* timeout
*/
static void elmc_timeout(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
/* COMMAND-UNIT active? */
if (p->scb->status & CU_ACTIVE) {
pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name);
pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name,
(int)p->xmit_cmds[0]->cmd_status,
(int)p->nop_cmds[0]->cmd_status,
(int)p->nop_cmds[1]->cmd_status, (int)p->nop_point);
p->scb->cmd = CUC_ABORT;
elmc_attn586();
WAIT_4_SCB_CMD();
p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
p->scb->cmd = CUC_START;
elmc_attn586();
WAIT_4_SCB_CMD();
netif_wake_queue(dev);
} else {
pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n",
dev->name, p->scb->status);
pr_debug("%s: command-stats: %04x %04x\n", dev->name,
p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
elmc_close(dev);
elmc_open(dev);
}
}
/******************************************************
* send frame
*/
static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len;
int i;
#ifndef NO_NOPCOMMANDS
int next_nop;
#endif
struct priv *p = netdev_priv(dev);
netif_stop_queue(dev);
len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
if (len != skb->len)
memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
#if (NUM_XMIT_BUFFS == 1)
#ifdef NO_NOPCOMMANDS
p->xmit_buffs[0]->size = TBD_LAST | len;
for (i = 0; i < 16; i++) {
p->scb->cbl_offset = make16(p->xmit_cmds[0]);
p->scb->cmd = CUC_START;
p->xmit_cmds[0]->cmd_status = 0;
elmc_attn586();
if (!i) {
dev_kfree_skb(skb);
}
WAIT_4_SCB_CMD();
if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */
break;
}
if (p->xmit_cmds[0]->cmd_status) {
break;
}
if (i == 15) {
pr_warning("%s: Can't start transmit-command.\n", dev->name);
}
}
#else
next_nop = (p->nop_point + 1) & 0x1;
p->xmit_buffs[0]->size = TBD_LAST | len;
p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
= make16((p->nop_cmds[next_nop]));
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
p->nop_point = next_nop;
dev_kfree_skb(skb);
#endif
#else
p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
next_nop = 0;
}
p->xmit_cmds[p->xmit_count]->cmd_status = 0;
p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
= make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
p->xmit_count = next_nop;
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
dev_kfree_skb(skb);
#endif
return NETDEV_TX_OK;
}
/*******************************************
* Someone wanna have the statistics
*/
static struct net_device_stats *elmc_get_stats(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
unsigned short crc, aln, rsc, ovrn;
crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
p->scb->crc_errs -= crc;
aln = p->scb->aln_errs;
p->scb->aln_errs -= aln;
rsc = p->scb->rsc_errs;
p->scb->rsc_errs -= rsc;
ovrn = p->scb->ovrn_errs;
p->scb->ovrn_errs -= ovrn;
dev->stats.rx_crc_errors += crc;
dev->stats.rx_fifo_errors += ovrn;
dev->stats.rx_frame_errors += aln;
dev->stats.rx_dropped += rsc;
return &dev->stats;
}
/********************************************************
* Set MC list ..
*/
#ifdef ELMC_MULTICAST
static void set_multicast_list(struct net_device *dev)
{
if (!dev->start) {
/* without a running interface, promiscuous doesn't work */
return;
}
dev->start = 0;
alloc586(dev);
init586(dev);
startrecv586(dev);
dev->start = 1;
}
#endif
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
};
#ifdef MODULE
/* Increase if needed ;) */
#define MAX_3C523_CARDS 4
static struct net_device *dev_elmc[MAX_3C523_CARDS];
static int irq[MAX_3C523_CARDS];
static int io[MAX_3C523_CARDS];
module_param_array(irq, int, NULL, 0);
module_param_array(io, int, NULL, 0);
MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
MODULE_LICENSE("GPL");
int __init init_module(void)
{
int this_dev,found = 0;
/* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */
for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
struct net_device *dev = alloc_etherdev(sizeof(struct priv));
if (!dev)
break;
dev->irq=irq[this_dev];
dev->base_addr=io[this_dev];
if (do_elmc_probe(dev) == 0) {
dev_elmc[this_dev] = dev;
found++;
continue;
}
free_netdev(dev);
if (io[this_dev]==0)
break;
pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
}
if(found==0) {
if (io[0]==0)
pr_notice("3c523.c: No 3c523 cards found\n");
return -ENXIO;
} else return 0;
}
void __exit cleanup_module(void)
{
int this_dev;
for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
struct net_device *dev = dev_elmc[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
| gpl-2.0 |
Tommy-Geenexus/android_kernel_sony_msm8974_togari_5.x | drivers/staging/rts5139/rts51x_transport.c | 4806 | 27545 | /* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
* Maintainer:
* Edwin Rong (edwin_rong@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include "debug.h"
#include "rts51x.h"
#include "rts51x_chip.h"
#include "rts51x_card.h"
#include "rts51x_scsi.h"
#include "rts51x_transport.h"
#include "trace.h"
/***********************************************************************
* Scatter-gather transfer buffer access routines
***********************************************************************/
/* Copy a buffer of length buflen to/from the srb's transfer buffer.
* Update the **sgptr and *offset variables so that the next copy will
* pick up from where this one left off.
*/
unsigned int rts51x_access_sglist(unsigned char *buffer,
unsigned int buflen, void *sglist,
void **sgptr, unsigned int *offset,
enum xfer_buf_dir dir)
{
unsigned int cnt;
struct scatterlist *sg = (struct scatterlist *)*sgptr;
/* We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages, and
* each page has to be kmap()'ed separately. If the page is already
* in kernel-addressable memory then kmap() will return its address.
* If the page is not directly accessible -- such as a user buffer
* located in high memory -- then kmap() will map it to a temporary
* position in the kernel's virtual address space.
*/
if (!sg)
sg = (struct scatterlist *)sglist;
/* This loop handles a single s-g list entry, which may
* include multiple pages. Find the initial page structure
* and the starting offset within the page, and update
* the *offset and **sgptr values for the next loop.
*/
cnt = 0;
while (cnt < buflen && sg) {
struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE - 1);
unsigned int sglen = sg->length - *offset;
if (sglen > buflen - cnt) {
/* Transfer ends within this s-g entry */
sglen = buflen - cnt;
*offset += sglen;
} else {
/* Transfer continues to next s-g entry */
*offset = 0;
sg = sg_next(sg);
}
/* Transfer the data for all the pages in this
* s-g entry. For each page: call kmap(), do the
* transfer, and call kunmap() immediately after. */
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
memcpy(ptr + poff, buffer + cnt, plen);
else
memcpy(buffer + cnt, ptr + poff, plen);
kunmap(page);
/* Start at the beginning of the next page */
poff = 0;
++page;
cnt += plen;
sglen -= plen;
}
}
*sgptr = sg;
/* Return the amount actually transferred */
return cnt;
}
unsigned int rts51x_access_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb,
struct scatterlist **sgptr,
unsigned int *offset, enum xfer_buf_dir dir)
{
return rts51x_access_sglist(buffer, buflen, (void *)scsi_sglist(srb),
(void **)sgptr, offset, dir);
}
/* Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue.
*/
void rts51x_set_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
buflen = min(buflen, scsi_bufflen(srb));
buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
TO_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
void rts51x_get_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
buflen = min(buflen, scsi_bufflen(srb));
buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
FROM_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
/* This is the completion handler which will wake us up when an URB
* completes.
*/
static void urb_done_completion(struct urb *urb)
{
struct completion *urb_done_ptr = urb->context;
if (urb_done_ptr)
complete(urb_done_ptr);
}
/* This is the common part of the URB message submission code
*
* All URBs from the driver involved in handling a queued scsi
* command _must_ pass through this function (or something like it) for the
* abort mechanisms to work properly.
*/
static int rts51x_msg_common(struct rts51x_chip *chip, struct urb *urb,
int timeout)
{
struct rts51x_usb *rts51x = chip->usb;
struct completion urb_done;
long timeleft;
int status;
/* don't submit URBs during abort processing */
if (test_bit(FLIDX_ABORTING, &rts51x->dflags))
TRACE_RET(chip, -EIO);
/* set up data structures for the wakeup system */
init_completion(&urb_done);
/* fill the common fields in the URB */
urb->context = &urb_done;
urb->actual_length = 0;
urb->error_count = 0;
urb->status = 0;
/* we assume that if transfer_buffer isn't us->iobuf then it
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
* transfer buffer has already been mapped. */
urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
if (urb->transfer_buffer == rts51x->iobuf) {
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rts51x->iobuf_dma;
}
urb->setup_dma = rts51x->cr_dma;
/* submit the URB */
status = usb_submit_urb(urb, GFP_NOIO);
if (status) {
/* something went wrong */
TRACE_RET(chip, status);
}
/* since the URB has been submitted successfully, it's now okay
* to cancel it */
set_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
/* did an abort occur during the submission? */
if (test_bit(FLIDX_ABORTING, &rts51x->dflags)) {
/* cancel the URB, if it hasn't been cancelled already */
if (test_and_clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags)) {
RTS51X_DEBUGP("-- cancelling URB\n");
usb_unlink_urb(urb);
}
}
/* wait for the completion of the URB */
timeleft =
wait_for_completion_interruptible_timeout(&urb_done,
(timeout * HZ /
1000) ? :
MAX_SCHEDULE_TIMEOUT);
clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
if (timeleft <= 0) {
RTS51X_DEBUGP("%s -- cancelling URB\n",
timeleft == 0 ? "Timeout" : "Signal");
usb_kill_urb(urb);
if (timeleft == 0)
status = -ETIMEDOUT;
else
status = -EINTR;
} else {
status = urb->status;
}
return status;
}
/*
* Interpret the results of a URB transfer
*/
static int interpret_urb_result(struct rts51x_chip *chip, unsigned int pipe,
unsigned int length, int result,
unsigned int partial)
{
int retval = STATUS_SUCCESS;
/* RTS51X_DEBUGP("Status code %d; transferred %u/%u\n",
result, partial, length); */
switch (result) {
/* no error code; did we send all the data? */
case 0:
if (partial != length) {
RTS51X_DEBUGP("-- short transfer\n");
TRACE_RET(chip, STATUS_TRANS_SHORT);
}
/* RTS51X_DEBUGP("-- transfer complete\n"); */
return STATUS_SUCCESS;
/* stalled */
case -EPIPE:
/* for control endpoints, (used by CB[I]) a stall indicates
* a failed command */
if (usb_pipecontrol(pipe)) {
RTS51X_DEBUGP("-- stall on control pipe\n");
TRACE_RET(chip, STATUS_STALLED);
}
/* for other sorts of endpoint, clear the stall */
RTS51X_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
if (rts51x_clear_halt(chip, pipe) < 0)
TRACE_RET(chip, STATUS_ERROR);
retval = STATUS_STALLED;
TRACE_GOTO(chip, Exit);
/* babble - the device tried to send more than
* we wanted to read */
case -EOVERFLOW:
RTS51X_DEBUGP("-- babble\n");
retval = STATUS_TRANS_LONG;
TRACE_GOTO(chip, Exit);
/* the transfer was cancelled by abort,
* disconnect, or timeout */
case -ECONNRESET:
RTS51X_DEBUGP("-- transfer cancelled\n");
retval = STATUS_ERROR;
TRACE_GOTO(chip, Exit);
/* short scatter-gather read transfer */
case -EREMOTEIO:
RTS51X_DEBUGP("-- short read transfer\n");
retval = STATUS_TRANS_SHORT;
TRACE_GOTO(chip, Exit);
/* abort or disconnect in progress */
case -EIO:
RTS51X_DEBUGP("-- abort or disconnect in progress\n");
retval = STATUS_ERROR;
TRACE_GOTO(chip, Exit);
case -ETIMEDOUT:
RTS51X_DEBUGP("-- time out\n");
retval = STATUS_TIMEDOUT;
TRACE_GOTO(chip, Exit);
/* the catch-all error case */
default:
RTS51X_DEBUGP("-- unknown error\n");
retval = STATUS_ERROR;
TRACE_GOTO(chip, Exit);
}
Exit:
if ((retval != STATUS_SUCCESS) && !usb_pipecontrol(pipe))
rts51x_clear_hw_error(chip);
return retval;
}
int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
struct rts51x_usb *rts51x = chip->usb;
int result;
RTS51X_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
__func__, request, requesttype, value, index, size);
/* fill in the devrequest structure */
rts51x->cr->bRequestType = requesttype;
rts51x->cr->bRequest = request;
rts51x->cr->wValue = cpu_to_le16(value);
rts51x->cr->wIndex = cpu_to_le16(index);
rts51x->cr->wLength = cpu_to_le16(size);
/* fill and submit the URB */
usb_fill_control_urb(rts51x->current_urb, rts51x->pusb_dev, pipe,
(unsigned char *)rts51x->cr, data, size,
urb_done_completion, NULL);
result = rts51x_msg_common(chip, rts51x->current_urb, timeout);
return interpret_urb_result(chip, pipe, size, result,
rts51x->current_urb->actual_length);
}
int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein(pipe))
endp |= USB_DIR_IN;
result = rts51x_ctrl_transfer(chip, SND_CTRL_PIPE(chip),
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp, NULL, 0, 3000);
if (result != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
usb_reset_endpoint(chip->usb->pusb_dev, endp);
return STATUS_SUCCESS;
}
int rts51x_reset_pipe(struct rts51x_chip *chip, char pipe)
{
return rts51x_clear_halt(chip, pipe);
}
static void rts51x_sg_clean(struct usb_sg_request *io)
{
if (io->urbs) {
while (io->entries--)
usb_free_urb(io->urbs[io->entries]);
kfree(io->urbs);
io->urbs = NULL;
}
#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */
if (io->dev->dev.dma_mask != NULL)
usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
io->sg, io->nents);
#endif
io->dev = NULL;
}
#if 0
static void rts51x_sg_complete(struct urb *urb)
{
struct usb_sg_request *io = urb->context;
int status = urb->status;
spin_lock(&io->lock);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
* device driver code (like this routine) unlink queued urbs first,
* if it needs to, since the HC won't work on them at all. So it's
* not possible for page N+1 to overwrite page N, and so on.
*
* That's only for "hard" faults; "soft" faults (unlinks) sometimes
* complete before the HCD can get requests away from hardware,
* though never during cleanup after a hard fault.
*/
if (io->status
&& (io->status != -ECONNRESET
|| status != -ECONNRESET)
&& urb->actual_length) {
dev_err(io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
status, io->status);
/* BUG (); */
}
if (io->status == 0 && status && status != -ECONNRESET) {
int i, found, retval;
io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
spin_unlock(&io->lock);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i] || !io->urbs[i]->dev)
continue;
if (found) {
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
retval != -EBUSY)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
} else if (urb == io->urbs[i])
found = 1;
}
spin_lock(&io->lock);
}
urb->dev = NULL;
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock(&io->lock);
}
/* This function is ported from usb_sg_init, which can transfer
* sg list partially */
int rts51x_sg_init_partial(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, void *buf, struct scatterlist **sgptr,
unsigned int *offset, int nents, size_t length, gfp_t mem_flags)
{
int i;
int urb_flags;
int dma;
struct scatterlist *sg = *sgptr, *first_sg;
first_sg = (struct scatterlist *)buf;
if (!sg)
sg = first_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
|| usb_pipeisoc(pipe)
|| (nents <= 0))
return -EINVAL;
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
io->sg = first_sg; /* used by unmap */
io->nents = nents;
RTS51X_DEBUGP("Before map, sg address: 0x%x\n", (unsigned int)sg);
RTS51X_DEBUGP("Before map, dev address: 0x%x\n", (unsigned int)dev);
/* not all host controllers use DMA (like the mainstream pci ones);
* they can use PIO (sl811) or be software over another transport.
*/
dma = (dev->dev.dma_mask != NULL);
if (dma) {
/* map the whole sg list, because here we only know the
* total nents */
io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
first_sg, nents);
} else {
io->entries = nents;
}
/* initialize all the urbs we'll use */
if (io->entries <= 0)
return io->entries;
io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
if (!io->urbs)
goto nomem;
urb_flags = URB_NO_INTERRUPT;
if (dma)
urb_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
RTS51X_DEBUGP("io->entries = %d\n", io->entries);
for (i = 0; (sg != NULL) && (length > 0); i++) {
unsigned len;
RTS51X_DEBUGP("sg address: 0x%x\n", (unsigned int)sg);
RTS51X_DEBUGP("length = %d, *offset = %d\n", length, *offset);
io->urbs[i] = usb_alloc_urb(0, mem_flags);
if (!io->urbs[i]) {
io->entries = i;
goto nomem;
}
io->urbs[i]->dev = NULL;
io->urbs[i]->pipe = pipe;
io->urbs[i]->interval = period;
io->urbs[i]->transfer_flags = urb_flags;
io->urbs[i]->complete = rts51x_sg_complete;
io->urbs[i]->context = io;
if (dma) {
io->urbs[i]->transfer_dma =
sg_dma_address(sg) + *offset;
len = sg_dma_len(sg) - *offset;
io->urbs[i]->transfer_buffer = NULL;
RTS51X_DEBUGP(" -- sg entry dma length = %d\n",
sg_dma_len(sg));
} else {
/* hc may use _only_ transfer_buffer */
io->urbs[i]->transfer_buffer = sg_virt(sg) + *offset;
len = sg->length - *offset;
RTS51X_DEBUGP(" -- sg entry length = %d\n",
sg->length);
}
if (length >= len) {
*offset = 0;
io->urbs[i]->transfer_buffer_length = len;
length -= len;
sg = sg_next(sg);
} else {
*offset += length;
io->urbs[i]->transfer_buffer_length = length;
length = 0;
}
if (length == 0)
io->entries = i + 1;
#if 0
if (length) {
len = min_t(unsigned, len, length);
length -= len;
if (length == 0) {
io->entries = i + 1;
*offset += len;
} else {
*offset = 0;
}
}
#endif
}
RTS51X_DEBUGP("In %s, urb count: %d\n", __func__, i);
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
RTS51X_DEBUGP("sg address stored in sgptr: 0x%x\n", (unsigned int)sg);
*sgptr = sg;
/* transaction state */
io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
return 0;
nomem:
rts51x_sg_clean(io);
return -ENOMEM;
}
#endif
int rts51x_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
int nents, size_t length, gfp_t mem_flags)
{
return usb_sg_init(io, dev, pipe, period, sg, nents, length, mem_flags);
}
int rts51x_sg_wait(struct usb_sg_request *io, int timeout)
{
long timeleft;
int i;
int entries = io->entries;
/* queue the urbs. */
spin_lock_irq(&io->lock);
i = 0;
while (i < entries && !io->status) {
int retval;
io->urbs[i]->dev = io->dev;
retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
/* after we submit, let completions or cancelations fire;
* we handshake using io->status.
*/
spin_unlock_irq(&io->lock);
switch (retval) {
/* maybe we retrying will recover */
case -ENXIO: /* hc didn't queue this one */
case -EAGAIN:
case -ENOMEM:
io->urbs[i]->dev = NULL;
retval = 0;
yield();
break;
/* no error? continue immediately.
*
* NOTE: to work better with UHCI (4K I/O buffer may
* need 3K of TDs) it may be good to limit how many
* URBs are queued at once; N milliseconds?
*/
case 0:
++i;
cpu_relax();
break;
/* fail any uncompleted urbs */
default:
io->urbs[i]->dev = NULL;
io->urbs[i]->status = retval;
dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
__func__, retval);
usb_sg_cancel(io);
}
spin_lock_irq(&io->lock);
if (retval && (io->status == 0 || io->status == -ECONNRESET))
io->status = retval;
}
io->count -= entries - i;
if (io->count == 0)
complete(&io->complete);
spin_unlock_irq(&io->lock);
timeleft =
wait_for_completion_interruptible_timeout(&io->complete,
(timeout * HZ /
1000) ? :
MAX_SCHEDULE_TIMEOUT);
if (timeleft <= 0) {
RTS51X_DEBUGP("%s -- cancelling SG request\n",
timeleft == 0 ? "Timeout" : "Signal");
usb_sg_cancel(io);
if (timeleft == 0)
io->status = -ETIMEDOUT;
else
io->status = -EINTR;
}
rts51x_sg_clean(io);
return io->status;
}
/*
* Transfer a scatter-gather list via bulk transfer
*
* This function does basically the same thing as usb_stor_bulk_transfer_buf()
* above, but it uses the usbcore scatter-gather library.
*/
static int rts51x_bulk_transfer_sglist(struct rts51x_chip *chip,
unsigned int pipe,
struct scatterlist *sg, int num_sg,
unsigned int length,
unsigned int *act_len, int timeout)
{
int result;
/* don't submit s-g requests during abort processing */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
TRACE_RET(chip, STATUS_ERROR);
/* initialize the scatter-gather request block */
RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
length, num_sg);
result =
rts51x_sg_init(&chip->usb->current_sg, chip->usb->pusb_dev, pipe, 0,
sg, num_sg, length, GFP_NOIO);
if (result) {
RTS51X_DEBUGP("rts51x_sg_init returned %d\n", result);
TRACE_RET(chip, STATUS_ERROR);
}
/* since the block has been initialized successfully, it's now
* okay to cancel it */
set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* did an abort occur during the submission? */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
/* cancel the request, if it hasn't been cancelled already */
if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
RTS51X_DEBUGP("-- cancelling sg request\n");
usb_sg_cancel(&chip->usb->current_sg);
}
}
/* wait for the completion of the transfer */
result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* result = us->current_sg.status; */
if (act_len)
*act_len = chip->usb->current_sg.bytes;
return interpret_urb_result(chip, pipe, length, result,
chip->usb->current_sg.bytes);
}
#if 0
static int rts51x_bulk_transfer_sglist_partial(struct rts51x_chip *chip,
unsigned int pipe, void *buf, struct scatterlist **sgptr,
unsigned int *offset, int num_sg, unsigned int length,
unsigned int *act_len, int timeout)
{
int result;
/* don't submit s-g requests during abort processing */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
TRACE_RET(chip, STATUS_ERROR);
/* initialize the scatter-gather request block */
RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
length, num_sg);
result = rts51x_sg_init_partial(&chip->usb->current_sg,
chip->usb->pusb_dev, pipe, 0, buf, sgptr, offset,
num_sg, length, GFP_NOIO);
if (result) {
RTS51X_DEBUGP("rts51x_sg_init_partial returned %d\n", result);
TRACE_RET(chip, STATUS_ERROR);
}
/* since the block has been initialized successfully, it's now
* okay to cancel it */
set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* did an abort occur during the submission? */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
/* cancel the request, if it hasn't been cancelled already */
if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
RTS51X_DEBUGP("-- cancelling sg request\n");
usb_sg_cancel(&chip->usb->current_sg);
}
}
/* wait for the completion of the transfer */
result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* result = us->current_sg.status; */
if (act_len)
*act_len = chip->usb->current_sg.bytes;
return interpret_urb_result(chip, pipe, length, result,
chip->usb->current_sg.bytes);
}
#endif
int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe,
void *buf, unsigned int length,
unsigned int *act_len, int timeout)
{
int result;
/* fill and submit the URB */
usb_fill_bulk_urb(chip->usb->current_urb, chip->usb->pusb_dev, pipe,
buf, length, urb_done_completion, NULL);
result = rts51x_msg_common(chip, chip->usb->current_urb, timeout);
/* store the actual length of the data transferred */
if (act_len)
*act_len = chip->usb->current_urb->actual_length;
return interpret_urb_result(chip, pipe, length, result,
chip->usb->current_urb->actual_length);
}
int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe,
void *buf, unsigned int len, int use_sg,
unsigned int *act_len, int timeout)
{
int result;
if (timeout < 600)
timeout = 600;
if (use_sg) {
result =
rts51x_bulk_transfer_sglist(chip, pipe,
(struct scatterlist *)buf,
use_sg, len, act_len, timeout);
} else {
result =
rts51x_bulk_transfer_buf(chip, pipe, buf, len, act_len,
timeout);
}
return result;
}
int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
void *buf, void **ptr, unsigned int *offset,
unsigned int len, int use_sg,
unsigned int *act_len, int timeout)
{
int result;
if (timeout < 600)
timeout = 600;
if (use_sg) {
void *tmp_buf = kmalloc(len, GFP_KERNEL);
if (!tmp_buf)
TRACE_RET(chip, STATUS_NOMEM);
if (usb_pipeout(pipe)) {
rts51x_access_sglist(tmp_buf, len, buf, ptr, offset,
FROM_XFER_BUF);
}
result =
rts51x_bulk_transfer_buf(chip, pipe, tmp_buf, len, act_len,
timeout);
if (result == STATUS_SUCCESS) {
if (usb_pipein(pipe)) {
rts51x_access_sglist(tmp_buf, len, buf, ptr,
offset, TO_XFER_BUF);
}
}
kfree(tmp_buf);
#if 0
result = rts51x_bulk_transfer_sglist_partial(chip, pipe, buf,
(struct scatterlist **)ptr, offset,
use_sg, len, act_len, timeout);
#endif
} else {
unsigned int step = 0;
if (offset)
step = *offset;
result =
rts51x_bulk_transfer_buf(chip, pipe, buf + step, len,
act_len, timeout);
if (act_len)
step += *act_len;
else
step += len;
if (offset)
*offset = step;
}
return result;
}
int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
{
unsigned int pipe = RCV_INTR_PIPE(chip);
struct usb_host_endpoint *ep;
struct completion urb_done;
int result;
if (!status)
TRACE_RET(chip, STATUS_ERROR);
/* set up data structures for the wakeup system */
init_completion(&urb_done);
ep = chip->usb->pusb_dev->ep_in[usb_pipeendpoint(pipe)];
/* fill and submit the URB */
/* We set interval to 1 here, so the polling interval is controlled
* by our polling thread */
usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe,
status, 2, urb_done_completion, &urb_done, 1);
result = rts51x_msg_common(chip, chip->usb->intr_urb, 50);
return interpret_urb_result(chip, pipe, 2, result,
chip->usb->intr_urb->actual_length);
}
u8 media_not_present[] = {
0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 };
u8 invalid_cmd_field[] = {
0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 };
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int result;
#ifdef CONFIG_PM
if (chip->option.ss_en) {
if (srb->cmnd[0] == TEST_UNIT_READY) {
if (RTS51X_CHK_STAT(chip, STAT_SS)) {
if (check_fake_card_ready(chip,
SCSI_LUN(srb))) {
srb->result = SAM_STAT_GOOD;
} else {
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
media_not_present, SENSE_SIZE);
}
return;
}
} else if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
if (RTS51X_CHK_STAT(chip, STAT_SS)) {
int prevent = srb->cmnd[4] & 0x1;
if (prevent) {
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
invalid_cmd_field, SENSE_SIZE);
} else {
srb->result = SAM_STAT_GOOD;
}
return;
}
} else {
if (RTS51X_CHK_STAT(chip, STAT_SS)
|| RTS51X_CHK_STAT(chip, STAT_SS_PRE)) {
/* Wake up device */
RTS51X_DEBUGP("Try to wake up device\n");
chip->resume_from_scsi = 1;
rts51x_try_to_exit_ss(chip);
if (RTS51X_CHK_STAT(chip, STAT_SS)) {
wait_timeout(3000);
rts51x_init_chip(chip);
rts51x_init_cards(chip);
}
}
}
}
#endif
result = rts51x_scsi_handler(srb, chip);
/* if there is a transport error, reset and don't auto-sense */
if (result == TRANSPORT_ERROR) {
RTS51X_DEBUGP("-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto Handle_Errors;
}
srb->result = SAM_STAT_GOOD;
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == TRANSPORT_FAILED) {
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
(unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
sizeof(struct sense_data_t));
}
return;
/* Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
* device reset. */
Handle_Errors:
return;
}
| gpl-2.0 |
MoKee/android_kernel_lge_gproj | drivers/i2c/muxes/pca9541.c | 4806 | 10287 | /*
* I2C multiplexer driver for PCA9541 bus master selector
*
* Copyright (c) 2010 Ericsson AB.
*
* Author: Guenter Roeck <guenter.roeck@ericsson.com>
*
* Derived from:
* pca954x.c
*
* Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/i2c/pca954x.h>
/*
* The PCA9541 is a bus master selector. It supports two I2C masters connected
* to a single slave bus.
*
* Before each bus transaction, a master has to acquire bus ownership. After the
* transaction is complete, bus ownership has to be released. This fits well
* into the I2C multiplexer framework, which provides select and release
* functions for this purpose. For this reason, this driver is modeled as
* single-channel I2C bus multiplexer.
*
* This driver assumes that the two bus masters are controlled by two different
* hosts. If a single host controls both masters, platform code has to ensure
* that only one of the masters is instantiated at any given time.
*/
#define PCA9541_CONTROL 0x01
#define PCA9541_ISTAT 0x02
#define PCA9541_CTL_MYBUS (1 << 0)
#define PCA9541_CTL_NMYBUS (1 << 1)
#define PCA9541_CTL_BUSON (1 << 2)
#define PCA9541_CTL_NBUSON (1 << 3)
#define PCA9541_CTL_BUSINIT (1 << 4)
#define PCA9541_CTL_TESTON (1 << 6)
#define PCA9541_CTL_NTESTON (1 << 7)
#define PCA9541_ISTAT_INTIN (1 << 0)
#define PCA9541_ISTAT_BUSINIT (1 << 1)
#define PCA9541_ISTAT_BUSOK (1 << 2)
#define PCA9541_ISTAT_BUSLOST (1 << 3)
#define PCA9541_ISTAT_MYTEST (1 << 6)
#define PCA9541_ISTAT_NMYTEST (1 << 7)
#define BUSON (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
#define MYBUS (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
#define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
#define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON)
/* arbitration timeouts, in jiffies */
#define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */
#define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */
/* arbitration retry delays, in us */
#define SELECT_DELAY_SHORT 50
#define SELECT_DELAY_LONG 1000
struct pca9541 {
struct i2c_adapter *mux_adap;
unsigned long select_timeout;
unsigned long arb_timeout;
};
static const struct i2c_device_id pca9541_id[] = {
{"pca9541", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, pca9541_id);
/*
* Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
* as they will try to lock the adapter a second time.
*/
static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
{
struct i2c_adapter *adap = client->adapter;
int ret;
if (adap->algo->master_xfer) {
struct i2c_msg msg;
char buf[2];
msg.addr = client->addr;
msg.flags = 0;
msg.len = 2;
buf[0] = command;
buf[1] = val;
msg.buf = buf;
ret = adap->algo->master_xfer(adap, &msg, 1);
} else {
union i2c_smbus_data data;
data.byte = val;
ret = adap->algo->smbus_xfer(adap, client->addr,
client->flags,
I2C_SMBUS_WRITE,
command,
I2C_SMBUS_BYTE_DATA, &data);
}
return ret;
}
/*
* Read from chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
* as they will try to lock adapter a second time.
*/
static int pca9541_reg_read(struct i2c_client *client, u8 command)
{
struct i2c_adapter *adap = client->adapter;
int ret;
u8 val;
if (adap->algo->master_xfer) {
struct i2c_msg msg[2] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = &command
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &val
}
};
ret = adap->algo->master_xfer(adap, msg, 2);
if (ret == 2)
ret = val;
else if (ret >= 0)
ret = -EIO;
} else {
union i2c_smbus_data data;
ret = adap->algo->smbus_xfer(adap, client->addr,
client->flags,
I2C_SMBUS_READ,
command,
I2C_SMBUS_BYTE_DATA, &data);
if (!ret)
ret = data.byte;
}
return ret;
}
/*
* Arbitration management functions
*/
/* Release bus. Also reset NTESTON and BUSINIT if it was set. */
static void pca9541_release_bus(struct i2c_client *client)
{
int reg;
reg = pca9541_reg_read(client, PCA9541_CONTROL);
if (reg >= 0 && !busoff(reg) && mybus(reg))
pca9541_reg_write(client, PCA9541_CONTROL,
(reg & PCA9541_CTL_NBUSON) >> 1);
}
/*
* Arbitration is defined as a two-step process. A bus master can only activate
* the slave bus if it owns it; otherwise it has to request ownership first.
* This multi-step process ensures that access contention is resolved
* gracefully.
*
* Bus Ownership Other master Action
* state requested access
* ----------------------------------------------------
* off - yes wait for arbitration timeout or
* for other master to drop request
* off no no take ownership
* off yes no turn on bus
* on yes - done
* on no - wait for arbitration timeout or
* for other master to release bus
*
* The main contention point occurs if the slave bus is off and both masters
* request ownership at the same time. In this case, one master will turn on
* the slave bus, believing that it owns it. The other master will request
* bus ownership. Result is that the bus is turned on, and master which did
* _not_ own the slave bus before ends up owning it.
*/
/* Control commands per PCA9541 datasheet */
static const u8 pca9541_control[16] = {
4, 0, 1, 5, 4, 4, 5, 5, 0, 0, 1, 1, 0, 4, 5, 1
};
/*
* Channel arbitration
*
* Return values:
* <0: error
* 0 : bus not acquired
* 1 : bus acquired
*/
static int pca9541_arbitrate(struct i2c_client *client)
{
struct pca9541 *data = i2c_get_clientdata(client);
int reg;
reg = pca9541_reg_read(client, PCA9541_CONTROL);
if (reg < 0)
return reg;
if (busoff(reg)) {
int istat;
/*
* Bus is off. Request ownership or turn it on unless
* other master requested ownership.
*/
istat = pca9541_reg_read(client, PCA9541_ISTAT);
if (!(istat & PCA9541_ISTAT_NMYTEST)
|| time_is_before_eq_jiffies(data->arb_timeout)) {
/*
* Other master did not request ownership,
* or arbitration timeout expired. Take the bus.
*/
pca9541_reg_write(client,
PCA9541_CONTROL,
pca9541_control[reg & 0x0f]
| PCA9541_CTL_NTESTON);
data->select_timeout = SELECT_DELAY_SHORT;
} else {
/*
* Other master requested ownership.
* Set extra long timeout to give it time to acquire it.
*/
data->select_timeout = SELECT_DELAY_LONG * 2;
}
} else if (mybus(reg)) {
/*
* Bus is on, and we own it. We are done with acquisition.
* Reset NTESTON and BUSINIT, then return success.
*/
if (reg & (PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT))
pca9541_reg_write(client,
PCA9541_CONTROL,
reg & ~(PCA9541_CTL_NTESTON
| PCA9541_CTL_BUSINIT));
return 1;
} else {
/*
* Other master owns the bus.
* If arbitration timeout has expired, force ownership.
* Otherwise request it.
*/
data->select_timeout = SELECT_DELAY_LONG;
if (time_is_before_eq_jiffies(data->arb_timeout)) {
/* Time is up, take the bus and reset it. */
pca9541_reg_write(client,
PCA9541_CONTROL,
pca9541_control[reg & 0x0f]
| PCA9541_CTL_BUSINIT
| PCA9541_CTL_NTESTON);
} else {
/* Request bus ownership if needed */
if (!(reg & PCA9541_CTL_NTESTON))
pca9541_reg_write(client,
PCA9541_CONTROL,
reg | PCA9541_CTL_NTESTON);
}
}
return 0;
}
static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
{
struct pca9541 *data = i2c_get_clientdata(client);
int ret;
unsigned long timeout = jiffies + ARB2_TIMEOUT;
/* give up after this time */
data->arb_timeout = jiffies + ARB_TIMEOUT;
/* force bus ownership after this time */
do {
ret = pca9541_arbitrate(client);
if (ret)
return ret < 0 ? ret : 0;
if (data->select_timeout == SELECT_DELAY_SHORT)
udelay(data->select_timeout);
else
msleep(data->select_timeout / 1000);
} while (time_is_after_eq_jiffies(timeout));
return -ETIMEDOUT;
}
static int pca9541_release_chan(struct i2c_adapter *adap,
void *client, u32 chan)
{
pca9541_release_bus(client);
return 0;
}
/*
* I2C init/probing/exit functions
*/
static int pca9541_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
struct pca954x_platform_data *pdata = client->dev.platform_data;
struct pca9541 *data;
int force;
int ret = -ENODEV;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
goto err;
data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto err;
}
i2c_set_clientdata(client, data);
/*
* I2C accesses are unprotected here.
* We have to lock the adapter before releasing the bus.
*/
i2c_lock_adapter(adap);
pca9541_release_bus(client);
i2c_unlock_adapter(adap);
/* Create mux adapter */
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
pca9541_select_chan,
pca9541_release_chan);
if (data->mux_adap == NULL) {
dev_err(&client->dev, "failed to register master selector\n");
goto exit_free;
}
dev_info(&client->dev, "registered master selector for I2C %s\n",
client->name);
return 0;
exit_free:
kfree(data);
err:
return ret;
}
static int pca9541_remove(struct i2c_client *client)
{
struct pca9541 *data = i2c_get_clientdata(client);
i2c_del_mux_adapter(data->mux_adap);
kfree(data);
return 0;
}
static struct i2c_driver pca9541_driver = {
.driver = {
.name = "pca9541",
.owner = THIS_MODULE,
},
.probe = pca9541_probe,
.remove = pca9541_remove,
.id_table = pca9541_id,
};
module_i2c_driver(pca9541_driver);
MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
MODULE_DESCRIPTION("PCA9541 I2C master selector driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
cooldudezach/android_kernel_zte_hera | drivers/net/phy/dp83640.c | 4806 | 31005 | /*
* Driver for the National Semiconductor DP83640 PHYTER
*
* Copyright (C) 2010 OMICRON electronics GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include "dp83640_reg.h"
#define DP83640_PHY_ID 0x20005ce1
#define PAGESEL 0x13
#define LAYER4 0x02
#define LAYER2 0x01
#define MAX_RXTS 64
#define N_EXT_TS 6
#define PSF_PTPVER 2
#define PSF_EVNT 0x4000
#define PSF_RX 0x2000
#define PSF_TX 0x1000
#define EXT_EVENT 1
#define CAL_EVENT 7
#define CAL_TRIGGER 7
#define PER_TRIGGER 6
/* phyter seems to miss the mark by 16 ns */
#define ADJTIME_FIX 16
#if defined(__BIG_ENDIAN)
#define ENDIAN_FLAG 0
#elif defined(__LITTLE_ENDIAN)
#define ENDIAN_FLAG PSF_ENDIAN
#endif
#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
struct phy_rxts {
u16 ns_lo; /* ns[15:0] */
u16 ns_hi; /* overflow[1:0], ns[29:16] */
u16 sec_lo; /* sec[15:0] */
u16 sec_hi; /* sec[31:16] */
u16 seqid; /* sequenceId[15:0] */
u16 msgtype; /* messageType[3:0], hash[11:0] */
};
struct phy_txts {
u16 ns_lo; /* ns[15:0] */
u16 ns_hi; /* overflow[1:0], ns[29:16] */
u16 sec_lo; /* sec[15:0] */
u16 sec_hi; /* sec[31:16] */
};
struct rxts {
struct list_head list;
unsigned long tmo;
u64 ns;
u16 seqid;
u8 msgtype;
u16 hash;
};
struct dp83640_clock;
struct dp83640_private {
struct list_head list;
struct dp83640_clock *clock;
struct phy_device *phydev;
struct work_struct ts_work;
int hwts_tx_en;
int hwts_rx_en;
int layer;
int version;
/* remember state of cfg0 during calibration */
int cfg0;
/* remember the last event time stamp */
struct phy_txts edata;
/* list of rx timestamps */
struct list_head rxts;
struct list_head rxpool;
struct rxts rx_pool_data[MAX_RXTS];
/* protects above three fields from concurrent access */
spinlock_t rx_lock;
/* queues of incoming and outgoing packets */
struct sk_buff_head rx_queue;
struct sk_buff_head tx_queue;
};
struct dp83640_clock {
/* keeps the instance in the 'phyter_clocks' list */
struct list_head list;
/* we create one clock instance per MII bus */
struct mii_bus *bus;
/* protects extended registers from concurrent access */
struct mutex extreg_lock;
/* remembers which page was last selected */
int page;
/* our advertised capabilities */
struct ptp_clock_info caps;
/* protects the three fields below from concurrent access */
struct mutex clock_lock;
/* the one phyter from which we shall read */
struct dp83640_private *chosen;
/* list of the other attached phyters, not chosen */
struct list_head phylist;
/* reference to our PTP hardware clock */
struct ptp_clock *ptp_clock;
};
/* globals */
enum {
CALIBRATE_GPIO,
PEROUT_GPIO,
EXTTS0_GPIO,
EXTTS1_GPIO,
EXTTS2_GPIO,
EXTTS3_GPIO,
EXTTS4_GPIO,
EXTTS5_GPIO,
GPIO_TABLE_SIZE
};
static int chosen_phy = -1;
static ushort gpio_tab[GPIO_TABLE_SIZE] = {
1, 2, 3, 4, 8, 9, 10, 11
};
module_param(chosen_phy, int, 0444);
module_param_array(gpio_tab, ushort, NULL, 0444);
MODULE_PARM_DESC(chosen_phy, \
"The address of the PHY to use for the ancillary clock features");
MODULE_PARM_DESC(gpio_tab, \
"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
/* a list of clocks and a mutex to protect it */
static LIST_HEAD(phyter_clocks);
static DEFINE_MUTEX(phyter_clocks_lock);
static void rx_timestamp_work(struct work_struct *work);
/* extended register access functions */
#define BROADCAST_ADDR 31
static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val)
{
return mdiobus_write(bus, BROADCAST_ADDR, regnum, val);
}
/* Caller must hold extreg_lock. */
static int ext_read(struct phy_device *phydev, int page, u32 regnum)
{
struct dp83640_private *dp83640 = phydev->priv;
int val;
if (dp83640->clock->page != page) {
broadcast_write(phydev->bus, PAGESEL, page);
dp83640->clock->page = page;
}
val = phy_read(phydev, regnum);
return val;
}
/* Caller must hold extreg_lock. */
static void ext_write(int broadcast, struct phy_device *phydev,
int page, u32 regnum, u16 val)
{
struct dp83640_private *dp83640 = phydev->priv;
if (dp83640->clock->page != page) {
broadcast_write(phydev->bus, PAGESEL, page);
dp83640->clock->page = page;
}
if (broadcast)
broadcast_write(phydev->bus, regnum, val);
else
phy_write(phydev, regnum, val);
}
/* Caller must hold extreg_lock. */
static int tdr_write(int bc, struct phy_device *dev,
const struct timespec *ts, u16 cmd)
{
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0] */
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16); /* ns[31:16] */
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec & 0xffff); /* sec[15:0] */
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec >> 16); /* sec[31:16]*/
ext_write(bc, dev, PAGE4, PTP_CTL, cmd);
return 0;
}
/* convert phy timestamps into driver timestamps */
static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
{
u32 sec;
sec = p->sec_lo;
sec |= p->sec_hi << 16;
rxts->ns = p->ns_lo;
rxts->ns |= (p->ns_hi & 0x3fff) << 16;
rxts->ns += ((u64)sec) * 1000000000ULL;
rxts->seqid = p->seqid;
rxts->msgtype = (p->msgtype >> 12) & 0xf;
rxts->hash = p->msgtype & 0x0fff;
rxts->tmo = jiffies + 2;
}
static u64 phy2txts(struct phy_txts *p)
{
u64 ns;
u32 sec;
sec = p->sec_lo;
sec |= p->sec_hi << 16;
ns = p->ns_lo;
ns |= (p->ns_hi & 0x3fff) << 16;
ns += ((u64)sec) * 1000000000ULL;
return ns;
}
static void periodic_output(struct dp83640_clock *clock,
struct ptp_clock_request *clkreq, bool on)
{
struct dp83640_private *dp83640 = clock->chosen;
struct phy_device *phydev = dp83640->phydev;
u32 sec, nsec, period;
u16 gpio, ptp_trig, trigger, val;
gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
trigger = PER_TRIGGER;
ptp_trig = TRIG_WR |
(trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
(gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
TRIG_PER |
TRIG_PULSE;
val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
if (!on) {
val |= TRIG_DIS;
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
return;
}
sec = clkreq->perout.start.sec;
nsec = clkreq->perout.start.nsec;
period = clkreq->perout.period.sec * 1000000000UL;
period += clkreq->perout.period.nsec;
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
/*load trigger*/
val |= TRIG_LOAD;
ext_write(0, phydev, PAGE4, PTP_CTL, val);
ext_write(0, phydev, PAGE4, PTP_TDR, nsec & 0xffff); /* ns[15:0] */
ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */
ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16); /* ns[31:16] */
/*enable trigger*/
val &= ~TRIG_LOAD;
val |= TRIG_EN;
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
}
/* ptp clock methods */
static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
u64 rate;
int neg_adj = 0;
u16 hi, lo;
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
rate = ppb;
rate <<= 26;
rate = div_u64(rate, 1953125);
hi = (rate >> 16) & PTP_RATE_HI_MASK;
if (neg_adj)
hi |= PTP_RATE_DIR;
lo = rate & 0xffff;
mutex_lock(&clock->extreg_lock);
ext_write(1, phydev, PAGE4, PTP_RATEH, hi);
ext_write(1, phydev, PAGE4, PTP_RATEL, lo);
mutex_unlock(&clock->extreg_lock);
return 0;
}
static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
struct timespec ts;
int err;
delta += ADJTIME_FIX;
ts = ns_to_timespec(delta);
mutex_lock(&clock->extreg_lock);
err = tdr_write(1, phydev, &ts, PTP_STEP_CLK);
mutex_unlock(&clock->extreg_lock);
return err;
}
static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
unsigned int val[4];
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_RD_CLK);
val[0] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[15:0] */
val[1] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[31:16] */
val[2] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[15:0] */
val[3] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[31:16] */
mutex_unlock(&clock->extreg_lock);
ts->tv_nsec = val[0] | (val[1] << 16);
ts->tv_sec = val[2] | (val[3] << 16);
return 0;
}
static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
int err;
mutex_lock(&clock->extreg_lock);
err = tdr_write(1, phydev, ts, PTP_LOAD_CLK);
mutex_unlock(&clock->extreg_lock);
return err;
}
static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
int index;
u16 evnt, event_num, gpio_num;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
index = rq->extts.index;
if (index < 0 || index >= N_EXT_TS)
return -EINVAL;
event_num = EXT_EVENT + index;
evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
case PTP_CLK_REQ_PEROUT:
if (rq->perout.index != 0)
return -EINVAL;
periodic_output(clock, rq, on);
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
static void enable_status_frames(struct phy_device *phydev, bool on)
{
u16 cfg0 = 0, ver;
if (on)
cfg0 = PSF_EVNT_EN | PSF_RXTS_EN | PSF_TXTS_EN | ENDIAN_FLAG;
ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
if (!phydev->attached_dev) {
pr_warning("dp83640: expected to find an attached netdevice\n");
return;
}
if (on) {
if (dev_mc_add(phydev->attached_dev, status_frame_dst))
pr_warning("dp83640: failed to add mc address\n");
} else {
if (dev_mc_del(phydev->attached_dev, status_frame_dst))
pr_warning("dp83640: failed to delete mc address\n");
}
}
static bool is_status_frame(struct sk_buff *skb, int type)
{
struct ethhdr *h = eth_hdr(skb);
if (PTP_CLASS_V2_L2 == type &&
!memcmp(h->h_source, status_frame_src, sizeof(status_frame_src)))
return true;
else
return false;
}
static int expired(struct rxts *rxts)
{
return time_after(jiffies, rxts->tmo);
}
/* Caller must hold rx_lock. */
static void prune_rx_ts(struct dp83640_private *dp83640)
{
struct list_head *this, *next;
struct rxts *rxts;
list_for_each_safe(this, next, &dp83640->rxts) {
rxts = list_entry(this, struct rxts, list);
if (expired(rxts)) {
list_del_init(&rxts->list);
list_add(&rxts->list, &dp83640->rxpool);
}
}
}
/* synchronize the phyters so they act as one clock */
static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
{
int val;
phy_write(phydev, PAGESEL, 0);
val = phy_read(phydev, PHYCR2);
if (on)
val |= BC_WRITE;
else
val &= ~BC_WRITE;
phy_write(phydev, PHYCR2, val);
phy_write(phydev, PAGESEL, init_page);
}
static void recalibrate(struct dp83640_clock *clock)
{
s64 now, diff;
struct phy_txts event_ts;
struct timespec ts;
struct list_head *this;
struct dp83640_private *tmp;
struct phy_device *master = clock->chosen->phydev;
u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
trigger = CAL_TRIGGER;
cal_gpio = gpio_tab[CALIBRATE_GPIO];
mutex_lock(&clock->extreg_lock);
/*
* enable broadcast, disable status frames, enable ptp clock
*/
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
enable_broadcast(tmp->phydev, clock->page, 1);
tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0);
ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0);
ext_write(0, tmp->phydev, PAGE4, PTP_CTL, PTP_ENABLE);
}
enable_broadcast(master, clock->page, 1);
cfg0 = ext_read(master, PAGE5, PSF_CFG0);
ext_write(0, master, PAGE5, PSF_CFG0, 0);
ext_write(0, master, PAGE4, PTP_CTL, PTP_ENABLE);
/*
* enable an event timestamp
*/
evnt = EVNT_WR | EVNT_RISE | EVNT_SINGLE;
evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt);
}
ext_write(0, master, PAGE5, PTP_EVNT, evnt);
/*
* configure a trigger
*/
ptp_trig = TRIG_WR | TRIG_IF_LATE | TRIG_PULSE;
ptp_trig |= (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT;
ptp_trig |= (cal_gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT;
ext_write(0, master, PAGE5, PTP_TRIG, ptp_trig);
/* load trigger */
val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
val |= TRIG_LOAD;
ext_write(0, master, PAGE4, PTP_CTL, val);
/* enable trigger */
val &= ~TRIG_LOAD;
val |= TRIG_EN;
ext_write(0, master, PAGE4, PTP_CTL, val);
/* disable trigger */
val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
val |= TRIG_DIS;
ext_write(0, master, PAGE4, PTP_CTL, val);
/*
* read out and correct offsets
*/
val = ext_read(master, PAGE4, PTP_STS);
pr_info("master PTP_STS 0x%04hx", val);
val = ext_read(master, PAGE4, PTP_ESTS);
pr_info("master PTP_ESTS 0x%04hx", val);
event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA);
now = phy2txts(&event_ts);
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
val = ext_read(tmp->phydev, PAGE4, PTP_STS);
pr_info("slave PTP_STS 0x%04hx", val);
val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
pr_info("slave PTP_ESTS 0x%04hx", val);
event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
diff = now - (s64) phy2txts(&event_ts);
pr_info("slave offset %lld nanoseconds\n", diff);
diff += ADJTIME_FIX;
ts = ns_to_timespec(diff);
tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
}
/*
* restore status frames
*/
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0);
}
ext_write(0, master, PAGE5, PSF_CFG0, cfg0);
mutex_unlock(&clock->extreg_lock);
}
/* time stamping methods */
static inline u16 exts_chan_to_edata(int ch)
{
return 1 << ((ch + EXT_EVENT) * 2);
}
static int decode_evnt(struct dp83640_private *dp83640,
void *data, u16 ests)
{
struct phy_txts *phy_txts;
struct ptp_clock_event event;
int i, parsed;
int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
u16 ext_status = 0;
if (ests & MULT_EVNT) {
ext_status = *(u16 *) data;
data += sizeof(ext_status);
}
phy_txts = data;
switch (words) { /* fall through in every case */
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
if (ext_status) {
parsed = words + 2;
} else {
parsed = words + 1;
i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
ext_status = exts_chan_to_edata(i);
}
event.type = PTP_CLOCK_EXTTS;
event.timestamp = phy2txts(&dp83640->edata);
for (i = 0; i < N_EXT_TS; i++) {
if (ext_status & exts_chan_to_edata(i)) {
event.index = i;
ptp_clock_event(dp83640->clock->ptp_clock, &event);
}
}
return parsed * sizeof(u16);
}
static void decode_rxts(struct dp83640_private *dp83640,
struct phy_rxts *phy_rxts)
{
struct rxts *rxts;
unsigned long flags;
spin_lock_irqsave(&dp83640->rx_lock, flags);
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
pr_debug("dp83640: rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
list_del_init(&rxts->list);
phy2rxts(phy_rxts, rxts);
list_add_tail(&rxts->list, &dp83640->rxts);
out:
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
}
static void decode_txts(struct dp83640_private *dp83640,
struct phy_txts *phy_txts)
{
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb;
u64 ns;
/* We must already have the skb that triggered this. */
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
pr_debug("dp83640: have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_complete_tx_timestamp(skb, &shhwtstamps);
}
static void decode_status_frame(struct dp83640_private *dp83640,
struct sk_buff *skb)
{
struct phy_rxts *phy_rxts;
struct phy_txts *phy_txts;
u8 *ptr;
int len, size;
u16 ests, type;
ptr = skb->data + 2;
for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) {
type = *(u16 *)ptr;
ests = type & 0x0fff;
type = type & 0xf000;
len -= sizeof(type);
ptr += sizeof(type);
if (PSF_RX == type && len >= sizeof(*phy_rxts)) {
phy_rxts = (struct phy_rxts *) ptr;
decode_rxts(dp83640, phy_rxts);
size = sizeof(*phy_rxts);
} else if (PSF_TX == type && len >= sizeof(*phy_txts)) {
phy_txts = (struct phy_txts *) ptr;
decode_txts(dp83640, phy_txts);
size = sizeof(*phy_txts);
} else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
size = decode_evnt(dp83640, ptr, ests);
} else {
size = 0;
break;
}
ptr += size;
}
}
static int is_sync(struct sk_buff *skb, int type)
{
u8 *data = skb->data, *msgtype;
unsigned int offset = 0;
switch (type) {
case PTP_CLASS_V1_IPV4:
case PTP_CLASS_V2_IPV4:
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
break;
case PTP_CLASS_V1_IPV6:
case PTP_CLASS_V2_IPV6:
offset = OFF_PTP6;
break;
case PTP_CLASS_V2_L2:
offset = ETH_HLEN;
break;
case PTP_CLASS_V2_VLAN:
offset = ETH_HLEN + VLAN_HLEN;
break;
default:
return 0;
}
if (type & PTP_CLASS_V1)
offset += OFF_PTP_CONTROL;
if (skb->len < offset + 1)
return 0;
msgtype = data + offset;
return (*msgtype & 0xf) == 0;
}
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
u16 *seqid;
unsigned int offset;
u8 *msgtype, *data = skb_mac_header(skb);
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
switch (type) {
case PTP_CLASS_V1_IPV4:
case PTP_CLASS_V2_IPV4:
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
break;
case PTP_CLASS_V1_IPV6:
case PTP_CLASS_V2_IPV6:
offset = OFF_PTP6;
break;
case PTP_CLASS_V2_L2:
offset = ETH_HLEN;
break;
case PTP_CLASS_V2_VLAN:
offset = ETH_HLEN + VLAN_HLEN;
break;
default:
return 0;
}
if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
return 0;
if (unlikely(type & PTP_CLASS_V1))
msgtype = data + offset + OFF_PTP_CONTROL;
else
msgtype = data + offset;
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
return (rxts->msgtype == (*msgtype & 0xf) &&
rxts->seqid == ntohs(*seqid));
}
static void dp83640_free_clocks(void)
{
struct dp83640_clock *clock;
struct list_head *this, *next;
mutex_lock(&phyter_clocks_lock);
list_for_each_safe(this, next, &phyter_clocks) {
clock = list_entry(this, struct dp83640_clock, list);
if (!list_empty(&clock->phylist)) {
pr_warning("phy list non-empty while unloading");
BUG();
}
list_del(&clock->list);
mutex_destroy(&clock->extreg_lock);
mutex_destroy(&clock->clock_lock);
put_device(&clock->bus->dev);
kfree(clock);
}
mutex_unlock(&phyter_clocks_lock);
}
static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
{
INIT_LIST_HEAD(&clock->list);
clock->bus = bus;
mutex_init(&clock->extreg_lock);
mutex_init(&clock->clock_lock);
INIT_LIST_HEAD(&clock->phylist);
clock->caps.owner = THIS_MODULE;
sprintf(clock->caps.name, "dp83640 timer");
clock->caps.max_adj = 1953124;
clock->caps.n_alarm = 0;
clock->caps.n_ext_ts = N_EXT_TS;
clock->caps.n_per_out = 1;
clock->caps.pps = 0;
clock->caps.adjfreq = ptp_dp83640_adjfreq;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime = ptp_dp83640_gettime;
clock->caps.settime = ptp_dp83640_settime;
clock->caps.enable = ptp_dp83640_enable;
/*
* Get a reference to this bus instance.
*/
get_device(&bus->dev);
}
static int choose_this_phy(struct dp83640_clock *clock,
struct phy_device *phydev)
{
if (chosen_phy == -1 && !clock->chosen)
return 1;
if (chosen_phy == phydev->addr)
return 1;
return 0;
}
static struct dp83640_clock *dp83640_clock_get(struct dp83640_clock *clock)
{
if (clock)
mutex_lock(&clock->clock_lock);
return clock;
}
/*
* Look up and lock a clock by bus instance.
* If there is no clock for this bus, then create it first.
*/
static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
{
struct dp83640_clock *clock = NULL, *tmp;
struct list_head *this;
mutex_lock(&phyter_clocks_lock);
list_for_each(this, &phyter_clocks) {
tmp = list_entry(this, struct dp83640_clock, list);
if (tmp->bus == bus) {
clock = tmp;
break;
}
}
if (clock)
goto out;
clock = kzalloc(sizeof(struct dp83640_clock), GFP_KERNEL);
if (!clock)
goto out;
dp83640_clock_init(clock, bus);
list_add_tail(&phyter_clocks, &clock->list);
out:
mutex_unlock(&phyter_clocks_lock);
return dp83640_clock_get(clock);
}
static void dp83640_clock_put(struct dp83640_clock *clock)
{
mutex_unlock(&clock->clock_lock);
}
static int dp83640_probe(struct phy_device *phydev)
{
struct dp83640_clock *clock;
struct dp83640_private *dp83640;
int err = -ENOMEM, i;
if (phydev->addr == BROADCAST_ADDR)
return 0;
clock = dp83640_clock_get_bus(phydev->bus);
if (!clock)
goto no_clock;
dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
if (!dp83640)
goto no_memory;
dp83640->phydev = phydev;
INIT_WORK(&dp83640->ts_work, rx_timestamp_work);
INIT_LIST_HEAD(&dp83640->rxts);
INIT_LIST_HEAD(&dp83640->rxpool);
for (i = 0; i < MAX_RXTS; i++)
list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
phydev->priv = dp83640;
spin_lock_init(&dp83640->rx_lock);
skb_queue_head_init(&dp83640->rx_queue);
skb_queue_head_init(&dp83640->tx_queue);
dp83640->clock = clock;
if (choose_this_phy(clock, phydev)) {
clock->chosen = dp83640;
clock->ptp_clock = ptp_clock_register(&clock->caps);
if (IS_ERR(clock->ptp_clock)) {
err = PTR_ERR(clock->ptp_clock);
goto no_register;
}
} else
list_add_tail(&dp83640->list, &clock->phylist);
if (clock->chosen && !list_empty(&clock->phylist))
recalibrate(clock);
else
enable_broadcast(dp83640->phydev, clock->page, 1);
dp83640_clock_put(clock);
return 0;
no_register:
clock->chosen = NULL;
kfree(dp83640);
no_memory:
dp83640_clock_put(clock);
no_clock:
return err;
}
static void dp83640_remove(struct phy_device *phydev)
{
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
struct sk_buff *skb;
if (phydev->addr == BROADCAST_ADDR)
return;
enable_status_frames(phydev, false);
cancel_work_sync(&dp83640->ts_work);
while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
kfree_skb(skb);
while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
skb_complete_tx_timestamp(skb, NULL);
clock = dp83640_clock_get(dp83640->clock);
if (dp83640 == clock->chosen) {
ptp_clock_unregister(clock->ptp_clock);
clock->chosen = NULL;
} else {
list_for_each_safe(this, next, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
if (tmp == dp83640) {
list_del_init(&tmp->list);
break;
}
}
}
dp83640_clock_put(clock);
kfree(dp83640);
}
static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
{
struct dp83640_private *dp83640 = phydev->priv;
struct hwtstamp_config cfg;
u16 txcfg0, rxcfg0;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
if (cfg.flags) /* reserved for future extensions */
return -EINVAL;
if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
return -ERANGE;
dp83640->hwts_tx_en = cfg.tx_type;
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
dp83640->hwts_rx_en = 0;
dp83640->layer = 0;
dp83640->version = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
dp83640->hwts_rx_en = 1;
dp83640->layer = LAYER4;
dp83640->version = 1;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
dp83640->hwts_rx_en = 1;
dp83640->layer = LAYER4;
dp83640->version = 2;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
dp83640->hwts_rx_en = 1;
dp83640->layer = LAYER2;
dp83640->version = 2;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
dp83640->hwts_rx_en = 1;
dp83640->layer = LAYER4|LAYER2;
dp83640->version = 2;
break;
default:
return -ERANGE;
}
txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
if (dp83640->layer & LAYER2) {
txcfg0 |= TX_L2_EN;
rxcfg0 |= RX_L2_EN;
}
if (dp83640->layer & LAYER4) {
txcfg0 |= TX_IPV6_EN | TX_IPV4_EN;
rxcfg0 |= RX_IPV6_EN | RX_IPV4_EN;
}
if (dp83640->hwts_tx_en)
txcfg0 |= TX_TS_EN;
if (dp83640->hwts_tx_en == HWTSTAMP_TX_ONESTEP_SYNC)
txcfg0 |= SYNC_1STEP | CHK_1STEP;
if (dp83640->hwts_rx_en)
rxcfg0 |= RX_TS_EN;
mutex_lock(&dp83640->clock->extreg_lock);
if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
enable_status_frames(phydev, true);
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
}
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
mutex_unlock(&dp83640->clock->extreg_lock);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
static void rx_timestamp_work(struct work_struct *work)
{
struct dp83640_private *dp83640 =
container_of(work, struct dp83640_private, ts_work);
struct list_head *this, *next;
struct rxts *rxts;
struct skb_shared_hwtstamps *shhwtstamps;
struct sk_buff *skb;
unsigned int type;
unsigned long flags;
/* Deliver each deferred packet, with or without a time stamp. */
while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
type = SKB_PTP_TYPE(skb);
spin_lock_irqsave(&dp83640->rx_lock, flags);
list_for_each_safe(this, next, &dp83640->rxts) {
rxts = list_entry(this, struct rxts, list);
if (match(skb, type, rxts)) {
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
list_del_init(&rxts->list);
list_add(&rxts->list, &dp83640->rxpool);
break;
}
}
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
netif_rx_ni(skb);
}
/* Clear out expired time stamps. */
spin_lock_irqsave(&dp83640->rx_lock, flags);
prune_rx_ts(dp83640);
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
}
static bool dp83640_rxtstamp(struct phy_device *phydev,
struct sk_buff *skb, int type)
{
struct dp83640_private *dp83640 = phydev->priv;
if (!dp83640->hwts_rx_en)
return false;
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
kfree_skb(skb);
return true;
}
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_work(&dp83640->ts_work);
return true;
}
static void dp83640_txtstamp(struct phy_device *phydev,
struct sk_buff *skb, int type)
{
struct dp83640_private *dp83640 = phydev->priv;
switch (dp83640->hwts_tx_en) {
case HWTSTAMP_TX_ONESTEP_SYNC:
if (is_sync(skb, type)) {
skb_complete_tx_timestamp(skb, NULL);
return;
}
/* fall through */
case HWTSTAMP_TX_ON:
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
case HWTSTAMP_TX_OFF:
default:
skb_complete_tx_timestamp(skb, NULL);
break;
}
}
static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
.features = PHY_BASIC_FEATURES,
.flags = 0,
.probe = dp83640_probe,
.remove = dp83640_remove,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.hwtstamp = dp83640_hwtstamp,
.rxtstamp = dp83640_rxtstamp,
.txtstamp = dp83640_txtstamp,
.driver = {.owner = THIS_MODULE,}
};
static int __init dp83640_init(void)
{
return phy_driver_register(&dp83640_driver);
}
static void __exit dp83640_exit(void)
{
dp83640_free_clocks();
phy_driver_unregister(&dp83640_driver);
}
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
MODULE_LICENSE("GPL");
module_init(dp83640_init);
module_exit(dp83640_exit);
static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83640_tbl);
| gpl-2.0 |
oppo-source/N1-4.2-kernel-source | drivers/media/video/cx25840/cx25840-ir.c | 5062 | 36370 | /*
* Driver for the Conexant CX2584x Audio/Video decoder chip and related cores
*
* Integrated Consumer Infrared Controller
*
* Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <media/cx25840.h>
#include <media/rc-core.h>
#include "cx25840-core.h"
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable integrated IR debug messages");
#define CX25840_IR_REG_BASE 0x200
#define CX25840_IR_CNTRL_REG 0x200
#define CNTRL_WIN_3_3 0x00000000
#define CNTRL_WIN_4_3 0x00000001
#define CNTRL_WIN_3_4 0x00000002
#define CNTRL_WIN_4_4 0x00000003
#define CNTRL_WIN 0x00000003
#define CNTRL_EDG_NONE 0x00000000
#define CNTRL_EDG_FALL 0x00000004
#define CNTRL_EDG_RISE 0x00000008
#define CNTRL_EDG_BOTH 0x0000000C
#define CNTRL_EDG 0x0000000C
#define CNTRL_DMD 0x00000010
#define CNTRL_MOD 0x00000020
#define CNTRL_RFE 0x00000040
#define CNTRL_TFE 0x00000080
#define CNTRL_RXE 0x00000100
#define CNTRL_TXE 0x00000200
#define CNTRL_RIC 0x00000400
#define CNTRL_TIC 0x00000800
#define CNTRL_CPL 0x00001000
#define CNTRL_LBM 0x00002000
#define CNTRL_R 0x00004000
#define CX25840_IR_TXCLK_REG 0x204
#define TXCLK_TCD 0x0000FFFF
#define CX25840_IR_RXCLK_REG 0x208
#define RXCLK_RCD 0x0000FFFF
#define CX25840_IR_CDUTY_REG 0x20C
#define CDUTY_CDC 0x0000000F
#define CX25840_IR_STATS_REG 0x210
#define STATS_RTO 0x00000001
#define STATS_ROR 0x00000002
#define STATS_RBY 0x00000004
#define STATS_TBY 0x00000008
#define STATS_RSR 0x00000010
#define STATS_TSR 0x00000020
#define CX25840_IR_IRQEN_REG 0x214
#define IRQEN_RTE 0x00000001
#define IRQEN_ROE 0x00000002
#define IRQEN_RSE 0x00000010
#define IRQEN_TSE 0x00000020
#define IRQEN_MSK 0x00000033
#define CX25840_IR_FILTR_REG 0x218
#define FILTR_LPF 0x0000FFFF
#define CX25840_IR_FIFO_REG 0x23C
#define FIFO_RXTX 0x0000FFFF
#define FIFO_RXTX_LVL 0x00010000
#define FIFO_RXTX_RTO 0x0001FFFF
#define FIFO_RX_NDV 0x00020000
#define FIFO_RX_DEPTH 8
#define FIFO_TX_DEPTH 8
#define CX25840_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */
#define CX25840_IR_REFCLK_FREQ (CX25840_VIDCLK_FREQ / 2)
/*
* We use this union internally for convenience, but callers to tx_write
* and rx_read will be expecting records of type struct ir_raw_event.
* Always ensure the size of this union is dictated by struct ir_raw_event.
*/
union cx25840_ir_fifo_rec {
u32 hw_fifo_data;
struct ir_raw_event ir_core_data;
};
#define CX25840_IR_RX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
#define CX25840_IR_TX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
struct cx25840_ir_state {
struct i2c_client *c;
struct v4l2_subdev_ir_parameters rx_params;
struct mutex rx_params_lock; /* protects Rx parameter settings cache */
atomic_t rxclk_divider;
atomic_t rx_invert;
struct kfifo rx_kfifo;
spinlock_t rx_kfifo_lock; /* protect Rx data kfifo */
struct v4l2_subdev_ir_parameters tx_params;
struct mutex tx_params_lock; /* protects Tx parameter settings cache */
atomic_t txclk_divider;
};
static inline struct cx25840_ir_state *to_ir_state(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
return state ? state->ir_state : NULL;
}
/*
* Rx and Tx Clock Divider register computations
*
* Note the largest clock divider value of 0xffff corresponds to:
* (0xffff + 1) * 1000 / 108/2 MHz = 1,213,629.629... ns
* which fits in 21 bits, so we'll use unsigned int for time arguments.
*/
static inline u16 count_to_clock_divider(unsigned int d)
{
if (d > RXCLK_RCD + 1)
d = RXCLK_RCD;
else if (d < 2)
d = 1;
else
d--;
return (u16) d;
}
static inline u16 ns_to_clock_divider(unsigned int ns)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
static inline unsigned int clock_divider_to_ns(unsigned int divider)
{
/* Period of the Rx or Tx clock in ns */
return DIV_ROUND_CLOSEST((divider + 1) * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static inline u16 carrier_freq_to_clock_divider(unsigned int freq)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * 16));
}
static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider)
{
return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * 16);
}
static inline u16 freq_to_clock_divider(unsigned int freq,
unsigned int rollovers)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * rollovers));
}
static inline unsigned int clock_divider_to_freq(unsigned int divider,
unsigned int rollovers)
{
return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ,
(divider + 1) * rollovers);
}
/*
* Low Pass Filter register calculations
*
* Note the largest count value of 0xffff corresponds to:
* 0xffff * 1000 / 108/2 MHz = 1,213,611.11... ns
* which fits in 21 bits, so we'll use unsigned int for time arguments.
*/
static inline u16 count_to_lpf_count(unsigned int d)
{
if (d > FILTR_LPF)
d = FILTR_LPF;
else if (d < 4)
d = 0;
return (u16) d;
}
static inline u16 ns_to_lpf_count(unsigned int ns)
{
return count_to_lpf_count(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
static inline unsigned int lpf_count_to_ns(unsigned int count)
{
/* Duration of the Low Pass Filter rejection window in ns */
return DIV_ROUND_CLOSEST(count * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static inline unsigned int lpf_count_to_us(unsigned int count)
{
/* Duration of the Low Pass Filter rejection window in us */
return DIV_ROUND_CLOSEST(count, CX25840_IR_REFCLK_FREQ / 1000000);
}
/*
* FIFO register pulse width count compuations
*/
static u32 clock_divider_to_resolution(u16 divider)
{
/*
* Resolution is the duration of 1 tick of the readable portion of
* of the pulse width counter as read from the FIFO. The two lsb's are
* not readable, hence the << 2. This function returns ns.
*/
return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static u64 pulse_width_count_to_ns(u16 count, u16 divider)
{
u64 n;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not readable, hence
* the (count << 2) | 0x3
*/
n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */
rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */
if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
n++;
return n;
}
#if 0
/* Keep as we will need this for Transmit functionality */
static u16 ns_to_pulse_width_count(u32 ns, u16 divider)
{
u64 n;
u32 d;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not accessible, hence
* the (1 << 2)
*/
n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
d = (1 << 2) * ((u32) divider + 1) * 1000; /* millicycles/count */
rem = do_div(n, d);
if (rem >= d / 2)
n++;
if (n > FIFO_RXTX)
n = FIFO_RXTX;
else if (n == 0)
n = 1;
return (u16) n;
}
#endif
static unsigned int pulse_width_count_to_us(u16 count, u16 divider)
{
u64 n;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not readable, hence
* the (count << 2) | 0x3
*/
n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */
rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => us */
if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
n++;
return (unsigned int) n;
}
/*
* Pulse Clocks computations: Combined Pulse Width Count & Rx Clock Counts
*
* The total pulse clock count is an 18 bit pulse width timer count as the most
* significant part and (up to) 16 bit clock divider count as a modulus.
* When the Rx clock divider ticks down to 0, it increments the 18 bit pulse
* width timer count's least significant bit.
*/
static u64 ns_to_pulse_clocks(u32 ns)
{
u64 clocks;
u32 rem;
clocks = CX25840_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */
rem = do_div(clocks, 1000); /* /1000 = cycles */
if (rem >= 1000 / 2)
clocks++;
return clocks;
}
static u16 pulse_clocks_to_clock_divider(u64 count)
{
u32 rem;
rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
count = RXCLK_RCD;
else if (count < 2)
count = 1;
else
count--;
return (u16) count;
}
/*
* IR Control Register helpers
*/
enum tx_fifo_watermark {
TX_FIFO_HALF_EMPTY = 0,
TX_FIFO_EMPTY = CNTRL_TIC,
};
enum rx_fifo_watermark {
RX_FIFO_HALF_FULL = 0,
RX_FIFO_NOT_EMPTY = CNTRL_RIC,
};
static inline void control_tx_irq_watermark(struct i2c_client *c,
enum tx_fifo_watermark level)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_TIC, level);
}
static inline void control_rx_irq_watermark(struct i2c_client *c,
enum rx_fifo_watermark level)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_RIC, level);
}
static inline void control_tx_enable(struct i2c_client *c, bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE),
enable ? (CNTRL_TXE | CNTRL_TFE) : 0);
}
static inline void control_rx_enable(struct i2c_client *c, bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE),
enable ? (CNTRL_RXE | CNTRL_RFE) : 0);
}
static inline void control_tx_modulation_enable(struct i2c_client *c,
bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_MOD,
enable ? CNTRL_MOD : 0);
}
static inline void control_rx_demodulation_enable(struct i2c_client *c,
bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_DMD,
enable ? CNTRL_DMD : 0);
}
static inline void control_rx_s_edge_detection(struct i2c_client *c,
u32 edge_types)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_EDG_BOTH,
edge_types & CNTRL_EDG_BOTH);
}
static void control_rx_s_carrier_window(struct i2c_client *c,
unsigned int carrier,
unsigned int *carrier_range_low,
unsigned int *carrier_range_high)
{
u32 v;
unsigned int c16 = carrier * 16;
if (*carrier_range_low < DIV_ROUND_CLOSEST(c16, 16 + 3)) {
v = CNTRL_WIN_3_4;
*carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 4);
} else {
v = CNTRL_WIN_3_3;
*carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 3);
}
if (*carrier_range_high > DIV_ROUND_CLOSEST(c16, 16 - 3)) {
v |= CNTRL_WIN_4_3;
*carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 4);
} else {
v |= CNTRL_WIN_3_3;
*carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 3);
}
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_WIN, v);
}
static inline void control_tx_polarity_invert(struct i2c_client *c,
bool invert)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_CPL,
invert ? CNTRL_CPL : 0);
}
/*
* IR Rx & Tx Clock Register helpers
*/
static unsigned int txclk_tx_s_carrier(struct i2c_client *c,
unsigned int freq,
u16 *divider)
{
*divider = carrier_freq_to_clock_divider(freq);
cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
return clock_divider_to_carrier_freq(*divider);
}
static unsigned int rxclk_rx_s_carrier(struct i2c_client *c,
unsigned int freq,
u16 *divider)
{
*divider = carrier_freq_to_clock_divider(freq);
cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
return clock_divider_to_carrier_freq(*divider);
}
static u32 txclk_tx_s_max_pulse_width(struct i2c_client *c, u32 ns,
u16 *divider)
{
u64 pulse_clocks;
if (ns > IR_MAX_DURATION)
ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
}
static u32 rxclk_rx_s_max_pulse_width(struct i2c_client *c, u32 ns,
u16 *divider)
{
u64 pulse_clocks;
if (ns > IR_MAX_DURATION)
ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
}
/*
* IR Tx Carrier Duty Cycle register helpers
*/
static unsigned int cduty_tx_s_duty_cycle(struct i2c_client *c,
unsigned int duty_cycle)
{
u32 n;
n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */
if (n != 0)
n--;
if (n > 15)
n = 15;
cx25840_write4(c, CX25840_IR_CDUTY_REG, n);
return DIV_ROUND_CLOSEST((n + 1) * 100, 16);
}
/*
* IR Filter Register helpers
*/
static u32 filter_rx_s_min_width(struct i2c_client *c, u32 min_width_ns)
{
u32 count = ns_to_lpf_count(min_width_ns);
cx25840_write4(c, CX25840_IR_FILTR_REG, count);
return lpf_count_to_ns(count);
}
/*
* IR IRQ Enable Register helpers
*/
static inline void irqenable_rx(struct v4l2_subdev *sd, u32 mask)
{
struct cx25840_state *state = to_state(sd);
if (is_cx23885(state) || is_cx23887(state))
mask ^= IRQEN_MSK;
mask &= (IRQEN_RTE | IRQEN_ROE | IRQEN_RSE);
cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG,
~(IRQEN_RTE | IRQEN_ROE | IRQEN_RSE), mask);
}
static inline void irqenable_tx(struct v4l2_subdev *sd, u32 mask)
{
struct cx25840_state *state = to_state(sd);
if (is_cx23885(state) || is_cx23887(state))
mask ^= IRQEN_MSK;
mask &= IRQEN_TSE;
cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~IRQEN_TSE, mask);
}
/*
* V4L2 Subdevice IR Ops
*/
int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c = NULL;
unsigned long flags;
union cx25840_ir_fifo_rec rx_data[FIFO_RX_DEPTH];
unsigned int i, j, k;
u32 events, v;
int tsr, rsr, rto, ror, tse, rse, rte, roe, kror;
u32 cntrl, irqen, stats;
*handled = false;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
/* Only support the IR controller for the CX2388[57] AV Core for now */
if (!(is_cx23885(state) || is_cx23887(state)))
return -ENODEV;
cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
if (is_cx23885(state) || is_cx23887(state))
irqen ^= IRQEN_MSK;
stats = cx25840_read4(c, CX25840_IR_STATS_REG);
tsr = stats & STATS_TSR; /* Tx FIFO Service Request */
rsr = stats & STATS_RSR; /* Rx FIFO Service Request */
rto = stats & STATS_RTO; /* Rx Pulse Width Timer Time Out */
ror = stats & STATS_ROR; /* Rx FIFO Over Run */
tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */
rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */
rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */
roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */
v4l2_dbg(2, ir_debug, sd, "IR IRQ Status: %s %s %s %s %s %s\n",
tsr ? "tsr" : " ", rsr ? "rsr" : " ",
rto ? "rto" : " ", ror ? "ror" : " ",
stats & STATS_TBY ? "tby" : " ",
stats & STATS_RBY ? "rby" : " ");
v4l2_dbg(2, ir_debug, sd, "IR IRQ Enables: %s %s %s %s\n",
tse ? "tse" : " ", rse ? "rse" : " ",
rte ? "rte" : " ", roe ? "roe" : " ");
/*
* Transmitter interrupt service
*/
if (tse && tsr) {
/*
* TODO:
* Check the watermark threshold setting
* Pull FIFO_TX_DEPTH or FIFO_TX_DEPTH/2 entries from tx_kfifo
* Push the data to the hardware FIFO.
* If there was nothing more to send in the tx_kfifo, disable
* the TSR IRQ and notify the v4l2_device.
* If there was something in the tx_kfifo, check the tx_kfifo
* level and notify the v4l2_device, if it is low.
*/
/* For now, inhibit TSR interrupt until Tx is implemented */
irqenable_tx(sd, 0);
events = V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ;
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_TX_NOTIFY, &events);
*handled = true;
}
/*
* Receiver interrupt service
*/
kror = 0;
if ((rse && rsr) || (rte && rto)) {
/*
* Receive data on RSR to clear the STATS_RSR.
* Receive data on RTO, since we may not have yet hit the RSR
* watermark when we receive the RTO.
*/
for (i = 0, v = FIFO_RX_NDV;
(v & FIFO_RX_NDV) && !kror; i = 0) {
for (j = 0;
(v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) {
v = cx25840_read4(c, CX25840_IR_FIFO_REG);
rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV;
i++;
}
if (i == 0)
break;
j = i * sizeof(union cx25840_ir_fifo_rec);
k = kfifo_in_locked(&ir_state->rx_kfifo,
(unsigned char *) rx_data, j,
&ir_state->rx_kfifo_lock);
if (k != j)
kror++; /* rx_kfifo over run */
}
*handled = true;
}
events = 0;
v = 0;
if (kror) {
events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN;
v4l2_err(sd, "IR receiver software FIFO overrun\n");
}
if (roe && ror) {
/*
* The RX FIFO Enable (CNTRL_RFE) must be toggled to clear
* the Rx FIFO Over Run status (STATS_ROR)
*/
v |= CNTRL_RFE;
events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN;
v4l2_err(sd, "IR receiver hardware FIFO overrun\n");
}
if (rte && rto) {
/*
* The IR Receiver Enable (CNTRL_RXE) must be toggled to clear
* the Rx Pulse Width Timer Time Out (STATS_RTO)
*/
v |= CNTRL_RXE;
events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED;
}
if (v) {
/* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v);
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl);
*handled = true;
}
spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
if (kfifo_len(&ir_state->rx_kfifo) >= CX25840_IR_RX_KFIFO_SIZE / 2)
events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
if (events)
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
return 0;
}
/* Receiver */
static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
bool invert;
u16 divider;
unsigned int i, n;
union cx25840_ir_fifo_rec *p;
unsigned u, v, w;
if (ir_state == NULL)
return -ENODEV;
invert = (bool) atomic_read(&ir_state->rx_invert);
divider = (u16) atomic_read(&ir_state->rxclk_divider);
n = count / sizeof(union cx25840_ir_fifo_rec)
* sizeof(union cx25840_ir_fifo_rec);
if (n == 0) {
*num = 0;
return 0;
}
n = kfifo_out_locked(&ir_state->rx_kfifo, buf, n,
&ir_state->rx_kfifo_lock);
n /= sizeof(union cx25840_ir_fifo_rec);
*num = n * sizeof(union cx25840_ir_fifo_rec);
for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
/* Assume RTO was because of no IR light input */
u = 0;
w = 1;
} else {
u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
if (invert)
u = u ? 0 : 1;
w = 0;
}
v = (unsigned) pulse_width_count_to_ns(
(u16) (p->hw_fifo_data & FIFO_RXTX), divider);
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
p->ir_core_data.timeout = w;
v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s %s\n",
v, u ? "mark" : "space", w ? "(timed out)" : "");
if (w)
v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
}
return 0;
}
static int cx25840_ir_rx_g_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
mutex_lock(&ir_state->rx_params_lock);
memcpy(p, &ir_state->rx_params,
sizeof(struct v4l2_subdev_ir_parameters));
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
static int cx25840_ir_rx_shutdown(struct v4l2_subdev *sd)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
mutex_lock(&ir_state->rx_params_lock);
/* Disable or slow down all IR Rx circuits and counters */
irqenable_rx(sd, 0);
control_rx_enable(c, false);
control_rx_demodulation_enable(c, false);
control_rx_s_edge_detection(c, CNTRL_EDG_NONE);
filter_rx_s_min_width(c, 0);
cx25840_write4(c, CX25840_IR_RXCLK_REG, RXCLK_RCD);
ir_state->rx_params.shutdown = true;
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
static int cx25840_ir_rx_s_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
struct v4l2_subdev_ir_parameters *o;
u16 rxclk_divider;
if (ir_state == NULL)
return -ENODEV;
if (p->shutdown)
return cx25840_ir_rx_shutdown(sd);
if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
return -ENOSYS;
c = ir_state->c;
o = &ir_state->rx_params;
mutex_lock(&ir_state->rx_params_lock);
o->shutdown = p->shutdown;
p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
o->mode = p->mode;
p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
o->bytes_per_data_element = p->bytes_per_data_element;
/* Before we tweak the hardware, we have to disable the receiver */
irqenable_rx(sd, 0);
control_rx_enable(c, false);
control_rx_demodulation_enable(c, p->modulation);
o->modulation = p->modulation;
if (p->modulation) {
p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq,
&rxclk_divider);
o->carrier_freq = p->carrier_freq;
p->duty_cycle = 50;
o->duty_cycle = p->duty_cycle;
control_rx_s_carrier_window(c, p->carrier_freq,
&p->carrier_range_lower,
&p->carrier_range_upper);
o->carrier_range_lower = p->carrier_range_lower;
o->carrier_range_upper = p->carrier_range_upper;
p->max_pulse_width =
(u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider);
} else {
p->max_pulse_width =
rxclk_rx_s_max_pulse_width(c, p->max_pulse_width,
&rxclk_divider);
}
o->max_pulse_width = p->max_pulse_width;
atomic_set(&ir_state->rxclk_divider, rxclk_divider);
p->noise_filter_min_width =
filter_rx_s_min_width(c, p->noise_filter_min_width);
o->noise_filter_min_width = p->noise_filter_min_width;
p->resolution = clock_divider_to_resolution(rxclk_divider);
o->resolution = p->resolution;
/* FIXME - make this dependent on resolution for better performance */
control_rx_irq_watermark(c, RX_FIFO_HALF_FULL);
control_rx_s_edge_detection(c, CNTRL_EDG_BOTH);
o->invert_level = p->invert_level;
atomic_set(&ir_state->rx_invert, p->invert_level);
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
unsigned long flags;
spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
kfifo_reset(&ir_state->rx_kfifo);
spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
if (p->interrupt_enable)
irqenable_rx(sd, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
control_rx_enable(c, p->enable);
}
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
/* Transmitter */
static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
#if 0
/*
* FIXME - the code below is an incomplete and untested sketch of what
* may need to be done. The critical part is to get 4 (or 8) pulses
* from the tx_kfifo, or converted from ns to the proper units from the
* input, and push them off to the hardware Tx FIFO right away, if the
* HW TX fifo needs service. The rest can be pushed to the tx_kfifo in
* a less critical timeframe. Also watch out for overruning the
* tx_kfifo - don't let it happen and let the caller know not all his
* pulses were written.
*/
u32 *ns_pulse = (u32 *) buf;
unsigned int n;
u32 fifo_pulse[FIFO_TX_DEPTH];
u32 mark;
/* Compute how much we can fit in the tx kfifo */
n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo);
n = min(n, (unsigned int) count);
n /= sizeof(u32);
/* FIXME - turn on Tx Fifo service interrupt
* check hardware fifo level, and other stuff
*/
for (i = 0; i < n; ) {
for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) {
mark = ns_pulse[i] & LEVEL_MASK;
fifo_pulse[j] = ns_to_pulse_width_count(
ns_pulse[i] &
~LEVEL_MASK,
ir_state->txclk_divider);
if (mark)
fifo_pulse[j] &= FIFO_RXTX_LVL;
i++;
}
kfifo_put(ir_state->tx_kfifo, (u8 *) fifo_pulse,
j * sizeof(u32));
}
*num = n * sizeof(u32);
#else
/* For now enable the Tx FIFO Service interrupt & pretend we did work */
irqenable_tx(sd, IRQEN_TSE);
*num = count;
#endif
return 0;
}
static int cx25840_ir_tx_g_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
mutex_lock(&ir_state->tx_params_lock);
memcpy(p, &ir_state->tx_params,
sizeof(struct v4l2_subdev_ir_parameters));
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
static int cx25840_ir_tx_shutdown(struct v4l2_subdev *sd)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
mutex_lock(&ir_state->tx_params_lock);
/* Disable or slow down all IR Tx circuits and counters */
irqenable_tx(sd, 0);
control_tx_enable(c, false);
control_tx_modulation_enable(c, false);
cx25840_write4(c, CX25840_IR_TXCLK_REG, TXCLK_TCD);
ir_state->tx_params.shutdown = true;
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
static int cx25840_ir_tx_s_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
struct v4l2_subdev_ir_parameters *o;
u16 txclk_divider;
if (ir_state == NULL)
return -ENODEV;
if (p->shutdown)
return cx25840_ir_tx_shutdown(sd);
if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
return -ENOSYS;
c = ir_state->c;
o = &ir_state->tx_params;
mutex_lock(&ir_state->tx_params_lock);
o->shutdown = p->shutdown;
p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
o->mode = p->mode;
p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
o->bytes_per_data_element = p->bytes_per_data_element;
/* Before we tweak the hardware, we have to disable the transmitter */
irqenable_tx(sd, 0);
control_tx_enable(c, false);
control_tx_modulation_enable(c, p->modulation);
o->modulation = p->modulation;
if (p->modulation) {
p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq,
&txclk_divider);
o->carrier_freq = p->carrier_freq;
p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle);
o->duty_cycle = p->duty_cycle;
p->max_pulse_width =
(u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider);
} else {
p->max_pulse_width =
txclk_tx_s_max_pulse_width(c, p->max_pulse_width,
&txclk_divider);
}
o->max_pulse_width = p->max_pulse_width;
atomic_set(&ir_state->txclk_divider, txclk_divider);
p->resolution = clock_divider_to_resolution(txclk_divider);
o->resolution = p->resolution;
/* FIXME - make this dependent on resolution for better performance */
control_tx_irq_watermark(c, TX_FIFO_HALF_EMPTY);
control_tx_polarity_invert(c, p->invert_carrier_sense);
o->invert_carrier_sense = p->invert_carrier_sense;
/*
* FIXME: we don't have hardware help for IO pin level inversion
* here like we have on the CX23888.
* Act on this with some mix of logical inversion of data levels,
* carrier polarity, and carrier duty cycle.
*/
o->invert_level = p->invert_level;
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
/* reset tx_fifo here */
if (p->interrupt_enable)
irqenable_tx(sd, IRQEN_TSE);
control_tx_enable(c, p->enable);
}
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
/*
* V4L2 Subdevice Core Ops support
*/
int cx25840_ir_log_status(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *c = state->c;
char *s;
int i, j;
u32 cntrl, txclk, rxclk, cduty, stats, irqen, filtr;
/* The CX23888 chip doesn't have an IR controller on the A/V core */
if (is_cx23888(state))
return 0;
cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
txclk = cx25840_read4(c, CX25840_IR_TXCLK_REG) & TXCLK_TCD;
rxclk = cx25840_read4(c, CX25840_IR_RXCLK_REG) & RXCLK_RCD;
cduty = cx25840_read4(c, CX25840_IR_CDUTY_REG) & CDUTY_CDC;
stats = cx25840_read4(c, CX25840_IR_STATS_REG);
irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
if (is_cx23885(state) || is_cx23887(state))
irqen ^= IRQEN_MSK;
filtr = cx25840_read4(c, CX25840_IR_FILTR_REG) & FILTR_LPF;
v4l2_info(sd, "IR Receiver:\n");
v4l2_info(sd, "\tEnabled: %s\n",
cntrl & CNTRL_RXE ? "yes" : "no");
v4l2_info(sd, "\tDemodulation from a carrier: %s\n",
cntrl & CNTRL_DMD ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO: %s\n",
cntrl & CNTRL_RFE ? "enabled" : "disabled");
switch (cntrl & CNTRL_EDG) {
case CNTRL_EDG_NONE:
s = "disabled";
break;
case CNTRL_EDG_FALL:
s = "falling edge";
break;
case CNTRL_EDG_RISE:
s = "rising edge";
break;
case CNTRL_EDG_BOTH:
s = "rising & falling edges";
break;
default:
s = "??? edge";
break;
}
v4l2_info(sd, "\tPulse timers' start/stop trigger: %s\n", s);
v4l2_info(sd, "\tFIFO data on pulse timer overflow: %s\n",
cntrl & CNTRL_R ? "not loaded" : "overflow marker");
v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
cntrl & CNTRL_RIC ? "not empty" : "half full or greater");
v4l2_info(sd, "\tLoopback mode: %s\n",
cntrl & CNTRL_LBM ? "loopback active" : "normal receive");
if (cntrl & CNTRL_DMD) {
v4l2_info(sd, "\tExpected carrier (16 clocks): %u Hz\n",
clock_divider_to_carrier_freq(rxclk));
switch (cntrl & CNTRL_WIN) {
case CNTRL_WIN_3_3:
i = 3;
j = 3;
break;
case CNTRL_WIN_4_3:
i = 4;
j = 3;
break;
case CNTRL_WIN_3_4:
i = 3;
j = 4;
break;
case CNTRL_WIN_4_4:
i = 4;
j = 4;
break;
default:
i = 0;
j = 0;
break;
}
v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
"-%1d/+%1d, %u to %u Hz\n", i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n",
pulse_width_count_to_us(FIFO_RXTX, rxclk),
pulse_width_count_to_ns(FIFO_RXTX, rxclk));
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
"%u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
stats & STATS_RTO ? "yes" : "no");
v4l2_info(sd, "\tPulse width timer time-out intr: %s\n",
irqen & IRQEN_RTE ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO overrun: %s\n",
stats & STATS_ROR ? "yes" : "no");
v4l2_info(sd, "\tFIFO overrun interrupt: %s\n",
irqen & IRQEN_ROE ? "enabled" : "disabled");
v4l2_info(sd, "\tBusy: %s\n",
stats & STATS_RBY ? "yes" : "no");
v4l2_info(sd, "\tFIFO service requested: %s\n",
stats & STATS_RSR ? "yes" : "no");
v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
irqen & IRQEN_RSE ? "enabled" : "disabled");
v4l2_info(sd, "IR Transmitter:\n");
v4l2_info(sd, "\tEnabled: %s\n",
cntrl & CNTRL_TXE ? "yes" : "no");
v4l2_info(sd, "\tModulation onto a carrier: %s\n",
cntrl & CNTRL_MOD ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO: %s\n",
cntrl & CNTRL_TFE ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
cntrl & CNTRL_TIC ? "not empty" : "half full or less");
v4l2_info(sd, "\tCarrier polarity: %s\n",
cntrl & CNTRL_CPL ? "space:burst mark:noburst"
: "space:noburst mark:burst");
if (cntrl & CNTRL_MOD) {
v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n",
clock_divider_to_carrier_freq(txclk));
v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n",
cduty + 1);
}
v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n",
pulse_width_count_to_us(FIFO_RXTX, txclk),
pulse_width_count_to_ns(FIFO_RXTX, txclk));
v4l2_info(sd, "\tBusy: %s\n",
stats & STATS_TBY ? "yes" : "no");
v4l2_info(sd, "\tFIFO service requested: %s\n",
stats & STATS_TSR ? "yes" : "no");
v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
irqen & IRQEN_TSE ? "enabled" : "disabled");
return 0;
}
const struct v4l2_subdev_ir_ops cx25840_ir_ops = {
.rx_read = cx25840_ir_rx_read,
.rx_g_parameters = cx25840_ir_rx_g_parameters,
.rx_s_parameters = cx25840_ir_rx_s_parameters,
.tx_write = cx25840_ir_tx_write,
.tx_g_parameters = cx25840_ir_tx_g_parameters,
.tx_s_parameters = cx25840_ir_tx_s_parameters,
};
static const struct v4l2_subdev_ir_parameters default_rx_params = {
.bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
.interrupt_enable = false,
.shutdown = true,
.modulation = true,
.carrier_freq = 36000, /* 36 kHz - RC-5, and RC-6 carrier */
/* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */
/* RC-6: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */
.noise_filter_min_width = 333333, /* ns */
.carrier_range_lower = 35000,
.carrier_range_upper = 37000,
.invert_level = false,
};
static const struct v4l2_subdev_ir_parameters default_tx_params = {
.bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
.interrupt_enable = false,
.shutdown = true,
.modulation = true,
.carrier_freq = 36000, /* 36 kHz - RC-5 carrier */
.duty_cycle = 25, /* 25 % - RC-5 carrier */
.invert_level = false,
.invert_carrier_sense = false,
};
int cx25840_ir_probe(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state;
struct v4l2_subdev_ir_parameters default_params;
/* Only init the IR controller for the CX2388[57] AV Core for now */
if (!(is_cx23885(state) || is_cx23887(state)))
return 0;
ir_state = kzalloc(sizeof(struct cx25840_ir_state), GFP_KERNEL);
if (ir_state == NULL)
return -ENOMEM;
spin_lock_init(&ir_state->rx_kfifo_lock);
if (kfifo_alloc(&ir_state->rx_kfifo,
CX25840_IR_RX_KFIFO_SIZE, GFP_KERNEL)) {
kfree(ir_state);
return -ENOMEM;
}
ir_state->c = state->c;
state->ir_state = ir_state;
/* Ensure no interrupts arrive yet */
if (is_cx23885(state) || is_cx23887(state))
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, IRQEN_MSK);
else
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0);
mutex_init(&ir_state->rx_params_lock);
memcpy(&default_params, &default_rx_params,
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&ir_state->tx_params_lock);
memcpy(&default_params, &default_tx_params,
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
return 0;
}
int cx25840_ir_remove(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
cx25840_ir_rx_shutdown(sd);
cx25840_ir_tx_shutdown(sd);
kfifo_free(&ir_state->rx_kfifo);
kfree(ir_state);
state->ir_state = NULL;
return 0;
}
| gpl-2.0 |
SomethingExplosive/android_kernel_samsung_manta | sound/core/memalloc.c | 5062 | 13762 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Takashi Iwai <tiwai@suse.de>
*
* Generic memory allocators
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/memalloc.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Memory allocator for ALSA system.");
MODULE_LICENSE("GPL");
/*
*/
static DEFINE_MUTEX(list_mutex);
static LIST_HEAD(mem_list_head);
/* buffer preservation list */
struct snd_mem_list {
struct snd_dma_buffer buffer;
unsigned int id;
struct list_head list;
};
/* id for pre-allocated buffers */
#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
/*
*
* Generic memory allocators
*
*/
static long snd_allocated_pages; /* holding the number of allocated pages */
static inline void inc_snd_pages(int order)
{
snd_allocated_pages += 1 << order;
}
static inline void dec_snd_pages(int order)
{
snd_allocated_pages -= 1 << order;
}
/**
* snd_malloc_pages - allocate pages with the given size
* @size: the size to allocate in bytes
* @gfp_flags: the allocation conditions, GFP_XXX
*
* Allocates the physically contiguous pages with the given size.
*
* Returns the pointer of the buffer, or NULL if no enoguh memory.
*/
void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
{
int pg;
void *res;
if (WARN_ON(!size))
return NULL;
if (WARN_ON(!gfp_flags))
return NULL;
gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
pg = get_order(size);
if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
inc_snd_pages(pg);
return res;
}
/**
* snd_free_pages - release the pages
* @ptr: the buffer pointer to release
* @size: the allocated buffer size
*
* Releases the buffer allocated via snd_malloc_pages().
*/
void snd_free_pages(void *ptr, size_t size)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
free_pages((unsigned long) ptr, pg);
}
/*
*
* Bus-specific memory allocators
*
*/
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
{
int pg;
void *res;
gfp_t gfp_flags;
if (WARN_ON(!dma))
return NULL;
pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
if (res != NULL)
inc_snd_pages(pg);
return res;
}
/* free the coherent DMA pages */
static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
dma_addr_t dma)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}
#endif /* CONFIG_HAS_DMA */
/*
*
* ALSA generic memory management
*
*/
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type.
*
* Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
dmab->area = snd_malloc_pages(size,
(__force gfp_t)(unsigned long)device);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
dmab->area = NULL;
dmab->addr = 0;
return -ENXIO;
}
if (! dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
}
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type. When no space is left, this function reduces the size and
* tries to allocate again. The size actually allocated is stored in
* res_size argument.
*
* Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
aligned_size = PAGE_SIZE << get_order(size);
if (size != aligned_size)
size = aligned_size;
else
size >>= 1;
}
if (! dmab->area)
return -ENOMEM;
return 0;
}
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages().
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
switch (dmab->dev.type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
snd_free_pages(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
}
}
/**
* snd_dma_get_reserved - get the reserved buffer for the given device
* @dmab: the buffer allocation record to store
* @id: the buffer id
*
* Looks for the reserved-buffer list and re-uses if the same buffer
* is found in the list. When the buffer is found, it's removed from the free list.
*
* Returns the size of buffer if the buffer is found, or zero if not found.
*/
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return 0;
mutex_lock(&list_mutex);
list_for_each_entry(mem, &mem_list_head, list) {
if (mem->id == id &&
(mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
struct device *dev = dmab->dev.dev;
list_del(&mem->list);
*dmab = mem->buffer;
if (dmab->dev.dev == NULL)
dmab->dev.dev = dev;
kfree(mem);
mutex_unlock(&list_mutex);
return dmab->bytes;
}
}
mutex_unlock(&list_mutex);
return 0;
}
/**
* snd_dma_reserve_buf - reserve the buffer
* @dmab: the buffer to reserve
* @id: the buffer id
*
* Reserves the given buffer as a reserved buffer.
*
* Returns zero if successful, or a negative code at error.
*/
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return -EINVAL;
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
if (! mem)
return -ENOMEM;
mutex_lock(&list_mutex);
mem->buffer = *dmab;
mem->id = id;
list_add_tail(&mem->list, &mem_list_head);
mutex_unlock(&list_mutex);
return 0;
}
/*
* purge all reserved buffers
*/
static void free_all_reserved_pages(void)
{
struct list_head *p;
struct snd_mem_list *mem;
mutex_lock(&list_mutex);
while (! list_empty(&mem_list_head)) {
p = mem_list_head.next;
mem = list_entry(p, struct snd_mem_list, list);
list_del(p);
snd_dma_free_pages(&mem->buffer);
kfree(mem);
}
mutex_unlock(&list_mutex);
}
#ifdef CONFIG_PROC_FS
/*
* proc file interface
*/
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
static struct proc_dir_entry *snd_mem_proc;
static int snd_mem_proc_read(struct seq_file *seq, void *offset)
{
long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
mutex_lock(&list_mutex);
seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, &mem_list_head, list) {
devno++;
seq_printf(seq, "buffer %d : ID %08x : type %s\n",
devno, mem->id, types[mem->buffer.dev.type]);
seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
(unsigned long)mem->buffer.addr,
(int)mem->buffer.bytes);
}
mutex_unlock(&list_mutex);
return 0;
}
static int snd_mem_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, snd_mem_proc_read, NULL);
}
/* FIXME: for pci only - other bus? */
#ifdef CONFIG_PCI
#define gettoken(bufp) strsep(bufp, " \t\n")
static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
char buf[128];
char *token, *p;
if (count > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[count] = '\0';
p = buf;
token = gettoken(&p);
if (! token || *token == '#')
return count;
if (strcmp(token, "add") == 0) {
char *endp;
int vendor, device, size, buffers;
long mask;
int i, alloced;
struct pci_dev *pci;
if ((token = gettoken(&p)) == NULL ||
(vendor = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(device = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(mask = simple_strtol(token, NULL, 0)) < 0 ||
(token = gettoken(&p)) == NULL ||
(size = memparse(token, &endp)) < 64*1024 ||
size > 16*1024*1024 /* too big */ ||
(token = gettoken(&p)) == NULL ||
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
buffers > 4) {
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
return count;
}
vendor &= 0xffff;
device &= 0xffff;
alloced = 0;
pci = NULL;
while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
if (mask > 0 && mask < 0xffffffff) {
if (pci_set_dma_mask(pci, mask) < 0 ||
pci_set_consistent_dma_mask(pci, mask) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
pci_dev_put(pci);
return count;
}
}
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
pci_dev_put(pci);
return count;
}
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
}
alloced++;
}
if (! alloced) {
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
/* FIXME: We can allocate only in ZONE_DMA
* without a device pointer!
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
break;
}
snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
}
}
} else if (strcmp(token, "erase") == 0)
/* FIXME: need for releasing each buffer chunk? */
free_all_reserved_pages();
else
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
return count;
}
#endif /* CONFIG_PCI */
static const struct file_operations snd_mem_proc_fops = {
.owner = THIS_MODULE,
.open = snd_mem_proc_open,
.read = seq_read,
#ifdef CONFIG_PCI
.write = snd_mem_proc_write,
#endif
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
/*
* module entry
*/
static int __init snd_mem_init(void)
{
#ifdef CONFIG_PROC_FS
snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
&snd_mem_proc_fops);
#endif
return 0;
}
static void __exit snd_mem_exit(void)
{
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
free_all_reserved_pages();
if (snd_allocated_pages > 0)
printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
}
module_init(snd_mem_init)
module_exit(snd_mem_exit)
/*
* exports
*/
EXPORT_SYMBOL(snd_dma_alloc_pages);
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
EXPORT_SYMBOL(snd_dma_get_reserved_buf);
EXPORT_SYMBOL(snd_dma_reserve_buf);
EXPORT_SYMBOL(snd_malloc_pages);
EXPORT_SYMBOL(snd_free_pages);
| gpl-2.0 |
flar2/m8-Sense-5.0.1 | drivers/scsi/bnx2fc/bnx2fc_els.c | 5062 | 25669 | /*
* bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
* This file contains helper routines that handle ELS requests
* and responses.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
*/
#include "bnx2fc.h"
static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
void *arg);
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
void *arg);
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
void *data, u32 data_len,
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_cmd *orig_io_req;
struct bnx2fc_cmd *rrq_req;
int rc = 0;
BUG_ON(!cb_arg);
rrq_req = cb_arg->io_req;
orig_io_req = cb_arg->aborted_io_req;
BUG_ON(!orig_io_req);
BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
orig_io_req->xid, rrq_req->xid);
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
/*
* els req is timed out. cleanup the IO with FW and
* drop the completion. Remove from active_cmd_queue.
*/
BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
rrq_req->xid);
if (rrq_req->on_active_queue) {
list_del_init(&rrq_req->link);
rrq_req->on_active_queue = 0;
rc = bnx2fc_initiate_cleanup(rrq_req);
BUG_ON(rc);
}
}
kfree(cb_arg);
}
int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
{
struct fc_els_rrq rrq;
struct bnx2fc_rport *tgt = aborted_io_req->tgt;
struct fc_lport *lport = tgt->rdata->local_port;
struct bnx2fc_els_cb_arg *cb_arg = NULL;
u32 sid = tgt->sid;
u32 r_a_tov = lport->r_a_tov;
unsigned long start = jiffies;
int rc;
BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
aborted_io_req->xid);
memset(&rrq, 0, sizeof(rrq));
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
rc = -ENOMEM;
goto rrq_err;
}
cb_arg->aborted_io_req = aborted_io_req;
rrq.rrq_cmd = ELS_RRQ;
hton24(rrq.rrq_s_id, sid);
rrq.rrq_ox_id = htons(aborted_io_req->xid);
rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
retry_rrq:
rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
bnx2fc_rrq_compl, cb_arg,
r_a_tov);
if (rc == -ENOMEM) {
if (time_after(jiffies, start + (10 * HZ))) {
BNX2FC_ELS_DBG("rrq Failed\n");
rc = FAILED;
goto rrq_err;
}
msleep(20);
goto retry_rrq;
}
rrq_err:
if (rc) {
BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
aborted_io_req->xid);
kfree(cb_arg);
spin_lock_bh(&tgt->tgt_lock);
kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
}
return rc;
}
static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_cmd *els_req;
struct bnx2fc_rport *tgt;
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
unsigned char *buf;
void *resp_buf;
u32 resp_len, hdr_len;
u16 l2_oxid;
int frame_len;
int rc = 0;
l2_oxid = cb_arg->l2_oxid;
BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
els_req = cb_arg->io_req;
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
/*
* els req is timed out. cleanup the IO with FW and
* drop the completion. libfc will handle the els timeout
*/
if (els_req->on_active_queue) {
list_del_init(&els_req->link);
els_req->on_active_queue = 0;
rc = bnx2fc_initiate_cleanup(els_req);
BUG_ON(rc);
}
goto free_arg;
}
tgt = els_req->tgt;
mp_req = &(els_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!buf) {
printk(KERN_ERR PFX "Unable to alloc mp buf\n");
goto free_arg;
}
hdr_len = sizeof(*fc_hdr);
if (hdr_len + resp_len > PAGE_SIZE) {
printk(KERN_ERR PFX "l2_els_compl: resp len is "
"beyond page size\n");
goto free_buf;
}
memcpy(buf, fc_hdr, hdr_len);
memcpy(buf + hdr_len, resp_buf, resp_len);
frame_len = hdr_len + resp_len;
bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
free_buf:
kfree(buf);
free_arg:
kfree(cb_arg);
}
int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
{
struct fc_els_adisc *adisc;
struct fc_frame_header *fh;
struct bnx2fc_els_cb_arg *cb_arg;
struct fc_lport *lport = tgt->rdata->local_port;
u32 r_a_tov = lport->r_a_tov;
int rc;
fh = fc_frame_header_get(fp);
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
return -ENOMEM;
}
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
/* adisc is initialized by libfc */
rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
if (rc)
kfree(cb_arg);
return rc;
}
int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
{
struct fc_els_logo *logo;
struct fc_frame_header *fh;
struct bnx2fc_els_cb_arg *cb_arg;
struct fc_lport *lport = tgt->rdata->local_port;
u32 r_a_tov = lport->r_a_tov;
int rc;
fh = fc_frame_header_get(fp);
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
return -ENOMEM;
}
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
logo = fc_frame_payload_get(fp, sizeof(*logo));
/* logo is initialized by libfc */
rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
if (rc)
kfree(cb_arg);
return rc;
}
int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
{
struct fc_els_rls *rls;
struct fc_frame_header *fh;
struct bnx2fc_els_cb_arg *cb_arg;
struct fc_lport *lport = tgt->rdata->local_port;
u32 r_a_tov = lport->r_a_tov;
int rc;
fh = fc_frame_header_get(fp);
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
return -ENOMEM;
}
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
rls = fc_frame_payload_get(fp, sizeof(*rls));
/* rls is initialized by libfc */
rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
if (rc)
kfree(cb_arg);
return rc;
}
void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr, *fh;
struct bnx2fc_cmd *srr_req;
struct bnx2fc_cmd *orig_io_req;
struct fc_frame *fp;
unsigned char *buf;
void *resp_buf;
u32 resp_len, hdr_len;
u8 opcode;
int rc = 0;
orig_io_req = cb_arg->aborted_io_req;
srr_req = cb_arg->io_req;
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
/* SRR timedout */
BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
"orig_io - 0x%x\n",
orig_io_req->xid);
rc = bnx2fc_initiate_abts(srr_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
"failed. issue cleanup\n");
bnx2fc_initiate_cleanup(srr_req);
}
if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
orig_io_req->xid, orig_io_req->req_flags);
goto srr_compl_done;
}
orig_io_req->srr_retry++;
if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
struct bnx2fc_rport *tgt = orig_io_req->tgt;
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_send_srr(orig_io_req,
orig_io_req->srr_offset,
orig_io_req->srr_rctl);
spin_lock_bh(&tgt->tgt_lock);
if (!rc)
goto srr_compl_done;
}
rc = bnx2fc_initiate_abts(orig_io_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
"failed xid = 0x%x. issue cleanup\n",
orig_io_req->xid);
bnx2fc_initiate_cleanup(orig_io_req);
}
goto srr_compl_done;
}
if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
orig_io_req->xid, orig_io_req->req_flags);
goto srr_compl_done;
}
mp_req = &(srr_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
hdr_len = sizeof(*fc_hdr);
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!buf) {
printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
goto srr_compl_done;
}
memcpy(buf, fc_hdr, hdr_len);
memcpy(buf + hdr_len, resp_buf, resp_len);
fp = fc_frame_alloc(NULL, resp_len);
if (!fp) {
printk(KERN_ERR PFX "fc_frame_alloc failure\n");
goto free_buf;
}
fh = (struct fc_frame_header *) fc_frame_header_get(fp);
/* Copy FC Frame header and payload into the frame */
memcpy(fh, buf, hdr_len + resp_len);
opcode = fc_frame_payload_op(fp);
switch (opcode) {
case ELS_LS_ACC:
BNX2FC_IO_DBG(srr_req, "SRR success\n");
break;
case ELS_LS_RJT:
BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
rc = bnx2fc_initiate_abts(orig_io_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
"failed xid = 0x%x. issue cleanup\n",
orig_io_req->xid);
bnx2fc_initiate_cleanup(orig_io_req);
}
break;
default:
BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
opcode);
break;
}
fc_frame_free(fp);
free_buf:
kfree(buf);
srr_compl_done:
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
}
void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_cmd *orig_io_req, *new_io_req;
struct bnx2fc_cmd *rec_req;
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr, *fh;
struct fc_els_ls_rjt *rjt;
struct fc_els_rec_acc *acc;
struct bnx2fc_rport *tgt;
struct fcoe_err_report_entry *err_entry;
struct scsi_cmnd *sc_cmd;
enum fc_rctl r_ctl;
unsigned char *buf;
void *resp_buf;
struct fc_frame *fp;
u8 opcode;
u32 offset;
u32 e_stat;
u32 resp_len, hdr_len;
int rc = 0;
bool send_seq_clnp = false;
bool abort_io = false;
BNX2FC_MISC_DBG("Entered rec_compl callback\n");
rec_req = cb_arg->io_req;
orig_io_req = cb_arg->aborted_io_req;
BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
tgt = orig_io_req->tgt;
/* Handle REC timeout case */
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
BNX2FC_IO_DBG(rec_req, "timed out, abort "
"orig_io - 0x%x\n",
orig_io_req->xid);
/* els req is timed out. send abts for els */
rc = bnx2fc_initiate_abts(rec_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
"failed. issue cleanup\n");
bnx2fc_initiate_cleanup(rec_req);
}
orig_io_req->rec_retry++;
/* REC timedout. send ABTS to the orig IO req */
if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_send_rec(orig_io_req);
spin_lock_bh(&tgt->tgt_lock);
if (!rc)
goto rec_compl_done;
}
rc = bnx2fc_initiate_abts(orig_io_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
"failed xid = 0x%x. issue cleanup\n",
orig_io_req->xid);
bnx2fc_initiate_cleanup(orig_io_req);
}
goto rec_compl_done;
}
if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
BNX2FC_IO_DBG(rec_req, "completed"
"orig_io - 0x%x\n",
orig_io_req->xid);
goto rec_compl_done;
}
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
BNX2FC_IO_DBG(rec_req, "abts in prog "
"orig_io - 0x%x\n",
orig_io_req->xid);
goto rec_compl_done;
}
mp_req = &(rec_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
acc = resp_buf = mp_req->resp_buf;
hdr_len = sizeof(*fc_hdr);
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!buf) {
printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
goto rec_compl_done;
}
memcpy(buf, fc_hdr, hdr_len);
memcpy(buf + hdr_len, resp_buf, resp_len);
fp = fc_frame_alloc(NULL, resp_len);
if (!fp) {
printk(KERN_ERR PFX "fc_frame_alloc failure\n");
goto free_buf;
}
fh = (struct fc_frame_header *) fc_frame_header_get(fp);
/* Copy FC Frame header and payload into the frame */
memcpy(fh, buf, hdr_len + resp_len);
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if ((rjt->er_reason == ELS_RJT_LOGIC ||
rjt->er_reason == ELS_RJT_UNAB) &&
rjt->er_explan == ELS_EXPL_OXID_RXID) {
BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
new_io_req = bnx2fc_cmd_alloc(tgt);
if (!new_io_req)
goto abort_io;
new_io_req->sc_cmd = orig_io_req->sc_cmd;
/* cleanup orig_io_req that is with the FW */
set_bit(BNX2FC_FLAG_CMD_LOST,
&orig_io_req->req_flags);
bnx2fc_initiate_cleanup(orig_io_req);
/* Post a new IO req with the same sc_cmd */
BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_post_io_req(tgt, new_io_req);
spin_lock_bh(&tgt->tgt_lock);
if (!rc)
goto free_frame;
BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
}
abort_io:
rc = bnx2fc_initiate_abts(orig_io_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
"failed. issue cleanup\n");
bnx2fc_initiate_cleanup(orig_io_req);
}
} else if (opcode == ELS_LS_ACC) {
/* REVISIT: Check if the exchange is already aborted */
offset = ntohl(acc->reca_fc4value);
e_stat = ntohl(acc->reca_e_stat);
if (e_stat & ESB_ST_SEQ_INIT) {
BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
goto free_frame;
}
BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
e_stat, offset);
/* Seq initiative is with us */
err_entry = (struct fcoe_err_report_entry *)
&orig_io_req->err_entry;
sc_cmd = orig_io_req->sc_cmd;
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
/* SCSI WRITE command */
if (offset == orig_io_req->data_xfer_len) {
BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
/* FCP_RSP lost */
r_ctl = FC_RCTL_DD_CMD_STATUS;
offset = 0;
} else {
/* start transmitting from offset */
BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
send_seq_clnp = true;
r_ctl = FC_RCTL_DD_DATA_DESC;
if (bnx2fc_initiate_seq_cleanup(orig_io_req,
offset, r_ctl))
abort_io = true;
/* XFER_RDY */
}
} else {
/* SCSI READ command */
if (err_entry->data.rx_buf_off ==
orig_io_req->data_xfer_len) {
/* FCP_RSP lost */
BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
r_ctl = FC_RCTL_DD_CMD_STATUS;
offset = 0;
} else {
/* request retransmission from this offset */
send_seq_clnp = true;
offset = err_entry->data.rx_buf_off;
BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
/* FCP_DATA lost */
r_ctl = FC_RCTL_DD_SOL_DATA;
if (bnx2fc_initiate_seq_cleanup(orig_io_req,
offset, r_ctl))
abort_io = true;
}
}
if (abort_io) {
rc = bnx2fc_initiate_abts(orig_io_req);
if (rc != SUCCESS) {
BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
" failed. issue cleanup\n");
bnx2fc_initiate_cleanup(orig_io_req);
}
} else if (!send_seq_clnp) {
BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
spin_lock_bh(&tgt->tgt_lock);
if (rc) {
BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
" IO will abort\n");
}
}
}
free_frame:
fc_frame_free(fp);
free_buf:
kfree(buf);
rec_compl_done:
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
kfree(cb_arg);
}
int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
{
struct fc_els_rec rec;
struct bnx2fc_rport *tgt = orig_io_req->tgt;
struct fc_lport *lport = tgt->rdata->local_port;
struct bnx2fc_els_cb_arg *cb_arg = NULL;
u32 sid = tgt->sid;
u32 r_a_tov = lport->r_a_tov;
int rc;
BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
memset(&rec, 0, sizeof(rec));
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
rc = -ENOMEM;
goto rec_err;
}
kref_get(&orig_io_req->refcount);
cb_arg->aborted_io_req = orig_io_req;
rec.rec_cmd = ELS_REC;
hton24(rec.rec_s_id, sid);
rec.rec_ox_id = htons(orig_io_req->xid);
rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
bnx2fc_rec_compl, cb_arg,
r_a_tov);
rec_err:
if (rc) {
BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
kfree(cb_arg);
}
return rc;
}
int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
{
struct fcp_srr srr;
struct bnx2fc_rport *tgt = orig_io_req->tgt;
struct fc_lport *lport = tgt->rdata->local_port;
struct bnx2fc_els_cb_arg *cb_arg = NULL;
u32 r_a_tov = lport->r_a_tov;
int rc;
BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
memset(&srr, 0, sizeof(srr));
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
rc = -ENOMEM;
goto srr_err;
}
kref_get(&orig_io_req->refcount);
cb_arg->aborted_io_req = orig_io_req;
srr.srr_op = ELS_SRR;
srr.srr_ox_id = htons(orig_io_req->xid);
srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
srr.srr_rel_off = htonl(offset);
srr.srr_r_ctl = r_ctl;
orig_io_req->srr_offset = offset;
orig_io_req->srr_rctl = r_ctl;
rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
bnx2fc_srr_compl, cb_arg,
r_a_tov);
srr_err:
if (rc) {
BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
kfree(cb_arg);
} else
set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
return rc;
}
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
void *data, u32 data_len,
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
{
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct fc_rport *rport = tgt->rport;
struct fc_lport *lport = port->lport;
struct bnx2fc_cmd *els_req;
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
int rc = 0;
int task_idx, index;
u32 did, sid;
u16 xid;
rc = fc_remote_port_chkready(rport);
if (rc) {
printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
rc = -EINVAL;
goto els_err;
}
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
rc = -EINVAL;
goto els_err;
}
if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
(test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
rc = -EINVAL;
goto els_err;
}
els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
if (!els_req) {
rc = -ENOMEM;
goto els_err;
}
els_req->sc_cmd = NULL;
els_req->port = port;
els_req->tgt = tgt;
els_req->cb_func = cb_func;
cb_arg->io_req = els_req;
els_req->cb_arg = cb_arg;
mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
rc = bnx2fc_init_mp_req(els_req);
if (rc == FAILED) {
printk(KERN_ERR PFX "ELS MP request init failed\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&els_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
rc = -ENOMEM;
goto els_err;
} else {
/* rc SUCCESS */
rc = 0;
}
/* Set the data_xfer_len to the size of ELS payload */
mp_req->req_len = data_len;
els_req->data_xfer_len = mp_req->req_len;
/* Fill ELS Payload */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
memcpy(mp_req->req_buf, data, data_len);
} else {
printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
spin_lock_bh(&tgt->tgt_lock);
kref_put(&els_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
rc = -EINVAL;
}
if (rc)
goto els_err;
/* Fill FC header */
fc_hdr = &(mp_req->req_fc_hdr);
did = tgt->rport->port_id;
sid = tgt->sid;
if (op == ELS_SRR)
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
FC_TYPE_FCP, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
else
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
FC_TYPE_ELS, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = els_req->xid;
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
task_page = (struct fcoe_task_ctx_entry *)
interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(els_req, task);
spin_lock_bh(&tgt->tgt_lock);
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
printk(KERN_ERR PFX "initiate_els.. session not ready\n");
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
kref_put(&els_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return -EINVAL;
}
if (timer_msec)
bnx2fc_cmd_timer_set(els_req, timer_msec);
bnx2fc_add_2_sq(tgt, xid);
els_req->on_active_queue = 1;
list_add_tail(&els_req->link, &tgt->els_queue);
/* Ring doorbell */
bnx2fc_ring_doorbell(tgt);
spin_unlock_bh(&tgt->tgt_lock);
els_err:
return rc;
}
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
struct fcoe_task_ctx_entry *task, u8 num_rq)
{
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
u64 *hdr;
u64 *temp_hdr;
BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
&els_req->req_flags)) {
BNX2FC_ELS_DBG("Timer context finished processing this "
"els - 0x%x\n", els_req->xid);
/* This IO doesn't receive cleanup completion */
kref_put(&els_req->refcount, bnx2fc_cmd_release);
return;
}
/* Cancel the timeout_work, as we received the response */
if (cancel_delayed_work(&els_req->timeout_work))
kref_put(&els_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
if (els_req->on_active_queue) {
list_del_init(&els_req->link);
els_req->on_active_queue = 0;
}
mp_req = &(els_req->mp_req);
fc_hdr = &(mp_req->resp_fc_hdr);
hdr = (u64 *)fc_hdr;
temp_hdr = (u64 *)
&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
hdr[2] = cpu_to_be64(temp_hdr[2]);
mp_req->resp_len =
task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
/* Parse ELS response */
if ((els_req->cb_func) && (els_req->cb_arg)) {
els_req->cb_func(els_req->cb_arg);
els_req->cb_arg = NULL;
}
kref_put(&els_req->refcount, bnx2fc_cmd_release);
}
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
void *arg)
{
struct fcoe_ctlr *fip = arg;
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
struct fc_frame_header *fh;
u8 op;
if (IS_ERR(fp))
goto done;
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
fh = fc_frame_header_get(fp);
if (fh->fh_type != FC_TYPE_ELS) {
printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
"fh_type != FC_TYPE_ELS\n");
fc_frame_free(fp);
return;
}
op = fc_frame_payload_op(fp);
if (lport->vport) {
if (op == ELS_LS_RJT) {
printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
fc_vport_terminate(lport->vport);
fc_frame_free(fp);
return;
}
}
if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
fc_frame_free(fp);
return;
}
}
fip->update_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
void *arg)
{
struct fcoe_ctlr *fip = arg;
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
static u8 zero_mac[ETH_ALEN] = { 0 };
if (!IS_ERR(fp))
fip->update_mac(lport, zero_mac);
fc_lport_logo_resp(seq, fp, lport);
}
struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *fp, unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
struct fcoe_ctlr *fip = &interface->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
case ELS_FLOGI:
case ELS_FDISC:
return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
fip, timeout);
case ELS_LOGO:
/* only hook onto fabric logouts, not port logouts */
if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
break;
return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
fip, timeout);
}
return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
}
| gpl-2.0 |
lycanthia/Find7-Kernel-Source-4.3 | drivers/media/video/cx25840/cx25840-ir.c | 5062 | 36370 | /*
* Driver for the Conexant CX2584x Audio/Video decoder chip and related cores
*
* Integrated Consumer Infrared Controller
*
* Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <media/cx25840.h>
#include <media/rc-core.h>
#include "cx25840-core.h"
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable integrated IR debug messages");
#define CX25840_IR_REG_BASE 0x200
#define CX25840_IR_CNTRL_REG 0x200
#define CNTRL_WIN_3_3 0x00000000
#define CNTRL_WIN_4_3 0x00000001
#define CNTRL_WIN_3_4 0x00000002
#define CNTRL_WIN_4_4 0x00000003
#define CNTRL_WIN 0x00000003
#define CNTRL_EDG_NONE 0x00000000
#define CNTRL_EDG_FALL 0x00000004
#define CNTRL_EDG_RISE 0x00000008
#define CNTRL_EDG_BOTH 0x0000000C
#define CNTRL_EDG 0x0000000C
#define CNTRL_DMD 0x00000010
#define CNTRL_MOD 0x00000020
#define CNTRL_RFE 0x00000040
#define CNTRL_TFE 0x00000080
#define CNTRL_RXE 0x00000100
#define CNTRL_TXE 0x00000200
#define CNTRL_RIC 0x00000400
#define CNTRL_TIC 0x00000800
#define CNTRL_CPL 0x00001000
#define CNTRL_LBM 0x00002000
#define CNTRL_R 0x00004000
#define CX25840_IR_TXCLK_REG 0x204
#define TXCLK_TCD 0x0000FFFF
#define CX25840_IR_RXCLK_REG 0x208
#define RXCLK_RCD 0x0000FFFF
#define CX25840_IR_CDUTY_REG 0x20C
#define CDUTY_CDC 0x0000000F
#define CX25840_IR_STATS_REG 0x210
#define STATS_RTO 0x00000001
#define STATS_ROR 0x00000002
#define STATS_RBY 0x00000004
#define STATS_TBY 0x00000008
#define STATS_RSR 0x00000010
#define STATS_TSR 0x00000020
#define CX25840_IR_IRQEN_REG 0x214
#define IRQEN_RTE 0x00000001
#define IRQEN_ROE 0x00000002
#define IRQEN_RSE 0x00000010
#define IRQEN_TSE 0x00000020
#define IRQEN_MSK 0x00000033
#define CX25840_IR_FILTR_REG 0x218
#define FILTR_LPF 0x0000FFFF
#define CX25840_IR_FIFO_REG 0x23C
#define FIFO_RXTX 0x0000FFFF
#define FIFO_RXTX_LVL 0x00010000
#define FIFO_RXTX_RTO 0x0001FFFF
#define FIFO_RX_NDV 0x00020000
#define FIFO_RX_DEPTH 8
#define FIFO_TX_DEPTH 8
#define CX25840_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */
#define CX25840_IR_REFCLK_FREQ (CX25840_VIDCLK_FREQ / 2)
/*
* We use this union internally for convenience, but callers to tx_write
* and rx_read will be expecting records of type struct ir_raw_event.
* Always ensure the size of this union is dictated by struct ir_raw_event.
*/
union cx25840_ir_fifo_rec {
u32 hw_fifo_data;
struct ir_raw_event ir_core_data;
};
#define CX25840_IR_RX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
#define CX25840_IR_TX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
struct cx25840_ir_state {
struct i2c_client *c;
struct v4l2_subdev_ir_parameters rx_params;
struct mutex rx_params_lock; /* protects Rx parameter settings cache */
atomic_t rxclk_divider;
atomic_t rx_invert;
struct kfifo rx_kfifo;
spinlock_t rx_kfifo_lock; /* protect Rx data kfifo */
struct v4l2_subdev_ir_parameters tx_params;
struct mutex tx_params_lock; /* protects Tx parameter settings cache */
atomic_t txclk_divider;
};
static inline struct cx25840_ir_state *to_ir_state(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
return state ? state->ir_state : NULL;
}
/*
* Rx and Tx Clock Divider register computations
*
* Note the largest clock divider value of 0xffff corresponds to:
* (0xffff + 1) * 1000 / 108/2 MHz = 1,213,629.629... ns
* which fits in 21 bits, so we'll use unsigned int for time arguments.
*/
static inline u16 count_to_clock_divider(unsigned int d)
{
if (d > RXCLK_RCD + 1)
d = RXCLK_RCD;
else if (d < 2)
d = 1;
else
d--;
return (u16) d;
}
static inline u16 ns_to_clock_divider(unsigned int ns)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
static inline unsigned int clock_divider_to_ns(unsigned int divider)
{
/* Period of the Rx or Tx clock in ns */
return DIV_ROUND_CLOSEST((divider + 1) * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static inline u16 carrier_freq_to_clock_divider(unsigned int freq)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * 16));
}
static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider)
{
return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * 16);
}
static inline u16 freq_to_clock_divider(unsigned int freq,
unsigned int rollovers)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * rollovers));
}
static inline unsigned int clock_divider_to_freq(unsigned int divider,
unsigned int rollovers)
{
return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ,
(divider + 1) * rollovers);
}
/*
* Low Pass Filter register calculations
*
* Note the largest count value of 0xffff corresponds to:
* 0xffff * 1000 / 108/2 MHz = 1,213,611.11... ns
* which fits in 21 bits, so we'll use unsigned int for time arguments.
*/
static inline u16 count_to_lpf_count(unsigned int d)
{
if (d > FILTR_LPF)
d = FILTR_LPF;
else if (d < 4)
d = 0;
return (u16) d;
}
static inline u16 ns_to_lpf_count(unsigned int ns)
{
return count_to_lpf_count(
DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
static inline unsigned int lpf_count_to_ns(unsigned int count)
{
/* Duration of the Low Pass Filter rejection window in ns */
return DIV_ROUND_CLOSEST(count * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static inline unsigned int lpf_count_to_us(unsigned int count)
{
/* Duration of the Low Pass Filter rejection window in us */
return DIV_ROUND_CLOSEST(count, CX25840_IR_REFCLK_FREQ / 1000000);
}
/*
* FIFO register pulse width count compuations
*/
static u32 clock_divider_to_resolution(u16 divider)
{
/*
* Resolution is the duration of 1 tick of the readable portion of
* of the pulse width counter as read from the FIFO. The two lsb's are
* not readable, hence the << 2. This function returns ns.
*/
return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000,
CX25840_IR_REFCLK_FREQ / 1000000);
}
static u64 pulse_width_count_to_ns(u16 count, u16 divider)
{
u64 n;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not readable, hence
* the (count << 2) | 0x3
*/
n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */
rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */
if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
n++;
return n;
}
#if 0
/* Keep as we will need this for Transmit functionality */
static u16 ns_to_pulse_width_count(u32 ns, u16 divider)
{
u64 n;
u32 d;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not accessible, hence
* the (1 << 2)
*/
n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
d = (1 << 2) * ((u32) divider + 1) * 1000; /* millicycles/count */
rem = do_div(n, d);
if (rem >= d / 2)
n++;
if (n > FIFO_RXTX)
n = FIFO_RXTX;
else if (n == 0)
n = 1;
return (u16) n;
}
#endif
static unsigned int pulse_width_count_to_us(u16 count, u16 divider)
{
u64 n;
u32 rem;
/*
* The 2 lsb's of the pulse width timer count are not readable, hence
* the (count << 2) | 0x3
*/
n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */
rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => us */
if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
n++;
return (unsigned int) n;
}
/*
* Pulse Clocks computations: Combined Pulse Width Count & Rx Clock Counts
*
* The total pulse clock count is an 18 bit pulse width timer count as the most
* significant part and (up to) 16 bit clock divider count as a modulus.
* When the Rx clock divider ticks down to 0, it increments the 18 bit pulse
* width timer count's least significant bit.
*/
static u64 ns_to_pulse_clocks(u32 ns)
{
u64 clocks;
u32 rem;
clocks = CX25840_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */
rem = do_div(clocks, 1000); /* /1000 = cycles */
if (rem >= 1000 / 2)
clocks++;
return clocks;
}
static u16 pulse_clocks_to_clock_divider(u64 count)
{
u32 rem;
rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
count = RXCLK_RCD;
else if (count < 2)
count = 1;
else
count--;
return (u16) count;
}
/*
* IR Control Register helpers
*/
enum tx_fifo_watermark {
TX_FIFO_HALF_EMPTY = 0,
TX_FIFO_EMPTY = CNTRL_TIC,
};
enum rx_fifo_watermark {
RX_FIFO_HALF_FULL = 0,
RX_FIFO_NOT_EMPTY = CNTRL_RIC,
};
static inline void control_tx_irq_watermark(struct i2c_client *c,
enum tx_fifo_watermark level)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_TIC, level);
}
static inline void control_rx_irq_watermark(struct i2c_client *c,
enum rx_fifo_watermark level)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_RIC, level);
}
static inline void control_tx_enable(struct i2c_client *c, bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE),
enable ? (CNTRL_TXE | CNTRL_TFE) : 0);
}
static inline void control_rx_enable(struct i2c_client *c, bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE),
enable ? (CNTRL_RXE | CNTRL_RFE) : 0);
}
static inline void control_tx_modulation_enable(struct i2c_client *c,
bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_MOD,
enable ? CNTRL_MOD : 0);
}
static inline void control_rx_demodulation_enable(struct i2c_client *c,
bool enable)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_DMD,
enable ? CNTRL_DMD : 0);
}
static inline void control_rx_s_edge_detection(struct i2c_client *c,
u32 edge_types)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_EDG_BOTH,
edge_types & CNTRL_EDG_BOTH);
}
static void control_rx_s_carrier_window(struct i2c_client *c,
unsigned int carrier,
unsigned int *carrier_range_low,
unsigned int *carrier_range_high)
{
u32 v;
unsigned int c16 = carrier * 16;
if (*carrier_range_low < DIV_ROUND_CLOSEST(c16, 16 + 3)) {
v = CNTRL_WIN_3_4;
*carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 4);
} else {
v = CNTRL_WIN_3_3;
*carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 3);
}
if (*carrier_range_high > DIV_ROUND_CLOSEST(c16, 16 - 3)) {
v |= CNTRL_WIN_4_3;
*carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 4);
} else {
v |= CNTRL_WIN_3_3;
*carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 3);
}
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_WIN, v);
}
static inline void control_tx_polarity_invert(struct i2c_client *c,
bool invert)
{
cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_CPL,
invert ? CNTRL_CPL : 0);
}
/*
* IR Rx & Tx Clock Register helpers
*/
static unsigned int txclk_tx_s_carrier(struct i2c_client *c,
unsigned int freq,
u16 *divider)
{
*divider = carrier_freq_to_clock_divider(freq);
cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
return clock_divider_to_carrier_freq(*divider);
}
static unsigned int rxclk_rx_s_carrier(struct i2c_client *c,
unsigned int freq,
u16 *divider)
{
*divider = carrier_freq_to_clock_divider(freq);
cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
return clock_divider_to_carrier_freq(*divider);
}
static u32 txclk_tx_s_max_pulse_width(struct i2c_client *c, u32 ns,
u16 *divider)
{
u64 pulse_clocks;
if (ns > IR_MAX_DURATION)
ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
}
static u32 rxclk_rx_s_max_pulse_width(struct i2c_client *c, u32 ns,
u16 *divider)
{
u64 pulse_clocks;
if (ns > IR_MAX_DURATION)
ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
}
/*
* IR Tx Carrier Duty Cycle register helpers
*/
static unsigned int cduty_tx_s_duty_cycle(struct i2c_client *c,
unsigned int duty_cycle)
{
u32 n;
n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */
if (n != 0)
n--;
if (n > 15)
n = 15;
cx25840_write4(c, CX25840_IR_CDUTY_REG, n);
return DIV_ROUND_CLOSEST((n + 1) * 100, 16);
}
/*
* IR Filter Register helpers
*/
static u32 filter_rx_s_min_width(struct i2c_client *c, u32 min_width_ns)
{
u32 count = ns_to_lpf_count(min_width_ns);
cx25840_write4(c, CX25840_IR_FILTR_REG, count);
return lpf_count_to_ns(count);
}
/*
* IR IRQ Enable Register helpers
*/
static inline void irqenable_rx(struct v4l2_subdev *sd, u32 mask)
{
struct cx25840_state *state = to_state(sd);
if (is_cx23885(state) || is_cx23887(state))
mask ^= IRQEN_MSK;
mask &= (IRQEN_RTE | IRQEN_ROE | IRQEN_RSE);
cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG,
~(IRQEN_RTE | IRQEN_ROE | IRQEN_RSE), mask);
}
static inline void irqenable_tx(struct v4l2_subdev *sd, u32 mask)
{
struct cx25840_state *state = to_state(sd);
if (is_cx23885(state) || is_cx23887(state))
mask ^= IRQEN_MSK;
mask &= IRQEN_TSE;
cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~IRQEN_TSE, mask);
}
/*
* V4L2 Subdevice IR Ops
*/
int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c = NULL;
unsigned long flags;
union cx25840_ir_fifo_rec rx_data[FIFO_RX_DEPTH];
unsigned int i, j, k;
u32 events, v;
int tsr, rsr, rto, ror, tse, rse, rte, roe, kror;
u32 cntrl, irqen, stats;
*handled = false;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
/* Only support the IR controller for the CX2388[57] AV Core for now */
if (!(is_cx23885(state) || is_cx23887(state)))
return -ENODEV;
cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
if (is_cx23885(state) || is_cx23887(state))
irqen ^= IRQEN_MSK;
stats = cx25840_read4(c, CX25840_IR_STATS_REG);
tsr = stats & STATS_TSR; /* Tx FIFO Service Request */
rsr = stats & STATS_RSR; /* Rx FIFO Service Request */
rto = stats & STATS_RTO; /* Rx Pulse Width Timer Time Out */
ror = stats & STATS_ROR; /* Rx FIFO Over Run */
tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */
rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */
rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */
roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */
v4l2_dbg(2, ir_debug, sd, "IR IRQ Status: %s %s %s %s %s %s\n",
tsr ? "tsr" : " ", rsr ? "rsr" : " ",
rto ? "rto" : " ", ror ? "ror" : " ",
stats & STATS_TBY ? "tby" : " ",
stats & STATS_RBY ? "rby" : " ");
v4l2_dbg(2, ir_debug, sd, "IR IRQ Enables: %s %s %s %s\n",
tse ? "tse" : " ", rse ? "rse" : " ",
rte ? "rte" : " ", roe ? "roe" : " ");
/*
* Transmitter interrupt service
*/
if (tse && tsr) {
/*
* TODO:
* Check the watermark threshold setting
* Pull FIFO_TX_DEPTH or FIFO_TX_DEPTH/2 entries from tx_kfifo
* Push the data to the hardware FIFO.
* If there was nothing more to send in the tx_kfifo, disable
* the TSR IRQ and notify the v4l2_device.
* If there was something in the tx_kfifo, check the tx_kfifo
* level and notify the v4l2_device, if it is low.
*/
/* For now, inhibit TSR interrupt until Tx is implemented */
irqenable_tx(sd, 0);
events = V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ;
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_TX_NOTIFY, &events);
*handled = true;
}
/*
* Receiver interrupt service
*/
kror = 0;
if ((rse && rsr) || (rte && rto)) {
/*
* Receive data on RSR to clear the STATS_RSR.
* Receive data on RTO, since we may not have yet hit the RSR
* watermark when we receive the RTO.
*/
for (i = 0, v = FIFO_RX_NDV;
(v & FIFO_RX_NDV) && !kror; i = 0) {
for (j = 0;
(v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) {
v = cx25840_read4(c, CX25840_IR_FIFO_REG);
rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV;
i++;
}
if (i == 0)
break;
j = i * sizeof(union cx25840_ir_fifo_rec);
k = kfifo_in_locked(&ir_state->rx_kfifo,
(unsigned char *) rx_data, j,
&ir_state->rx_kfifo_lock);
if (k != j)
kror++; /* rx_kfifo over run */
}
*handled = true;
}
events = 0;
v = 0;
if (kror) {
events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN;
v4l2_err(sd, "IR receiver software FIFO overrun\n");
}
if (roe && ror) {
/*
* The RX FIFO Enable (CNTRL_RFE) must be toggled to clear
* the Rx FIFO Over Run status (STATS_ROR)
*/
v |= CNTRL_RFE;
events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN;
v4l2_err(sd, "IR receiver hardware FIFO overrun\n");
}
if (rte && rto) {
/*
* The IR Receiver Enable (CNTRL_RXE) must be toggled to clear
* the Rx Pulse Width Timer Time Out (STATS_RTO)
*/
v |= CNTRL_RXE;
events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED;
}
if (v) {
/* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v);
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl);
*handled = true;
}
spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
if (kfifo_len(&ir_state->rx_kfifo) >= CX25840_IR_RX_KFIFO_SIZE / 2)
events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
if (events)
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
return 0;
}
/* Receiver */
static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
bool invert;
u16 divider;
unsigned int i, n;
union cx25840_ir_fifo_rec *p;
unsigned u, v, w;
if (ir_state == NULL)
return -ENODEV;
invert = (bool) atomic_read(&ir_state->rx_invert);
divider = (u16) atomic_read(&ir_state->rxclk_divider);
n = count / sizeof(union cx25840_ir_fifo_rec)
* sizeof(union cx25840_ir_fifo_rec);
if (n == 0) {
*num = 0;
return 0;
}
n = kfifo_out_locked(&ir_state->rx_kfifo, buf, n,
&ir_state->rx_kfifo_lock);
n /= sizeof(union cx25840_ir_fifo_rec);
*num = n * sizeof(union cx25840_ir_fifo_rec);
for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
/* Assume RTO was because of no IR light input */
u = 0;
w = 1;
} else {
u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
if (invert)
u = u ? 0 : 1;
w = 0;
}
v = (unsigned) pulse_width_count_to_ns(
(u16) (p->hw_fifo_data & FIFO_RXTX), divider);
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
p->ir_core_data.timeout = w;
v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s %s\n",
v, u ? "mark" : "space", w ? "(timed out)" : "");
if (w)
v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
}
return 0;
}
static int cx25840_ir_rx_g_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
mutex_lock(&ir_state->rx_params_lock);
memcpy(p, &ir_state->rx_params,
sizeof(struct v4l2_subdev_ir_parameters));
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
static int cx25840_ir_rx_shutdown(struct v4l2_subdev *sd)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
mutex_lock(&ir_state->rx_params_lock);
/* Disable or slow down all IR Rx circuits and counters */
irqenable_rx(sd, 0);
control_rx_enable(c, false);
control_rx_demodulation_enable(c, false);
control_rx_s_edge_detection(c, CNTRL_EDG_NONE);
filter_rx_s_min_width(c, 0);
cx25840_write4(c, CX25840_IR_RXCLK_REG, RXCLK_RCD);
ir_state->rx_params.shutdown = true;
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
static int cx25840_ir_rx_s_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
struct v4l2_subdev_ir_parameters *o;
u16 rxclk_divider;
if (ir_state == NULL)
return -ENODEV;
if (p->shutdown)
return cx25840_ir_rx_shutdown(sd);
if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
return -ENOSYS;
c = ir_state->c;
o = &ir_state->rx_params;
mutex_lock(&ir_state->rx_params_lock);
o->shutdown = p->shutdown;
p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
o->mode = p->mode;
p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
o->bytes_per_data_element = p->bytes_per_data_element;
/* Before we tweak the hardware, we have to disable the receiver */
irqenable_rx(sd, 0);
control_rx_enable(c, false);
control_rx_demodulation_enable(c, p->modulation);
o->modulation = p->modulation;
if (p->modulation) {
p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq,
&rxclk_divider);
o->carrier_freq = p->carrier_freq;
p->duty_cycle = 50;
o->duty_cycle = p->duty_cycle;
control_rx_s_carrier_window(c, p->carrier_freq,
&p->carrier_range_lower,
&p->carrier_range_upper);
o->carrier_range_lower = p->carrier_range_lower;
o->carrier_range_upper = p->carrier_range_upper;
p->max_pulse_width =
(u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider);
} else {
p->max_pulse_width =
rxclk_rx_s_max_pulse_width(c, p->max_pulse_width,
&rxclk_divider);
}
o->max_pulse_width = p->max_pulse_width;
atomic_set(&ir_state->rxclk_divider, rxclk_divider);
p->noise_filter_min_width =
filter_rx_s_min_width(c, p->noise_filter_min_width);
o->noise_filter_min_width = p->noise_filter_min_width;
p->resolution = clock_divider_to_resolution(rxclk_divider);
o->resolution = p->resolution;
/* FIXME - make this dependent on resolution for better performance */
control_rx_irq_watermark(c, RX_FIFO_HALF_FULL);
control_rx_s_edge_detection(c, CNTRL_EDG_BOTH);
o->invert_level = p->invert_level;
atomic_set(&ir_state->rx_invert, p->invert_level);
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
unsigned long flags;
spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
kfifo_reset(&ir_state->rx_kfifo);
spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
if (p->interrupt_enable)
irqenable_rx(sd, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
control_rx_enable(c, p->enable);
}
mutex_unlock(&ir_state->rx_params_lock);
return 0;
}
/* Transmitter */
static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
#if 0
/*
* FIXME - the code below is an incomplete and untested sketch of what
* may need to be done. The critical part is to get 4 (or 8) pulses
* from the tx_kfifo, or converted from ns to the proper units from the
* input, and push them off to the hardware Tx FIFO right away, if the
* HW TX fifo needs service. The rest can be pushed to the tx_kfifo in
* a less critical timeframe. Also watch out for overruning the
* tx_kfifo - don't let it happen and let the caller know not all his
* pulses were written.
*/
u32 *ns_pulse = (u32 *) buf;
unsigned int n;
u32 fifo_pulse[FIFO_TX_DEPTH];
u32 mark;
/* Compute how much we can fit in the tx kfifo */
n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo);
n = min(n, (unsigned int) count);
n /= sizeof(u32);
/* FIXME - turn on Tx Fifo service interrupt
* check hardware fifo level, and other stuff
*/
for (i = 0; i < n; ) {
for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) {
mark = ns_pulse[i] & LEVEL_MASK;
fifo_pulse[j] = ns_to_pulse_width_count(
ns_pulse[i] &
~LEVEL_MASK,
ir_state->txclk_divider);
if (mark)
fifo_pulse[j] &= FIFO_RXTX_LVL;
i++;
}
kfifo_put(ir_state->tx_kfifo, (u8 *) fifo_pulse,
j * sizeof(u32));
}
*num = n * sizeof(u32);
#else
/* For now enable the Tx FIFO Service interrupt & pretend we did work */
irqenable_tx(sd, IRQEN_TSE);
*num = count;
#endif
return 0;
}
static int cx25840_ir_tx_g_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
mutex_lock(&ir_state->tx_params_lock);
memcpy(p, &ir_state->tx_params,
sizeof(struct v4l2_subdev_ir_parameters));
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
static int cx25840_ir_tx_shutdown(struct v4l2_subdev *sd)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
c = ir_state->c;
mutex_lock(&ir_state->tx_params_lock);
/* Disable or slow down all IR Tx circuits and counters */
irqenable_tx(sd, 0);
control_tx_enable(c, false);
control_tx_modulation_enable(c, false);
cx25840_write4(c, CX25840_IR_TXCLK_REG, TXCLK_TCD);
ir_state->tx_params.shutdown = true;
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
static int cx25840_ir_tx_s_parameters(struct v4l2_subdev *sd,
struct v4l2_subdev_ir_parameters *p)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
struct i2c_client *c;
struct v4l2_subdev_ir_parameters *o;
u16 txclk_divider;
if (ir_state == NULL)
return -ENODEV;
if (p->shutdown)
return cx25840_ir_tx_shutdown(sd);
if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
return -ENOSYS;
c = ir_state->c;
o = &ir_state->tx_params;
mutex_lock(&ir_state->tx_params_lock);
o->shutdown = p->shutdown;
p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
o->mode = p->mode;
p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
o->bytes_per_data_element = p->bytes_per_data_element;
/* Before we tweak the hardware, we have to disable the transmitter */
irqenable_tx(sd, 0);
control_tx_enable(c, false);
control_tx_modulation_enable(c, p->modulation);
o->modulation = p->modulation;
if (p->modulation) {
p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq,
&txclk_divider);
o->carrier_freq = p->carrier_freq;
p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle);
o->duty_cycle = p->duty_cycle;
p->max_pulse_width =
(u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider);
} else {
p->max_pulse_width =
txclk_tx_s_max_pulse_width(c, p->max_pulse_width,
&txclk_divider);
}
o->max_pulse_width = p->max_pulse_width;
atomic_set(&ir_state->txclk_divider, txclk_divider);
p->resolution = clock_divider_to_resolution(txclk_divider);
o->resolution = p->resolution;
/* FIXME - make this dependent on resolution for better performance */
control_tx_irq_watermark(c, TX_FIFO_HALF_EMPTY);
control_tx_polarity_invert(c, p->invert_carrier_sense);
o->invert_carrier_sense = p->invert_carrier_sense;
/*
* FIXME: we don't have hardware help for IO pin level inversion
* here like we have on the CX23888.
* Act on this with some mix of logical inversion of data levels,
* carrier polarity, and carrier duty cycle.
*/
o->invert_level = p->invert_level;
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
/* reset tx_fifo here */
if (p->interrupt_enable)
irqenable_tx(sd, IRQEN_TSE);
control_tx_enable(c, p->enable);
}
mutex_unlock(&ir_state->tx_params_lock);
return 0;
}
/*
* V4L2 Subdevice Core Ops support
*/
int cx25840_ir_log_status(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *c = state->c;
char *s;
int i, j;
u32 cntrl, txclk, rxclk, cduty, stats, irqen, filtr;
/* The CX23888 chip doesn't have an IR controller on the A/V core */
if (is_cx23888(state))
return 0;
cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
txclk = cx25840_read4(c, CX25840_IR_TXCLK_REG) & TXCLK_TCD;
rxclk = cx25840_read4(c, CX25840_IR_RXCLK_REG) & RXCLK_RCD;
cduty = cx25840_read4(c, CX25840_IR_CDUTY_REG) & CDUTY_CDC;
stats = cx25840_read4(c, CX25840_IR_STATS_REG);
irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
if (is_cx23885(state) || is_cx23887(state))
irqen ^= IRQEN_MSK;
filtr = cx25840_read4(c, CX25840_IR_FILTR_REG) & FILTR_LPF;
v4l2_info(sd, "IR Receiver:\n");
v4l2_info(sd, "\tEnabled: %s\n",
cntrl & CNTRL_RXE ? "yes" : "no");
v4l2_info(sd, "\tDemodulation from a carrier: %s\n",
cntrl & CNTRL_DMD ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO: %s\n",
cntrl & CNTRL_RFE ? "enabled" : "disabled");
switch (cntrl & CNTRL_EDG) {
case CNTRL_EDG_NONE:
s = "disabled";
break;
case CNTRL_EDG_FALL:
s = "falling edge";
break;
case CNTRL_EDG_RISE:
s = "rising edge";
break;
case CNTRL_EDG_BOTH:
s = "rising & falling edges";
break;
default:
s = "??? edge";
break;
}
v4l2_info(sd, "\tPulse timers' start/stop trigger: %s\n", s);
v4l2_info(sd, "\tFIFO data on pulse timer overflow: %s\n",
cntrl & CNTRL_R ? "not loaded" : "overflow marker");
v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
cntrl & CNTRL_RIC ? "not empty" : "half full or greater");
v4l2_info(sd, "\tLoopback mode: %s\n",
cntrl & CNTRL_LBM ? "loopback active" : "normal receive");
if (cntrl & CNTRL_DMD) {
v4l2_info(sd, "\tExpected carrier (16 clocks): %u Hz\n",
clock_divider_to_carrier_freq(rxclk));
switch (cntrl & CNTRL_WIN) {
case CNTRL_WIN_3_3:
i = 3;
j = 3;
break;
case CNTRL_WIN_4_3:
i = 4;
j = 3;
break;
case CNTRL_WIN_3_4:
i = 3;
j = 4;
break;
case CNTRL_WIN_4_4:
i = 4;
j = 4;
break;
default:
i = 0;
j = 0;
break;
}
v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
"-%1d/+%1d, %u to %u Hz\n", i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n",
pulse_width_count_to_us(FIFO_RXTX, rxclk),
pulse_width_count_to_ns(FIFO_RXTX, rxclk));
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
"%u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
stats & STATS_RTO ? "yes" : "no");
v4l2_info(sd, "\tPulse width timer time-out intr: %s\n",
irqen & IRQEN_RTE ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO overrun: %s\n",
stats & STATS_ROR ? "yes" : "no");
v4l2_info(sd, "\tFIFO overrun interrupt: %s\n",
irqen & IRQEN_ROE ? "enabled" : "disabled");
v4l2_info(sd, "\tBusy: %s\n",
stats & STATS_RBY ? "yes" : "no");
v4l2_info(sd, "\tFIFO service requested: %s\n",
stats & STATS_RSR ? "yes" : "no");
v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
irqen & IRQEN_RSE ? "enabled" : "disabled");
v4l2_info(sd, "IR Transmitter:\n");
v4l2_info(sd, "\tEnabled: %s\n",
cntrl & CNTRL_TXE ? "yes" : "no");
v4l2_info(sd, "\tModulation onto a carrier: %s\n",
cntrl & CNTRL_MOD ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO: %s\n",
cntrl & CNTRL_TFE ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
cntrl & CNTRL_TIC ? "not empty" : "half full or less");
v4l2_info(sd, "\tCarrier polarity: %s\n",
cntrl & CNTRL_CPL ? "space:burst mark:noburst"
: "space:noburst mark:burst");
if (cntrl & CNTRL_MOD) {
v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n",
clock_divider_to_carrier_freq(txclk));
v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n",
cduty + 1);
}
v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n",
pulse_width_count_to_us(FIFO_RXTX, txclk),
pulse_width_count_to_ns(FIFO_RXTX, txclk));
v4l2_info(sd, "\tBusy: %s\n",
stats & STATS_TBY ? "yes" : "no");
v4l2_info(sd, "\tFIFO service requested: %s\n",
stats & STATS_TSR ? "yes" : "no");
v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
irqen & IRQEN_TSE ? "enabled" : "disabled");
return 0;
}
const struct v4l2_subdev_ir_ops cx25840_ir_ops = {
.rx_read = cx25840_ir_rx_read,
.rx_g_parameters = cx25840_ir_rx_g_parameters,
.rx_s_parameters = cx25840_ir_rx_s_parameters,
.tx_write = cx25840_ir_tx_write,
.tx_g_parameters = cx25840_ir_tx_g_parameters,
.tx_s_parameters = cx25840_ir_tx_s_parameters,
};
static const struct v4l2_subdev_ir_parameters default_rx_params = {
.bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
.interrupt_enable = false,
.shutdown = true,
.modulation = true,
.carrier_freq = 36000, /* 36 kHz - RC-5, and RC-6 carrier */
/* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */
/* RC-6: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */
.noise_filter_min_width = 333333, /* ns */
.carrier_range_lower = 35000,
.carrier_range_upper = 37000,
.invert_level = false,
};
static const struct v4l2_subdev_ir_parameters default_tx_params = {
.bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
.interrupt_enable = false,
.shutdown = true,
.modulation = true,
.carrier_freq = 36000, /* 36 kHz - RC-5 carrier */
.duty_cycle = 25, /* 25 % - RC-5 carrier */
.invert_level = false,
.invert_carrier_sense = false,
};
int cx25840_ir_probe(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state;
struct v4l2_subdev_ir_parameters default_params;
/* Only init the IR controller for the CX2388[57] AV Core for now */
if (!(is_cx23885(state) || is_cx23887(state)))
return 0;
ir_state = kzalloc(sizeof(struct cx25840_ir_state), GFP_KERNEL);
if (ir_state == NULL)
return -ENOMEM;
spin_lock_init(&ir_state->rx_kfifo_lock);
if (kfifo_alloc(&ir_state->rx_kfifo,
CX25840_IR_RX_KFIFO_SIZE, GFP_KERNEL)) {
kfree(ir_state);
return -ENOMEM;
}
ir_state->c = state->c;
state->ir_state = ir_state;
/* Ensure no interrupts arrive yet */
if (is_cx23885(state) || is_cx23887(state))
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, IRQEN_MSK);
else
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0);
mutex_init(&ir_state->rx_params_lock);
memcpy(&default_params, &default_rx_params,
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&ir_state->tx_params_lock);
memcpy(&default_params, &default_tx_params,
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
return 0;
}
int cx25840_ir_remove(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct cx25840_ir_state *ir_state = to_ir_state(sd);
if (ir_state == NULL)
return -ENODEV;
cx25840_ir_rx_shutdown(sd);
cx25840_ir_tx_shutdown(sd);
kfifo_free(&ir_state->rx_kfifo);
kfree(ir_state);
state->ir_state = NULL;
return 0;
}
| gpl-2.0 |
ocoot/Xiaomi_Kernel_OpenSource | arch/arm/mach-omap2/powerdomain-common.c | 7622 | 3163 | /*
* Common powerdomain framework functions
*
* Copyright (C) 2010-2011 Texas Instruments, Inc.
* Copyright (C) 2010 Nokia Corporation
*
* Derived from mach-omap2/powerdomain.c written by Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include "pm.h"
#include "cm.h"
#include "cm-regbits-34xx.h"
#include "cm-regbits-44xx.h"
#include "prm-regbits-34xx.h"
#include "prm-regbits-44xx.h"
/*
* OMAP3 and OMAP4 specific register bit initialisations
* Notice that the names here are not according to each power
* domain but the bit mapping used applies to all of them
*/
/* OMAP3 and OMAP4 Memory Onstate Masks (common across all power domains) */
#define OMAP_MEM0_ONSTATE_MASK OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK
#define OMAP_MEM1_ONSTATE_MASK OMAP3430_L1FLATMEMONSTATE_MASK
#define OMAP_MEM2_ONSTATE_MASK OMAP3430_SHAREDL2CACHEFLATONSTATE_MASK
#define OMAP_MEM3_ONSTATE_MASK OMAP3430_L2FLATMEMONSTATE_MASK
#define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK
/* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */
#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK
#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK
#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK
#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK
#define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK
/* OMAP3 and OMAP4 Memory Status bits */
#define OMAP_MEM0_STATEST_MASK OMAP3430_SHAREDL1CACHEFLATSTATEST_MASK
#define OMAP_MEM1_STATEST_MASK OMAP3430_L1FLATMEMSTATEST_MASK
#define OMAP_MEM2_STATEST_MASK OMAP3430_SHAREDL2CACHEFLATSTATEST_MASK
#define OMAP_MEM3_STATEST_MASK OMAP3430_L2FLATMEMSTATEST_MASK
#define OMAP_MEM4_STATEST_MASK OMAP4430_OCP_NRET_BANK_STATEST_MASK
/* Common Internal functions used across OMAP rev's*/
u32 omap2_pwrdm_get_mem_bank_onstate_mask(u8 bank)
{
switch (bank) {
case 0:
return OMAP_MEM0_ONSTATE_MASK;
case 1:
return OMAP_MEM1_ONSTATE_MASK;
case 2:
return OMAP_MEM2_ONSTATE_MASK;
case 3:
return OMAP_MEM3_ONSTATE_MASK;
case 4:
return OMAP_MEM4_ONSTATE_MASK;
default:
WARN_ON(1); /* should never happen */
return -EEXIST;
}
return 0;
}
u32 omap2_pwrdm_get_mem_bank_retst_mask(u8 bank)
{
switch (bank) {
case 0:
return OMAP_MEM0_RETSTATE_MASK;
case 1:
return OMAP_MEM1_RETSTATE_MASK;
case 2:
return OMAP_MEM2_RETSTATE_MASK;
case 3:
return OMAP_MEM3_RETSTATE_MASK;
case 4:
return OMAP_MEM4_RETSTATE_MASK;
default:
WARN_ON(1); /* should never happen */
return -EEXIST;
}
return 0;
}
u32 omap2_pwrdm_get_mem_bank_stst_mask(u8 bank)
{
switch (bank) {
case 0:
return OMAP_MEM0_STATEST_MASK;
case 1:
return OMAP_MEM1_STATEST_MASK;
case 2:
return OMAP_MEM2_STATEST_MASK;
case 3:
return OMAP_MEM3_STATEST_MASK;
case 4:
return OMAP_MEM4_STATEST_MASK;
default:
WARN_ON(1); /* should never happen */
return -EEXIST;
}
return 0;
}
| gpl-2.0 |
javilonas/Lonas_KL-GT-I9300-OMNI | drivers/staging/easycap/easycap_testcard.c | 8134 | 3628 | /******************************************************************************
* *
* easycap_testcard.c *
* *
******************************************************************************/
/*
*
* Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*****************************************************************************/
#include "easycap.h"
/*****************************************************************************/
#define TESTCARD_BYTESPERLINE (2 * 720)
void
easycap_testcard(struct easycap *peasycap, int field)
{
int total;
int y, u, v, r, g, b;
unsigned char uyvy[4];
int i1, line, k, m, n, more, much, barwidth, barheight;
unsigned char bfbar[TESTCARD_BYTESPERLINE / 8], *p1, *p2;
struct data_buffer *pfield_buffer;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return;
}
JOM(8, "%i=field\n", field);
switch (peasycap->width) {
case 720:
case 360: {
barwidth = (2 * 720) / 8;
break;
}
case 704:
case 352: {
barwidth = (2 * 704) / 8;
break;
}
case 640:
case 320: {
barwidth = (2 * 640) / 8;
break;
}
default: {
SAM("ERROR: cannot set barwidth\n");
return;
}
}
if (TESTCARD_BYTESPERLINE < barwidth) {
SAM("ERROR: barwidth is too large\n");
return;
}
switch (peasycap->height) {
case 576:
case 288: {
barheight = 576;
break;
}
case 480:
case 240: {
barheight = 480;
break;
}
default: {
SAM("ERROR: cannot set barheight\n");
return;
}
}
total = 0;
k = field;
m = 0;
n = 0;
for (line = 0; line < (barheight / 2); line++) {
for (i1 = 0; i1 < 8; i1++) {
r = (i1 * 256)/8;
g = (i1 * 256)/8;
b = (i1 * 256)/8;
y = 299*r/1000 + 587*g/1000 + 114*b/1000 ;
u = -147*r/1000 - 289*g/1000 + 436*b/1000 ;
u = u + 128;
v = 615*r/1000 - 515*g/1000 - 100*b/1000 ;
v = v + 128;
uyvy[0] = 0xFF & u ;
uyvy[1] = 0xFF & y ;
uyvy[2] = 0xFF & v ;
uyvy[3] = 0xFF & y ;
p1 = &bfbar[0];
while (p1 < &bfbar[barwidth]) {
*p1++ = uyvy[0] ;
*p1++ = uyvy[1] ;
*p1++ = uyvy[2] ;
*p1++ = uyvy[3] ;
total += 4;
}
p1 = &bfbar[0];
more = barwidth;
while (more) {
if ((FIELD_BUFFER_SIZE/PAGE_SIZE) <= m) {
SAM("ERROR: bad m reached\n");
return;
}
if (PAGE_SIZE < n) {
SAM("ERROR: bad n reached\n");
return;
}
if (0 > more) {
SAM("ERROR: internal fault\n");
return;
}
much = PAGE_SIZE - n;
if (much > more)
much = more;
pfield_buffer = &peasycap->field_buffer[k][m];
p2 = pfield_buffer->pgo + n;
memcpy(p2, p1, much);
p1 += much;
n += much;
more -= much;
if (PAGE_SIZE == n) {
m++;
n = 0;
}
}
}
}
return;
}
| gpl-2.0 |
CyanogenMod/android_kernel_lge_msm8226 | drivers/net/wireless/rt2x00/rt2x00leds.c | 9158 | 6602 | /*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
Module: rt2x00lib
Abstract: rt2x00 led specific routines.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "rt2x00.h"
#include "rt2x00lib.h"
void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi)
{
struct rt2x00_led *led = &rt2x00dev->led_qual;
unsigned int brightness;
if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED))
return;
/*
* Led handling requires a positive value for the rssi,
* to do that correctly we need to add the correction.
*/
rssi += rt2x00dev->rssi_offset;
/*
* Get the rssi level, this is used to convert the rssi
* to a LED value inside the range LED_OFF - LED_FULL.
*/
if (rssi <= 30)
rssi = 0;
else if (rssi <= 39)
rssi = 1;
else if (rssi <= 49)
rssi = 2;
else if (rssi <= 53)
rssi = 3;
else if (rssi <= 63)
rssi = 4;
else
rssi = 5;
/*
* Note that we must _not_ send LED_OFF since the driver
* is going to calculate the value and might use it in a
* division.
*/
brightness = ((LED_FULL / 6) * rssi) + 1;
if (brightness != led->led_dev.brightness) {
led->led_dev.brightness_set(&led->led_dev, brightness);
led->led_dev.brightness = brightness;
}
}
static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled)
{
unsigned int brightness = enabled ? LED_FULL : LED_OFF;
if (!(led->flags & LED_REGISTERED))
return;
led->led_dev.brightness_set(&led->led_dev, brightness);
led->led_dev.brightness = brightness;
}
void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled)
{
if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY)
rt2x00led_led_simple(&rt2x00dev->led_qual, enabled);
}
void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
{
if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC)
rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled);
}
void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
{
if (rt2x00dev->led_radio.type == LED_TYPE_RADIO)
rt2x00led_led_simple(&rt2x00dev->led_radio, enabled);
}
static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led,
const char *name)
{
struct device *device = wiphy_dev(rt2x00dev->hw->wiphy);
int retval;
led->led_dev.name = name;
led->led_dev.brightness = LED_OFF;
retval = led_classdev_register(device, &led->led_dev);
if (retval) {
ERROR(rt2x00dev, "Failed to register led handler.\n");
return retval;
}
led->flags |= LED_REGISTERED;
return 0;
}
void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
{
char dev_name[16];
char name[32];
int retval;
unsigned long on_period;
unsigned long off_period;
snprintf(dev_name, sizeof(dev_name), "%s-%s",
rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
snprintf(name, sizeof(name), "%s::radio", dev_name);
retval = rt2x00leds_register_led(rt2x00dev,
&rt2x00dev->led_radio,
name);
if (retval)
goto exit_fail;
}
if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
snprintf(name, sizeof(name), "%s::assoc", dev_name);
retval = rt2x00leds_register_led(rt2x00dev,
&rt2x00dev->led_assoc,
name);
if (retval)
goto exit_fail;
}
if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
snprintf(name, sizeof(name), "%s::quality", dev_name);
retval = rt2x00leds_register_led(rt2x00dev,
&rt2x00dev->led_qual,
name);
if (retval)
goto exit_fail;
}
/*
* Initialize blink time to default value:
* On period: 70ms
* Off period: 30ms
*/
if (rt2x00dev->led_radio.led_dev.blink_set) {
on_period = 70;
off_period = 30;
rt2x00dev->led_radio.led_dev.blink_set(
&rt2x00dev->led_radio.led_dev, &on_period, &off_period);
}
return;
exit_fail:
rt2x00leds_unregister(rt2x00dev);
}
static void rt2x00leds_unregister_led(struct rt2x00_led *led)
{
led_classdev_unregister(&led->led_dev);
/*
* This might look weird, but when we are unregistering while
* suspended the led is already off, and since we haven't
* fully resumed yet, access to the device might not be
* possible yet.
*/
if (!(led->led_dev.flags & LED_SUSPENDED))
led->led_dev.brightness_set(&led->led_dev, LED_OFF);
led->flags &= ~LED_REGISTERED;
}
void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->led_qual.flags & LED_REGISTERED)
rt2x00leds_unregister_led(&rt2x00dev->led_qual);
if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
rt2x00leds_unregister_led(&rt2x00dev->led_assoc);
if (rt2x00dev->led_radio.flags & LED_REGISTERED)
rt2x00leds_unregister_led(&rt2x00dev->led_radio);
}
static inline void rt2x00leds_suspend_led(struct rt2x00_led *led)
{
led_classdev_suspend(&led->led_dev);
/* This shouldn't be needed, but just to be safe */
led->led_dev.brightness_set(&led->led_dev, LED_OFF);
led->led_dev.brightness = LED_OFF;
}
void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->led_qual.flags & LED_REGISTERED)
rt2x00leds_suspend_led(&rt2x00dev->led_qual);
if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
rt2x00leds_suspend_led(&rt2x00dev->led_assoc);
if (rt2x00dev->led_radio.flags & LED_REGISTERED)
rt2x00leds_suspend_led(&rt2x00dev->led_radio);
}
static inline void rt2x00leds_resume_led(struct rt2x00_led *led)
{
led_classdev_resume(&led->led_dev);
/* Device might have enabled the LEDS during resume */
led->led_dev.brightness_set(&led->led_dev, LED_OFF);
led->led_dev.brightness = LED_OFF;
}
void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->led_radio.flags & LED_REGISTERED)
rt2x00leds_resume_led(&rt2x00dev->led_radio);
if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
rt2x00leds_resume_led(&rt2x00dev->led_assoc);
if (rt2x00dev->led_qual.flags & LED_REGISTERED)
rt2x00leds_resume_led(&rt2x00dev->led_qual);
}
| gpl-2.0 |
jiangchao87/m8uhl | fs/ecryptfs/debug.c | 14278 | 3939 | /**
* eCryptfs: Linux filesystem encryption layer
* Functions only useful for debugging.
*
* Copyright (C) 2006 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include "ecryptfs_kernel.h"
/**
* ecryptfs_dump_auth_tok - debug function to print auth toks
*
* This function will print the contents of an ecryptfs authentication
* token.
*/
void ecryptfs_dump_auth_tok(struct ecryptfs_auth_tok *auth_tok)
{
char salt[ECRYPTFS_SALT_SIZE * 2 + 1];
char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
ecryptfs_printk(KERN_DEBUG, "Auth tok at mem loc [%p]:\n",
auth_tok);
if (auth_tok->flags & ECRYPTFS_PRIVATE_KEY) {
ecryptfs_printk(KERN_DEBUG, " * private key type\n");
} else {
ecryptfs_printk(KERN_DEBUG, " * passphrase type\n");
ecryptfs_to_hex(salt, auth_tok->token.password.salt,
ECRYPTFS_SALT_SIZE);
salt[ECRYPTFS_SALT_SIZE * 2] = '\0';
ecryptfs_printk(KERN_DEBUG, " * salt = [%s]\n", salt);
if (auth_tok->token.password.flags &
ECRYPTFS_PERSISTENT_PASSWORD) {
ecryptfs_printk(KERN_DEBUG, " * persistent\n");
}
memcpy(sig, auth_tok->token.password.signature,
ECRYPTFS_SIG_SIZE_HEX);
sig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
ecryptfs_printk(KERN_DEBUG, " * signature = [%s]\n", sig);
}
ecryptfs_printk(KERN_DEBUG, " * session_key.flags = [0x%x]\n",
auth_tok->session_key.flags);
if (auth_tok->session_key.flags
& ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT)
ecryptfs_printk(KERN_DEBUG,
" * Userspace decrypt request set\n");
if (auth_tok->session_key.flags
& ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT)
ecryptfs_printk(KERN_DEBUG,
" * Userspace encrypt request set\n");
if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_DECRYPTED_KEY) {
ecryptfs_printk(KERN_DEBUG, " * Contains decrypted key\n");
ecryptfs_printk(KERN_DEBUG,
" * session_key.decrypted_key_size = [0x%x]\n",
auth_tok->session_key.decrypted_key_size);
ecryptfs_printk(KERN_DEBUG, " * Decrypted session key "
"dump:\n");
if (ecryptfs_verbosity > 0)
ecryptfs_dump_hex(auth_tok->session_key.decrypted_key,
ECRYPTFS_DEFAULT_KEY_BYTES);
}
if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_ENCRYPTED_KEY) {
ecryptfs_printk(KERN_DEBUG, " * Contains encrypted key\n");
ecryptfs_printk(KERN_DEBUG,
" * session_key.encrypted_key_size = [0x%x]\n",
auth_tok->session_key.encrypted_key_size);
ecryptfs_printk(KERN_DEBUG, " * Encrypted session key "
"dump:\n");
if (ecryptfs_verbosity > 0)
ecryptfs_dump_hex(auth_tok->session_key.encrypted_key,
auth_tok->session_key.
encrypted_key_size);
}
}
/**
* ecryptfs_dump_hex - debug hex printer
* @data: string of bytes to be printed
* @bytes: number of bytes to print
*
* Dump hexadecimal representation of char array
*/
void ecryptfs_dump_hex(char *data, int bytes)
{
int i = 0;
int add_newline = 1;
if (ecryptfs_verbosity < 1)
return;
if (bytes != 0) {
printk(KERN_DEBUG "0x%.2x.", (unsigned char)data[i]);
i++;
}
while (i < bytes) {
printk("0x%.2x.", (unsigned char)data[i]);
i++;
if (i % 16 == 0) {
printk("\n");
add_newline = 0;
} else
add_newline = 1;
}
if (add_newline)
printk("\n");
}
| gpl-2.0 |
voodik/android_kernel_hardkernel_odroidxu | drivers/net/wireless/rtl8192cu_v40/hal/rtl8192c/rtl8192c_mp.c | 199 | 35683 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTL8192C_MP_C_
#ifdef CONFIG_MP_INCLUDED
#include <drv_types.h>
#include <rtw_mp.h>
#ifdef CONFIG_RTL8192C
#include <rtl8192c_hal.h>
#endif
s32 Hal_SetPowerTracking(PADAPTER padapter, u8 enable)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
if (!netif_running(padapter->pnetdev)) {
RT_TRACE(_module_mp_, _drv_warning_, ("SetPowerTracking! Fail: interface not opened!\n"));
return _FAIL;
}
if (check_fwstate(&padapter->mlmepriv, WIFI_MP_STATE) == _FALSE) {
RT_TRACE(_module_mp_, _drv_warning_, ("SetPowerTracking! Fail: not in MP mode!\n"));
return _FAIL;
}
if (enable)
pdmpriv->TxPowerTrackControl = _TRUE;
else
pdmpriv->TxPowerTrackControl = _FALSE;
return _SUCCESS;
}
void Hal_GetPowerTracking(PADAPTER padapter, u8 *enable)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
*enable = pdmpriv->TxPowerTrackControl;
}
static void Hal_disable_dm(PADAPTER padapter)
{
u8 v8;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
//3 1. disable firmware dynamic mechanism
// disable Power Training, Rate Adaptive
v8 = rtw_read8(padapter, REG_BCN_CTRL);
v8 &= ~EN_BCN_FUNCTION;
rtw_write8(padapter, REG_BCN_CTRL, v8);
//3 2. disable driver dynamic mechanism
// disable Dynamic Initial Gain
// disable High Power
// disable Power Tracking
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, _FALSE);
// enable APK, LCK and IQK but disable power tracking
pdmpriv->TxPowerTrackControl = _FALSE;
Switch_DM_Func(padapter, DYNAMIC_FUNC_SS, _TRUE);
}
/*-----------------------------------------------------------------------------
* Function: mpt_SwitchRfSetting
*
* Overview: Change RF Setting when we siwthc channel/rate/BW for MP.
*
* Input: IN PADAPTER pAdapter
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 01/08/2009 MHC Suggestion from SD3 Willis for 92S series.
* 01/09/2009 MHC Add CCK modification for 40MHZ. Suggestion from SD3.
*
*---------------------------------------------------------------------------*/
void Hal_mpt_SwitchRfSetting(PADAPTER pAdapter)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
struct mp_priv *pmp = &pAdapter->mppriv;
u8 ChannelToSw = pmp->channel, eRFPath = RF_PATH_A;
u8 ulRateIdx = pmp->rateidx;
u8 ulbandwidth = pmp->bandwidth;
PMPT_CONTEXT pMptCtx = &(pAdapter->mppriv.MptCtx);
BOOLEAN bInteralPA = _FALSE;
u32 value = 0;
#ifdef CONFIG_USB_HCI
if (IS_92C_SERIAL(pHalData->VersionID))
{
//92CE-VAU (92cu mCard)
if( BOARD_MINICARD == pHalData->BoardType)
{
if (ulRateIdx < MPT_RATE_6M) // CCK rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x0F400);
}
else //OFDM~MCS rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F000);
}
}
else //92CU dongle
{
if (ulRateIdx < MPT_RATE_6M) // CCK rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x0F400);
}
else if (ChannelToSw & BIT0) // OFDM rate, odd number channel
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F200);
}
else if (ChannelToSw == 4) // OFDM rate, even number channel
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x28200);
write_rfreg(pAdapter, 0, RF_SYN_G6, 0xe0004);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x709);
rtw_msleep_os(1);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x4B333);
}
else if(ChannelToSw == 10) // OFDM rate, even number channel
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x28000);
write_rfreg(pAdapter, 0, RF_SYN_G6, 0xe000A);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x709);
rtw_msleep_os(1);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x7B333);
}
else if(ChannelToSw == 12) // OFDM rate, even number channel
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x28200);
write_rfreg(pAdapter, 0, RF_SYN_G6, 0xe000C);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x50B);
rtw_msleep_os(1);
write_rfreg(pAdapter, 0, RF_SYN_G7, 0x4B333);
}
else
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F200);
}
}
}
else //88cu
{
//mcard interface
if( BOARD_MINICARD == pHalData->BoardType)
{
if (ulRateIdx < MPT_RATE_6M) // CCK rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x0F400);
}
else //OFDM~MCS rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F200);
}
if(ChannelToSw == 6 || ChannelToSw == 8)
{
write_bbreg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0, 0x22);
write_bbreg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0, 0x22);
write_bbreg(pAdapter, rOFDM0_RxDetector1, bMaskByte0, 0x4F);
}
else
{
write_bbreg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0, 0x20);
write_bbreg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0, 0x20);
write_bbreg(pAdapter, rOFDM0_RxDetector1, bMaskByte0, pMptCtx->backup0xc30);
}
}
else
{
if (ulRateIdx < MPT_RATE_6M) // CCK rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x0F400);
}
else if (ChannelToSw & BIT0) // OFDM rate, odd number channel
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F200);
}
else
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F000);
}
}
}
#else //PCI_INTERFACE
if (ulRateIdx < MPT_RATE_6M) // CCK rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x0F400);
}
else //OFDM~MCS rate
{
write_rfreg(pAdapter, 0, RF_SYN_G2, 0x4F000);
}
//88CE
if(!IS_92C_SERIAL(pHalData->VersionID))
{
if(ChannelToSw == 6 || ChannelToSw == 8)
{
write_bbreg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0, 0x22);
write_bbreg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0, 0x22);
write_bbreg(pAdapter, rOFDM0_RxDetector1, bMaskByte0, 0x4F);
}
else
{
write_bbreg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0, pMptCtx->backup0xc50);
write_bbreg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0, pMptCtx->backup0xc58);
write_bbreg(pAdapter, rOFDM0_RxDetector1, bMaskByte0, pMptCtx->backup0xc30);
}
}
#endif //CONFIG_USB_HCI
}
/*---------------------------hal\rtl8192c\MPT_Phy.c---------------------------*/
/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
void Hal_MPT_CCKTxPowerAdjust(PADAPTER Adapter, BOOLEAN bInCH14)
{
u32 TempVal = 0, TempVal2 = 0, TempVal3 = 0;
u32 CurrCCKSwingVal = 0, CCKSwingIndex = 12;
u8 i;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
// get current cck swing value and check 0xa22 & 0xa23 later to match the table.
CurrCCKSwingVal = read_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord);
if (!bInCH14)
{
// Readback the current bb cck swing value and compare with the table to
// get the current swing index
for (i = 0; i < CCK_TABLE_SIZE; i++)
{
if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch1_Ch13[i][0]) &&
(((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch1_Ch13[i][1]))
{
CCKSwingIndex = i;
// RT_TRACE(COMP_INIT, DBG_LOUD,("Ch1~13, Current reg0x%x = 0x%lx, CCKSwingIndex=0x%x\n",
// (rCCK0_TxFilter1+2), CurrCCKSwingVal, CCKSwingIndex));
break;
}
}
//Write 0xa22 0xa23
TempVal = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][0] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][1]<<8) ;
//Write 0xa24 ~ 0xa27
TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4]<<16 )+
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5]<<24);
//Write 0xa28 0xa29
TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7]<<8) ;
}
else
{
for (i = 0; i < CCK_TABLE_SIZE; i++)
{
if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch14[i][0]) &&
(((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch14[i][1]))
{
CCKSwingIndex = i;
// RT_TRACE(COMP_INIT, DBG_LOUD,("Ch14, Current reg0x%x = 0x%lx, CCKSwingIndex=0x%x\n",
// (rCCK0_TxFilter1+2), CurrCCKSwingVal, CCKSwingIndex));
break;
}
}
//Write 0xa22 0xa23
TempVal = CCKSwingTable_Ch14[CCKSwingIndex][0] +
(CCKSwingTable_Ch14[CCKSwingIndex][1]<<8) ;
//Write 0xa24 ~ 0xa27
TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
(CCKSwingTable_Ch14[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch14[CCKSwingIndex][4]<<16 )+
(CCKSwingTable_Ch14[CCKSwingIndex][5]<<24);
//Write 0xa28 0xa29
TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
(CCKSwingTable_Ch14[CCKSwingIndex][7]<<8) ;
}
write_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord, TempVal);
write_bbreg(Adapter, rCCK0_TxFilter2, bMaskDWord, TempVal2);
write_bbreg(Adapter, rCCK0_DebugPort, bMaskLWord, TempVal3);
}
void Hal_MPT_CCKTxPowerAdjustbyIndex(PADAPTER pAdapter, BOOLEAN beven)
{
s32 TempCCk;
u8 CCK_index, CCK_index_old;
u8 Action = 0; //0: no action, 1: even->odd, 2:odd->even
u8 TimeOut = 100;
s32 i = 0;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
PMPT_CONTEXT pMptCtx = &pAdapter->mppriv.MptCtx;
if (!IS_92C_SERIAL(pHalData->VersionID))
return;
#if 0
while(PlatformAtomicExchange(&Adapter->IntrCCKRefCount, TRUE) == TRUE)
{
PlatformSleepUs(100);
TimeOut--;
if(TimeOut <= 0)
{
RTPRINT(FINIT, INIT_TxPower,
("!!!MPT_CCKTxPowerAdjustbyIndex Wait for check CCK gain index too long!!!\n" ));
break;
}
}
#endif
if (beven && !pMptCtx->bMptIndexEven) //odd->even
{
Action = 2;
pMptCtx->bMptIndexEven = _TRUE;
}
else if (!beven && pMptCtx->bMptIndexEven) //even->odd
{
Action = 1;
pMptCtx->bMptIndexEven = _FALSE;
}
if (Action != 0)
{
//Query CCK default setting From 0xa24
TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
for (i = 0; i < CCK_TABLE_SIZE; i++)
{
if (pHalData->dmpriv.bCCKinCH14)
{
if (_rtw_memcmp((void*)&TempCCk, (void*)&CCKSwingTable_Ch14[i][2], 4) == _TRUE)
{
CCK_index_old = (u8) i;
// RTPRINT(FINIT, INIT_TxPower,("MPT_CCKTxPowerAdjustbyIndex: Initial reg0x%x = 0x%lx, CCK_index=0x%x, ch 14 %d\n",
// rCCK0_TxFilter2, TempCCk, CCK_index_old, pHalData->bCCKinCH14));
break;
}
}
else
{
if (_rtw_memcmp((void*)&TempCCk, (void*)&CCKSwingTable_Ch1_Ch13[i][2], 4) == _TRUE)
{
CCK_index_old = (u8) i;
// RTPRINT(FINIT, INIT_TxPower,("MPT_CCKTxPowerAdjustbyIndex: Initial reg0x%x = 0x%lx, CCK_index=0x%x, ch14 %d\n",
// rCCK0_TxFilter2, TempCCk, CCK_index_old, pHalData->bCCKinCH14));
break;
}
}
}
if (Action == 1)
CCK_index = CCK_index_old - 1;
else
CCK_index = CCK_index_old + 1;
// RTPRINT(FINIT, INIT_TxPower,("MPT_CCKTxPowerAdjustbyIndex: new CCK_index=0x%x\n",
// CCK_index));
//Adjust CCK according to gain index
if (!pHalData->dmpriv.bCCKinCH14) {
rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch1_Ch13[CCK_index][1]);
rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch1_Ch13[CCK_index][2]);
rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch1_Ch13[CCK_index][3]);
rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch1_Ch13[CCK_index][4]);
rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch1_Ch13[CCK_index][5]);
rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch1_Ch13[CCK_index][6]);
rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch1_Ch13[CCK_index][7]);
} else {
rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch14[CCK_index][0]);
rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch14[CCK_index][1]);
rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch14[CCK_index][2]);
rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch14[CCK_index][3]);
rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch14[CCK_index][4]);
rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch14[CCK_index][5]);
rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch14[CCK_index][6]);
rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch14[CCK_index][7]);
}
}
#if 0
RTPRINT(FINIT, INIT_TxPower,
("MPT_CCKTxPowerAdjustbyIndex 0xa20=%x\n", PlatformEFIORead4Byte(Adapter, 0xa20)));
PlatformAtomicExchange(&Adapter->IntrCCKRefCount, FALSE);
#endif
}
/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
/*
* SetChannel
* Description
* Use H2C command to change channel,
* not only modify rf register, but also other setting need to be done.
*/
void Hal_SetChannel(PADAPTER pAdapter)
{
#if 0
struct mp_priv *pmp = &pAdapter->mppriv;
// SelectChannel(pAdapter, pmp->channel);
set_channel_bwmode(pAdapter, pmp->channel, pmp->channel_offset, pmp->bandwidth);
#else
u8 eRFPath;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
struct mp_priv *pmp = &pAdapter->mppriv;
u8 channel = pmp->channel;
u8 bandwidth = pmp->bandwidth;
u8 rate = pmp->rateidx;
// set RF channel register
for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
{
if(IS_HARDWARE_TYPE_8192D(pAdapter))
_write_rfreg(pAdapter, (RF_RADIO_PATH_E)eRFPath, rRfChannel, 0xFF, channel);
else
_write_rfreg(pAdapter, eRFPath, rRfChannel, 0x3FF, channel);
}
Hal_mpt_SwitchRfSetting(pAdapter);
SelectChannel(pAdapter, channel);
if (pHalData->CurrentChannel == 14 && !pHalData->dmpriv.bCCKinCH14) {
pHalData->dmpriv.bCCKinCH14 = _TRUE;
Hal_MPT_CCKTxPowerAdjust(pAdapter, pHalData->dmpriv.bCCKinCH14);
}
else if (pHalData->CurrentChannel != 14 && pHalData->dmpriv.bCCKinCH14) {
pHalData->dmpriv.bCCKinCH14 = _FALSE;
Hal_MPT_CCKTxPowerAdjust(pAdapter, pHalData->dmpriv.bCCKinCH14);
}
#endif
}
/*
* Notice
* Switch bandwitdth may change center frequency(channel)
*/
void Hal_SetBandwidth(PADAPTER pAdapter)
{
struct mp_priv *pmp = &pAdapter->mppriv;
SetBWMode(pAdapter, pmp->bandwidth, pmp->prime_channel_offset);
Hal_mpt_SwitchRfSetting(pAdapter);
}
void Hal_SetCCKTxPower(PADAPTER pAdapter, u8 *TxPower)
{
u32 tmpval = 0;
// rf-A cck tx power
write_bbreg(pAdapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, TxPower[RF_PATH_A]);
tmpval = (TxPower[RF_PATH_A]<<16) | (TxPower[RF_PATH_A]<<8) | TxPower[RF_PATH_A];
write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
// rf-B cck tx power
write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, TxPower[RF_PATH_B]);
tmpval = (TxPower[RF_PATH_B]<<16) | (TxPower[RF_PATH_B]<<8) | TxPower[RF_PATH_B];
write_bbreg(pAdapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
RT_TRACE(_module_mp_, _drv_notice_,
("-SetCCKTxPower: A[0x%02x] B[0x%02x]\n",
TxPower[RF_PATH_A], TxPower[RF_PATH_B]));
}
void Hal_SetOFDMTxPower(PADAPTER pAdapter, u8 *TxPower)
{
u32 TxAGC = 0;
u8 tmpval = 0;
PMPT_CONTEXT pMptCtx = &pAdapter->mppriv.MptCtx;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
// HT Tx-rf(A)
tmpval = TxPower[RF_PATH_A];
TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
write_bbreg(pAdapter, rTxAGC_A_Rate18_06, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_A_Rate54_24, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_A_Mcs03_Mcs00, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_A_Mcs07_Mcs04, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_A_Mcs11_Mcs08, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_A_Mcs15_Mcs12, bMaskDWord, TxAGC);
// HT Tx-rf(B)
tmpval = TxPower[RF_PATH_B];
TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
write_bbreg(pAdapter, rTxAGC_B_Rate18_06, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_B_Rate54_24, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_B_Mcs03_Mcs00, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_B_Mcs07_Mcs04, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_B_Mcs11_Mcs08, bMaskDWord, TxAGC);
write_bbreg(pAdapter, rTxAGC_B_Mcs15_Mcs12, bMaskDWord, TxAGC);
RT_TRACE(_module_mp_, _drv_notice_,
("-SetOFDMTxPower: A[0x%02x] B[0x%02x]\n",
TxPower[RF_PATH_A], TxPower[RF_PATH_B]));
}
void Hal_SetAntennaPathPower(PADAPTER pAdapter)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
u8 TxPowerLevel[MAX_RF_PATH_NUMS];
u8 rfPath;
TxPowerLevel[RF_PATH_A] = pAdapter->mppriv.txpoweridx;
TxPowerLevel[RF_PATH_B] = pAdapter->mppriv.txpoweridx_b;
switch (pAdapter->mppriv.antenna_tx)
{
case ANTENNA_A:
default:
rfPath = RF_PATH_A;
break;
case ANTENNA_B:
rfPath = RF_PATH_B;
break;
case ANTENNA_C:
rfPath = RF_PATH_C;
break;
}
switch (pHalData->rf_chip)
{
case RF_8225:
case RF_8256:
case RF_6052:
Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
if (pAdapter->mppriv.rateidx < MPT_RATE_6M) // CCK rate
Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
break;
default:
break;
}
}
void Hal_SetTxPower(PADAPTER pAdapter)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
u8 TxPower = pAdapter->mppriv.txpoweridx;
u8 TxPowerLevel[MAX_RF_PATH_NUMS];
u8 rf, rfPath;
for (rf = 0; rf < MAX_RF_PATH_NUMS; rf++) {
TxPowerLevel[rf] = TxPower;
}
switch (pAdapter->mppriv.antenna_tx)
{
case ANTENNA_A:
default:
rfPath = RF_PATH_A;
break;
case ANTENNA_B:
rfPath = RF_PATH_B;
break;
case ANTENNA_C:
rfPath = RF_PATH_C;
break;
}
switch (pHalData->rf_chip)
{
// 2008/09/12 MH Test only !! We enable the TX power tracking for MP!!!!!
// We should call normal driver API later!!
case RF_8225:
case RF_8256:
case RF_6052:
Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
if (pAdapter->mppriv.rateidx < MPT_RATE_6M) // CCK rate
Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
break;
default:
break;
}
// SetCCKTxPower(pAdapter, TxPower);
// SetOFDMTxPower(pAdapter, TxPower);
}
void Hal_SetTxAGCOffset(PADAPTER pAdapter, u32 ulTxAGCOffset)
{
u32 TxAGCOffset_B, TxAGCOffset_C, TxAGCOffset_D,tmpAGC;
TxAGCOffset_B = (ulTxAGCOffset&0x000000ff);
TxAGCOffset_C = ((ulTxAGCOffset&0x0000ff00)>>8);
TxAGCOffset_D = ((ulTxAGCOffset&0x00ff0000)>>16);
tmpAGC = (TxAGCOffset_D<<8 | TxAGCOffset_C<<4 | TxAGCOffset_B);
write_bbreg(pAdapter, rFPGA0_TxGainStage,
(bXBTxAGC|bXCTxAGC|bXDTxAGC), tmpAGC);
}
void Hal_SetDataRate(PADAPTER pAdapter)
{
Hal_mpt_SwitchRfSetting(pAdapter);
}
#if !defined (CONFIG_RTL8192C) && !defined (CONFIG_RTL8192D)
/*------------------------------Define structure----------------------------*/
typedef struct _R_ANTENNA_SELECT_OFDM {
u32 r_tx_antenna:4;
u32 r_ant_l:4;
u32 r_ant_non_ht:4;
u32 r_ant_ht1:4;
u32 r_ant_ht2:4;
u32 r_ant_ht_s1:4;
u32 r_ant_non_ht_s1:4;
u32 OFDM_TXSC:2;
u32 Reserved:2;
}R_ANTENNA_SELECT_OFDM;
typedef struct _R_ANTENNA_SELECT_CCK {
u8 r_cckrx_enable_2:2;
u8 r_cckrx_enable:2;
u8 r_ccktx_enable:4;
}R_ANTENNA_SELECT_CCK;
#endif
void Hal_SetAntenna(PADAPTER pAdapter)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
R_ANTENNA_SELECT_OFDM *p_ofdm_tx; /* OFDM Tx register */
R_ANTENNA_SELECT_CCK *p_cck_txrx;
u8 r_rx_antenna_ofdm = 0, r_ant_select_cck_val = 0;
u8 chgTx = 0, chgRx = 0;
u32 r_ant_sel_cck_val = 0, r_ant_select_ofdm_val = 0, r_ofdm_tx_en_val = 0;
p_ofdm_tx = (R_ANTENNA_SELECT_OFDM *)&r_ant_select_ofdm_val;
p_cck_txrx = (R_ANTENNA_SELECT_CCK *)&r_ant_select_cck_val;
p_ofdm_tx->r_ant_ht1 = 0x1;
p_ofdm_tx->r_ant_ht2 = 0x2; // Second TX RF path is A
p_ofdm_tx->r_ant_non_ht = 0x3; // 0x1+0x2=0x3
switch (pAdapter->mppriv.antenna_tx)
{
case ANTENNA_A:
p_ofdm_tx->r_tx_antenna = 0x1;
r_ofdm_tx_en_val = 0x1;
p_ofdm_tx->r_ant_l = 0x1;
p_ofdm_tx->r_ant_ht_s1 = 0x1;
p_ofdm_tx->r_ant_non_ht_s1 = 0x1;
p_cck_txrx->r_ccktx_enable = 0x8;
chgTx = 1;
// From SD3 Willis suggestion !!! Set RF A=TX and B as standby
// if (IS_HARDWARE_TYPE_8192S(pAdapter))
{
write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 1);
r_ofdm_tx_en_val = 0x3;
// Power save
//cosa r_ant_select_ofdm_val = 0x11111111;
// We need to close RFB by SW control
if (pHalData->rf_type == RF_2T2R)
{
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 1);
PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 0);
}
}
break;
case ANTENNA_B:
p_ofdm_tx->r_tx_antenna = 0x2;
r_ofdm_tx_en_val = 0x2;
p_ofdm_tx->r_ant_l = 0x2;
p_ofdm_tx->r_ant_ht_s1 = 0x2;
p_ofdm_tx->r_ant_non_ht_s1 = 0x2;
p_cck_txrx->r_ccktx_enable = 0x4;
chgTx = 1;
// From SD3 Willis suggestion !!! Set RF A as standby
//if (IS_HARDWARE_TYPE_8192S(pAdapter))
{
PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 1);
PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
// r_ofdm_tx_en_val = 0x3;
// Power save
//cosa r_ant_select_ofdm_val = 0x22222222;
// 2008/10/31 MH From SD3 Willi's suggestion. We must read RF 1T table.
// 2009/01/08 MH From Sd3 Willis. We need to close RFA by SW control
if (pHalData->rf_type == RF_2T2R || pHalData->rf_type == RF_1T2R)
{
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 1);
PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
// PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
}
}
break;
case ANTENNA_AB: // For 8192S
p_ofdm_tx->r_tx_antenna = 0x3;
r_ofdm_tx_en_val = 0x3;
p_ofdm_tx->r_ant_l = 0x3;
p_ofdm_tx->r_ant_ht_s1 = 0x3;
p_ofdm_tx->r_ant_non_ht_s1 = 0x3;
p_cck_txrx->r_ccktx_enable = 0xC;
chgTx = 1;
// From SD3 Willis suggestion !!! Set RF B as standby
//if (IS_HARDWARE_TYPE_8192S(pAdapter))
{
PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
// Disable Power save
//cosa r_ant_select_ofdm_val = 0x3321333;
#if 0
// 2008/10/31 MH From SD3 Willi's suggestion. We must read RFA 2T table.
if ((pHalData->VersionID == VERSION_8192S_ACUT)) // For RTL8192SU A-Cut only, by Roger, 2008.11.07.
{
mpt_RFConfigFromPreParaArrary(pAdapter, 1, RF_PATH_A);
}
#endif
// 2009/01/08 MH From Sd3 Willis. We need to enable RFA/B by SW control
if (pHalData->rf_type == RF_2T2R)
{
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
// PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
}
}
break;
default:
break;
}
//
// r_rx_antenna_ofdm, bit0=A, bit1=B, bit2=C, bit3=D
// r_cckrx_enable : CCK default, 0=A, 1=B, 2=C, 3=D
// r_cckrx_enable_2 : CCK option, 0=A, 1=B, 2=C, 3=D
//
switch (pAdapter->mppriv.antenna_rx)
{
case ANTENNA_A:
r_rx_antenna_ofdm = 0x1; // A
p_cck_txrx->r_cckrx_enable = 0x0; // default: A
p_cck_txrx->r_cckrx_enable_2 = 0x0; // option: A
chgRx = 1;
break;
case ANTENNA_B:
r_rx_antenna_ofdm = 0x2; // B
p_cck_txrx->r_cckrx_enable = 0x1; // default: B
p_cck_txrx->r_cckrx_enable_2 = 0x1; // option: B
chgRx = 1;
break;
case ANTENNA_AB:
r_rx_antenna_ofdm = 0x3; // AB
p_cck_txrx->r_cckrx_enable = 0x0; // default:A
p_cck_txrx->r_cckrx_enable_2 = 0x1; // option:B
chgRx = 1;
break;
default:
break;
}
if (chgTx && chgRx)
{
switch(pHalData->rf_chip)
{
case RF_8225:
case RF_8256:
case RF_6052:
//r_ant_sel_cck_val = r_ant_select_cck_val;
PHY_SetBBReg(pAdapter, rFPGA1_TxInfo, 0x7fffffff, r_ant_select_ofdm_val); //OFDM Tx
PHY_SetBBReg(pAdapter, rFPGA0_TxInfo, 0x0000000f, r_ofdm_tx_en_val); //OFDM Tx
PHY_SetBBReg(pAdapter, rOFDM0_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); //OFDM Rx
PHY_SetBBReg(pAdapter, rOFDM1_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); //OFDM Rx
PHY_SetBBReg(pAdapter, rCCK0_AFESetting, bMaskByte3, r_ant_select_cck_val);//r_ant_sel_cck_val); //CCK TxRx
break;
default:
break;
}
}
RT_TRACE(_module_mp_, _drv_notice_, ("-SwitchAntenna: finished\n"));
}
s32 Hal_SetThermalMeter(PADAPTER pAdapter, u8 target_ther)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
if (!netif_running(pAdapter->pnetdev)) {
RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter! Fail: interface not opened!\n"));
return _FAIL;
}
if (check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE) == _FALSE) {
RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter: Fail! not in MP mode!\n"));
return _FAIL;
}
target_ther &= 0xff;
if (target_ther < 0x07)
target_ther = 0x07;
else if (target_ther > 0x1d)
target_ther = 0x1d;
pHalData->EEPROMThermalMeter = target_ther;
return _SUCCESS;
}
void Hal_TriggerRFThermalMeter(PADAPTER pAdapter)
{
write_rfreg(pAdapter, RF_PATH_A, RF_T_METER, 0x60); // 0x24: RF Reg[6:5]
// RT_TRACE(_module_mp_,_drv_alert_, ("TriggerRFThermalMeter() finished.\n" ));
}
u8 Hal_ReadRFThermalMeter(PADAPTER pAdapter)
{
u32 ThermalValue = 0;
ThermalValue = _read_rfreg(pAdapter, RF_PATH_A, RF_T_METER, 0x1F); // 0x24: RF Reg[4:0]
// RT_TRACE(_module_mp_, _drv_alert_, ("ThermalValue = 0x%x\n", ThermalValue));
return (u8)ThermalValue;
}
void Hal_GetThermalMeter(PADAPTER pAdapter, u8 *value)
{
#if 0
fw_cmd(pAdapter, IOCMD_GET_THERMAL_METER);
rtw_msleep_os(1000);
fw_cmd_data(pAdapter, value, 1);
*value &= 0xFF;
#else
Hal_TriggerRFThermalMeter(pAdapter);
rtw_msleep_os(1000);
*value = Hal_ReadRFThermalMeter(pAdapter);
#endif
}
void Hal_SetSingleCarrierTx(PADAPTER pAdapter, u8 bStart)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
pAdapter->mppriv.MptCtx.bSingleCarrier = bStart;
if (bStart)// Start Single Carrier.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetSingleCarrierTx: test start\n"));
// 1. if OFDM block on?
if(!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);//set OFDM block on
{
// 2. set CCK test mode off, set to CCK normal mode
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
// 3. turn on scramble setting
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
}
// 4. Turn On Single Carrier Tx and turn off the other test modes.
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bEnable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
#ifdef CONFIG_RTL8192C
// 5. Disable TX power saving at STF & LLTF
write_bbreg(pAdapter, rOFDM1_LSTF, BIT22, 1);
#endif
}
else// Stop Single Carrier.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetSingleCarrierTx: test stop\n"));
// Turn off all test modes.
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
#ifdef CONFIG_RTL8192C
// Cancel disable TX power saving at STF&LLTF
write_bbreg(pAdapter, rOFDM1_LSTF, BIT22, 0);
#endif
//Delay 10 ms //delay_ms(10);
rtw_msleep_os(10);
//BB Reset
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
}
}
void Hal_SetSingleToneTx(PADAPTER pAdapter, u8 bStart)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
BOOLEAN is92C = IS_92C_SERIAL(pHalData->VersionID);
u8 rfPath;
switch (pAdapter->mppriv.antenna_tx)
{
case ANTENNA_A:
default:
rfPath = RF_PATH_A;
break;
case ANTENNA_B:
rfPath = RF_PATH_B;
break;
case ANTENNA_C:
rfPath = RF_PATH_C;
break;
}
pAdapter->mppriv.MptCtx.bSingleTone = bStart;
if (bStart)// Start Single Tone.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetSingleToneTx: test start\n"));
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
if (is92C)
{
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x01);
rtw_usleep_os(100);
if (rfPath == RF_PATH_A)
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); // PAD all on.
else if (rfPath == RF_PATH_B)
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); // PAD all on.
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
rtw_usleep_os(100);
}
write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); // PAD all on.
rtw_usleep_os(100);
}
else// Stop Single Tone.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetSingleToneTx: test stop\n"));
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
if (is92C) {
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x00);
rtw_usleep_os(100);
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); // PAD all on.
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); // PAD all on.
rtw_usleep_os(100);
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
rtw_usleep_os(100);
write_rfreg(pAdapter, rfPath, 0x00, 0x30000); // PAD all on.
rtw_usleep_os(100);
}
}
}
void Hal_SetCarrierSuppressionTx(PADAPTER pAdapter, u8 bStart)
{
pAdapter->mppriv.MptCtx.bCarrierSuppression = bStart;
if (bStart) // Start Carrier Suppression.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetCarrierSuppressionTx: test start\n"));
//if(pMgntInfo->dot11CurrentWirelessMode == WIRELESS_MODE_B)
if (pAdapter->mppriv.rateidx <= MPT_RATE_11M)
{
// 1. if CCK block on?
if(!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);//set CCK block on
//Turn Off All Test Mode
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); //transmit mode
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x0); //turn off scramble setting
//Set CCK Tx Test Rate
//PHY_SetBBReg(pAdapter, rCCK0_System, bCCKTxRate, pMgntInfo->ForcedDataRate);
write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, 0x0); //Set FTxRate to 1Mbps
}
}
else// Stop Carrier Suppression.
{
RT_TRACE(_module_mp_,_drv_alert_, ("SetCarrierSuppressionTx: test stop\n"));
//if(pMgntInfo->dot11CurrentWirelessMode == WIRELESS_MODE_B)
if (pAdapter->mppriv.rateidx <= MPT_RATE_11M ) {
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); //normal mode
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x1); //turn on scramble setting
//BB Reset
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
}
}
//DbgPrint("\n MPT_ProSetCarrierSupp() is finished. \n");
}
void Hal_SetCCKContinuousTx(PADAPTER pAdapter, u8 bStart)
{
u32 cckrate;
if (bStart)
{
RT_TRACE(_module_mp_, _drv_alert_,
("SetCCKContinuousTx: test start\n"));
// 1. if CCK block on?
if(!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);//set CCK block on
//Turn Off All Test Mode
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
//Set CCK Tx Test Rate
#if 0
switch(pAdapter->mppriv.rateidx)
{
case 2:
cckrate = 0;
break;
case 4:
cckrate = 1;
break;
case 11:
cckrate = 2;
break;
case 22:
cckrate = 3;
break;
default:
cckrate = 0;
break;
}
#else
cckrate = pAdapter->mppriv.rateidx;
#endif
write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, cckrate);
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); //transmit mode
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); //turn on scramble setting
#ifdef CONFIG_RTL8192C
// Patch for CCK 11M waveform
if (cckrate == MPT_RATE_1M)
write_bbreg(pAdapter, 0xA71, BIT(6), bDisable);
else
write_bbreg(pAdapter, 0xA71, BIT(6), bEnable);
#endif
}
else {
RT_TRACE(_module_mp_, _drv_info_,
("SetCCKContinuousTx: test stop\n"));
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); //normal mode
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); //turn on scramble setting
//BB Reset
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
}
pAdapter->mppriv.MptCtx.bCckContTx = bStart;
pAdapter->mppriv.MptCtx.bOfdmContTx = _FALSE;
}/* mpt_StartCckContTx */
void Hal_SetOFDMContinuousTx(PADAPTER pAdapter, u8 bStart)
{
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
if (bStart) {
RT_TRACE(_module_mp_, _drv_info_, ("SetOFDMContinuousTx: test start\n"));
// 1. if OFDM block on?
if(!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);//set OFDM block on
{
// 2. set CCK test mode off, set to CCK normal mode
write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
// 3. turn on scramble setting
write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
}
// 4. Turn On Continue Tx and turn off the other test modes.
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bEnable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
} else {
RT_TRACE(_module_mp_,_drv_info_, ("SetOFDMContinuousTx: test stop\n"));
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
//Delay 10 ms
rtw_msleep_os(10);
//BB Reset
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
}
pAdapter->mppriv.MptCtx.bCckContTx = _FALSE;
pAdapter->mppriv.MptCtx.bOfdmContTx = bStart;
}/* mpt_StartOfdmContTx */
void Hal_SetContinuousTx(PADAPTER pAdapter, u8 bStart)
{
#if 0
// ADC turn off [bit24-21] adc port0 ~ port1
if (bStart) {
write_bbreg(pAdapter, rRx_Wait_CCCA, read_bbreg(pAdapter, rRx_Wait_CCCA) & 0xFE1FFFFF);
rtw_usleep_os(100);
}
#endif
RT_TRACE(_module_mp_, _drv_info_,
("SetContinuousTx: rate:%d\n", pAdapter->mppriv.rateidx));
pAdapter->mppriv.MptCtx.bStartContTx = bStart;
if (pAdapter->mppriv.rateidx <= MPT_RATE_11M)
{
Hal_SetCCKContinuousTx(pAdapter, bStart);
}
else if ((pAdapter->mppriv.rateidx >= MPT_RATE_6M) &&
(pAdapter->mppriv.rateidx <= MPT_RATE_MCS15))
{
Hal_SetOFDMContinuousTx(pAdapter, bStart);
}
#if 0
// ADC turn on [bit24-21] adc port0 ~ port1
if (!bStart) {
write_bbreg(pAdapter, rRx_Wait_CCCA, read_bbreg(pAdapter, rRx_Wait_CCCA) | 0x01E00000);
}
#endif
}
#endif // CONFIG_MP_INCLUDE
| gpl-2.0 |
qs2d/linux-3.10 | net/bluetooth/amp.c | 455 | 11451 | /*
Copyright (c) 2011,2012 Intel Corp.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/a2mp.h>
#include <net/bluetooth/amp.h>
#include <crypto/hash.h>
/* Remote AMP Controllers interface */
void amp_ctrl_get(struct amp_ctrl *ctrl)
{
BT_DBG("ctrl %p orig refcnt %d", ctrl,
atomic_read(&ctrl->kref.refcount));
kref_get(&ctrl->kref);
}
static void amp_ctrl_destroy(struct kref *kref)
{
struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
BT_DBG("ctrl %p", ctrl);
kfree(ctrl->assoc);
kfree(ctrl);
}
int amp_ctrl_put(struct amp_ctrl *ctrl)
{
BT_DBG("ctrl %p orig refcnt %d", ctrl,
atomic_read(&ctrl->kref.refcount));
return kref_put(&ctrl->kref, &_ctrl_destroy);
}
struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
{
struct amp_ctrl *ctrl;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return NULL;
kref_init(&ctrl->kref);
ctrl->id = id;
mutex_lock(&mgr->amp_ctrls_lock);
list_add(&ctrl->list, &mgr->amp_ctrls);
mutex_unlock(&mgr->amp_ctrls_lock);
BT_DBG("mgr %p ctrl %p", mgr, ctrl);
return ctrl;
}
void amp_ctrl_list_flush(struct amp_mgr *mgr)
{
struct amp_ctrl *ctrl, *n;
BT_DBG("mgr %p", mgr);
mutex_lock(&mgr->amp_ctrls_lock);
list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
list_del(&ctrl->list);
amp_ctrl_put(ctrl);
}
mutex_unlock(&mgr->amp_ctrls_lock);
}
struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
{
struct amp_ctrl *ctrl;
BT_DBG("mgr %p id %d", mgr, id);
mutex_lock(&mgr->amp_ctrls_lock);
list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
if (ctrl->id == id) {
amp_ctrl_get(ctrl);
mutex_unlock(&mgr->amp_ctrls_lock);
return ctrl;
}
}
mutex_unlock(&mgr->amp_ctrls_lock);
return NULL;
}
/* Physical Link interface */
static u8 __next_handle(struct amp_mgr *mgr)
{
if (++mgr->handle == 0)
mgr->handle = 1;
return mgr->handle;
}
struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
u8 remote_id, bool out)
{
bdaddr_t *dst = mgr->l2cap_conn->dst;
struct hci_conn *hcon;
hcon = hci_conn_add(hdev, AMP_LINK, dst);
if (!hcon)
return NULL;
BT_DBG("hcon %p dst %pMR", hcon, dst);
hcon->state = BT_CONNECT;
hcon->attempt++;
hcon->handle = __next_handle(mgr);
hcon->remote_id = remote_id;
hcon->amp_mgr = amp_mgr_get(mgr);
hcon->out = out;
return hcon;
}
/* AMP crypto key generation interface */
static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
{
int ret = 0;
struct crypto_shash *tfm;
if (!ksize)
return -EINVAL;
tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(tfm)) {
BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
return PTR_ERR(tfm);
}
ret = crypto_shash_setkey(tfm, key, ksize);
if (ret) {
BT_DBG("crypto_ahash_setkey failed: err %d", ret);
} else {
struct {
struct shash_desc shash;
char ctx[crypto_shash_descsize(tfm)];
} desc;
desc.shash.tfm = tfm;
desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(&desc.shash, plaintext, psize,
output);
}
crypto_free_shash(tfm);
return ret;
}
int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
{
struct hci_dev *hdev = conn->hdev;
struct link_key *key;
u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
int err;
if (!hci_conn_check_link_mode(conn))
return -EACCES;
BT_DBG("conn %p key_type %d", conn, conn->key_type);
/* Legacy key */
if (conn->key_type < 3) {
BT_ERR("Legacy key type %d", conn->key_type);
return -EACCES;
}
*type = conn->key_type;
*len = HCI_AMP_LINK_KEY_SIZE;
key = hci_find_link_key(hdev, &conn->dst);
if (!key) {
BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
return -EACCES;
}
/* BR/EDR Link Key concatenated together with itself */
memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
/* Derive Generic AMP Link Key (gamp) */
err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
if (err) {
BT_ERR("Could not derive Generic AMP Key: err %d", err);
return err;
}
if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("Use Generic AMP Key (gamp)");
memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
return err;
}
/* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
}
void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_assoc *loc_assoc = &hdev->loc_assoc;
BT_DBG("%s handle %d", hdev->name, phy_handle);
cp.phy_handle = phy_handle;
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
cp.len_so_far = cpu_to_le16(loc_assoc->offset);
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
}
void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
{
struct hci_cp_read_local_amp_assoc cp;
memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
memset(&cp, 0, sizeof(cp));
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
}
void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
struct hci_conn *hcon)
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_mgr *mgr = hcon->amp_mgr;
cp.phy_handle = hcon->handle;
cp.len_so_far = cpu_to_le16(0);
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
/* Read Local AMP Assoc final link information data */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
}
/* Write AMP Assoc data fragments, returns true with last fragment written*/
static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
struct hci_conn *hcon)
{
struct hci_cp_write_remote_amp_assoc *cp;
struct amp_mgr *mgr = hcon->amp_mgr;
struct amp_ctrl *ctrl;
u16 frag_len, len;
ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
if (!ctrl)
return false;
if (!ctrl->assoc_rem_len) {
BT_DBG("all fragments are written");
ctrl->assoc_rem_len = ctrl->assoc_len;
ctrl->assoc_len_so_far = 0;
amp_ctrl_put(ctrl);
return true;
}
frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
len = frag_len + sizeof(*cp);
cp = kzalloc(len, GFP_KERNEL);
if (!cp) {
amp_ctrl_put(ctrl);
return false;
}
BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
cp->phy_handle = hcon->handle;
cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
memcpy(cp->frag, ctrl->assoc, frag_len);
ctrl->assoc_len_so_far += frag_len;
ctrl->assoc_rem_len -= frag_len;
amp_ctrl_put(ctrl);
hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
kfree(cp);
return false;
}
void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
{
struct hci_conn *hcon;
BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
hcon = hci_conn_hash_lookup_handle(hdev, handle);
if (!hcon)
return;
/* Send A2MP create phylink rsp when all fragments are written */
if (amp_write_rem_assoc_frag(hdev, hcon))
a2mp_send_create_phy_link_rsp(hdev, 0);
}
void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
{
struct hci_conn *hcon;
BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
hcon = hci_conn_hash_lookup_handle(hdev, handle);
if (!hcon)
return;
BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
amp_write_rem_assoc_frag(hdev, hcon);
}
void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_create_phy_link cp;
cp.phy_handle = hcon->handle;
BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
hcon->handle);
if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
&cp.key_type)) {
BT_DBG("Cannot create link key");
return;
}
hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
}
void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_accept_phy_link cp;
cp.phy_handle = hcon->handle;
BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
hcon->handle);
if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
&cp.key_type)) {
BT_DBG("Cannot create link key");
return;
}
hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
}
void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
{
struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
struct amp_mgr *mgr = hs_hcon->amp_mgr;
struct l2cap_chan *bredr_chan;
BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
if (!bredr_hdev || !mgr || !mgr->bredr_chan)
return;
bredr_chan = mgr->bredr_chan;
l2cap_chan_lock(bredr_chan);
set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
bredr_chan->remote_amp_id = hs_hcon->remote_id;
bredr_chan->local_amp_id = hs_hcon->hdev->id;
bredr_chan->hs_hcon = hs_hcon;
bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
__l2cap_physical_cfm(bredr_chan, 0);
l2cap_chan_unlock(bredr_chan);
hci_dev_put(bredr_hdev);
}
void amp_create_logical_link(struct l2cap_chan *chan)
{
struct hci_conn *hs_hcon = chan->hs_hcon;
struct hci_cp_create_accept_logical_link cp;
struct hci_dev *hdev;
BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
if (!hs_hcon)
return;
hdev = hci_dev_hold(chan->hs_hcon->hdev);
if (!hdev)
return;
cp.phy_handle = hs_hcon->handle;
cp.tx_flow_spec.id = chan->local_id;
cp.tx_flow_spec.stype = chan->local_stype;
cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
cp.rx_flow_spec.id = chan->remote_id;
cp.rx_flow_spec.stype = chan->remote_stype;
cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
if (hs_hcon->out)
hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
&cp);
else
hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
&cp);
hci_dev_put(hdev);
}
void amp_disconnect_logical_link(struct hci_chan *hchan)
{
struct hci_conn *hcon = hchan->conn;
struct hci_cp_disconn_logical_link cp;
if (hcon->state != BT_CONNECTED) {
BT_DBG("hchan %p not connected", hchan);
return;
}
cp.log_handle = cpu_to_le16(hchan->handle);
hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
}
void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
{
BT_DBG("hchan %p", hchan);
hci_chan_del(hchan);
}
| gpl-2.0 |
maxsands1503/e3chal | node_modules/node-sass/src/libsass/src/file.cpp | 455 | 14042 | #ifdef _WIN32
# ifdef __MINGW32__
# ifndef off64_t
# define off64_t _off64_t /* Workaround for http://sourceforge.net/p/mingw/bugs/2024/ */
# endif
# endif
# include <direct.h>
# define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
#else
# include <unistd.h>
#endif
#include "sass.hpp"
#include <iostream>
#include <fstream>
#include <cctype>
#include <vector>
#include <algorithm>
#include <sys/stat.h>
#include "file.hpp"
#include "context.hpp"
#include "prelexer.hpp"
#include "utf8_string.hpp"
#include "sass2scss.h"
#ifdef _WIN32
# include <windows.h>
# ifdef _MSC_VER
# include <codecvt>
inline static std::string wstring_to_string(const std::wstring& wstr)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> wchar_converter;
return wchar_converter.to_bytes(wstr);
}
# else // mingw(/gcc) does not support C++11's codecvt yet.
inline static std::string wstring_to_string(const std::wstring &wstr)
{
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
std::string strTo(size_needed, 0);
WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
return strTo;
}
# endif
#endif
namespace Sass {
namespace File {
// return the current directory
// always with forward slashes
std::string get_cwd()
{
const size_t wd_len = 1024;
#ifndef _WIN32
char wd[wd_len];
std::string cwd = getcwd(wd, wd_len);
#else
wchar_t wd[wd_len];
std::string cwd = wstring_to_string(_wgetcwd(wd, wd_len));
//convert backslashes to forward slashes
replace(cwd.begin(), cwd.end(), '\\', '/');
#endif
if (cwd[cwd.length() - 1] != '/') cwd += '/';
return cwd;
}
// test if path exists and is a file
bool file_exists(const std::string& path)
{
#ifdef _WIN32
std::wstring wpath = UTF_8::convert_to_utf16(path);
DWORD dwAttrib = GetFileAttributesW(wpath.c_str());
return (dwAttrib != INVALID_FILE_ATTRIBUTES &&
(!(dwAttrib & FILE_ATTRIBUTE_DIRECTORY)));
#else
struct stat st_buf;
return (stat (path.c_str(), &st_buf) == 0) &&
(!S_ISDIR (st_buf.st_mode));
#endif
}
// return if given path is absolute
// works with *nix and windows paths
bool is_absolute_path(const std::string& path)
{
#ifdef _WIN32
if (path.length() >= 2 && isalpha(path[0]) && path[1] == ':') return true;
#endif
size_t i = 0;
// check if we have a protocol
if (path[i] && Prelexer::is_alpha(path[i])) {
// skip over all alphanumeric characters
while (path[i] && Prelexer::is_alnum(path[i])) ++i;
i = i && path[i] == ':' ? i + 1 : 0;
}
return path[i] == '/';
}
// helper function to find the last directory seperator
inline size_t find_last_folder_separator(const std::string& path, size_t limit = std::string::npos)
{
size_t pos = std::string::npos;
size_t pos_p = path.find_last_of('/', limit);
#ifdef _WIN32
size_t pos_w = path.find_last_of('\\', limit);
#else
size_t pos_w = std::string::npos;
#endif
if (pos_p != std::string::npos && pos_w != std::string::npos) {
pos = std::max(pos_p, pos_w);
}
else if (pos_p != std::string::npos) {
pos = pos_p;
}
else {
pos = pos_w;
}
return pos;
}
// return only the directory part of path
std::string dir_name(const std::string& path)
{
size_t pos = find_last_folder_separator(path);
if (pos == std::string::npos) return "";
else return path.substr(0, pos+1);
}
// return only the filename part of path
std::string base_name(const std::string& path)
{
size_t pos = find_last_folder_separator(path);
if (pos == std::string::npos) return path;
else return path.substr(pos+1);
}
// do a logical clean up of the path
// no physical check on the filesystem
std::string make_canonical_path (std::string path)
{
// declarations
size_t pos;
#ifdef _WIN32
//convert backslashes to forward slashes
replace(path.begin(), path.end(), '\\', '/');
#endif
pos = 0; // remove all self references inside the path string
while((pos = path.find("/./", pos)) != std::string::npos) path.erase(pos, 2);
pos = 0; // remove all leading and trailing self references
while(path.length() > 1 && path.substr(0, 2) == "./") path.erase(0, 2);
while((pos = path.length()) > 1 && path.substr(pos - 2) == "/.") path.erase(pos - 2);
size_t proto = 0;
// check if we have a protocol
if (path[proto] && Prelexer::is_alpha(path[proto])) {
// skip over all alphanumeric characters
while (path[proto] && Prelexer::is_alnum(path[proto++])) {}
// then skip over the mandatory colon
if (proto && path[proto] == ':') ++ proto;
}
// then skip over start slashes
while (path[proto++] == '/') {}
pos = proto; // collapse multiple delimiters into a single one
while((pos = path.find("//", pos)) != std::string::npos) path.erase(pos, 1);
return path;
}
// join two path segments cleanly together
// but only if right side is not absolute yet
std::string join_paths(std::string l, std::string r)
{
#ifdef _WIN32
// convert Windows backslashes to URL forward slashes
replace(l.begin(), l.end(), '\\', '/');
replace(r.begin(), r.end(), '\\', '/');
#endif
if (l.empty()) return r;
if (r.empty()) return l;
if (is_absolute_path(r)) return r;
if (l[l.length()-1] != '/') l += '/';
while ((r.length() > 3) && ((r.substr(0, 3) == "../") || (r.substr(0, 3)) == "..\\")) {
size_t L = l.length(), pos = find_last_folder_separator(l, L - 2);
bool is_slash = pos + 2 == L && (l[pos+1] == '/' || l[pos+1] == '\\');
bool is_self = pos + 3 == L && (l[pos+1] == '.');
if (!is_self && !is_slash) r = r.substr(3);
l = l.substr(0, pos == std::string::npos ? pos : pos + 1);
}
return l + r;
}
std::string path_for_console(const std::string& rel_path, const std::string& abs_path, const std::string& orig_path)
{
// magic algorith goes here!!
// if the file is outside this directory show the absolute path
if (rel_path.substr(0, 3) == "../") {
return orig_path;
}
// this seems to work most of the time
return abs_path == orig_path ? abs_path : rel_path;
}
// create an absolute path by resolving relative paths with cwd
std::string rel2abs(const std::string& path, const std::string& base, const std::string& cwd)
{
return make_canonical_path(join_paths(join_paths(cwd, base), path));
}
// create a path that is relative to the given base directory
// path and base will first be resolved against cwd to make them absolute
std::string abs2rel(const std::string& path, const std::string& base, const std::string& cwd)
{
std::string abs_path = rel2abs(path, cwd);
std::string abs_base = rel2abs(base, cwd);
size_t proto = 0;
// check if we have a protocol
if (path[proto] && Prelexer::is_alpha(path[proto])) {
// skip over all alphanumeric characters
while (path[proto] && Prelexer::is_alnum(path[proto++])) {}
// then skip over the mandatory colon
if (proto && path[proto] == ':') ++ proto;
}
// distinguish between windows absolute paths and valid protocols
// we assume that protocols must at least have two chars to be valid
if (proto && path[proto++] == '/' && proto > 3) return path;
#ifdef _WIN32
// absolute link must have a drive letter, and we know that we
// can only create relative links if both are on the same drive
if (abs_base[0] != abs_path[0]) return abs_path;
#endif
std::string stripped_uri = "";
std::string stripped_base = "";
size_t index = 0;
size_t minSize = std::min(abs_path.size(), abs_base.size());
for (size_t i = 0; i < minSize; ++i) {
#ifdef FS_CASE_SENSITIVE
if (abs_path[i] != abs_base[i]) break;
#else
// compare the charactes in a case insensitive manner
// windows fs is only case insensitive in ascii ranges
if (tolower(abs_path[i]) != tolower(abs_base[i])) break;
#endif
if (abs_path[i] == '/') index = i + 1;
}
for (size_t i = index; i < abs_path.size(); ++i) {
stripped_uri += abs_path[i];
}
for (size_t i = index; i < abs_base.size(); ++i) {
stripped_base += abs_base[i];
}
size_t left = 0;
size_t directories = 0;
for (size_t right = 0; right < stripped_base.size(); ++right) {
if (stripped_base[right] == '/') {
if (stripped_base.substr(left, 2) != "..") {
++directories;
}
else if (directories > 1) {
--directories;
}
else {
directories = 0;
}
left = right + 1;
}
}
std::string result = "";
for (size_t i = 0; i < directories; ++i) {
result += "../";
}
result += stripped_uri;
return result;
}
// Resolution order for ambiguous imports:
// (1) filename as given
// (2) underscore + given
// (3) underscore + given + extension
// (4) given + extension
std::vector<Include> resolve_includes(const std::string& root, const std::string& file)
{
std::string filename = join_paths(root, file);
// supported extensions
const std::vector<std::string> exts = {
".scss", ".sass", ".css"
};
// split the filename
std::string base(dir_name(file));
std::string name(base_name(file));
std::vector<Include> includes;
// create full path (maybe relative)
std::string rel_path(join_paths(base, name));
std::string abs_path(join_paths(root, rel_path));
if (file_exists(abs_path)) includes.push_back({{ rel_path, root }, abs_path });
// next test variation with underscore
rel_path = join_paths(base, "_" + name);
abs_path = join_paths(root, rel_path);
if (file_exists(abs_path)) includes.push_back({{ rel_path, root }, abs_path });
// next test exts plus underscore
for(auto ext : exts) {
rel_path = join_paths(base, "_" + name + ext);
abs_path = join_paths(root, rel_path);
if (file_exists(abs_path)) includes.push_back({{ rel_path, root }, abs_path });
}
// next test plain name with exts
for(auto ext : exts) {
rel_path = join_paths(base, name + ext);
abs_path = join_paths(root, rel_path);
if (file_exists(abs_path)) includes.push_back({{ rel_path, root }, abs_path });
}
// nothing found
return includes;
}
// helper function to resolve a filename
std::string find_file(const std::string& file, const std::vector<std::string> paths)
{
// search in every include path for a match
for (size_t i = 0, S = paths.size(); i < S; ++i)
{
std::vector<Include> resolved(resolve_includes(paths[i], file));
if (resolved.size()) return resolved[0].abs_path;
}
// nothing found
return std::string("");
}
// inc paths can be directly passed from C code
std::string find_file(const std::string& file, const char* paths[])
{
if (paths == 0) return std::string("");
std::vector<std::string> includes(0);
// includes.push_back(".");
const char** it = paths;
while (it && *it) {
includes.push_back(*it);
++it;
}
return find_file(file, includes);
}
// try to load the given filename
// returned memory must be freed
// will auto convert .sass files
char* read_file(const std::string& path)
{
#ifdef _WIN32
BYTE* pBuffer;
DWORD dwBytes;
// windows unicode filepaths are encoded in utf16
std::wstring wpath = UTF_8::convert_to_utf16(path);
HANDLE hFile = CreateFileW(wpath.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL);
if (hFile == INVALID_HANDLE_VALUE) return 0;
DWORD dwFileLength = GetFileSize(hFile, NULL);
if (dwFileLength == INVALID_FILE_SIZE) return 0;
// allocate an extra byte for the null char
pBuffer = (BYTE*)malloc((dwFileLength+1)*sizeof(BYTE));
ReadFile(hFile, pBuffer, dwFileLength, &dwBytes, NULL);
pBuffer[dwFileLength] = '\0';
CloseHandle(hFile);
// just convert from unsigned char*
char* contents = (char*) pBuffer;
#else
struct stat st;
if (stat(path.c_str(), &st) == -1 || S_ISDIR(st.st_mode)) return 0;
std::ifstream file(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
char* contents = 0;
if (file.is_open()) {
size_t size = file.tellg();
// allocate an extra byte for the null char
contents = (char*) malloc((size+1)*sizeof(char));
file.seekg(0, std::ios::beg);
file.read(contents, size);
contents[size] = '\0';
file.close();
}
#endif
std::string extension;
if (path.length() > 5) {
extension = path.substr(path.length() - 5, 5);
}
for(size_t i=0; i<extension.size();++i)
extension[i] = tolower(extension[i]);
if (extension == ".sass" && contents != 0) {
char * converted = sass2scss(contents, SASS2SCSS_PRETTIFY_1 | SASS2SCSS_KEEP_COMMENT);
free(contents); // free the indented contents
return converted; // should be freed by caller
} else {
return contents;
}
}
}
}
| gpl-2.0 |
schqiushui/kernel_lollipop_sense_a52 | sound/isa/wavefront/wavefront.c | 2247 | 19069 | /*
* ALSA card-level driver for Turtle Beach Wavefront cards
* (Maui,Tropez,Tropez+)
*
* Copyright (c) 1997-1999 by Paul Barton-Davis <pbd@op.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/isa.h>
#include <linux/pnp.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/opl3.h>
#include <sound/wss.h>
#include <sound/snd_wavefront.h>
MODULE_AUTHOR("Paul Barton-Davis <pbd@op.net>");
MODULE_DESCRIPTION("Turtle Beach Wavefront");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Turtle Beach,Maui/Tropez/Tropez+}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
#ifdef CONFIG_PNP
static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
#endif
static long cs4232_pcm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static int cs4232_pcm_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,11,12,15 */
static long cs4232_mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static int cs4232_mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 9,11,12,15 */
static long ics2115_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static int ics2115_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 2,9,11,12,15 */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */
static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */
static bool use_cs4232_midi[SNDRV_CARDS];
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for WaveFront soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for WaveFront soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable WaveFront soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "ISA PnP detection for WaveFront soundcards.");
#endif
module_param_array(cs4232_pcm_port, long, NULL, 0444);
MODULE_PARM_DESC(cs4232_pcm_port, "Port # for CS4232 PCM interface.");
module_param_array(cs4232_pcm_irq, int, NULL, 0444);
MODULE_PARM_DESC(cs4232_pcm_irq, "IRQ # for CS4232 PCM interface.");
module_param_array(dma1, int, NULL, 0444);
MODULE_PARM_DESC(dma1, "DMA1 # for CS4232 PCM interface.");
module_param_array(dma2, int, NULL, 0444);
MODULE_PARM_DESC(dma2, "DMA2 # for CS4232 PCM interface.");
module_param_array(cs4232_mpu_port, long, NULL, 0444);
MODULE_PARM_DESC(cs4232_mpu_port, "port # for CS4232 MPU-401 interface.");
module_param_array(cs4232_mpu_irq, int, NULL, 0444);
MODULE_PARM_DESC(cs4232_mpu_irq, "IRQ # for CS4232 MPU-401 interface.");
module_param_array(ics2115_irq, int, NULL, 0444);
MODULE_PARM_DESC(ics2115_irq, "IRQ # for ICS2115.");
module_param_array(ics2115_port, long, NULL, 0444);
MODULE_PARM_DESC(ics2115_port, "Port # for ICS2115.");
module_param_array(fm_port, long, NULL, 0444);
MODULE_PARM_DESC(fm_port, "FM port #.");
module_param_array(use_cs4232_midi, bool, NULL, 0444);
MODULE_PARM_DESC(use_cs4232_midi, "Use CS4232 MPU-401 interface (inaccessibly located inside your computer)");
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
static struct pnp_card_device_id snd_wavefront_pnpids[] = {
/* Tropez */
{ .id = "CSC7532", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } },
/* Tropez+ */
{ .id = "CSC7632", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } },
{ .id = "" }
};
MODULE_DEVICE_TABLE(pnp_card, snd_wavefront_pnpids);
static int
snd_wavefront_pnp (int dev, snd_wavefront_card_t *acard, struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
/* Check for each logical device. */
/* CS4232 chip (aka "windows sound system") is logical device 0 */
acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->wss == NULL)
return -EBUSY;
/* there is a game port at logical device 1, but we ignore it completely */
/* the control interface is logical device 2, but we ignore it
completely. in fact, nobody even seems to know what it
does.
*/
/* Only configure the CS4232 MIDI interface if its been
specifically requested. It is logical device 3.
*/
if (use_cs4232_midi[dev]) {
acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL);
if (acard->mpu == NULL)
return -EBUSY;
}
/* The ICS2115 synth is logical device 4 */
acard->synth = pnp_request_card_device(card, id->devs[3].id, NULL);
if (acard->synth == NULL)
return -EBUSY;
/* PCM/FM initialization */
pdev = acard->wss;
/* An interesting note from the Tropez+ FAQ:
Q. [Ports] Why is the base address of the WSS I/O ports off by 4?
A. WSS I/O requires a block of 8 I/O addresses ("ports"). Of these, the first
4 are used to identify and configure the board. With the advent of PnP,
these first 4 addresses have become obsolete, and software applications
only use the last 4 addresses to control the codec chip. Therefore, the
base address setting "skips past" the 4 unused addresses.
*/
err = pnp_activate_dev(pdev);
if (err < 0) {
snd_printk(KERN_ERR "PnP WSS pnp configure failure\n");
return err;
}
cs4232_pcm_port[dev] = pnp_port_start(pdev, 0);
fm_port[dev] = pnp_port_start(pdev, 1);
dma1[dev] = pnp_dma(pdev, 0);
dma2[dev] = pnp_dma(pdev, 1);
cs4232_pcm_irq[dev] = pnp_irq(pdev, 0);
/* Synth initialization */
pdev = acard->synth;
err = pnp_activate_dev(pdev);
if (err < 0) {
snd_printk(KERN_ERR "PnP ICS2115 pnp configure failure\n");
return err;
}
ics2115_port[dev] = pnp_port_start(pdev, 0);
ics2115_irq[dev] = pnp_irq(pdev, 0);
/* CS4232 MPU initialization. Configure this only if
explicitly requested, since its physically inaccessible and
consumes another IRQ.
*/
if (use_cs4232_midi[dev]) {
pdev = acard->mpu;
err = pnp_activate_dev(pdev);
if (err < 0) {
snd_printk(KERN_ERR "PnP MPU401 pnp configure failure\n");
cs4232_mpu_port[dev] = SNDRV_AUTO_PORT;
} else {
cs4232_mpu_port[dev] = pnp_port_start(pdev, 0);
cs4232_mpu_irq[dev] = pnp_irq(pdev, 0);
}
snd_printk (KERN_INFO "CS4232 MPU: port=0x%lx, irq=%i\n",
cs4232_mpu_port[dev],
cs4232_mpu_irq[dev]);
}
snd_printdd ("CS4232: pcm port=0x%lx, fm port=0x%lx, dma1=%i, dma2=%i, irq=%i\nICS2115: port=0x%lx, irq=%i\n",
cs4232_pcm_port[dev],
fm_port[dev],
dma1[dev],
dma2[dev],
cs4232_pcm_irq[dev],
ics2115_port[dev],
ics2115_irq[dev]);
return 0;
}
#endif /* CONFIG_PNP */
static irqreturn_t snd_wavefront_ics2115_interrupt(int irq, void *dev_id)
{
snd_wavefront_card_t *acard;
acard = (snd_wavefront_card_t *) dev_id;
if (acard == NULL)
return IRQ_NONE;
if (acard->wavefront.interrupts_are_midi) {
snd_wavefront_midi_interrupt (acard);
} else {
snd_wavefront_internal_interrupt (acard);
}
return IRQ_HANDLED;
}
static struct snd_hwdep *snd_wavefront_new_synth(struct snd_card *card,
int hw_dev,
snd_wavefront_card_t *acard)
{
struct snd_hwdep *wavefront_synth;
if (snd_wavefront_detect (acard) < 0) {
return NULL;
}
if (snd_wavefront_start (&acard->wavefront) < 0) {
return NULL;
}
if (snd_hwdep_new(card, "WaveFront", hw_dev, &wavefront_synth) < 0)
return NULL;
strcpy (wavefront_synth->name,
"WaveFront (ICS2115) wavetable synthesizer");
wavefront_synth->ops.open = snd_wavefront_synth_open;
wavefront_synth->ops.release = snd_wavefront_synth_release;
wavefront_synth->ops.ioctl = snd_wavefront_synth_ioctl;
return wavefront_synth;
}
static struct snd_hwdep *snd_wavefront_new_fx(struct snd_card *card,
int hw_dev,
snd_wavefront_card_t *acard,
unsigned long port)
{
struct snd_hwdep *fx_processor;
if (snd_wavefront_fx_start (&acard->wavefront)) {
snd_printk (KERN_ERR "cannot initialize YSS225 FX processor");
return NULL;
}
if (snd_hwdep_new (card, "YSS225", hw_dev, &fx_processor) < 0)
return NULL;
sprintf (fx_processor->name, "YSS225 FX Processor at 0x%lx", port);
fx_processor->ops.open = snd_wavefront_fx_open;
fx_processor->ops.release = snd_wavefront_fx_release;
fx_processor->ops.ioctl = snd_wavefront_fx_ioctl;
return fx_processor;
}
static snd_wavefront_mpu_id internal_id = internal_mpu;
static snd_wavefront_mpu_id external_id = external_mpu;
static struct snd_rawmidi *snd_wavefront_new_midi(struct snd_card *card,
int midi_dev,
snd_wavefront_card_t *acard,
unsigned long port,
snd_wavefront_mpu_id mpu)
{
struct snd_rawmidi *rmidi;
static int first = 1;
if (first) {
first = 0;
acard->wavefront.midi.base = port;
if (snd_wavefront_midi_start (acard)) {
snd_printk (KERN_ERR "cannot initialize MIDI interface\n");
return NULL;
}
}
if (snd_rawmidi_new (card, "WaveFront MIDI", midi_dev, 1, 1, &rmidi) < 0)
return NULL;
if (mpu == internal_mpu) {
strcpy(rmidi->name, "WaveFront MIDI (Internal)");
rmidi->private_data = &internal_id;
} else {
strcpy(rmidi->name, "WaveFront MIDI (External)");
rmidi->private_data = &external_id;
}
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_wavefront_midi_output);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_wavefront_midi_input);
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
return rmidi;
}
static void
snd_wavefront_free(struct snd_card *card)
{
snd_wavefront_card_t *acard = (snd_wavefront_card_t *)card->private_data;
if (acard) {
release_and_free_resource(acard->wavefront.res_base);
if (acard->wavefront.irq > 0)
free_irq(acard->wavefront.irq, (void *)acard);
}
}
static int snd_wavefront_card_new(int dev, struct snd_card **cardp)
{
struct snd_card *card;
snd_wavefront_card_t *acard;
int err;
err = snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(snd_wavefront_card_t), &card);
if (err < 0)
return err;
acard = card->private_data;
acard->wavefront.irq = -1;
spin_lock_init(&acard->wavefront.irq_lock);
init_waitqueue_head(&acard->wavefront.interrupt_sleeper);
spin_lock_init(&acard->wavefront.midi.open);
spin_lock_init(&acard->wavefront.midi.virtual);
acard->wavefront.card = card;
card->private_free = snd_wavefront_free;
*cardp = card;
return 0;
}
static int
snd_wavefront_probe (struct snd_card *card, int dev)
{
snd_wavefront_card_t *acard = card->private_data;
struct snd_wss *chip;
struct snd_hwdep *wavefront_synth;
struct snd_rawmidi *ics2115_internal_rmidi = NULL;
struct snd_rawmidi *ics2115_external_rmidi = NULL;
struct snd_hwdep *fx_processor;
int hw_dev = 0, midi_dev = 0, err;
/* --------- PCM --------------- */
err = snd_wss_create(card, cs4232_pcm_port[dev], -1,
cs4232_pcm_irq[dev], dma1[dev], dma2[dev],
WSS_HW_DETECT, 0, &chip);
if (err < 0) {
snd_printk(KERN_ERR "can't allocate WSS device\n");
return err;
}
err = snd_wss_pcm(chip, 0, NULL);
if (err < 0)
return err;
err = snd_wss_timer(chip, 0, NULL);
if (err < 0)
return err;
/* ---------- OPL3 synth --------- */
if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) {
struct snd_opl3 *opl3;
err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2,
OPL3_HW_OPL3_CS, 0, &opl3);
if (err < 0) {
snd_printk (KERN_ERR "can't allocate or detect OPL3 synth\n");
return err;
}
err = snd_opl3_hwdep_new(opl3, hw_dev, 1, NULL);
if (err < 0)
return err;
hw_dev++;
}
/* ------- ICS2115 Wavetable synth ------- */
acard->wavefront.res_base = request_region(ics2115_port[dev], 16,
"ICS2115");
if (acard->wavefront.res_base == NULL) {
snd_printk(KERN_ERR "unable to grab ICS2115 i/o region 0x%lx-0x%lx\n",
ics2115_port[dev], ics2115_port[dev] + 16 - 1);
return -EBUSY;
}
if (request_irq(ics2115_irq[dev], snd_wavefront_ics2115_interrupt,
0, "ICS2115", acard)) {
snd_printk(KERN_ERR "unable to use ICS2115 IRQ %d\n", ics2115_irq[dev]);
return -EBUSY;
}
acard->wavefront.irq = ics2115_irq[dev];
acard->wavefront.base = ics2115_port[dev];
wavefront_synth = snd_wavefront_new_synth(card, hw_dev, acard);
if (wavefront_synth == NULL) {
snd_printk (KERN_ERR "can't create WaveFront synth device\n");
return -ENOMEM;
}
strcpy (wavefront_synth->name, "ICS2115 Wavetable MIDI Synthesizer");
wavefront_synth->iface = SNDRV_HWDEP_IFACE_ICS2115;
hw_dev++;
/* --------- Mixer ------------ */
err = snd_wss_mixer(chip);
if (err < 0) {
snd_printk (KERN_ERR "can't allocate mixer device\n");
return err;
}
/* -------- CS4232 MPU-401 interface -------- */
if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) {
err = snd_mpu401_uart_new(card, midi_dev, MPU401_HW_CS4232,
cs4232_mpu_port[dev], 0,
cs4232_mpu_irq[dev], NULL);
if (err < 0) {
snd_printk (KERN_ERR "can't allocate CS4232 MPU-401 device\n");
return err;
}
midi_dev++;
}
/* ------ ICS2115 internal MIDI ------------ */
if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) {
ics2115_internal_rmidi =
snd_wavefront_new_midi (card,
midi_dev,
acard,
ics2115_port[dev],
internal_mpu);
if (ics2115_internal_rmidi == NULL) {
snd_printk (KERN_ERR "can't setup ICS2115 internal MIDI device\n");
return -ENOMEM;
}
midi_dev++;
}
/* ------ ICS2115 external MIDI ------------ */
if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) {
ics2115_external_rmidi =
snd_wavefront_new_midi (card,
midi_dev,
acard,
ics2115_port[dev],
external_mpu);
if (ics2115_external_rmidi == NULL) {
snd_printk (KERN_ERR "can't setup ICS2115 external MIDI device\n");
return -ENOMEM;
}
midi_dev++;
}
/* FX processor for Tropez+ */
if (acard->wavefront.has_fx) {
fx_processor = snd_wavefront_new_fx (card,
hw_dev,
acard,
ics2115_port[dev]);
if (fx_processor == NULL) {
snd_printk (KERN_ERR "can't setup FX device\n");
return -ENOMEM;
}
hw_dev++;
strcpy(card->driver, "Tropez+");
strcpy(card->shortname, "Turtle Beach Tropez+");
} else {
/* Need a way to distinguish between Maui and Tropez */
strcpy(card->driver, "WaveFront");
strcpy(card->shortname, "Turtle Beach WaveFront");
}
/* ----- Register the card --------- */
/* Not safe to include "Turtle Beach" in longname, due to
length restrictions
*/
sprintf(card->longname, "%s PCM 0x%lx irq %d dma %d",
card->driver,
chip->port,
cs4232_pcm_irq[dev],
dma1[dev]);
if (dma2[dev] >= 0 && dma2[dev] < 8)
sprintf(card->longname + strlen(card->longname), "&%d", dma2[dev]);
if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) {
sprintf (card->longname + strlen (card->longname),
" MPU-401 0x%lx irq %d",
cs4232_mpu_port[dev],
cs4232_mpu_irq[dev]);
}
sprintf (card->longname + strlen (card->longname),
" SYNTH 0x%lx irq %d",
ics2115_port[dev],
ics2115_irq[dev]);
return snd_card_register(card);
}
static int snd_wavefront_isa_match(struct device *pdev,
unsigned int dev)
{
if (!enable[dev])
return 0;
#ifdef CONFIG_PNP
if (isapnp[dev])
return 0;
#endif
if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR "specify CS4232 port\n");
return 0;
}
if (ics2115_port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR "specify ICS2115 port\n");
return 0;
}
return 1;
}
static int snd_wavefront_isa_probe(struct device *pdev,
unsigned int dev)
{
struct snd_card *card;
int err;
err = snd_wavefront_card_new(dev, &card);
if (err < 0)
return err;
snd_card_set_dev(card, pdev);
if ((err = snd_wavefront_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
dev_set_drvdata(pdev, card);
return 0;
}
static int snd_wavefront_isa_remove(struct device *devptr,
unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
return 0;
}
#define DEV_NAME "wavefront"
static struct isa_driver snd_wavefront_driver = {
.match = snd_wavefront_isa_match,
.probe = snd_wavefront_isa_probe,
.remove = snd_wavefront_isa_remove,
/* FIXME: suspend, resume */
.driver = {
.name = DEV_NAME
},
};
#ifdef CONFIG_PNP
static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
int res;
for ( ; dev < SNDRV_CARDS; dev++) {
if (enable[dev] && isapnp[dev])
break;
}
if (dev >= SNDRV_CARDS)
return -ENODEV;
res = snd_wavefront_card_new(dev, &card);
if (res < 0)
return res;
if (snd_wavefront_pnp (dev, card->private_data, pcard, pid) < 0) {
if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) {
snd_printk (KERN_ERR "isapnp detection failed\n");
snd_card_free (card);
return -ENODEV;
}
}
snd_card_set_dev(card, &pcard->card->dev);
if ((res = snd_wavefront_probe(card, dev)) < 0)
return res;
pnp_set_card_drvdata(pcard, card);
dev++;
return 0;
}
static void snd_wavefront_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
}
static struct pnp_card_driver wavefront_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
.name = "wavefront",
.id_table = snd_wavefront_pnpids,
.probe = snd_wavefront_pnp_detect,
.remove = snd_wavefront_pnp_remove,
/* FIXME: suspend,resume */
};
#endif /* CONFIG_PNP */
static int __init alsa_card_wavefront_init(void)
{
int err;
err = isa_register_driver(&snd_wavefront_driver, SNDRV_CARDS);
#ifdef CONFIG_PNP
if (!err)
isa_registered = 1;
err = pnp_register_card_driver(&wavefront_pnpc_driver);
if (!err)
pnp_registered = 1;
if (isa_registered)
err = 0;
#endif
return err;
}
static void __exit alsa_card_wavefront_exit(void)
{
#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_card_driver(&wavefront_pnpc_driver);
if (isa_registered)
#endif
isa_unregister_driver(&snd_wavefront_driver);
}
module_init(alsa_card_wavefront_init)
module_exit(alsa_card_wavefront_exit)
| gpl-2.0 |
denggww123/IMX6_DB_Kernel_3.10.53 | drivers/input/touchscreen/w90p910_ts.c | 2247 | 8529 | /*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* ADC controller bit defines */
#define ADC_DELAY 0xf00
#define ADC_DOWN 0x01
#define ADC_TSC_Y (0x01 << 8)
#define ADC_TSC_X (0x00 << 8)
#define TSC_FOURWIRE (~(0x03 << 1))
#define ADC_CLK_EN (0x01 << 28) /* ADC clock enable */
#define ADC_READ_CON (0x01 << 12)
#define ADC_CONV (0x01 << 13)
#define ADC_SEMIAUTO (0x01 << 14)
#define ADC_WAITTRIG (0x03 << 14)
#define ADC_RST1 (0x01 << 16)
#define ADC_RST0 (0x00 << 16)
#define ADC_EN (0x01 << 17)
#define ADC_INT (0x01 << 18)
#define WT_INT (0x01 << 20)
#define ADC_INT_EN (0x01 << 21)
#define LVD_INT_EN (0x01 << 22)
#define WT_INT_EN (0x01 << 23)
#define ADC_DIV (0x04 << 1) /* div = 6 */
enum ts_state {
TS_WAIT_NEW_PACKET, /* We are waiting next touch report */
TS_WAIT_X_COORD, /* We are waiting for ADC to report X coord */
TS_WAIT_Y_COORD, /* We are waiting for ADC to report Y coord */
TS_IDLE, /* Input device is closed, don't do anything */
};
struct w90p910_ts {
struct input_dev *input;
struct timer_list timer;
struct clk *clk;
int irq_num;
void __iomem *ts_reg;
spinlock_t lock;
enum ts_state state;
};
static void w90p910_report_event(struct w90p910_ts *w90p910_ts, bool down)
{
struct input_dev *dev = w90p910_ts->input;
if (down) {
input_report_abs(dev, ABS_X,
__raw_readl(w90p910_ts->ts_reg + 0x0c));
input_report_abs(dev, ABS_Y,
__raw_readl(w90p910_ts->ts_reg + 0x10));
}
input_report_key(dev, BTN_TOUCH, down);
input_sync(dev);
}
static void w90p910_prepare_x_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_X, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | WT_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_X_COORD;
}
static void w90p910_prepare_y_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_Y, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | ADC_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_Y_COORD;
}
static void w90p910_prepare_next_packet(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_INT | ADC_INT_EN | ADC_SEMIAUTO | ADC_CONV);
ctlreg |= ADC_WAITTRIG | WT_INT_EN;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
}
static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
{
struct w90p910_ts *w90p910_ts = dev_id;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
switch (w90p910_ts->state) {
case TS_WAIT_NEW_PACKET:
/*
* The controller only generates interrupts when pen
* is down.
*/
del_timer(&w90p910_ts->timer);
w90p910_prepare_x_reading(w90p910_ts);
break;
case TS_WAIT_X_COORD:
w90p910_prepare_y_reading(w90p910_ts);
break;
case TS_WAIT_Y_COORD:
w90p910_report_event(w90p910_ts, true);
w90p910_prepare_next_packet(w90p910_ts);
mod_timer(&w90p910_ts->timer, jiffies + msecs_to_jiffies(100));
break;
case TS_IDLE:
break;
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
return IRQ_HANDLED;
}
static void w90p910_check_pen_up(unsigned long data)
{
struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
if (w90p910_ts->state == TS_WAIT_NEW_PACKET &&
!(__raw_readl(w90p910_ts->ts_reg + 0x04) & ADC_DOWN)) {
w90p910_report_event(w90p910_ts, false);
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
}
static int w90p910_open(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* enable the ADC clock */
clk_enable(w90p910_ts->clk);
__raw_writel(ADC_RST1, w90p910_ts->ts_reg);
msleep(1);
__raw_writel(ADC_RST0, w90p910_ts->ts_reg);
msleep(1);
/* set delay and screen type */
val = __raw_readl(w90p910_ts->ts_reg + 0x04);
__raw_writel(val & TSC_FOURWIRE, w90p910_ts->ts_reg + 0x04);
__raw_writel(ADC_DELAY, w90p910_ts->ts_reg + 0x08);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
wmb();
/* set trigger mode */
val = __raw_readl(w90p910_ts->ts_reg);
val |= ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN;
__raw_writel(val, w90p910_ts->ts_reg);
return 0;
}
static void w90p910_close(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* disable trigger mode */
spin_lock_irq(&w90p910_ts->lock);
w90p910_ts->state = TS_IDLE;
val = __raw_readl(w90p910_ts->ts_reg);
val &= ~(ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN | ADC_INT_EN);
__raw_writel(val, w90p910_ts->ts_reg);
spin_unlock_irq(&w90p910_ts->lock);
/* Now that interrupts are shut off we can safely delete timer */
del_timer_sync(&w90p910_ts->timer);
/* stop the ADC clock */
clk_disable(w90p910_ts->clk);
}
static int w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
struct resource *res;
int err;
w90p910_ts = kzalloc(sizeof(struct w90p910_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!w90p910_ts || !input_dev) {
err = -ENOMEM;
goto fail1;
}
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
(unsigned long)w90p910_ts);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
goto fail1;
}
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
err = -EBUSY;
goto fail1;
}
w90p910_ts->ts_reg = ioremap(res->start, resource_size(res));
if (!w90p910_ts->ts_reg) {
err = -ENOMEM;
goto fail2;
}
w90p910_ts->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(w90p910_ts->clk)) {
err = PTR_ERR(w90p910_ts->clk);
goto fail3;
}
input_dev->name = "W90P910 TouchScreen";
input_dev->phys = "w90p910ts/event0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0005;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &pdev->dev;
input_dev->open = w90p910_open;
input_dev->close = w90p910_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x400, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x400, 0, 0);
input_set_drvdata(input_dev, w90p910_ts);
w90p910_ts->irq_num = platform_get_irq(pdev, 0);
if (request_irq(w90p910_ts->irq_num, w90p910_ts_interrupt,
0, "w90p910ts", w90p910_ts)) {
err = -EBUSY;
goto fail4;
}
err = input_register_device(w90p910_ts->input);
if (err)
goto fail5;
platform_set_drvdata(pdev, w90p910_ts);
return 0;
fail5: free_irq(w90p910_ts->irq_num, w90p910_ts);
fail4: clk_put(w90p910_ts->clk);
fail3: iounmap(w90p910_ts->ts_reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: input_free_device(input_dev);
kfree(w90p910_ts);
return err;
}
static int w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
free_irq(w90p910_ts->irq_num, w90p910_ts);
del_timer_sync(&w90p910_ts->timer);
iounmap(w90p910_ts->ts_reg);
clk_put(w90p910_ts->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
input_unregister_device(w90p910_ts->input);
kfree(w90p910_ts);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
.remove = w90x900ts_remove,
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
},
};
module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-ts");
| gpl-2.0 |
TeamWin/android_kernel_samsung_sprat | fs/ext3/ioctl.c | 3783 | 7615 | /*
* linux/fs/ext3/ioctl.c
*
* Copyright (C) 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*/
#include <linux/mount.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include "ext3.h"
long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct ext3_inode_info *ei = EXT3_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
switch (cmd) {
case EXT3_IOC_GETFLAGS:
ext3_get_inode_flags(ei);
flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case EXT3_IOC_SETFLAGS: {
handle_t *handle = NULL;
int err;
struct ext3_iloc iloc;
unsigned int oldflags;
unsigned int jflag;
if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
return -EFAULT;
err = mnt_want_write_file(filp);
if (err)
return err;
flags = ext3_mask_flags(inode->i_mode, flags);
mutex_lock(&inode->i_mutex);
/* Is it quota file? Do not allow user to mess with it */
err = -EPERM;
if (IS_NOQUOTA(inode))
goto flags_out;
oldflags = ei->i_flags;
/* The JOURNAL_DATA flag is modifiable only by root */
jflag = flags & EXT3_JOURNAL_DATA_FL;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by
* the relevant capability.
*
* This test looks nicer. Thanks to Pauline Middelink
*/
if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE))
goto flags_out;
}
/*
* The JOURNAL_DATA flag can only be changed by
* the relevant capability.
*/
if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
if (!capable(CAP_SYS_RESOURCE))
goto flags_out;
}
handle = ext3_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto flags_out;
}
if (IS_SYNC(inode))
handle->h_sync = 1;
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err)
goto flags_err;
flags = flags & EXT3_FL_USER_MODIFIABLE;
flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
ei->i_flags = flags;
ext3_set_inode_flags(inode);
inode->i_ctime = CURRENT_TIME_SEC;
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
ext3_journal_stop(handle);
if (err)
goto flags_out;
if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
err = ext3_change_inode_journal_flag(inode, jflag);
flags_out:
mutex_unlock(&inode->i_mutex);
mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GETVERSION:
case EXT3_IOC_GETVERSION_OLD:
return put_user(inode->i_generation, (int __user *) arg);
case EXT3_IOC_SETVERSION:
case EXT3_IOC_SETVERSION_OLD: {
handle_t *handle;
struct ext3_iloc iloc;
__u32 generation;
int err;
if (!inode_owner_or_capable(inode))
return -EPERM;
err = mnt_want_write_file(filp);
if (err)
return err;
if (get_user(generation, (int __user *) arg)) {
err = -EFAULT;
goto setversion_out;
}
mutex_lock(&inode->i_mutex);
handle = ext3_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto unlock_out;
}
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
inode->i_ctime = CURRENT_TIME_SEC;
inode->i_generation = generation;
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
}
ext3_journal_stop(handle);
unlock_out:
mutex_unlock(&inode->i_mutex);
setversion_out:
mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GETRSVSZ:
if (test_opt(inode->i_sb, RESERVATION)
&& S_ISREG(inode->i_mode)
&& ei->i_block_alloc_info) {
rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
return put_user(rsv_window_size, (int __user *)arg);
}
return -ENOTTY;
case EXT3_IOC_SETRSVSZ: {
int err;
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
err = mnt_want_write_file(filp);
if (err)
return err;
if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setrsvsz_out;
}
if (get_user(rsv_window_size, (int __user *)arg)) {
err = -EFAULT;
goto setrsvsz_out;
}
if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;
/*
* need to allocate reservation structure for this inode
* before set the window size
*/
mutex_lock(&ei->truncate_mutex);
if (!ei->i_block_alloc_info)
ext3_init_block_alloc_info(inode);
if (ei->i_block_alloc_info){
struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
rsv->rsv_goal_size = rsv_window_size;
}
mutex_unlock(&ei->truncate_mutex);
setrsvsz_out:
mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GROUP_EXTEND: {
ext3_fsblk_t n_blocks_count;
struct super_block *sb = inode->i_sb;
int err, err2;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
err = mnt_want_write_file(filp);
if (err)
return err;
if (get_user(n_blocks_count, (__u32 __user *)arg)) {
err = -EFAULT;
goto group_extend_out;
}
err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
journal_lock_updates(EXT3_SB(sb)->s_journal);
err2 = journal_flush(EXT3_SB(sb)->s_journal);
journal_unlock_updates(EXT3_SB(sb)->s_journal);
if (err == 0)
err = err2;
group_extend_out:
mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GROUP_ADD: {
struct ext3_new_group_data input;
struct super_block *sb = inode->i_sb;
int err, err2;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
err = mnt_want_write_file(filp);
if (err)
return err;
if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
sizeof(input))) {
err = -EFAULT;
goto group_add_out;
}
err = ext3_group_add(sb, &input);
journal_lock_updates(EXT3_SB(sb)->s_journal);
err2 = journal_flush(EXT3_SB(sb)->s_journal);
journal_unlock_updates(EXT3_SB(sb)->s_journal);
if (err == 0)
err = err2;
group_add_out:
mnt_drop_write_file(filp);
return err;
}
case FITRIM: {
struct super_block *sb = inode->i_sb;
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
ret = ext3_trim_fs(sb, &range);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
return 0;
}
default:
return -ENOTTY;
}
}
#ifdef CONFIG_COMPAT
long ext3_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case EXT3_IOC32_GETFLAGS:
cmd = EXT3_IOC_GETFLAGS;
break;
case EXT3_IOC32_SETFLAGS:
cmd = EXT3_IOC_SETFLAGS;
break;
case EXT3_IOC32_GETVERSION:
cmd = EXT3_IOC_GETVERSION;
break;
case EXT3_IOC32_SETVERSION:
cmd = EXT3_IOC_SETVERSION;
break;
case EXT3_IOC32_GROUP_EXTEND:
cmd = EXT3_IOC_GROUP_EXTEND;
break;
case EXT3_IOC32_GETVERSION_OLD:
cmd = EXT3_IOC_GETVERSION_OLD;
break;
case EXT3_IOC32_SETVERSION_OLD:
cmd = EXT3_IOC_SETVERSION_OLD;
break;
#ifdef CONFIG_JBD_DEBUG
case EXT3_IOC32_WAIT_FOR_READONLY:
cmd = EXT3_IOC_WAIT_FOR_READONLY;
break;
#endif
case EXT3_IOC32_GETRSVSZ:
cmd = EXT3_IOC_GETRSVSZ;
break;
case EXT3_IOC32_SETRSVSZ:
cmd = EXT3_IOC_SETRSVSZ;
break;
case EXT3_IOC_GROUP_ADD:
break;
default:
return -ENOIOCTLCMD;
}
return ext3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
| gpl-2.0 |
yytang2012/linux-kvm-arm | drivers/s390/scsi/zfcp_scsi.c | 4039 | 21393 | /*
* zfcp device driver
*
* Interface to Linux SCSI midlayer.
*
* Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_eh.h>
#include <linux/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
static bool allow_lun_scan = 1;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
default:
return -EOPNOTSUPP;
}
return sdev->queue_depth;
}
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
/* if previous slave_alloc returned early, there is nothing to do */
if (!zfcp_sdev->port)
return;
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
return 0;
}
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
}
static
int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
int status, scsi_result, ret;
/* reset the status for this request */
scpnt->result = 0;
scpnt->host_scribble = NULL;
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
status = atomic_read(&zfcp_sdev->status);
if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
!(atomic_read(&zfcp_sdev->port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED)) {
/* only LUN access denied, but port is good
* not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
return 0;
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
/* This could be either
* open LUN pending: this is temporary, will result in
* open LUN or ERP_FAILED, so retry command
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
return 0;
}
ret = zfcp_fsf_fcp_cmnd(scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
return SCSI_MLQUEUE_HOST_BUSY;
return ret;
}
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port;
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
if (!unit && !(allow_lun_scan && npiv)) {
put_device(&port->dev);
return -ENXIO;
}
zfcp_sdev->port = port;
zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
spin_lock_init(&zfcp_sdev->latencies.lock);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
zfcp_erp_wait(port->adapter);
return 0;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
struct Scsi_Host *scsi_host = scpnt->device->host;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
while (retry--) {
abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
if (abrt_req)
break;
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
return SUCCESS;
}
}
if (!abrt_req) {
zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
return FAILED;
}
wait_for_completion(&abrt_req->completion);
if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
dbf_tag = "abrt_ok";
else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
dbf_tag = "abrt_nn";
else {
dbf_tag = "abrt_fa";
retval = FAILED;
}
zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
zfcp_fsf_req_free(abrt_req);
return retval;
}
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
if (fsf_req)
break;
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
return SUCCESS;
}
}
if (!fsf_req)
return FAILED;
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
} else
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
zfcp_fsf_req_free(fsf_req);
return retval;
}
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
return SUCCESS;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
static struct scsi_host_template zfcp_scsi_host_template = {
.module = THIS_MODULE,
.name = "zfcp",
.queuecommand = zfcp_scsi_queuecommand,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.change_queue_depth = zfcp_scsi_change_queue_depth,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
.sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
/* GCD, adjusted later */
.max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.cmd_per_lun = 1,
.use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
};
/**
* zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
return 0;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
"Registering the FCP device with the "
"SCSI stack failed\n");
return -EIO;
}
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
adapter->scsi_host->max_lun = 0xFFFFFFFF;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
return -EIO;
}
return 0;
}
/**
* zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
* @adapter: The zfcp adapter to unregister.
*/
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
shost = adapter->scsi_host;
if (!shost)
return;
read_lock_irq(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
port->rport = NULL;
read_unlock_irq(&adapter->port_list_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
}
static struct fc_host_statistics*
zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
{
struct fc_host_statistics *fc_stats;
if (!adapter->fc_stats) {
fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return NULL;
adapter->fc_stats = fc_stats; /* freed in adapter_release */
}
memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
return adapter->fc_stats;
}
static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data,
struct fsf_qtcb_bottom_port *old)
{
fc_stats->seconds_since_last_reset =
data->seconds_since_last_reset - old->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames - old->tx_frames;
fc_stats->tx_words = data->tx_words - old->tx_words;
fc_stats->rx_frames = data->rx_frames - old->rx_frames;
fc_stats->rx_words = data->rx_words - old->rx_words;
fc_stats->lip_count = data->lip - old->lip;
fc_stats->nos_count = data->nos - old->nos;
fc_stats->error_frames = data->error_frames - old->error_frames;
fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
fc_stats->link_failure_count = data->link_failure - old->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
fc_stats->loss_of_signal_count =
data->loss_of_signal - old->loss_of_signal;
fc_stats->prim_seq_protocol_err_count =
data->psp_error_counts - old->psp_error_counts;
fc_stats->invalid_tx_word_count =
data->invalid_tx_words - old->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
fc_stats->fcp_input_requests =
data->input_requests - old->input_requests;
fc_stats->fcp_output_requests =
data->output_requests - old->output_requests;
fc_stats->fcp_control_requests =
data->control_requests - old->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}
static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data)
{
fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames;
fc_stats->tx_words = data->tx_words;
fc_stats->rx_frames = data->rx_frames;
fc_stats->rx_words = data->rx_words;
fc_stats->lip_count = data->lip;
fc_stats->nos_count = data->nos;
fc_stats->error_frames = data->error_frames;
fc_stats->dumped_frames = data->dumped_frames;
fc_stats->link_failure_count = data->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync;
fc_stats->loss_of_signal_count = data->loss_of_signal;
fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
fc_stats->invalid_tx_word_count = data->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs;
fc_stats->fcp_input_requests = data->input_requests;
fc_stats->fcp_output_requests = data->output_requests;
fc_stats->fcp_control_requests = data->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb;
}
static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
{
struct zfcp_adapter *adapter;
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)host->hostdata[0];
fc_stats = zfcp_init_fc_host_stats(adapter);
if (!fc_stats)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) {
kfree(data);
return NULL;
}
if (adapter->stats_reset &&
((jiffies/HZ - adapter->stats_reset) <
data->seconds_since_last_reset))
zfcp_adjust_fc_host_stats(fc_stats, data,
adapter->stats_reset_data);
else
zfcp_set_fc_host_stats(fc_stats, data);
kfree(data);
return fc_stats;
}
static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
kfree(adapter->stats_reset_data);
adapter->stats_reset_data = data; /* finally freed in
adapter_release */
}
}
static void zfcp_get_host_port_state(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int status = atomic_read(&adapter->status);
if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
!(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
else
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
}
static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
{
rport->dev_loss_tmo = timeout;
}
/**
* zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
* port. Using a reopen avoids a conflict with a shutdown
* overwriting a reopen. The "forced" ensures that a disappeared port
* is not opened again as valid due to the cached plogi data in
* non-NPIV mode.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port;
struct Scsi_Host *shost = rport_to_shost(rport);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
}
}
static void zfcp_scsi_rport_register(struct zfcp_port *port)
{
struct fc_rport_identifiers ids;
struct fc_rport *rport;
if (port->rport)
return;
ids.node_name = port->wwnn;
ids.port_name = port->wwpn;
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
"Registering port 0x%016Lx failed\n",
(unsigned long long)port->wwpn);
return;
}
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
port->starget_id = rport->scsi_target_id;
zfcp_unit_queue_scsi_scan(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
{
struct fc_rport *rport = port->rport;
if (rport) {
fc_remote_port_delete(rport);
port->rport = NULL;
}
}
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
zfcp_scsi_schedule_rport_block(port);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_scsi_rport_work(struct work_struct *work)
{
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_register(port);
} else {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_block(port);
}
}
put_device(&port->dev);
}
/**
* zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
* @adapter: The adapter where to configure DIF/DIX for the SCSI host
*/
void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
{
unsigned int mask = 0;
unsigned int data_div;
struct Scsi_Host *shost = adapter->scsi_host;
data_div = atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
if (enable_dif &&
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
mask |= SHOST_DIF_TYPE1_PROTECTION;
if (enable_dif && data_div &&
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->max_sectors = shost->sg_tablesize * 8;
}
scsi_host_set_prot(shost, mask);
}
/**
* zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
* @scmd: The SCSI command to report the error for
* @ascq: The ASCQ to put in the sense buffer
*
* See the error handling in sd_done for the sense codes used here.
* Set DID_SOFT_ERROR to retry the request, if possible.
*/
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
scsi_build_sense_buffer(1, scmd->sense_buffer,
ILLEGAL_REQUEST, 0x10, ascq);
set_driver_byte(scmd, DRIVER_SENSE);
scmd->result |= SAM_STAT_CHECK_CONDITION;
set_host_byte(scmd, DID_SOFT_ERROR);
}
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
.show_starget_node_name = 1,
.show_rport_supported_classes = 1,
.show_rport_maxframe_size = 1,
.show_rport_dev_loss_tmo = 1,
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_permanent_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.get_fc_host_stats = zfcp_get_fc_host_stats,
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.bsg_request = zfcp_fc_exec_bsg_job,
.bsg_timeout = zfcp_fc_timeout_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
.show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
| gpl-2.0 |
joisonwk/linux | drivers/media/pci/cx18/cx18-mailbox.c | 4295 | 24590 | /*
* cx18 mailbox functions
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include <stdarg.h>
#include "cx18-driver.h"
#include "cx18-io.h"
#include "cx18-scb.h"
#include "cx18-irq.h"
#include "cx18-mailbox.h"
#include "cx18-queue.h"
#include "cx18-streams.h"
#include "cx18-alsa-pcm.h" /* FIXME make configurable */
static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
#define API_FAST (1 << 2) /* Short timeout */
#define API_SLOW (1 << 3) /* Additional 300ms timeout */
struct cx18_api_info {
u32 cmd;
u8 flags; /* Flags, see above */
u8 rpu; /* Processing unit */
const char *name; /* The name of the command */
};
#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
static const struct cx18_api_info api_info[] = {
/* MPEG encoder API */
API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
API_ENTRY(CPU, CX18_CREATE_TASK, 0),
API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
API_ENTRY(CPU, CX18_CPU_SET_VFC_PARAM, 0),
API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
API_ENTRY(APU, CX18_APU_START, 0),
API_ENTRY(APU, CX18_APU_STOP, 0),
API_ENTRY(APU, CX18_APU_RESETAI, 0),
API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32, 0),
API_ENTRY(0, 0, 0),
};
static const struct cx18_api_info *find_api_info(u32 cmd)
{
int i;
for (i = 0; api_info[i].cmd; i++)
if (api_info[i].cmd == cmd)
return &api_info[i];
return NULL;
}
/* Call with buf of n*11+1 bytes */
static char *u32arr2hex(u32 data[], int n, char *buf)
{
char *p;
int i;
for (i = 0, p = buf; i < n; i++, p += 11) {
/* kernel snprintf() appends '\0' always */
snprintf(p, 12, " %#010x", data[i]);
}
*p = '\0';
return buf;
}
static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
{
char argstr[MAX_MB_ARGUMENTS*11+1];
if (!(cx18_debug & CX18_DBGFLG_API))
return;
CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
"\n", name, mb->request, mb->ack, mb->cmd, mb->error,
u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
}
/*
* Functions that run in a work_queue work handling context
*/
static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0)
return;
/* We ignore mdl and buf readpos accounting here - it doesn't matter */
/* The likely case */
if (list_is_singular(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
list);
if (buf->bytesused)
dvb_dmx_swfilter(&s->dvb->demux,
buf->buf, buf->bytesused);
return;
}
list_for_each_entry(buf, &mdl->buf_list, list) {
if (buf->bytesused == 0)
break;
dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused);
}
}
static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
struct cx18_videobuf_buffer *vb_buf;
struct cx18_buffer *buf;
u8 *p;
u32 offset = 0;
int dispatch = 0;
if (mdl->bytesused == 0)
return;
/* Acquire a videobuf buffer, clone to and and release it */
spin_lock(&s->vb_lock);
if (list_empty(&s->vb_capture))
goto out;
vb_buf = list_first_entry(&s->vb_capture, struct cx18_videobuf_buffer,
vb.queue);
p = videobuf_to_vmalloc(&vb_buf->vb);
if (!p)
goto out;
offset = vb_buf->bytes_used;
list_for_each_entry(buf, &mdl->buf_list, list) {
if (buf->bytesused == 0)
break;
if ((offset + buf->bytesused) <= vb_buf->vb.bsize) {
memcpy(p + offset, buf->buf, buf->bytesused);
offset += buf->bytesused;
vb_buf->bytes_used += buf->bytesused;
}
}
/* If we've filled the buffer as per the callers res then dispatch it */
if (vb_buf->bytes_used >= s->vb_bytes_per_frame) {
dispatch = 1;
vb_buf->bytes_used = 0;
}
if (dispatch) {
vb_buf->vb.ts = ktime_to_timeval(ktime_get());
list_del(&vb_buf->vb.queue);
vb_buf->vb.state = VIDEOBUF_DONE;
wake_up(&vb_buf->vb.done);
}
mod_timer(&s->vb_timeout, msecs_to_jiffies(2000) + jiffies);
out:
spin_unlock(&s->vb_lock);
}
static void cx18_mdl_send_to_alsa(struct cx18 *cx, struct cx18_stream *s,
struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
if (mdl->bytesused == 0)
return;
/* We ignore mdl and buf readpos accounting here - it doesn't matter */
/* The likely case */
if (list_is_singular(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
list);
if (buf->bytesused)
cx->pcm_announce_callback(cx->alsa, buf->buf,
buf->bytesused);
return;
}
list_for_each_entry(buf, &mdl->buf_list, list) {
if (buf->bytesused == 0)
break;
cx->pcm_announce_callback(cx->alsa, buf->buf, buf->bytesused);
}
}
static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_count, id;
struct cx18_mailbox *mb;
struct cx18_mdl_ack *mdl_ack;
struct cx18_stream *s;
struct cx18_mdl *mdl;
int i;
mb = &order->mb;
handle = mb->args[0];
s = cx18_handle_to_stream(cx, handle);
if (s == NULL) {
CX18_WARN("Got DMA done notification for unknown/inactive"
" handle %d, %s mailbox seq no %d\n", handle,
(order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
"stale" : "good", mb->request);
return;
}
mdl_ack_count = mb->args[2];
mdl_ack = order->mdl_ack;
for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
id = mdl_ack->id;
/*
* Simple integrity check for processing a stale (and possibly
* inconsistent mailbox): make sure the MDL id is in the
* valid range for the stream.
*
* We go through the trouble of dealing with stale mailboxes
* because most of the time, the mailbox data is still valid and
* unchanged (and in practice the firmware ping-pongs the
* two mdl_ack buffers so mdl_acks are not stale).
*
* There are occasions when we get a half changed mailbox,
* which this check catches for a handle & id mismatch. If the
* handle and id do correspond, the worst case is that we
* completely lost the old MDL, but pick up the new MDL
* early (but the new mdl_ack is guaranteed to be good in this
* case as the firmware wouldn't point us to a new mdl_ack until
* it's filled in).
*
* cx18_queue_get_mdl() will detect the lost MDLs
* and send them back to q_free for fw rotation eventually.
*/
if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
!(id >= s->mdl_base_idx &&
id < (s->mdl_base_idx + s->buffers))) {
CX18_WARN("Fell behind! Ignoring stale mailbox with "
" inconsistent data. Lost MDL for mailbox "
"seq no %d\n", mb->request);
break;
}
mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id);
if (mdl == NULL) {
CX18_WARN("Could not find MDL %d for stream %s\n",
id, s->name);
continue;
}
CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
s->name, mdl->bytesused);
if (s->type == CX18_ENC_STREAM_TYPE_TS) {
cx18_mdl_send_to_dvb(s, mdl);
cx18_enqueue(s, mdl, &s->q_free);
} else if (s->type == CX18_ENC_STREAM_TYPE_PCM) {
/* Pass the data to cx18-alsa */
if (cx->pcm_announce_callback != NULL) {
cx18_mdl_send_to_alsa(cx, s, mdl);
cx18_enqueue(s, mdl, &s->q_free);
} else {
cx18_enqueue(s, mdl, &s->q_full);
}
} else if (s->type == CX18_ENC_STREAM_TYPE_YUV) {
cx18_mdl_send_to_videobuf(s, mdl);
cx18_enqueue(s, mdl, &s->q_free);
} else {
cx18_enqueue(s, mdl, &s->q_full);
if (s->type == CX18_ENC_STREAM_TYPE_IDX)
cx18_stream_rotate_idx_mdls(cx);
}
}
/* Put as many MDLs as possible back into fw use */
cx18_stream_load_fw_queue(s);
wake_up(&cx->dma_waitq);
if (s->id != -1)
wake_up(&s->waitq);
}
static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
{
char *p;
char *str = order->str;
CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
p = strchr(str, '.');
if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
CX18_INFO("FW version: %s\n", p - 1);
}
static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
{
switch (order->rpu) {
case CPU:
{
switch (order->mb.cmd) {
case CX18_EPU_DMA_DONE:
epu_dma_done(cx, order);
break;
case CX18_EPU_DEBUG:
epu_debug(cx, order);
break;
default:
CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
}
break;
}
case APU:
CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
default:
break;
}
}
static
void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
{
atomic_set(&order->pending, 0);
}
void cx18_in_work_handler(struct work_struct *work)
{
struct cx18_in_work_order *order =
container_of(work, struct cx18_in_work_order, work);
struct cx18 *cx = order->cx;
epu_cmd(cx, order);
free_in_work_order(cx, order);
}
/*
* Functions that run in an interrupt handling context
*/
static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
struct cx18_mailbox __iomem *ack_mb;
u32 ack_irq, req;
switch (order->rpu) {
case APU:
ack_irq = IRQ_EPU_TO_APU_ACK;
ack_mb = &cx->scb->apu2epu_mb;
break;
case CPU:
ack_irq = IRQ_EPU_TO_CPU_ACK;
ack_mb = &cx->scb->cpu2epu_mb;
break;
default:
CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
order->rpu, order->mb.cmd);
return;
}
req = order->mb.request;
/* Don't ack if the RPU has gotten impatient and timed us out */
if (req != cx18_readl(cx, &ack_mb->request) ||
req == cx18_readl(cx, &ack_mb->ack)) {
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
"incoming %s to EPU mailbox (sequence no. %u) "
"while processing\n",
rpu_str[order->rpu], rpu_str[order->rpu], req);
order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
return;
}
cx18_writel(cx, req, &ack_mb->ack);
cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
return;
}
static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
int i;
mb = &order->mb;
handle = mb->args[0];
mdl_ack_offset = mb->args[1];
mdl_ack_count = mb->args[2];
if (handle == CX18_INVALID_TASK_HANDLE ||
mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
return -1;
}
for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
((u32 *)order->mdl_ack)[i / sizeof(u32)] =
cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
return 1;
}
static
int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 str_offset;
char *str = order->str;
str[0] = '\0';
str_offset = order->mb.args[1];
if (str_offset) {
cx18_setup_page(cx, str_offset);
cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
str[252] = '\0';
cx18_setup_page(cx, SCB_OFFSET);
}
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
return str_offset ? 1 : 0;
}
static inline
int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
int ret = -1;
switch (order->rpu) {
case CPU:
{
switch (order->mb.cmd) {
case CX18_EPU_DMA_DONE:
ret = epu_dma_done_irq(cx, order);
break;
case CX18_EPU_DEBUG:
ret = epu_debug_irq(cx, order);
break;
default:
CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
}
break;
}
case APU:
CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
default:
break;
}
return ret;
}
static inline
struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
{
int i;
struct cx18_in_work_order *order = NULL;
for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
/*
* We only need "pending" atomic to inspect its contents,
* and need not do a check and set because:
* 1. Any work handler thread only clears "pending" and only
* on one, particular work order at a time, per handler thread.
* 2. "pending" is only set here, and we're serialized because
* we're called in an IRQ handler context.
*/
if (atomic_read(&cx->in_work_order[i].pending) == 0) {
order = &cx->in_work_order[i];
atomic_set(&order->pending, 1);
break;
}
}
return order;
}
void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
{
struct cx18_mailbox __iomem *mb;
struct cx18_mailbox *order_mb;
struct cx18_in_work_order *order;
int submit;
int i;
switch (rpu) {
case CPU:
mb = &cx->scb->cpu2epu_mb;
break;
case APU:
mb = &cx->scb->apu2epu_mb;
break;
default:
return;
}
order = alloc_in_work_order_irq(cx);
if (order == NULL) {
CX18_WARN("Unable to find blank work order form to schedule "
"incoming mailbox command processing\n");
return;
}
order->flags = 0;
order->rpu = rpu;
order_mb = &order->mb;
/* mb->cmd and mb->args[0] through mb->args[2] */
for (i = 0; i < 4; i++)
(&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
/* mb->request and mb->ack. N.B. we want to read mb->ack last */
for (i = 0; i < 2; i++)
(&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
"incoming %s to EPU mailbox (sequence no. %u)"
"\n",
rpu_str[rpu], rpu_str[rpu], order_mb->request);
if (cx18_debug & CX18_DBGFLG_WARN)
dump_mb(cx, order_mb, "incoming");
order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
}
/*
* Individual EPU command processing is responsible for ack-ing
* a non-stale mailbox as soon as possible
*/
submit = epu_cmd_irq(cx, order);
if (submit > 0) {
queue_work(cx->in_work_queue, &order->work);
}
}
/*
* Functions called from a non-interrupt, non work_queue context
*/
static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
{
const struct cx18_api_info *info = find_api_info(cmd);
u32 irq, req, ack, err;
struct cx18_mailbox __iomem *mb;
wait_queue_head_t *waitq;
struct mutex *mb_lock;
unsigned long int t0, timeout, ret;
int i;
char argstr[MAX_MB_ARGUMENTS*11+1];
DEFINE_WAIT(w);
if (info == NULL) {
CX18_WARN("unknown cmd %x\n", cmd);
return -EINVAL;
}
if (cx18_debug & CX18_DBGFLG_API) { /* only call u32arr2hex if needed */
if (cmd == CX18_CPU_DE_SET_MDL) {
if (cx18_debug & CX18_DBGFLG_HIGHVOL)
CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
info->name, cmd,
u32arr2hex(data, args, argstr));
} else
CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
info->name, cmd,
u32arr2hex(data, args, argstr));
}
switch (info->rpu) {
case APU:
waitq = &cx->mb_apu_waitq;
mb_lock = &cx->epu2apu_mb_lock;
irq = IRQ_EPU_TO_APU;
mb = &cx->scb->epu2apu_mb;
break;
case CPU:
waitq = &cx->mb_cpu_waitq;
mb_lock = &cx->epu2cpu_mb_lock;
irq = IRQ_EPU_TO_CPU;
mb = &cx->scb->epu2cpu_mb;
break;
default:
CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
return -EINVAL;
}
mutex_lock(mb_lock);
/*
* Wait for an in-use mailbox to complete
*
* If the XPU is responding with Ack's, the mailbox shouldn't be in
* a busy state, since we serialize access to it on our end.
*
* If the wait for ack after sending a previous command was interrupted
* by a signal, we may get here and find a busy mailbox. After waiting,
* mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
*/
req = cx18_readl(cx, &mb->request);
timeout = msecs_to_jiffies(10);
ret = wait_event_timeout(*waitq,
(ack = cx18_readl(cx, &mb->ack)) == req,
timeout);
if (req != ack) {
/* waited long enough, make the mbox "not busy" from our end */
cx18_writel(cx, req, &mb->ack);
CX18_ERR("mbox was found stuck busy when setting up for %s; "
"clearing busy and trying to proceed\n", info->name);
} else if (ret != timeout)
CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
jiffies_to_msecs(timeout-ret));
/* Build the outgoing mailbox */
req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
cx18_writel(cx, cmd, &mb->cmd);
for (i = 0; i < args; i++)
cx18_writel(cx, data[i], &mb->args[i]);
cx18_writel(cx, 0, &mb->error);
cx18_writel(cx, req, &mb->request);
cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
/*
* Notify the XPU and wait for it to send an Ack back
*/
timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
irq, info->name);
/* So we don't miss the wakeup, prepare to wait before notifying fw */
prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
t0 = jiffies;
ack = cx18_readl(cx, &mb->ack);
if (ack != req) {
schedule_timeout(timeout);
ret = jiffies - t0;
ack = cx18_readl(cx, &mb->ack);
} else {
ret = jiffies - t0;
}
finish_wait(waitq, &w);
if (req != ack) {
mutex_unlock(mb_lock);
if (ret >= timeout) {
/* Timed out */
CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
"for RPU acknowledgement\n",
info->name, jiffies_to_msecs(ret));
} else {
CX18_DEBUG_WARN("woken up before mailbox ack was ready "
"after submitting %s to RPU. only "
"waited %d msecs on req %u but awakened"
" with unmatched ack %u\n",
info->name,
jiffies_to_msecs(ret),
req, ack);
}
return -EINVAL;
}
if (ret >= timeout)
CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
"sending %s; timed out waiting %d msecs\n",
info->name, jiffies_to_msecs(ret));
else
CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
jiffies_to_msecs(ret), info->name);
/* Collect data returned by the XPU */
for (i = 0; i < MAX_MB_ARGUMENTS; i++)
data[i] = cx18_readl(cx, &mb->args[i]);
err = cx18_readl(cx, &mb->error);
mutex_unlock(mb_lock);
/*
* Wait for XPU to perform extra actions for the caller in some cases.
* e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
* back in a burst shortly thereafter
*/
if (info->flags & API_SLOW)
cx18_msleep_timeout(300, 0);
if (err)
CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
info->name);
return err ? -EIO : 0;
}
int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
{
return cx18_api_call(cx, cmd, args, data);
}
static int cx18_set_filter_param(struct cx18_stream *s)
{
struct cx18 *cx = s->cx;
u32 mode;
int ret;
mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
s->handle, 1, mode, cx->spatial_strength);
mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
s->handle, 0, mode, cx->temporal_strength);
ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
s->handle, 2, cx->filter_mode >> 2, 0);
return ret;
}
int cx18_api_func(void *priv, u32 cmd, int in, int out,
u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx18_stream *s = priv;
struct cx18 *cx = s->cx;
switch (cmd) {
case CX2341X_ENC_SET_OUTPUT_PORT:
return 0;
case CX2341X_ENC_SET_FRAME_RATE:
return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
s->handle, 0, 0, 0, 0, data[0]);
case CX2341X_ENC_SET_FRAME_SIZE:
return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
s->handle, data[1], data[0]);
case CX2341X_ENC_SET_STREAM_TYPE:
return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
s->handle, data[0]);
case CX2341X_ENC_SET_ASPECT_RATIO:
return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
s->handle, data[0]);
case CX2341X_ENC_SET_GOP_PROPERTIES:
return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
s->handle, data[0], data[1]);
case CX2341X_ENC_SET_GOP_CLOSURE:
return 0;
case CX2341X_ENC_SET_AUDIO_PROPERTIES:
return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
s->handle, data[0]);
case CX2341X_ENC_MUTE_AUDIO:
return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
s->handle, data[0]);
case CX2341X_ENC_SET_BIT_RATE:
return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
s->handle, data[0], data[1], data[2], data[3]);
case CX2341X_ENC_MUTE_VIDEO:
return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
s->handle, data[0]);
case CX2341X_ENC_SET_FRAME_DROP_RATE:
return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
s->handle, data[0]);
case CX2341X_ENC_MISC:
return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
s->handle, data[0], data[1], data[2]);
case CX2341X_ENC_SET_DNR_FILTER_MODE:
cx->filter_mode = (data[0] & 3) | (data[1] << 2);
return cx18_set_filter_param(s);
case CX2341X_ENC_SET_DNR_FILTER_PROPS:
cx->spatial_strength = data[0];
cx->temporal_strength = data[1];
return cx18_set_filter_param(s);
case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
s->handle, data[0], data[1]);
case CX2341X_ENC_SET_CORING_LEVELS:
return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
s->handle, data[0], data[1], data[2], data[3]);
}
CX18_WARN("Unknown cmd %x\n", cmd);
return 0;
}
int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
u32 cmd, int args, ...)
{
va_list ap;
int i;
va_start(ap, args);
for (i = 0; i < args; i++)
data[i] = va_arg(ap, u32);
va_end(ap);
return cx18_api(cx, cmd, args, data);
}
int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
{
u32 data[MAX_MB_ARGUMENTS];
va_list ap;
int i;
if (cx == NULL) {
CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
return 0;
}
if (args > MAX_MB_ARGUMENTS) {
CX18_ERR("args too big (cmd=%x)\n", cmd);
args = MAX_MB_ARGUMENTS;
}
va_start(ap, args);
for (i = 0; i < args; i++)
data[i] = va_arg(ap, u32);
va_end(ap);
return cx18_api(cx, cmd, args, data);
}
| gpl-2.0 |
StanTRC/lge-kernel-e430 | arch/mips/mm/init.c | 4551 | 12375 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/kmap_types.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#define ENTER_CRITICAL(flags) \
{ \
unsigned int mvpflags; \
local_irq_save(flags);\
mvpflags = dvpe()
#define EXIT_CRITICAL(flags) \
evpe(mvpflags); \
local_irq_restore(flags); \
}
#else
#define ENTER_CRITICAL(flags) local_irq_save(flags)
#define EXIT_CRITICAL(flags) local_irq_restore(flags)
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
/*
* Not static inline because used by IP27 special magic initialization code
*/
unsigned long setup_zero_pages(void)
{
unsigned int order;
unsigned long size;
struct page *page;
if (cpu_has_vce)
order = 3;
else
order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
SetPageReserved(page);
page++;
}
size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;
return 1UL << order;
}
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
{
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
}
#else
static inline void kmap_coherent_init(void) {}
#endif
void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
BUG_ON(Page_dcache_dirty(page));
inc_preempt_count();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() +
(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
#else
idx += in_interrupt() ? FIX_N_COLOURS : 0;
#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, PAGE_KERNEL);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
#ifdef CONFIG_MIPS_MT_SMTC
set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
/* preload TLB instead of local_flush_tlb_one() */
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
tlbidx = read_c0_index();
mtc0_tlbw_hazard();
if (tlbidx < 0)
tlb_write_random();
else
tlb_write_indexed();
#else
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
tlb_write_indexed();
#endif
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
return (void*) vaddr;
}
#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
void kunmap_coherent(void)
{
#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
unsigned long flags, old_ctx;
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
wired = read_c0_wired() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
#endif
dec_preempt_count();
preempt_check_resched();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
page_mapped(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapped(page) && !Page_dcache_dirty(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (cpu_has_dc_aliases &&
page_mapped(page) && !Page_dcache_dirty(page)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
}
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pud_offset(vaddr);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
#endif
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
int page_is_ram(unsigned long pagenr)
{
int i;
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long addr, end;
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
break;
default:
/* not usable memory */
continue;
}
addr = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr +
boot_mem_map.map[i].size);
if (pagenr >= addr && pagenr < end)
return 1;
}
return 0;
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long lastpfn __maybe_unused;
pagetable_init();
#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
kmap_coherent_init();
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
lastpfn = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
lastpfn = max_low_pfn;
}
#endif
free_area_init_nodes(max_zone_pfns);
}
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
unsigned long tmp, ram;
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_DISCONTIGMEM
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
if (page_is_ram(tmp) && pfn_valid(tmp)) {
ram++;
if (PageReserved(pfn_to_page(tmp)))
reservedpages++;
}
num_physpages = ram;
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
if (!page_is_ram(tmp)) {
SetPageReserved(page);
continue;
}
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
num_physpages += totalhigh_pages;
#endif
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
kclist_add(&kcore_kseg0, (void *) CKSEG0,
0x80000000 - 4, KCORE_TEXT);
#endif
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
ClearPageReserved(page);
init_page_count(page);
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
__free_page(page);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory",
virt_to_phys((void *)start),
virt_to_phys((void *)end));
}
#endif
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin),
__pa_symbol(&__init_end));
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
/*
* On 64-bit we've got three-level pagetables with a slightly
* different layout ...
*/
#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
| gpl-2.0 |
dexter93/kernel_htc_msm8660 | drivers/gpu/drm/nouveau/nv50_display.c | 4807 | 28485 | /*
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
return 2;
return 4;
}
u32
nv50_display_active_crtcs(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 mask = 0;
int i;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0) {
for (i = 0; i < 2; i++)
mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
} else {
for (i = 0; i < 4; i++)
mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
}
for (i = 0; i < 3; i++)
mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
return mask & 3;
}
static int
evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
{
int ret = 0;
nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
nv_wr32(dev, 0x610304 + (ch * 0x08), data);
nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
ret = -EBUSY;
if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
return ret;
}
int
nv50_display_early_init(struct drm_device *dev)
{
u32 ctrl = nv_rd32(dev, 0x610200);
int i;
/* check if master evo channel is already active, a good a sign as any
* that the display engine is in a weird state (hibernate/kexec), if
* it is, do our best to reset the display engine...
*/
if ((ctrl & 0x00000003) == 0x00000003) {
NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
/* deactivate both heads first, PDISP will disappear forever
* (well, until you power cycle) on some boards as soon as
* PMC_ENABLE is hit unless they are..
*/
for (i = 0; i < 2; i++) {
evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
}
evo_icmd(dev, 0, 0x0080, 0);
/* reset PDISP */
nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
}
return 0;
}
void
nv50_display_late_takedown(struct drm_device *dev)
{
}
int
nv50_display_sync(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
u64 start;
int ret;
ret = RING_SPACE(evo, 6);
if (ret == 0) {
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x80000000);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0);
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
nv_wo32(disp->ntfy, 0x000, 0x00000000);
FIRE_RING (evo);
start = ptimer->read(dev);
do {
if (nv_ro32(disp->ntfy, 0x000))
return 0;
} while (ptimer->read(dev) - start < 2000000000ULL);
}
return -EBUSY;
}
int
nv50_display_init(struct drm_device *dev)
{
struct nouveau_channel *evo;
int ret, i;
u32 val;
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
/*
* I think the 0x006101XX range is some kind of main control area
* that enables things.
*/
/* CRTC? */
for (i = 0; i < 2; i++) {
val = nv_rd32(dev, 0x00616100 + (i * 0x800));
nv_wr32(dev, 0x00610190 + (i * 0x10), val);
val = nv_rd32(dev, 0x00616104 + (i * 0x800));
nv_wr32(dev, 0x00610194 + (i * 0x10), val);
val = nv_rd32(dev, 0x00616108 + (i * 0x800));
nv_wr32(dev, 0x00610198 + (i * 0x10), val);
val = nv_rd32(dev, 0x0061610c + (i * 0x800));
nv_wr32(dev, 0x0061019c + (i * 0x10), val);
}
/* DAC */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
/* SOR */
for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
/* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
}
for (i = 0; i < 3; i++) {
nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
}
/* The precise purpose is unknown, i suspect it has something to do
* with text mode.
*/
if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
if (!nv_wait(dev, 0x006194e8, 2, 0)) {
NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
nv_rd32(dev, 0x6194e8));
return -EBUSY;
}
}
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
return -EBUSY;
}
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
NV_ERROR(dev, "timeout: "
"CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
return -EBUSY;
}
}
nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
ret = nv50_evo_init(dev);
if (ret)
return ret;
evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
ret = RING_SPACE(evo, 3);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
OUT_RING (evo, NvEvoSync);
return nv50_display_sync(dev);
}
void
nv50_display_fini(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
nv50_crtc_blank(crtc, true);
}
ret = RING_SPACE(evo, 2);
if (ret == 0) {
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
}
FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
*/
list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
if (!crtc->base.enabled)
continue;
nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
"0x%08x\n", mask, mask);
NV_ERROR(dev, "0x610024 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_INTR_1));
}
}
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
}
}
nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
}
}
/* disable interrupts. */
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
}
int
nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_priv->engine.display.priv = priv;
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
/* We setup the encoders from the BIOS table */
for (i = 0 ; i < dcb->entries; i++) {
struct dcb_entry *entry = &dcb->entry[i];
if (entry->location != DCB_LOC_ON_CHIP) {
NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
entry->type, ffs(entry->or) - 1);
continue;
}
connector = nouveau_connector_create(dev, entry->connector);
if (IS_ERR(connector))
continue;
switch (entry->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
case OUTPUT_DP:
nv50_sor_create(connector, entry);
break;
case OUTPUT_ANALOG:
nv50_dac_create(connector, entry);
break;
default:
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
continue;
}
}
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
if (!connector->encoder_ids[0]) {
NV_WARN(dev, "%s has no encoders, removing\n",
drm_get_connector_name(connector));
connector->funcs->destroy(connector);
}
}
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_evo_create(dev);
if (ret) {
nv50_display_destroy(dev);
return ret;
}
return 0;
}
void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
NV_DEBUG_KMS(dev, "\n");
nv50_evo_destroy(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
struct nv50_display *disp = nv50_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
struct nouveau_channel *evo = dispc->sync;
int ret;
ret = RING_SPACE(evo, 8);
if (ret) {
WARN_ON(1);
return;
}
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0094, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x00c0, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nv50_display *disp = nv50_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
struct nouveau_channel *evo = dispc->sync;
int ret;
ret = RING_SPACE(evo, chan ? 25 : 27);
if (unlikely(ret))
return ret;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
ret = RING_SPACE(chan, 10);
if (ret) {
WIND_RING(evo);
return ret;
}
if (dev_priv->chipset < 0xc0) {
BEGIN_RING(chan, 0, 0x0060, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, dispc->sem.offset);
BEGIN_RING(chan, 0, 0x006c, 1);
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
BEGIN_RING(chan, 0, 0x0064, 2);
OUT_RING (chan, dispc->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_RING(chan, 0, 0x0060, 1);
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram_handle);
} else {
u64 offset = chan->dispc_vma[nv_crtc->index].offset;
offset += dispc->sem.offset;
BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
OUT_RING (chan, 0x1002);
BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
FIRE_RING (chan);
} else {
nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
0xf00d0000 | dispc->sem.value);
}
/* queue the flip on the crtc's "display sync" channel */
BEGIN_RING(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
if (chan) {
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000100);
} else {
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000010);
/* allows gamma somehow, PDISP will bitch at you if
* you don't wait for vblank before changing this..
*/
BEGIN_RING(evo, 0, 0x00e0, 1);
OUT_RING (evo, 0x40000000);
}
BEGIN_RING(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
OUT_RING (evo, NvEvoSync);
BEGIN_RING(evo, 0, 0x00a0, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x00c0, 1);
OUT_RING (evo, nv_fb->r_dma);
BEGIN_RING(evo, 0, 0x0110, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5);
OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
OUT_RING (evo, nv_fb->r_format);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
dispc->sem.offset ^= 0x10;
dispc->sem.value++;
return 0;
}
static u16
nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
u32 mc, int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->vbios;
u32 script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb != dcb)
continue;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
break;
}
or = ffs(dcb->or) - 1;
switch (dcb->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
script |= 0x0100;
if (bios->fp.if_is_24bit)
script |= 0x0200;
} else {
/* determine number of lvds links */
if (nv_connector && nv_connector->edid &&
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
/* http://www.spwg.org */
if (((u8 *)nv_connector->edid)[121] == 2)
script |= 0x0100;
} else
if (pxclk >= bios->fp.duallink_transition_clk) {
script |= 0x0100;
}
/* determine panel depth */
if (script & 0x0100) {
if (bios->fp.strapless_is_24bit & 2)
script |= 0x0200;
} else {
if (bios->fp.strapless_is_24bit & 1)
script |= 0x0200;
}
if (nv_connector && nv_connector->edid &&
(nv_connector->edid->revision >= 4) &&
(nv_connector->edid->input & 0x70) >= 0x20)
script |= 0x0200;
}
if (nouveau_uscript_lvds >= 0) {
NV_INFO(dev, "override script 0x%04x with 0x%04x "
"for output LVDS-%d\n", script,
nouveau_uscript_lvds, or);
script = nouveau_uscript_lvds;
}
break;
case OUTPUT_TMDS:
script = (mc >> 8) & 0xf;
if (pxclk >= 165000)
script |= 0x0100;
if (nouveau_uscript_tmds >= 0) {
NV_INFO(dev, "override script 0x%04x with 0x%04x "
"for output TMDS-%d\n", script,
nouveau_uscript_tmds, or);
script = nouveau_uscript_tmds;
}
break;
case OUTPUT_DP:
script = (mc >> 8) & 0xf;
break;
case OUTPUT_ANALOG:
script = 0xff;
break;
default:
NV_ERROR(dev, "modeset on unsupported output type!\n");
break;
}
return script;
}
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan, *tmp;
list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
nvsw.vbl_wait) {
if (chan->nvsw.vblsem_head != crtc)
continue;
nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
chan->nvsw.vblsem_rval);
list_del(&chan->nvsw.vbl_wait);
drm_vblank_put(dev, crtc);
}
drm_handle_vblank(dev, crtc);
}
static void
nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
{
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
nv50_display_vblank_crtc_handler(dev, 0);
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
nv50_display_vblank_crtc_handler(dev, 1);
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
}
static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
/* Determine which CRTC we're dealing with, only 1 ever will be
* signalled at the same time with the current nouveau code.
*/
crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
if (crtc < 0)
goto ack;
/* Nothing needs to be done for the encoder */
crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
if (crtc < 0)
goto ack;
/* Find which encoder was connected to the CRTC */
for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
if (!(mc & (1 << crtc)))
continue;
switch ((mc & 0x00000f00) >> 8) {
case 0: type = OUTPUT_ANALOG; break;
case 1: type = OUTPUT_TV; break;
default:
NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
goto ack;
}
or = i;
}
for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
else
mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
if (!(mc & (1 << crtc)))
continue;
switch ((mc & 0x00000f00) >> 8) {
case 0: type = OUTPUT_LVDS; break;
case 1: type = OUTPUT_TMDS; break;
case 2: type = OUTPUT_TMDS; break;
case 5: type = OUTPUT_TMDS; break;
case 8: type = OUTPUT_DP; break;
case 9: type = OUTPUT_DP; break;
default:
NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
goto ack;
}
or = i;
}
/* There was no encoder to disable */
if (type == OUTPUT_ANY)
goto ack;
/* Disable the encoder */
for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
disp->irq.dcb = dcb;
goto ack;
}
}
NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
nv_wr32(dev, 0x610030, 0x80000000);
}
static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
if (crtc >= 0) {
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
pclk &= 0x003fffff;
if (pclk)
nv50_crtc_set_clock(dev, crtc, pclk);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
}
/* Nothing needs to be done for the encoder */
crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
if (crtc < 0)
goto ack;
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
/* Find which encoder is connected to the CRTC */
for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
if (!(mc & (1 << crtc)))
continue;
switch ((mc & 0x00000f00) >> 8) {
case 0: type = OUTPUT_ANALOG; break;
case 1: type = OUTPUT_TV; break;
default:
NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
goto ack;
}
or = i;
}
for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
else
mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
if (!(mc & (1 << crtc)))
continue;
switch ((mc & 0x00000f00) >> 8) {
case 0: type = OUTPUT_LVDS; break;
case 1: type = OUTPUT_TMDS; break;
case 2: type = OUTPUT_TMDS; break;
case 5: type = OUTPUT_TMDS; break;
case 8: type = OUTPUT_DP; break;
case 9: type = OUTPUT_DP; break;
default:
NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
goto ack;
}
or = i;
}
if (type == OUTPUT_ANY)
goto ack;
/* Enable the encoder */
for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
dcb = &dev_priv->vbios.dcb.entry[i];
if (dcb->type == type && (dcb->or & (1 << or)))
break;
}
if (i == dev_priv->vbios.dcb.entries) {
NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
goto ack;
}
script = nv50_display_script_select(dev, dcb, mc, pclk);
nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
if (type == OUTPUT_DP) {
int link = !(dcb->dpconf.sor.link & 1);
if ((mc & 0x000f0000) == 0x00020000)
nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
else
nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
}
if (dcb->type != OUTPUT_ANALOG) {
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
tmp &= ~0x00000f0f;
if (script & 0x0100)
tmp |= 0x00000101;
nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
} else {
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
disp->irq.dcb = dcb;
disp->irq.pclk = pclk;
disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
nv_wr32(dev, 0x610030, 0x80000000);
}
/* If programming a TMDS output on a SOR that can also be configured for
* DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
*
* It looks like the VBIOS TMDS scripts make an attempt at this, however,
* the VBIOS scripts on at least one board I have only switch it off on
* link 0, causing a blank display if the output has previously been
* programmed for DisplayPort.
*/
static void
nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
{
int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
struct drm_encoder *encoder;
u32 tmp;
if (dcb->type != OUTPUT_TMDS)
return;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == OUTPUT_DP &&
nv_encoder->dcb->or & (1 << or)) {
tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
break;
}
}
}
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
struct dcb_entry *dcb = disp->irq.dcb;
u16 script = disp->irq.script;
u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
if (!dcb)
goto ack;
nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
nv50_display_unk40_dp_set_tmds(dev, dcb);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
nv_wr32(dev, 0x610030, 0x80000000);
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
static void
nv50_display_bh(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
nv50_display_unk10_handler(dev);
else
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
nv50_display_unk20_handler(dev);
else
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
nv50_display_unk40_handler(dev);
else
break;
}
nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
}
static void
nv50_display_error_handler(struct drm_device *dev)
{
u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
u32 addr, data;
int chid;
for (chid = 0; chid < 5; chid++) {
if (!(channels & (1 << chid)))
continue;
nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
"(0x%04x 0x%02x)\n", chid,
addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
}
}
static void
nv50_display_isr(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
uint32_t clock;
NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
if (!intr0 && !(intr1 & ~delayed))
break;
if (intr0 & 0x001f0000) {
nv50_display_error_handler(dev);
intr0 &= ~0x001f0000;
}
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
nv50_display_vblank_handler(dev, intr1);
intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
}
clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
NV50_PDISPLAY_INTR_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}
if (intr0) {
NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
}
if (intr1) {
NV_ERROR(dev,
"unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
}
}
}
| gpl-2.0 |
CheckYourScreen/Arsenic.Kernel | drivers/s390/crypto/zcrypt_pcixcc.c | 4807 | 34002 | /*
* linux/drivers/s390/crypto/zcrypt_pcixcc.c
*
* zcrypt 2.1.0
*
* Copyright (C) 2001, 2006 IBM Corporation
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_pcicc.h"
#include "zcrypt_pcixcc.h"
#include "zcrypt_cca_key.h"
#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
#define PCIXCC_MCL2_SPEED_RATING 7870
#define PCIXCC_MCL3_SPEED_RATING 7870
#define CEX2C_SPEED_RATING 7000
#define CEX3C_SPEED_RATING 6500
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
#define PCIXCC_CLEANUP_TIME (15*HZ)
#define CEIL4(x) ((((x)+3)/4)*4)
struct response_type {
struct completion work;
int type;
};
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
"Copyright 2001, 2006 IBM Corporation");
MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
struct ap_message *);
static struct ap_driver zcrypt_pcixcc_driver = {
.probe = zcrypt_pcixcc_probe,
.remove = zcrypt_pcixcc_remove,
.receive = zcrypt_pcixcc_receive,
.ids = zcrypt_pcixcc_ids,
.request_timeout = PCIXCC_CLEANUP_TIME,
};
/**
* The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
* Everything about this interface is ascii/big-endian, since the
* device does *not* have 'Intel inside'.
*
* The CPRBX is followed immediately by the parm block.
* The parm block contains:
* - function code ('PD' 0x5044 or 'PK' 0x504B)
* - rule block (one of:)
* + 0x000A 'PKCS-1.2' (MCL2 'PD')
* + 0x000A 'ZERO-PAD' (MCL2 'PK')
* + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
* + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
* - VUD block
*/
static struct CPRBX static_cprbx = {
.cprb_len = 0x00DC,
.cprb_ver_id = 0x02,
.func_id = {0x54,0x32},
};
/**
* Convert a ICAMEX message to a type6 MEX message.
*
* @zdev: crypto device pointer
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C','A',},
.function_code = {'P','K'},
};
static struct function_and_rules_block static_pke_fnr = {
.function_code = {'P','K'},
.ulen = 10,
.only_rule = {'M','R','P',' ',' ',' ',' ',' '}
};
static struct function_and_rules_block static_pke_fnr_MCL2 = {
.function_code = {'P','K'},
.ulen = 10,
.only_rule = {'Z','E','R','O','-','P','A','D'}
};
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
char text[0];
} __attribute__((packed)) *msg = ap_msg->message;
int size;
/* VUD.ciphertext */
msg->length = mex->inputdatalength + 2;
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
return -EFAULT;
/* Set up key which is located after the variable length text. */
size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
if (size < 0)
return size;
size += sizeof(*msg) + mex->inputdatalength;
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
static_pke_fnr_MCL2 : static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
ap_msg->length = size;
return 0;
}
/**
* Convert a ICACRT message to a type6 CRT message.
*
* @zdev: crypto device pointer
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C','A',},
.function_code = {'P','D'},
};
static struct function_and_rules_block static_pkd_fnr = {
.function_code = {'P','D'},
.ulen = 10,
.only_rule = {'Z','E','R','O','-','P','A','D'}
};
static struct function_and_rules_block static_pkd_fnr_MCL2 = {
.function_code = {'P','D'},
.ulen = 10,
.only_rule = {'P','K','C','S','-','1','.','2'}
};
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
char text[0];
} __attribute__((packed)) *msg = ap_msg->message;
int size;
/* VUD.ciphertext */
msg->length = crt->inputdatalength + 2;
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
return -EFAULT;
/* Set up key which is located after the variable length text. */
size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
if (size < 0)
return size;
size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
size - sizeof(msg->hdr) - sizeof(msg->cprbx);
msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
static_pkd_fnr_MCL2 : static_pkd_fnr;
ap_msg->length = size;
return 0;
}
/**
* Convert a XCRB message to a type6 CPRB message.
*
* @zdev: crypto device pointer
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
* Returns 0 on success or -EFAULT, -EINVAL.
*/
struct type86_fmt2_msg {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
} __attribute__((packed));
static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_xcRB *xcRB)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
};
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
} __attribute__((packed)) *msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
int replylen;
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
/* length checks */
ap_msg->length = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
return -EINVAL;
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
return -EINVAL;
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
if (xcRB->request_data_length) {
msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
msg->hdr.ToCardLen2 = xcRB->request_data_length;
}
msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
msg->hdr.FromCardLen2 = xcRB->reply_data_length;
/* prepare CPRB */
if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
xcRB->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length)
return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
if (memcmp(function_code, "US", 2) == 0)
ap_msg->special = 1;
else
ap_msg->special = 0;
/* copy data block */
if (xcRB->request_data_length &&
copy_from_user(req_data, xcRB->request_data_address,
xcRB->request_data_length))
return -EFAULT;
return 0;
}
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
struct ap_message *ap_msg,
unsigned random_number_length)
{
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __attribute__((packed)) *msg = ap_msg->message;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A'},
.function_code = {'R', 'L'},
.ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
.FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
};
static struct CPRBX local_cprbx = {
.cprb_len = 0x00dc,
.cprb_ver_id = 0x02,
.func_id = {0x54, 0x32},
.req_parml = sizeof *msg - sizeof(msg->hdr) -
sizeof(msg->cprbx),
.rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
};
msg->hdr = static_type6_hdrX;
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = local_cprbx;
msg->cprbx.rpl_datal = random_number_length,
msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof *msg;
}
/**
* Copy results from a type 86 ICA reply message back to user space.
*
* @zdev: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
struct type86x_reply {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
unsigned short length;
char text[0];
} __attribute__((packed));
static int convert_type86_ica(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
static unsigned char static_pad[] = {
0x00,0x02,
0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
};
struct type86x_reply *msg = reply->message;
unsigned short service_rc, service_rs;
unsigned int reply_len, pad_len;
char *data;
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode;
if (service_rc == 8 && service_rs == 66)
return -EINVAL;
if (service_rc == 8 && service_rs == 65)
return -EINVAL;
if (service_rc == 8 && service_rs == 770)
return -EINVAL;
if (service_rc == 8 && service_rs == 783) {
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
return -EAGAIN;
}
if (service_rc == 12 && service_rs == 769)
return -EINVAL;
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
reply_len = msg->length - 2;
if (reply_len > outputdatalength)
return -EINVAL;
/*
* For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10.
*
* Currently, the cases where padding will be added is for:
* - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
* ZERO-PAD and CRT is only supported for PKD requests)
* - PCICC, always
*/
pad_len = outputdatalength - reply_len;
if (pad_len > 0) {
if (pad_len < 10)
return -EINVAL;
/* 'restore' padding left in the PCICC/PCIXCC card. */
if (copy_to_user(outputdata, static_pad, pad_len - 1))
return -EFAULT;
if (put_user(0, outputdata + pad_len - 1))
return -EFAULT;
}
/* Copy the crypto response to user space. */
if (copy_to_user(outputdata + pad_len, data, reply_len))
return -EFAULT;
return 0;
}
/**
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zdev: crypto device pointer
* @reply: reply AP message.
* @xcRB: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
static int convert_type86_xcrb(struct zcrypt_device *zdev,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
struct type86_fmt2_msg *msg = reply->message;
char *data = reply->message;
/* Copy CPRB to user */
if (copy_to_user(xcRB->reply_control_blk_addr,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcRB->reply_control_blk_length = msg->fmt2.count1;
/* Copy data buffer to user */
if (msg->fmt2.count2)
if (copy_to_user(xcRB->reply_data_addr,
data + msg->fmt2.offset2, msg->fmt2.count2))
return -EFAULT;
xcRB->reply_data_length = msg->fmt2.count2;
return 0;
}
static int convert_type86_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *buffer)
{
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *msg = reply->message;
char *data = reply->message;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
return -EINVAL;
memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
return msg->fmt2.count2;
}
static int convert_response_ica(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */
switch (((unsigned char *) reply->message)[1]) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return convert_error(zdev, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
(msg->cprbx.ccp_rscode == 0x14f) &&
(outputdatalength > 256)) {
if (zdev->max_exp_bit_length <= 17) {
zdev->max_exp_bit_length = 17;
return -EAGAIN;
} else
return -EINVAL;
}
if (msg->hdr.reply_code)
return convert_error(zdev, reply);
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zdev, reply,
outputdata, outputdatalength);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
static int convert_response_xcrb(struct zcrypt_device *zdev,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
struct type86x_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */
switch (((unsigned char *) reply->message)[1]) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
return convert_error(zdev, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
return convert_error(zdev, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zdev, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
static int convert_response_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *data)
{
struct type86x_reply *msg = reply->message;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return -EINVAL;
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zdev, reply, data);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
/**
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
* @ap_dev: pointer to the AP device
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
struct ap_message *msg,
struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type =
(struct response_type *) msg->private;
struct type86x_reply *t86r;
int length;
/* Copy the reply message to the request message buffer. */
if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
goto out;
}
t86r = reply->message;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case PCIXCC_RESPONSE_TYPE_ICA:
length = sizeof(struct type86x_reply)
+ t86r->length - 2;
length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
case PCIXCC_RESPONSE_TYPE_XCRB:
length = t86r->fmt2.offset2 + t86r->fmt2.count2;
length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
default:
memcpy(msg->message, &error_reply, sizeof error_reply);
}
} else
memcpy(msg->message, reply->message, sizeof error_reply);
out:
complete(&(resp_type->work));
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo request.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_ICA,
};
int rc;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
mex->outputdatalength);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo_crt request.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_ICA,
};
int rc;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
crt->outputdatalength);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a send_cprb request.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @xcRB: pointer to the send_cprb request buffer
*/
static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
struct ica_xcRB *xcRB)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
if (rc)
goto out_free;
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
kzfree(ap_msg.message);
return rc;
}
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
char *buffer)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_rng(zdev, &ap_msg, buffer);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
kfree(ap_msg.message);
return rc;
}
/**
* The crypto operations for a PCIXCC/CEX2C card.
*/
static struct zcrypt_ops zcrypt_pcixcc_ops = {
.rsa_modexpo = zcrypt_pcixcc_modexpo,
.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
.send_cprb = zcrypt_pcixcc_send_cprb,
};
static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
.rsa_modexpo = zcrypt_pcixcc_modexpo,
.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
.send_cprb = zcrypt_pcixcc_send_cprb,
.rng = zcrypt_pcixcc_rng,
};
/**
* Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
{
static unsigned char msg[] = {
0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
0xF1,0x3D,0x93,0x53
};
unsigned long long psmid;
struct CPRBX *cprbx;
char *reply;
int rc, i;
reply = (void *) get_zeroed_page(GFP_KERNEL);
if (!reply)
return -ENOMEM;
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 6; i++) {
mdelay(300);
rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 6) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
cprbx = (struct CPRBX *) (reply + 48);
if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
rc = ZCRYPT_PCIXCC_MCL2;
else
rc = ZCRYPT_PCIXCC_MCL3;
out_free:
free_page((unsigned long) reply);
return rc;
}
/**
* Large random number detection function. Its sends a message to a pcixcc
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
{
struct ap_message ap_msg;
unsigned long long psmid;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
int rc, i;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 2 * HZ) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
reply = ap_msg.message;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
* since the bus_match already checked the hardware type. The PCIXCC
* cards come in two flavours: micro code level 2 and micro code level 3.
* This is checked by sending a test message to the device.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev;
int rc = 0;
zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->online = 1;
switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_PCIXCC:
rc = zcrypt_pcixcc_mcl(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
zdev->user_space_type = rc;
if (rc == ZCRYPT_PCIXCC_MCL2) {
zdev->type_string = "PCIXCC_MCL2";
zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
} else {
zdev->type_string = "PCIXCC_MCL3";
zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
}
break;
case AP_DEVICE_TYPE_CEX2C:
zdev->user_space_type = ZCRYPT_CEX2C;
zdev->type_string = "CEX2C";
zdev->speed_rating = CEX2C_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3C:
zdev->user_space_type = ZCRYPT_CEX3C;
zdev->type_string = "CEX3C";
zdev->speed_rating = CEX3C_SPEED_RATING;
zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
break;
default:
goto out_free;
}
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
if (rc)
zdev->ops = &zcrypt_pcixcc_with_rng_ops;
else
zdev->ops = &zcrypt_pcixcc_ops;
ap_dev->reply = &zdev->reply;
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc)
goto out_free;
return 0;
out_free:
ap_dev->private = NULL;
zcrypt_device_free(zdev);
return rc;
}
/**
* This is called to remove the extended PCIXCC/CEX2C driver information
* if an AP device is removed.
*/
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev = ap_dev->private;
zcrypt_device_unregister(zdev);
}
int __init zcrypt_pcixcc_init(void)
{
return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
}
void zcrypt_pcixcc_exit(void)
{
ap_driver_unregister(&zcrypt_pcixcc_driver);
}
module_init(zcrypt_pcixcc_init);
module_exit(zcrypt_pcixcc_exit);
| gpl-2.0 |
bedalus/nexus4 | drivers/scsi/qla4xxx/ql4_init.c | 4807 | 32213 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <scsi/iscsi_if.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
{
uint32_t value;
uint8_t func_number;
unsigned long flags;
/* Get the function number */
spin_lock_irqsave(&ha->hardware_lock, flags);
value = readw(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
func_number = (uint8_t) ((value >> 4) & 0x30);
switch (value & ISP_CONTROL_FN_MASK) {
case ISP_CONTROL_FN0_SCSI:
ha->mac_index = 1;
break;
case ISP_CONTROL_FN1_SCSI:
ha->mac_index = 3;
break;
default:
DEBUG2(printk("scsi%ld: %s: Invalid function number, "
"ispControlStatus = 0x%x\n", ha->host_no,
__func__, value));
break;
}
DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
ha->mac_index));
}
/**
* qla4xxx_free_ddb - deallocate ddb
* @ha: pointer to host adapter structure.
* @ddb_entry: pointer to device database entry
*
* This routine marks a DDB entry INVALID
**/
void qla4xxx_free_ddb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
/* Remove device pointer from index mapping arrays */
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
(struct ddb_entry *) INVALID_ENTRY;
ha->tot_ddbs--;
}
/**
* qla4xxx_init_response_q_entries() - Initializes response queue entries.
* @ha: HA context
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
**/
static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
{
uint16_t cnt;
struct response *pkt;
pkt = (struct response *)ha->response_ptr;
for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
pkt++;
}
}
/**
* qla4xxx_init_rings - initialize hw queues
* @ha: pointer to host adapter structure.
*
* This routine initializes the internal queues for the specified adapter.
* The QLA4010 requires us to restart the queues at index 0.
* The QLA4000 doesn't care, so just default to QLA4010's requirement.
**/
int qla4xxx_init_rings(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
int i;
/* Initialize request queue. */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->request_out = 0;
ha->request_in = 0;
ha->request_ptr = &ha->request_ring[ha->request_in];
ha->req_q_count = REQUEST_QUEUE_DEPTH;
/* Initialize response queue. */
ha->response_in = 0;
ha->response_out = 0;
ha->response_ptr = &ha->response_ring[ha->response_out];
if (is_qla8022(ha)) {
writel(0,
(unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out);
writel(0,
(unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in);
writel(0,
(unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out);
} else {
/*
* Initialize DMA Shadow registers. The firmware is really
* supposed to take care of this, but on some uniprocessor
* systems, the shadow registers aren't cleared-- causing
* the interrupt_handler to think there are responses to be
* processed when there aren't.
*/
ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
wmb();
writel(0, &ha->reg->req_q_in);
writel(0, &ha->reg->rsp_q_out);
readl(&ha->reg->rsp_q_out);
}
qla4xxx_init_response_q_entries(ha);
/* Initialize mabilbox active array */
for (i = 0; i < MAX_MRB; i++)
ha->active_mrb_array[i] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
}
/**
* qla4xxx_get_sys_info - validate adapter MAC address(es)
* @ha: pointer to host adapter structure.
*
**/
int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
{
struct flash_sys_info *sys_info;
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
&sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
sizeof(*sys_info)) != QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
"failed\n", ha->host_no, __func__));
goto exit_get_sys_info;
}
/* Save M.A.C. address & serial_number */
memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
min(sizeof(ha->my_mac),
sizeof(sys_info->physAddr[0].address)));
memcpy(ha->serial_number, &sys_info->acSerialNumber,
min(sizeof(ha->serial_number),
sizeof(sys_info->acSerialNumber)));
status = QLA_SUCCESS;
exit_get_sys_info:
dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
sys_info_dma);
exit_get_sys_info_no_free:
return status;
}
/**
* qla4xxx_init_local_data - initialize adapter specific local data
* @ha: pointer to host adapter structure.
*
**/
static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
{
/* Initialize aen queue */
ha->aen_q_count = MAX_AEN_ENTRIES;
return qla4xxx_get_firmware_status(ha);
}
static uint8_t
qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
{
uint8_t ipv4_wait = 0;
uint8_t ipv6_wait = 0;
int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
/* If both IPv4 & IPv6 are enabled, possibly only one
* IP address may be acquired, so check to see if we
* need to wait for another */
if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
((ha->addl_fw_state &
FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
ipv4_wait = 1;
}
if (((ha->ip_config.ipv6_addl_options &
IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
((ha->ip_config.ipv6_link_local_state ==
IP_ADDRSTATE_ACQUIRING) ||
(ha->ip_config.ipv6_addr0_state ==
IP_ADDRSTATE_ACQUIRING) ||
(ha->ip_config.ipv6_addr1_state ==
IP_ADDRSTATE_ACQUIRING))) {
ipv6_wait = 1;
if ((ha->ip_config.ipv6_link_local_state ==
IP_ADDRSTATE_PREFERRED) ||
(ha->ip_config.ipv6_addr0_state ==
IP_ADDRSTATE_PREFERRED) ||
(ha->ip_config.ipv6_addr1_state ==
IP_ADDRSTATE_PREFERRED)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
"Preferred IP configured."
" Don't wait!\n", ha->host_no,
__func__));
ipv6_wait = 0;
}
if (memcmp(&ha->ip_config.ipv6_default_router_addr,
ip_address, IPv6_ADDR_LEN) == 0) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
"No Router configured. "
"Don't wait!\n", ha->host_no,
__func__));
ipv6_wait = 0;
}
if ((ha->ip_config.ipv6_default_router_state ==
IPV6_RTRSTATE_MANUAL) &&
(ha->ip_config.ipv6_link_local_state ==
IP_ADDRSTATE_TENTATIVE) &&
(memcmp(&ha->ip_config.ipv6_link_local_addr,
&ha->ip_config.ipv6_default_router_addr, 4) ==
0)) {
DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
"IP configured. Don't wait!\n",
ha->host_no, __func__));
ipv6_wait = 0;
}
}
if (ipv4_wait || ipv6_wait) {
DEBUG2(printk("scsi%ld: %s: Wait for additional "
"IP(s) \"", ha->host_no, __func__));
if (ipv4_wait)
DEBUG2(printk("IPv4 "));
if (ha->ip_config.ipv6_link_local_state ==
IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6LinkLocal "));
if (ha->ip_config.ipv6_addr0_state ==
IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6Addr0 "));
if (ha->ip_config.ipv6_addr1_state ==
IP_ADDRSTATE_ACQUIRING)
DEBUG2(printk("IPv6Addr1 "));
DEBUG2(printk("\"\n"));
}
}
return ipv4_wait|ipv6_wait;
}
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
int ready = 0;
DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n"));
for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
timeout_count--) {
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
/* Get firmware state. */
if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: unable to get firmware "
"state\n", ha->host_no, __func__));
break;
}
if (ha->firmware_state & FW_STATE_ERROR) {
DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
" occurred\n", ha->host_no, __func__));
break;
}
if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
/*
* The firmware has not yet been issued an Initialize
* Firmware command, so issue it now.
*/
if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
break;
/* Go back and test for ready state - no wait. */
continue;
}
if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
"AUTOCONNECT in progress\n",
ha->host_no, __func__));
}
if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
" CONFIGURING IP\n",
ha->host_no, __func__));
/*
* Check for link state after 15 secs and if link is
* still DOWN then, cable is unplugged. Ignore "DHCP
* in Progress/CONFIGURING IP" bit to check if firmware
* is in ready state or not after 15 secs.
* This is applicable for both 2.x & 3.x firmware
*/
if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
" LINK UP (Cable plugged)\n",
ha->host_no, __func__));
} else if (ha->firmware_state &
(FW_STATE_CONFIGURING_IP |
FW_STATE_READY)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
"LINK DOWN (Cable unplugged)\n",
ha->host_no, __func__));
ha->firmware_state = FW_STATE_READY;
}
}
}
if (ha->firmware_state == FW_STATE_READY) {
/* If DHCP IP Addr is available, retrieve it now. */
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
&ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
if (!qla4xxx_wait_for_ip_config(ha) ||
timeout_count == 1) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"Firmware Ready..\n"));
/* The firmware is ready to process SCSI
commands. */
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: MEDIA TYPE"
" - %s\n", ha->host_no,
__func__, (ha->addl_fw_state &
FW_ADDSTATE_OPTICAL_MEDIA)
!= 0 ? "OPTICAL" : "COPPER"));
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: DHCPv4 STATE"
" Enabled %s\n", ha->host_no,
__func__, (ha->addl_fw_state &
FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
"YES" : "NO"));
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: LINK %s\n",
ha->host_no, __func__,
(ha->addl_fw_state &
FW_ADDSTATE_LINK_UP) != 0 ?
"UP" : "DOWN"));
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: iSNS Service "
"Started %s\n",
ha->host_no, __func__,
(ha->addl_fw_state &
FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
"YES" : "NO"));
ready = 1;
break;
}
}
DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
"seconds expired= %d\n", ha->host_no, __func__,
ha->firmware_state, ha->addl_fw_state,
timeout_count));
if (is_qla4032(ha) &&
!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
(timeout_count < ADAPTER_INIT_TOV - 5)) {
break;
}
msleep(1000);
} /* end of for */
if (timeout_count <= 0)
DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
ha->host_no, __func__));
if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
"it's waiting to configure an IP address\n",
ha->host_no, __func__));
ready = 1;
} else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
DEBUG2(printk("scsi%ld: %s: FW initialized, but "
"auto-discovery still in process\n",
ha->host_no, __func__));
ready = 1;
}
return ready;
}
/**
* qla4xxx_init_firmware - initializes the firmware.
* @ha: pointer to host adapter structure.
*
**/
static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
if (is_aer_supported(ha) &&
test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
return status;
/* For 82xx, stop firmware before initializing because if BIOS
* has previously initialized firmware, then driver's initialize
* firmware will fail. */
if (is_qla8022(ha))
qla4_8xxx_stop_firmware(ha);
ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
"control block\n", ha->host_no, __func__));
return status;
}
if (!qla4xxx_fw_ready(ha))
return status;
return qla4xxx_get_firmware_status(ha);
}
static void qla4xxx_set_model_info(struct scsi_qla_host *ha)
{
uint16_t board_id_string[8];
int i;
int size = sizeof(ha->nvram->isp4022.boardIdStr);
int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2;
for (i = 0; i < (size / 2) ; i++) {
board_id_string[i] = rd_nvram_word(ha, offset);
offset += 1;
}
memcpy(ha->model_name, board_id_string, size);
}
static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
{
unsigned long flags;
union external_hw_config_reg extHwConfig;
DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
__func__));
if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
return QLA_ERROR;
if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
ql4xxx_unlock_flash(ha);
return QLA_ERROR;
}
/* Get EEPRom Parameters from NVRAM and validate */
ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n");
if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
spin_lock_irqsave(&ha->hardware_lock, flags);
extHwConfig.Asuint32_t =
rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
} else {
ql4_printk(KERN_WARNING, ha,
"scsi%ld: %s: EEProm checksum invalid. "
"Please update your EEPROM\n", ha->host_no,
__func__);
/* Attempt to set defaults */
if (is_qla4010(ha))
extHwConfig.Asuint32_t = 0x1912;
else if (is_qla4022(ha) | is_qla4032(ha))
extHwConfig.Asuint32_t = 0x0023;
else
return QLA_ERROR;
}
if (is_qla4022(ha) || is_qla4032(ha))
qla4xxx_set_model_info(ha);
else
strcpy(ha->model_name, "QLA4010");
DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
ha->host_no, __func__, extHwConfig.Asuint32_t));
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
readl(isp_ext_hw_conf(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql4xxx_unlock_nvram(ha);
ql4xxx_unlock_flash(ha);
return QLA_SUCCESS;
}
/**
* qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers.
* @ha: HA context
*/
void qla4_8xxx_pci_config(struct scsi_qla_host *ha)
{
pci_set_master(ha->pdev);
}
void qla4xxx_pci_config(struct scsi_qla_host *ha)
{
uint16_t w;
int status;
ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
pci_set_master(ha->pdev);
status = pci_set_mwi(ha->pdev);
/*
* We want to respect framework's setting of PCI configuration space
* command register and also want to make sure that all bits of
* interest to us are properly set in command register.
*/
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
}
static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
unsigned long max_wait_time;
unsigned long flags;
uint32_t mbox_status;
ql4_printk(KERN_INFO, ha, "Starting firmware ...\n");
/*
* Start firmware from flash ROM
*
* WORKAROUND: Stuff a non-constant value that the firmware can
* use as a seed for a random number generator in MB7 prior to
* setting BOOT_ENABLE. Fixes problem where the TCP
* connections use the same TCP ports after each reboot,
* causing some connections to not get re-established.
*/
DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
ha->host_no, __func__));
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(jiffies, &ha->reg->mailbox[7]);
if (is_qla4022(ha) | is_qla4032(ha))
writel(set_rmask(NVR_WRITE_ENABLE),
&ha->reg->u1.isp4022.nvram);
writel(2, &ha->reg->mailbox[6]);
readl(&ha->reg->mailbox[6]);
writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for firmware to come UP. */
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
"boot firmware to complete...\n",
ha->host_no, __func__, FIRMWARE_UP_TOV));
max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
do {
uint32_t ctrl_status;
spin_lock_irqsave(&ha->hardware_lock, flags);
ctrl_status = readw(&ha->reg->ctrl_status);
mbox_status = readw(&ha->reg->mailbox[0]);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
break;
if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
break;
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
"firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
ha->host_no, __func__, ctrl_status, max_wait_time));
msleep_interruptible(250);
} while (!time_after_eq(jiffies, max_wait_time));
if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
ha->host_no, __func__));
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
status = QLA_SUCCESS;
} else {
printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
"- mbox status 0x%x\n", ha->host_no, __func__,
mbox_status);
status = QLA_ERROR;
}
return status;
}
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
{
#define QL4_LOCK_DRVR_WAIT 60
#define QL4_LOCK_DRVR_SLEEP 1
int drvr_wait = QL4_LOCK_DRVR_WAIT;
while (drvr_wait) {
if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
if (drvr_wait) {
DEBUG2(printk("scsi%ld: %s: Waiting for "
"Global Init Semaphore(%d)...\n",
a->host_no,
__func__, drvr_wait));
}
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
"acquired\n", a->host_no, __func__));
return QLA_SUCCESS;
}
}
return QLA_ERROR;
}
/**
* qla4xxx_start_firmware - starts qla4xxx firmware
* @ha: Pointer to host adapter structure.
*
* This routine performs the necessary steps to start the firmware for
* the QLA4010 adapter.
**/
int qla4xxx_start_firmware(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
uint32_t mbox_status;
int status = QLA_ERROR;
int soft_reset = 1;
int config_chip = 0;
if (is_qla4022(ha) | is_qla4032(ha))
ql4xxx_set_mac_number(ha);
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
return QLA_ERROR;
spin_lock_irqsave(&ha->hardware_lock, flags);
DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no,
__func__, readw(isp_port_ctrl(ha))));
DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
__func__, readw(isp_port_status(ha))));
/* Is Hardware already initialized? */
if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
DEBUG(printk("scsi%ld: %s: Hardware has already been "
"initialized\n", ha->host_no, __func__));
/* Receive firmware boot acknowledgement */
mbox_status = readw(&ha->reg->mailbox[0]);
DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
"0x%x\n", ha->host_no, __func__, mbox_status));
/* Is firmware already booted? */
if (mbox_status == 0) {
/* F/W not running, must be config by net driver */
config_chip = 1;
soft_reset = 0;
} else {
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Get firmware "
"state -- state = 0x%x\n",
ha->host_no,
__func__, ha->firmware_state));
/* F/W is running */
if (ha->firmware_state &
FW_STATE_CONFIG_WAIT) {
DEBUG2(printk("scsi%ld: %s: Firmware "
"in known state -- "
"config and "
"boot, state = 0x%x\n",
ha->host_no, __func__,
ha->firmware_state));
config_chip = 1;
soft_reset = 0;
}
} else {
DEBUG2(printk("scsi%ld: %s: Firmware in "
"unknown state -- resetting,"
" state = "
"0x%x\n", ha->host_no, __func__,
ha->firmware_state));
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
} else {
DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
"started - resetting\n", ha->host_no, __func__));
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
ha->host_no, __func__, soft_reset, config_chip));
if (soft_reset) {
DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
__func__));
status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr
* lock again, but ok */
if (status == QLA_ERROR) {
DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
ha->host_no, __func__));
ql4xxx_unlock_drvr(ha);
return QLA_ERROR;
}
config_chip = 1;
/* Reset clears the semaphore, so acquire again */
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
return QLA_ERROR;
}
if (config_chip) {
if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
status = qla4xxx_start_firmware_from_flash(ha);
}
ql4xxx_unlock_drvr(ha);
if (status == QLA_SUCCESS) {
if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
qla4xxx_get_crash_record(ha);
} else {
DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
ha->host_no, __func__));
}
return status;
}
/**
* qla4xxx_free_ddb_index - Free DDBs reserved by firmware
* @ha: pointer to adapter structure
*
* Since firmware is not running in autoconnect mode the DDB indices should
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
if (ret == QLA_ERROR) {
next_idx++;
continue;
}
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"Freeing DDB index = 0x%x\n", idx));
ret = qla4xxx_clear_ddb_entry(ha, idx);
if (ret == QLA_ERROR)
ql4_printk(KERN_ERR, ha,
"Unable to clear DDB index = "
"0x%x\n", idx);
}
if (next_idx == 0)
break;
}
}
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
*
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
ha->eeprom_cmd_data = 0;
ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
ha->isp_ops->pci_config(ha);
ha->isp_ops->disable_intrs(ha);
/* Initialize the Host adapter request/response queues and firmware */
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
goto exit_init_hba;
if (qla4xxx_init_local_data(ha) == QLA_ERROR)
goto exit_init_hba;
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
goto exit_init_hba;
if (is_reset == RESET_ADAPTER)
qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
if (is_qla8022(ha) && (status == QLA_ERROR)) {
/* Since interrupts are registered in start_firmware for
* 82xx, release them here if initialize_adapter fails */
qla4xxx_free_irqs(ha);
}
DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
return status;
}
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state)
{
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
"index [%d]\n", __func__,
ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
ddb_entry->fw_ddb_device_state = state;
switch (old_fw_ddb_device_state) {
case DDB_DS_LOGIN_IN_PROCESS:
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
case DDB_DS_NO_CONNECTION_ACTIVE:
iscsi_conn_login_event(ddb_entry->conn,
ISCSI_CONN_STATE_FREE);
status = QLA_SUCCESS;
break;
}
break;
case DDB_DS_SESSION_ACTIVE:
switch (state) {
case DDB_DS_SESSION_FAILED:
/*
* iscsi_session failure will cause userspace to
* stop the connection which in turn would block the
* iscsi_session and start relogin
*/
iscsi_session_failure(ddb_entry->sess->dd_data,
ISCSI_ERR_CONN_FAILED);
status = QLA_SUCCESS;
break;
case DDB_DS_NO_CONNECTION_ACTIVE:
clear_bit(fw_ddb_index, ha->ddb_idx_map);
status = QLA_SUCCESS;
break;
}
break;
case DDB_DS_SESSION_FAILED:
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
iscsi_session_failure(ddb_entry->sess->dd_data,
ISCSI_ERR_CONN_FAILED);
status = QLA_SUCCESS;
break;
}
break;
default:
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
__func__));
break;
}
return status;
}
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
{
/*
* This triggers a relogin. After the relogin_timer
* expires, the relogin gets scheduled. We must wait a
* minimum amount of time since receiving an 0x8014 AEN
* with failed device_state or a logout response before
* we can issue another relogin.
*
* Firmware pads this timeout: (time2wait +1).
* Driver retry to login should be longer than F/W.
* Otherwise F/W will fail
* set_ddb() mbx cmd with 0x4005 since it still
* counting down its time2wait.
*/
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->retry_relogin_timer,
ddb_entry->default_time2wait + 4);
}
int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state)
{
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
"index [%d]\n", __func__,
ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
ddb_entry->fw_ddb_device_state = state;
switch (old_fw_ddb_device_state) {
case DDB_DS_LOGIN_IN_PROCESS:
case DDB_DS_NO_CONNECTION_ACTIVE:
switch (state) {
case DDB_DS_SESSION_ACTIVE:
ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
iscsi_block_session(ddb_entry->sess);
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
qla4xxx_arm_relogin_timer(ddb_entry);
status = QLA_SUCCESS;
break;
}
break;
case DDB_DS_SESSION_ACTIVE:
switch (state) {
case DDB_DS_SESSION_FAILED:
iscsi_block_session(ddb_entry->sess);
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
qla4xxx_arm_relogin_timer(ddb_entry);
status = QLA_SUCCESS;
break;
}
break;
case DDB_DS_SESSION_FAILED:
switch (state) {
case DDB_DS_SESSION_ACTIVE:
ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
qla4xxx_arm_relogin_timer(ddb_entry);
status = QLA_SUCCESS;
break;
}
break;
default:
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
__func__));
break;
}
return status;
}
/**
* qla4xxx_process_ddb_changed - process ddb state change
* @ha - Pointer to host adapter structure.
* @fw_ddb_index - Firmware's device database index
* @state - Device state
*
* This routine processes a Decive Database Changed AEN Event.
**/
int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry *ddb_entry;
int status = QLA_ERROR;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
goto exit_ddb_event;
/* Get the corresponging ddb entry */
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
/* Device does not currently exist in our database. */
if (ddb_entry == NULL) {
ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
__func__, fw_ddb_index);
if (state == DDB_DS_NO_CONNECTION_ACTIVE)
clear_bit(fw_ddb_index, ha->ddb_idx_map);
goto exit_ddb_event;
}
ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
/**
* qla4xxx_login_flash_ddb - Login to target (DDB)
* @cls_session: Pointer to the session to login
*
* This routine logins to the target.
* Issues setddb and conn open mbx
**/
void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_dma;
uint32_t mbx_sts = 0;
int ret;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
if (!test_bit(AF_LINK_UP, &ha->flags))
return;
if (ddb_entry->ddb_type != FLASH_DDB) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"Skipping login to non FLASH DB"));
goto exit_login;
}
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
goto exit_login;
}
if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
if (ret == QLA_ERROR)
goto exit_login;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
}
memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
sizeof(struct dev_db_entry));
ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
fw_ddb_dma, &mbx_sts);
if (ret == QLA_ERROR) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
goto exit_login;
}
ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
if (ret == QLA_ERROR) {
ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
sess->targetname);
goto exit_login;
}
exit_login:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
}
| gpl-2.0 |
fabeer/android_kernel_xolo_q1100 | arch/mips/bcm63xx/gpio.c | 7367 | 3630 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
* Copyright (C) 2008-2011 Florian Fainelli <florian@openwrt.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_gpio.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#ifndef BCMCPU_RUNTIME_DETECT
#define gpio_out_low_reg GPIO_DATA_LO_REG
#ifdef CONFIG_BCM63XX_CPU_6345
#ifdef gpio_out_low_reg
#undef gpio_out_low_reg
#define gpio_out_low_reg GPIO_DATA_LO_REG_6345
#endif /* gpio_out_low_reg */
#endif /* CONFIG_BCM63XX_CPU_6345 */
static inline void bcm63xx_gpio_out_low_reg_init(void)
{
}
#else /* ! BCMCPU_RUNTIME_DETECT */
static u32 gpio_out_low_reg;
static void bcm63xx_gpio_out_low_reg_init(void)
{
switch (bcm63xx_get_cpu_id()) {
case BCM6345_CPU_ID:
gpio_out_low_reg = GPIO_DATA_LO_REG_6345;
break;
default:
gpio_out_low_reg = GPIO_DATA_LO_REG;
break;
}
}
#endif /* ! BCMCPU_RUNTIME_DETECT */
static DEFINE_SPINLOCK(bcm63xx_gpio_lock);
static u32 gpio_out_low, gpio_out_high;
static void bcm63xx_gpio_set(struct gpio_chip *chip,
unsigned gpio, int val)
{
u32 reg;
u32 mask;
u32 *v;
unsigned long flags;
if (gpio >= chip->ngpio)
BUG();
if (gpio < 32) {
reg = gpio_out_low_reg;
mask = 1 << gpio;
v = &gpio_out_low;
} else {
reg = GPIO_DATA_HI_REG;
mask = 1 << (gpio - 32);
v = &gpio_out_high;
}
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
if (val)
*v |= mask;
else
*v &= ~mask;
bcm_gpio_writel(*v, reg);
spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
}
static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
u32 reg;
u32 mask;
if (gpio >= chip->ngpio)
BUG();
if (gpio < 32) {
reg = gpio_out_low_reg;
mask = 1 << gpio;
} else {
reg = GPIO_DATA_HI_REG;
mask = 1 << (gpio - 32);
}
return !!(bcm_gpio_readl(reg) & mask);
}
static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
unsigned gpio, int dir)
{
u32 reg;
u32 mask;
u32 tmp;
unsigned long flags;
if (gpio >= chip->ngpio)
BUG();
if (gpio < 32) {
reg = GPIO_CTL_LO_REG;
mask = 1 << gpio;
} else {
reg = GPIO_CTL_HI_REG;
mask = 1 << (gpio - 32);
}
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
tmp = bcm_gpio_readl(reg);
if (dir == BCM63XX_GPIO_DIR_IN)
tmp &= ~mask;
else
tmp |= mask;
bcm_gpio_writel(tmp, reg);
spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
return 0;
}
static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
}
static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
bcm63xx_gpio_set(chip, gpio, value);
return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
}
static struct gpio_chip bcm63xx_gpio_chip = {
.label = "bcm63xx-gpio",
.direction_input = bcm63xx_gpio_direction_input,
.direction_output = bcm63xx_gpio_direction_output,
.get = bcm63xx_gpio_get,
.set = bcm63xx_gpio_set,
.base = 0,
};
int __init bcm63xx_gpio_init(void)
{
bcm63xx_gpio_out_low_reg_init();
gpio_out_low = bcm_gpio_readl(gpio_out_low_reg);
if (!BCMCPU_IS_6345())
gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG);
bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count();
pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio);
return gpiochip_add(&bcm63xx_gpio_chip);
}
| gpl-2.0 |
globalgang/linux-at91 | drivers/net/arcnet/capmode.c | 12487 | 7629 | /*
* Linux ARCnet driver - "cap mode" packet encapsulation.
* It adds sequence numbers to packets for communicating between a user space
* application and the driver. After a transmit it sends a packet with protocol
* byte 0 back up to the userspace containing the sequence number of the packet
* plus the transmit-status on the ArcNet.
*
* Written 2002-4 by Esben Nielsen, Vestas Wind Systems A/S
* Derived from arc-rawmode.c by Avery Pennarun.
* arc-rawmode was in turned based on skeleton.c, see below.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *skb;
struct archdr *pkt = pkthdr;
char *pktbuf, *pkthdrbuf;
int ofs;
BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length);
if (length >= MinTU)
ofs = 512 - length;
else
ofs = 256 - length;
skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
skb->dev = dev;
skb_reset_mac_header(skb);
pkt = (struct archdr *)skb_mac_header(skb);
skb_pull(skb, ARC_HDR_SIZE);
/* up to sizeof(pkt->soft) has already been copied from the card */
/* squeeze in an int for the cap encapsulation */
/* use these variables to be sure we count in bytes, not in
sizeof(struct archdr) */
pktbuf=(char*)pkt;
pkthdrbuf=(char*)pkthdr;
memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto));
memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int),
pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto),
sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto));
if (length > sizeof(pkt->soft))
lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt->soft.raw + sizeof(pkt->soft)
+ sizeof(int),
length - sizeof(pkt->soft));
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
netif_rx(skb);
}
/*
* Create the ARCnet hard/soft headers for cap mode.
* There aren't any soft headers in cap mode - not even the protocol id.
*/
static int build_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
/*
* Set the source hardware address.
*
* This is pretty pointless for most purposes, but it can help in
* debugging. ARCnet does not allow us to change the source address in
* the actual packet sent)
*/
pkt->hard.source = *dev->dev_addr;
/* see linux/net/ethernet/eth.c to see where I got the following */
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
/*
* FIXME: fill in the last byte of the dest ipaddr here to better
* comply with RFC1051 in "noarp" mode.
*/
pkt->hard.dest = 0;
return hdr_size;
}
/* otherwise, just fill it in and go! */
pkt->hard.dest = daddr;
return hdr_size; /* success */
}
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct arc_hardware *hard = &pkt->hard;
int ofs;
/* hard header is not included in packet length */
length -= ARC_HDR_SIZE;
/* And neither is the cookie field */
length -= sizeof(int);
BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
lp->next_tx, lp->cur_tx, bufnum);
BUGMSG(D_PROTO, "Sending for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
if (length > XMTU) {
/* should never happen! other people already check for this. */
BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
length, XMTU);
length = XMTU;
}
if (length > MinTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length;
} else if (length > MTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length - 3;
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
/* Copy the arcnet-header + the protocol byte down: */
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
sizeof(pkt->soft.cap.proto));
/* Skip the extra integer we have written into it as a cookie
but write the rest of the message: */
lp->hw.copy_to_card(dev, bufnum, ofs+1,
((unsigned char*)&pkt->soft.cap.mes),length-1);
lp->lastload_dest = hard->dest;
return 1; /* done */
}
static int ack_tx(struct net_device *dev, int acked)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *ackskb;
struct archdr *ackpkt;
int length=sizeof(struct arc_cap);
BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
lp->outgoing.skb->protocol, acked);
BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
/* Now alloc a skb to send back up through the layers: */
ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
if (ackskb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
goto free_outskb;
}
skb_put(ackskb, length + ARC_HDR_SIZE );
ackskb->dev = dev;
skb_reset_mac_header(ackskb);
ackpkt = (struct archdr *)skb_mac_header(ackskb);
/* skb_pull(ackskb, ARC_HDR_SIZE); */
skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
ARC_HDR_SIZE + sizeof(struct arc_cap));
ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
ackpkt->soft.cap.mes.ack=acked;
BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
*((int*)&ackpkt->soft.cap.cookie[0]));
ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
netif_rx(ackskb);
free_outskb:
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
return 0;
}
static struct ArcProto capmode_proto =
{
'r',
XMTU,
0,
rx,
build_header,
prepare_tx,
NULL,
ack_tx
};
static void arcnet_cap_init(void)
{
int count;
for (count = 1; count <= 8; count++)
if (arc_proto_map[count] == arc_proto_default)
arc_proto_map[count] = &capmode_proto;
/* for cap mode, we only set the bcast proto if there's no better one */
if (arc_bcast_proto == arc_proto_default)
arc_bcast_proto = &capmode_proto;
arc_proto_default = &capmode_proto;
arc_raw_proto = &capmode_proto;
}
static int __init capmode_module_init(void)
{
printk(VERSION);
arcnet_cap_init();
return 0;
}
static void __exit capmode_module_exit(void)
{
arcnet_unregister_proto(&capmode_proto);
}
module_init(capmode_module_init);
module_exit(capmode_module_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
lollipop-og/bricked-geehrc | drivers/net/arcnet/capmode.c | 12487 | 7629 | /*
* Linux ARCnet driver - "cap mode" packet encapsulation.
* It adds sequence numbers to packets for communicating between a user space
* application and the driver. After a transmit it sends a packet with protocol
* byte 0 back up to the userspace containing the sequence number of the packet
* plus the transmit-status on the ArcNet.
*
* Written 2002-4 by Esben Nielsen, Vestas Wind Systems A/S
* Derived from arc-rawmode.c by Avery Pennarun.
* arc-rawmode was in turned based on skeleton.c, see below.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *skb;
struct archdr *pkt = pkthdr;
char *pktbuf, *pkthdrbuf;
int ofs;
BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length);
if (length >= MinTU)
ofs = 512 - length;
else
ofs = 256 - length;
skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
skb->dev = dev;
skb_reset_mac_header(skb);
pkt = (struct archdr *)skb_mac_header(skb);
skb_pull(skb, ARC_HDR_SIZE);
/* up to sizeof(pkt->soft) has already been copied from the card */
/* squeeze in an int for the cap encapsulation */
/* use these variables to be sure we count in bytes, not in
sizeof(struct archdr) */
pktbuf=(char*)pkt;
pkthdrbuf=(char*)pkthdr;
memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto));
memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int),
pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto),
sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto));
if (length > sizeof(pkt->soft))
lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt->soft.raw + sizeof(pkt->soft)
+ sizeof(int),
length - sizeof(pkt->soft));
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
netif_rx(skb);
}
/*
* Create the ARCnet hard/soft headers for cap mode.
* There aren't any soft headers in cap mode - not even the protocol id.
*/
static int build_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
/*
* Set the source hardware address.
*
* This is pretty pointless for most purposes, but it can help in
* debugging. ARCnet does not allow us to change the source address in
* the actual packet sent)
*/
pkt->hard.source = *dev->dev_addr;
/* see linux/net/ethernet/eth.c to see where I got the following */
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
/*
* FIXME: fill in the last byte of the dest ipaddr here to better
* comply with RFC1051 in "noarp" mode.
*/
pkt->hard.dest = 0;
return hdr_size;
}
/* otherwise, just fill it in and go! */
pkt->hard.dest = daddr;
return hdr_size; /* success */
}
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct arc_hardware *hard = &pkt->hard;
int ofs;
/* hard header is not included in packet length */
length -= ARC_HDR_SIZE;
/* And neither is the cookie field */
length -= sizeof(int);
BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
lp->next_tx, lp->cur_tx, bufnum);
BUGMSG(D_PROTO, "Sending for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
if (length > XMTU) {
/* should never happen! other people already check for this. */
BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
length, XMTU);
length = XMTU;
}
if (length > MinTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length;
} else if (length > MTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length - 3;
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
/* Copy the arcnet-header + the protocol byte down: */
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
sizeof(pkt->soft.cap.proto));
/* Skip the extra integer we have written into it as a cookie
but write the rest of the message: */
lp->hw.copy_to_card(dev, bufnum, ofs+1,
((unsigned char*)&pkt->soft.cap.mes),length-1);
lp->lastload_dest = hard->dest;
return 1; /* done */
}
static int ack_tx(struct net_device *dev, int acked)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *ackskb;
struct archdr *ackpkt;
int length=sizeof(struct arc_cap);
BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
lp->outgoing.skb->protocol, acked);
BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
/* Now alloc a skb to send back up through the layers: */
ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
if (ackskb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
goto free_outskb;
}
skb_put(ackskb, length + ARC_HDR_SIZE );
ackskb->dev = dev;
skb_reset_mac_header(ackskb);
ackpkt = (struct archdr *)skb_mac_header(ackskb);
/* skb_pull(ackskb, ARC_HDR_SIZE); */
skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
ARC_HDR_SIZE + sizeof(struct arc_cap));
ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
ackpkt->soft.cap.mes.ack=acked;
BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
*((int*)&ackpkt->soft.cap.cookie[0]));
ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
netif_rx(ackskb);
free_outskb:
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
return 0;
}
static struct ArcProto capmode_proto =
{
'r',
XMTU,
0,
rx,
build_header,
prepare_tx,
NULL,
ack_tx
};
static void arcnet_cap_init(void)
{
int count;
for (count = 1; count <= 8; count++)
if (arc_proto_map[count] == arc_proto_default)
arc_proto_map[count] = &capmode_proto;
/* for cap mode, we only set the bcast proto if there's no better one */
if (arc_bcast_proto == arc_proto_default)
arc_bcast_proto = &capmode_proto;
arc_proto_default = &capmode_proto;
arc_raw_proto = &capmode_proto;
}
static int __init capmode_module_init(void)
{
printk(VERSION);
arcnet_cap_init();
return 0;
}
static void __exit capmode_module_exit(void)
{
arcnet_unregister_proto(&capmode_proto);
}
module_init(capmode_module_init);
module_exit(capmode_module_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
raden/kencana-kernel | arch/x86/math-emu/reg_ld_str.c | 14279 | 32290 | /*---------------------------------------------------------------------------+
| reg_ld_str.c |
| |
| All of the functions which transfer data between user memory and FPU_REGs.|
| |
| Copyright (C) 1992,1993,1994,1996,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@suburbia.net |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include "fpu_emu.h"
#include <asm/uaccess.h>
#include "fpu_system.h"
#include "exception.h"
#include "reg_constant.h"
#include "control_w.h"
#include "status_w.h"
#define DOUBLE_Emax 1023 /* largest valid exponent */
#define DOUBLE_Ebias 1023
#define DOUBLE_Emin (-1022) /* smallest valid exponent */
#define SINGLE_Emax 127 /* largest valid exponent */
#define SINGLE_Ebias 127
#define SINGLE_Emin (-126) /* smallest valid exponent */
static u_char normalize_no_excep(FPU_REG *r, int exp, int sign)
{
u_char tag;
setexponent16(r, exp);
tag = FPU_normalize_nuo(r);
stdexp(r);
if (sign)
setnegative(r);
return tag;
}
int FPU_tagof(FPU_REG *ptr)
{
int exp;
exp = exponent16(ptr) & 0x7fff;
if (exp == 0) {
if (!(ptr->sigh | ptr->sigl)) {
return TAG_Zero;
}
/* The number is a de-normal or pseudodenormal. */
return TAG_Special;
}
if (exp == 0x7fff) {
/* Is an Infinity, a NaN, or an unsupported data type. */
return TAG_Special;
}
if (!(ptr->sigh & 0x80000000)) {
/* Unsupported data type. */
/* Valid numbers have the ms bit set to 1. */
/* Unnormal. */
return TAG_Special;
}
return TAG_Valid;
}
/* Get a long double from user memory */
int FPU_load_extended(long double __user *s, int stnr)
{
FPU_REG *sti_ptr = &st(stnr);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, s, 10);
__copy_from_user(sti_ptr, s, 10);
RE_ENTRANT_CHECK_ON;
return FPU_tagof(sti_ptr);
}
/* Get a double from user memory */
int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data)
{
int exp, tag, negative;
unsigned m64, l64;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, dfloat, 8);
FPU_get_user(m64, 1 + (unsigned long __user *)dfloat);
FPU_get_user(l64, (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
m64 &= 0xfffff;
if (exp > DOUBLE_Emax + EXTENDED_Ebias) {
/* Infinity or NaN */
if ((m64 == 0) && (l64 == 0)) {
/* +- infinity */
loaded_data->sigh = 0x80000000;
loaded_data->sigl = 0x00000000;
exp = EXP_Infinity + EXTENDED_Ebias;
tag = TAG_Special;
} else {
/* Must be a signaling or quiet NaN */
exp = EXP_NaN + EXTENDED_Ebias;
loaded_data->sigh = (m64 << 11) | 0x80000000;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
tag = TAG_Special; /* The calling function must look for NaNs */
}
} else if (exp < DOUBLE_Emin + EXTENDED_Ebias) {
/* Zero or de-normal */
if ((m64 == 0) && (l64 == 0)) {
/* Zero */
reg_copy(&CONST_Z, loaded_data);
exp = 0;
tag = TAG_Zero;
} else {
/* De-normal */
loaded_data->sigh = m64 << 11;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
return normalize_no_excep(loaded_data, DOUBLE_Emin,
negative)
| (denormal_operand() < 0 ? FPU_Exception : 0);
}
} else {
loaded_data->sigh = (m64 << 11) | 0x80000000;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
tag = TAG_Valid;
}
setexponent16(loaded_data, exp | negative);
return tag;
}
/* Get a float from user memory */
int FPU_load_single(float __user *single, FPU_REG *loaded_data)
{
unsigned m32;
int exp, tag, negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, single, 4);
FPU_get_user(m32, (unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
if (!(m32 & 0x7fffffff)) {
/* Zero */
reg_copy(&CONST_Z, loaded_data);
addexponent(loaded_data, negative);
return TAG_Zero;
}
exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
m32 = (m32 & 0x7fffff) << 8;
if (exp < SINGLE_Emin + EXTENDED_Ebias) {
/* De-normals */
loaded_data->sigh = m32;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
| (denormal_operand() < 0 ? FPU_Exception : 0);
} else if (exp > SINGLE_Emax + EXTENDED_Ebias) {
/* Infinity or NaN */
if (m32 == 0) {
/* +- infinity */
loaded_data->sigh = 0x80000000;
loaded_data->sigl = 0x00000000;
exp = EXP_Infinity + EXTENDED_Ebias;
tag = TAG_Special;
} else {
/* Must be a signaling or quiet NaN */
exp = EXP_NaN + EXTENDED_Ebias;
loaded_data->sigh = m32 | 0x80000000;
loaded_data->sigl = 0;
tag = TAG_Special; /* The calling function must look for NaNs */
}
} else {
loaded_data->sigh = m32 | 0x80000000;
loaded_data->sigl = 0;
tag = TAG_Valid;
}
setexponent16(loaded_data, exp | negative); /* Set the sign. */
return tag;
}
/* Get a long long from user memory */
int FPU_load_int64(long long __user *_s)
{
long long s;
int sign;
FPU_REG *st0_ptr = &st(0);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, _s, 8);
if (copy_from_user(&s, _s, 8))
FPU_abort;
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, st0_ptr);
return TAG_Zero;
}
if (s > 0)
sign = SIGN_Positive;
else {
s = -s;
sign = SIGN_Negative;
}
significand(st0_ptr) = s;
return normalize_no_excep(st0_ptr, 63, sign);
}
/* Get a long from user memory */
int FPU_load_int32(long __user *_s, FPU_REG *loaded_data)
{
long s;
int negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, _s, 4);
FPU_get_user(s, _s);
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, loaded_data);
return TAG_Zero;
}
if (s > 0)
negative = SIGN_Positive;
else {
s = -s;
negative = SIGN_Negative;
}
loaded_data->sigh = s;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, 31, negative);
}
/* Get a short from user memory */
int FPU_load_int16(short __user *_s, FPU_REG *loaded_data)
{
int s, negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, _s, 2);
/* Cast as short to get the sign extended. */
FPU_get_user(s, _s);
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, loaded_data);
return TAG_Zero;
}
if (s > 0)
negative = SIGN_Positive;
else {
s = -s;
negative = SIGN_Negative;
}
loaded_data->sigh = s << 16;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, 15, negative);
}
/* Get a packed bcd array from user memory */
int FPU_load_bcd(u_char __user *s)
{
FPU_REG *st0_ptr = &st(0);
int pos;
u_char bcd;
long long l = 0;
int sign;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, s, 10);
RE_ENTRANT_CHECK_ON;
for (pos = 8; pos >= 0; pos--) {
l *= 10;
RE_ENTRANT_CHECK_OFF;
FPU_get_user(bcd, s + pos);
RE_ENTRANT_CHECK_ON;
l += bcd >> 4;
l *= 10;
l += bcd & 0x0f;
}
RE_ENTRANT_CHECK_OFF;
FPU_get_user(sign, s + 9);
sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
RE_ENTRANT_CHECK_ON;
if (l == 0) {
reg_copy(&CONST_Z, st0_ptr);
addexponent(st0_ptr, sign); /* Set the sign. */
return TAG_Zero;
} else {
significand(st0_ptr) = l;
return normalize_no_excep(st0_ptr, 63, sign);
}
}
/*===========================================================================*/
/* Put a long double into user memory */
int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
long double __user * d)
{
/*
The only exception raised by an attempt to store to an
extended format is the Invalid Stack exception, i.e.
attempting to store from an empty register.
*/
if (st0_tag != TAG_Empty) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 10);
FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d);
FPU_put_user(st0_ptr->sigh,
(unsigned long __user *)((u_char __user *) d + 4));
FPU_put_user(exponent16(st0_ptr),
(unsigned short __user *)((u_char __user *) d +
8));
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & CW_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 10);
FPU_put_user(0, (unsigned long __user *)d);
FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d);
FPU_put_user(0xffff, 4 + (short __user *)d);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
/* Put a double into user memory */
int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat)
{
unsigned long l[2];
unsigned long increment = 0; /* avoid gcc warnings */
int precision_loss;
int exp;
FPU_REG tmp;
l[0] = 0;
l[1] = 0;
if (st0_tag == TAG_Valid) {
reg_copy(st0_ptr, &tmp);
exp = exponent(&tmp);
if (exp < DOUBLE_Emin) { /* It may be a denormal */
addexponent(&tmp, -DOUBLE_Emin + 52); /* largest exp to be 51 */
denormal_arg:
if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
#ifdef PECULIAR_486
/* Did it round to a non-denormal ? */
/* This behaviour might be regarded as peculiar, it appears
that the 80486 rounds to the dest precision, then
converts to decide underflow. */
if (!
((tmp.sigh == 0x00100000) && (tmp.sigl == 0)
&& (st0_ptr->sigl & 0x000007ff)))
#endif /* PECULIAR_486 */
{
EXCEPTION(EX_Underflow);
/* This is a special case: see sec 16.2.5.1 of
the 80486 book */
if (!(control_word & CW_Underflow))
return 0;
}
EXCEPTION(precision_loss);
if (!(control_word & CW_Precision))
return 0;
}
l[0] = tmp.sigl;
l[1] = tmp.sigh;
} else {
if (tmp.sigl & 0x000007ff) {
precision_loss = 1;
switch (control_word & CW_RC) {
case RC_RND:
/* Rounding can get a little messy.. */
increment = ((tmp.sigl & 0x7ff) > 0x400) | /* nearest */
((tmp.sigl & 0xc00) == 0xc00); /* odd -> even */
break;
case RC_DOWN: /* towards -infinity */
increment =
signpositive(&tmp) ? 0 : tmp.
sigl & 0x7ff;
break;
case RC_UP: /* towards +infinity */
increment =
signpositive(&tmp) ? tmp.
sigl & 0x7ff : 0;
break;
case RC_CHOP:
increment = 0;
break;
}
/* Truncate the mantissa */
tmp.sigl &= 0xfffff800;
if (increment) {
if (tmp.sigl >= 0xfffff800) {
/* the sigl part overflows */
if (tmp.sigh == 0xffffffff) {
/* The sigh part overflows */
tmp.sigh = 0x80000000;
exp++;
if (exp >= EXP_OVER)
goto overflow;
} else {
tmp.sigh++;
}
tmp.sigl = 0x00000000;
} else {
/* We only need to increment sigl */
tmp.sigl += 0x00000800;
}
}
} else
precision_loss = 0;
l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
l[1] = ((tmp.sigh >> 11) & 0xfffff);
if (exp > DOUBLE_Emax) {
overflow:
EXCEPTION(EX_Overflow);
if (!(control_word & CW_Overflow))
return 0;
set_precision_flag_up();
if (!(control_word & CW_Precision))
return 0;
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
/* Overflow to infinity */
l[1] = 0x7ff00000; /* Set to + INF */
} else {
if (precision_loss) {
if (increment)
set_precision_flag_up();
else
set_precision_flag_down();
}
/* Add the exponent */
l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20);
}
}
} else if (st0_tag == TAG_Zero) {
/* Number is zero */
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
/* A denormal will always underflow. */
#ifndef PECULIAR_486
/* An 80486 is supposed to be able to generate
a denormal exception here, but... */
/* Underflow has priority. */
if (control_word & CW_Underflow)
denormal_operand();
#endif /* PECULIAR_486 */
reg_copy(st0_ptr, &tmp);
goto denormal_arg;
} else if (st0_tag == TW_Infinity) {
l[1] = 0x7ff00000;
} else if (st0_tag == TW_NaN) {
/* Is it really a NaN ? */
if ((exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->sigh & 0x80000000)) {
/* See if we can get a valid NaN from the FPU_REG */
l[0] =
(st0_ptr->sigl >> 11) | (st0_ptr->
sigh << 21);
l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
if (!(st0_ptr->sigh & 0x40000000)) {
/* It is a signalling NaN */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
l[1] |= (0x40000000 >> 11);
}
l[1] |= 0x7ff00000;
} else {
/* It is an unsupported data type */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
l[1] = 0xfff80000;
}
}
} else if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & CW_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, dfloat, 8);
FPU_put_user(0, (unsigned long __user *)dfloat);
FPU_put_user(0xfff80000,
1 + (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
if (getsign(st0_ptr))
l[1] |= 0x80000000;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, dfloat, 8);
FPU_put_user(l[0], (unsigned long __user *)dfloat);
FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a float into user memory */
int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single)
{
long templ = 0;
unsigned long increment = 0; /* avoid gcc warnings */
int precision_loss;
int exp;
FPU_REG tmp;
if (st0_tag == TAG_Valid) {
reg_copy(st0_ptr, &tmp);
exp = exponent(&tmp);
if (exp < SINGLE_Emin) {
addexponent(&tmp, -SINGLE_Emin + 23); /* largest exp to be 22 */
denormal_arg:
if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
#ifdef PECULIAR_486
/* Did it round to a non-denormal ? */
/* This behaviour might be regarded as peculiar, it appears
that the 80486 rounds to the dest precision, then
converts to decide underflow. */
if (!((tmp.sigl == 0x00800000) &&
((st0_ptr->sigh & 0x000000ff)
|| st0_ptr->sigl)))
#endif /* PECULIAR_486 */
{
EXCEPTION(EX_Underflow);
/* This is a special case: see sec 16.2.5.1 of
the 80486 book */
if (!(control_word & CW_Underflow))
return 0;
}
EXCEPTION(precision_loss);
if (!(control_word & CW_Precision))
return 0;
}
templ = tmp.sigl;
} else {
if (tmp.sigl | (tmp.sigh & 0x000000ff)) {
unsigned long sigh = tmp.sigh;
unsigned long sigl = tmp.sigl;
precision_loss = 1;
switch (control_word & CW_RC) {
case RC_RND:
increment = ((sigh & 0xff) > 0x80) /* more than half */
||(((sigh & 0xff) == 0x80) && sigl) /* more than half */
||((sigh & 0x180) == 0x180); /* round to even */
break;
case RC_DOWN: /* towards -infinity */
increment = signpositive(&tmp)
? 0 : (sigl | (sigh & 0xff));
break;
case RC_UP: /* towards +infinity */
increment = signpositive(&tmp)
? (sigl | (sigh & 0xff)) : 0;
break;
case RC_CHOP:
increment = 0;
break;
}
/* Truncate part of the mantissa */
tmp.sigl = 0;
if (increment) {
if (sigh >= 0xffffff00) {
/* The sigh part overflows */
tmp.sigh = 0x80000000;
exp++;
if (exp >= EXP_OVER)
goto overflow;
} else {
tmp.sigh &= 0xffffff00;
tmp.sigh += 0x100;
}
} else {
tmp.sigh &= 0xffffff00; /* Finish the truncation */
}
} else
precision_loss = 0;
templ = (tmp.sigh >> 8) & 0x007fffff;
if (exp > SINGLE_Emax) {
overflow:
EXCEPTION(EX_Overflow);
if (!(control_word & CW_Overflow))
return 0;
set_precision_flag_up();
if (!(control_word & CW_Precision))
return 0;
/* This is a special case: see sec 16.2.5.1 of the 80486 book. */
/* Masked response is overflow to infinity. */
templ = 0x7f800000;
} else {
if (precision_loss) {
if (increment)
set_precision_flag_up();
else
set_precision_flag_down();
}
/* Add the exponent */
templ |= ((exp + SINGLE_Ebias) & 0xff) << 23;
}
}
} else if (st0_tag == TAG_Zero) {
templ = 0;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
reg_copy(st0_ptr, &tmp);
/* A denormal will always underflow. */
#ifndef PECULIAR_486
/* An 80486 is supposed to be able to generate
a denormal exception here, but... */
/* Underflow has priority. */
if (control_word & CW_Underflow)
denormal_operand();
#endif /* PECULIAR_486 */
goto denormal_arg;
} else if (st0_tag == TW_Infinity) {
templ = 0x7f800000;
} else if (st0_tag == TW_NaN) {
/* Is it really a NaN ? */
if ((exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->sigh & 0x80000000)) {
/* See if we can get a valid NaN from the FPU_REG */
templ = st0_ptr->sigh >> 8;
if (!(st0_ptr->sigh & 0x40000000)) {
/* It is a signalling NaN */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
templ |= (0x40000000 >> 8);
}
templ |= 0x7f800000;
} else {
/* It is an unsupported data type */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
templ = 0xffc00000;
}
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x164);
return 0;
}
#endif
} else if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & EX_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, single, 4);
FPU_put_user(0xffc00000,
(unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x163);
return 0;
}
#endif
if (getsign(st0_ptr))
templ |= 0x80000000;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, single, 4);
FPU_put_user(templ, (unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a long long into user memory */
int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
{
FPU_REG t;
long long tll;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
((long *)&tll)[0] = t.sigl;
((long *)&tll)[1] = t.sigh;
if ((precision_loss == 1) ||
((t.sigh & 0x80000000) &&
!((t.sigh == 0x80000000) && (t.sigl == 0) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
tll = 0x8000000000000000LL;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
tll = -tll;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 8);
if (copy_to_user(d, &tll, 8))
FPU_abort;
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a long into user memory */
int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d)
{
FPU_REG t;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
if (t.sigh ||
((t.sigl & 0x80000000) &&
!((t.sigl == 0x80000000) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
t.sigl = 0x80000000;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
t.sigl = -(long)t.sigl;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 4);
FPU_put_user(t.sigl, (unsigned long __user *)d);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a short into user memory */
int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d)
{
FPU_REG t;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
if (t.sigh ||
((t.sigl & 0xffff8000) &&
!((t.sigl == 0x8000) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
t.sigl = 0x8000;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
t.sigl = -t.sigl;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 2);
FPU_put_user((short)t.sigl, d);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a packed bcd array into user memory */
int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
{
FPU_REG t;
unsigned long long ll;
u_char b;
int i, precision_loss;
u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
ll = significand(&t);
/* Check for overflow, by comparing with 999999999999999999 decimal. */
if ((t.sigh > 0x0de0b6b3) ||
((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & CW_Invalid) {
/* Produce the QNaN "indefinite" */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 10);
for (i = 0; i < 7; i++)
FPU_put_user(0, d + i); /* These bytes "undefined" */
FPU_put_user(0xc0, d + 7); /* This byte "undefined" */
FPU_put_user(0xff, d + 8);
FPU_put_user(0xff, d + 9);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
} else if (precision_loss) {
/* Precision loss doesn't stop the data transfer */
set_precision_flag(precision_loss);
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 10);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 9; i++) {
b = FPU_div_small(&ll, 10);
b |= (FPU_div_small(&ll, 10)) << 4;
RE_ENTRANT_CHECK_OFF;
FPU_put_user(b, d + i);
RE_ENTRANT_CHECK_ON;
}
RE_ENTRANT_CHECK_OFF;
FPU_put_user(sign, d + 9);
RE_ENTRANT_CHECK_ON;
return 1;
}
/*===========================================================================*/
/* r gets mangled such that sig is int, sign:
it is NOT normalized */
/* The return value (in eax) is zero if the result is exact,
if bits are changed due to rounding, truncation, etc, then
a non-zero value is returned */
/* Overflow is signalled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the
largest possible value */
int FPU_round_to_int(FPU_REG *r, u_char tag)
{
u_char very_big;
unsigned eax;
if (tag == TAG_Zero) {
/* Make sure that zero is returned */
significand(r) = 0;
return 0; /* o.k. */
}
if (exponent(r) > 63) {
r->sigl = r->sigh = ~0; /* The largest representable number */
return 1; /* overflow */
}
eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
very_big = !(~(r->sigh) | ~(r->sigl)); /* test for 0xfff...fff */
#define half_or_more (eax & 0x80000000)
#define frac_part (eax)
#define more_than_half ((eax & 0x80000001) == 0x80000001)
switch (control_word & CW_RC) {
case RC_RND:
if (more_than_half /* nearest */
|| (half_or_more && (r->sigl & 1))) { /* odd -> even */
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_DOWN:
if (frac_part && getsign(r)) {
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_UP:
if (frac_part && !getsign(r)) {
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_CHOP:
break;
}
return eax ? PRECISION_LOST_DOWN : 0;
}
/*===========================================================================*/
u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
{
unsigned short tag_word = 0;
u_char tag;
int i;
if ((addr_modes.default_mode == VM86) ||
((addr_modes.default_mode == PM16)
^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, s, 0x0e);
FPU_get_user(control_word, (unsigned short __user *)s);
FPU_get_user(partial_status, (unsigned short __user *)(s + 2));
FPU_get_user(tag_word, (unsigned short __user *)(s + 4));
FPU_get_user(instruction_address.offset,
(unsigned short __user *)(s + 6));
FPU_get_user(instruction_address.selector,
(unsigned short __user *)(s + 8));
FPU_get_user(operand_address.offset,
(unsigned short __user *)(s + 0x0a));
FPU_get_user(operand_address.selector,
(unsigned short __user *)(s + 0x0c));
RE_ENTRANT_CHECK_ON;
s += 0x0e;
if (addr_modes.default_mode == VM86) {
instruction_address.offset
+= (instruction_address.selector & 0xf000) << 4;
operand_address.offset +=
(operand_address.selector & 0xf000) << 4;
}
} else {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, s, 0x1c);
FPU_get_user(control_word, (unsigned short __user *)s);
FPU_get_user(partial_status, (unsigned short __user *)(s + 4));
FPU_get_user(tag_word, (unsigned short __user *)(s + 8));
FPU_get_user(instruction_address.offset,
(unsigned long __user *)(s + 0x0c));
FPU_get_user(instruction_address.selector,
(unsigned short __user *)(s + 0x10));
FPU_get_user(instruction_address.opcode,
(unsigned short __user *)(s + 0x12));
FPU_get_user(operand_address.offset,
(unsigned long __user *)(s + 0x14));
FPU_get_user(operand_address.selector,
(unsigned long __user *)(s + 0x18));
RE_ENTRANT_CHECK_ON;
s += 0x1c;
}
#ifdef PECULIAR_486
control_word &= ~0xe080;
#endif /* PECULIAR_486 */
top = (partial_status >> SW_Top_Shift) & 7;
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
else
partial_status &= ~(SW_Summary | SW_Backward);
for (i = 0; i < 8; i++) {
tag = tag_word & 3;
tag_word >>= 2;
if (tag == TAG_Empty)
/* New tag is empty. Accept it */
FPU_settag(i, TAG_Empty);
else if (FPU_gettag(i) == TAG_Empty) {
/* Old tag is empty and new tag is not empty. New tag is determined
by old reg contents */
if (exponent(&fpu_register(i)) == -EXTENDED_Ebias) {
if (!
(fpu_register(i).sigl | fpu_register(i).
sigh))
FPU_settag(i, TAG_Zero);
else
FPU_settag(i, TAG_Special);
} else if (exponent(&fpu_register(i)) ==
0x7fff - EXTENDED_Ebias) {
FPU_settag(i, TAG_Special);
} else if (fpu_register(i).sigh & 0x80000000)
FPU_settag(i, TAG_Valid);
else
FPU_settag(i, TAG_Special); /* An Un-normal */
}
/* Else old tag is not empty and new tag is not empty. Old tag
remains correct */
}
return s;
}
void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
{
int i, regnr;
u_char __user *s = fldenv(addr_modes, data_address);
int offset = (top & 7) * 10, other = 80 - offset;
/* Copy all registers in stack order. */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, s, 80);
__copy_from_user(register_base + offset, s, other);
if (offset)
__copy_from_user(register_base, s + other, offset);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 8; i++) {
regnr = (i + top) & 7;
if (FPU_gettag(regnr) != TAG_Empty)
/* The loaded data over-rides all other cases. */
FPU_settag(regnr, FPU_tagof(&st(i)));
}
}
u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
{
if ((addr_modes.default_mode == VM86) ||
((addr_modes.default_mode == PM16)
^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 14);
#ifdef PECULIAR_486
FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d);
#else
FPU_put_user(control_word, (unsigned short __user *)d);
#endif /* PECULIAR_486 */
FPU_put_user(status_word(), (unsigned short __user *)(d + 2));
FPU_put_user(fpu_tag_word, (unsigned short __user *)(d + 4));
FPU_put_user(instruction_address.offset,
(unsigned short __user *)(d + 6));
FPU_put_user(operand_address.offset,
(unsigned short __user *)(d + 0x0a));
if (addr_modes.default_mode == VM86) {
FPU_put_user((instruction_address.
offset & 0xf0000) >> 4,
(unsigned short __user *)(d + 8));
FPU_put_user((operand_address.offset & 0xf0000) >> 4,
(unsigned short __user *)(d + 0x0c));
} else {
FPU_put_user(instruction_address.selector,
(unsigned short __user *)(d + 8));
FPU_put_user(operand_address.selector,
(unsigned short __user *)(d + 0x0c));
}
RE_ENTRANT_CHECK_ON;
d += 0x0e;
} else {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 7 * 4);
#ifdef PECULIAR_486
control_word &= ~0xe080;
/* An 80486 sets nearly all of the reserved bits to 1. */
control_word |= 0xffff0040;
partial_status = status_word() | 0xffff0000;
fpu_tag_word |= 0xffff0000;
I387->soft.fcs &= ~0xf8000000;
I387->soft.fos |= 0xffff0000;
#endif /* PECULIAR_486 */
if (__copy_to_user(d, &control_word, 7 * 4))
FPU_abort;
RE_ENTRANT_CHECK_ON;
d += 0x1c;
}
control_word |= CW_Exceptions;
partial_status &= ~(SW_Summary | SW_Backward);
return d;
}
void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
{
u_char __user *d;
int offset = (top & 7) * 10, other = 80 - offset;
d = fstenv(addr_modes, data_address);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE, d, 80);
/* Copy all registers in stack order. */
if (__copy_to_user(d, register_base + offset, other))
FPU_abort;
if (offset)
if (__copy_to_user(d + other, register_base, offset))
FPU_abort;
RE_ENTRANT_CHECK_ON;
finit();
}
/*===========================================================================*/
| gpl-2.0 |
androidbftab1/bf-kernel-4.2 | arch/ia64/mm/init.c | 200 | 19790 | /*
* Initialize MMU support.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long VMALLOC_END = VMALLOC_END_INIT;
EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
void
__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
struct page *page;
page = pte_page(pte);
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void
dma_mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
inline void
ia64_set_rbs_bot (void)
{
unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
* This performs some platform-dependent address space initialization.
* On IA-64, we want to setup the VM area for the register backing
* store (which grows upwards) and install the gateway page which is
* used for signal trampolines, etc.
*/
void
ia64_init_addr_space (void)
{
struct vm_area_struct *vma;
ia64_set_rbs_bot();
/*
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(¤t->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(¤t->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(¤t->mm->mmap_sem);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
down_write(¤t->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(¤t->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(¤t->mm->mmap_sem);
}
}
}
void
free_initmem (void)
{
free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
-1, "unused kernel");
}
void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
* therefore possible to have the initrd share the same page as
* the end of the kernel (given current setup).
*
* To avoid freeing/using the wrong page (kernel sized) we:
* - align up the beginning of initrd
* - align down the end of initrd
*
* | |
* |=============| a000
* | |
* | |
* | | 9000
* |/////////////|
* |/////////////|
* |=============| 8000
* |///INITRD////|
* |/////////////|
* |/////////////| 7000
* | |
* |KKKKKKKKKKKKK|
* |=============| 6000
* |KKKKKKKKKKKKK|
* |KKKKKKKKKKKKK|
* K=kernel using 8KB pages
*
* In this example, we must free page 8000 ONLY. So we must align up
* initrd_start and keep initrd_end as is.
*/
start = PAGE_ALIGN(start);
end = end & PAGE_MASK;
if (start < end)
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!virt_addr_valid(start))
continue;
free_reserved_page(virt_to_page(start));
}
}
/*
* This installs a clean page in the kernel's page table.
*/
static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{
pud = pud_alloc(&init_mm, pgd, address);
if (!pud)
goto out;
pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_kernel(pmd, address);
if (!pte)
goto out;
if (!pte_none(*pte))
goto out;
set_pte(pte, mk_pte(page, pgprot));
}
out:
/* no need for flush_tlb */
return page;
}
static void __init
setup_gate (void)
{
struct page *page;
/*
* Map the gate page twice: once read-only to export the ELF
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
page = virt_to_page(ia64_imva(__start_gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
/* Fill in the holes (if any) with read-only zero pages: */
{
unsigned long addr;
for (addr = GATE_ADDR + PAGE_SIZE;
addr < GATE_ADDR + PERCPU_PAGE_SIZE;
addr += PAGE_SIZE)
{
put_kernel_page(ZERO_PAGE(0), addr,
PAGE_READONLY);
put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
PAGE_READONLY);
}
}
#endif
ia64_patch_gate();
}
static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
return 0;
}
__initcall(gate_vma_init);
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return &gate_vma;
}
int in_gate_area_no_mm(unsigned long addr)
{
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
return 1;
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return in_gate_area_no_mm(addr);
}
void ia64_mmu_init(void *my_cpu_data)
{
unsigned long pta, impl_va_bits;
extern void tlb_init(void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
#else
# define VHPT_ENABLE_BIT 1
#endif
/*
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
* address space. The IA-64 architecture guarantees that at least 50 bits of
* virtual address space are implemented but if we pick a large enough page size
* (e.g., 64KB), the mapped address space is big enough that it will overlap with
* VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
* IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
* problem in practice. Alternatively, we could truncate the top of the mapped
* address space to not permit mappings that would overlap with the VMLPT.
* --davidm 00/12/06
*/
# define pte_bits 3
# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
/*
* The virtual page table has to cover the entire implemented address space within
* a region even though not all of this space may be mappable. The reason for
* this is that the Access bit and Dirty bit fault handlers perform
* non-speculative accesses to the virtual page table, so the address range of the
* virtual page table itself needs to be covered by virtual page table.
*/
# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
# define POW2(n) (1ULL << (n))
impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
if (impl_va_bits < 51 || impl_va_bits > 61)
panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
/*
* mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
* which must fit into "vmlpt_bits - pte_bits" slots. Second half of
* the test makes sure that our mapped space doesn't overlap the
* unimplemented hole in the middle of the region.
*/
if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
(mapped_space_bits > impl_va_bits - 1))
panic("Cannot build a big enough virtual-linear page table"
" to cover mapped address space.\n"
" Try using a smaller page size.\n");
/* place the VMLPT at the end of each page-table mapped region: */
pta = POW2(61) - POW2(vmlpt_bits);
/*
* Set the (virtually mapped linear) page table address. Bit
* 8 selects between the short and long format, bits 2-7 the
* size of the table, and bit 0 whether the VHPT walker is
* enabled.
*/
ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
ia64_tlb_init();
#ifdef CONFIG_HUGETLB_PAGE
ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
ia64_srlz_d();
#endif
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
int vmemmap_find_next_valid_pfn(int node, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
pg_data_t *pgdat = NODE_DATA(node);
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
start_page = (unsigned long) map_start & PAGE_MASK;
end_page = PAGE_ALIGN((unsigned long) map_end);
node = paddr_to_nid(__pa(start));
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pud = pud_offset(pgd, address);
if (pud_none(*pud))
pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte))
set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
PAGE_KERNEL));
}
return 0;
}
struct memmap_init_callback_data {
struct page *start;
struct page *end;
int nid;
unsigned long zone;
};
static int __meminit
virtual_memmap_init(u64 start, u64 end, void *arg)
{
struct memmap_init_callback_data *args;
struct page *map_start, *map_end;
args = (struct memmap_init_callback_data *) arg;
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
if (map_start < args->start)
map_start = args->start;
if (map_end > args->end)
map_end = args->end;
/*
* We have to initialize "out of bounds" struct page elements that fit completely
* on the same pages that were allocated for the "in bounds" elements because they
* may be referenced later (and found to be "reserved").
*/
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
/ sizeof(struct page));
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY);
return 0;
}
void __meminit
memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map)
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
else {
struct page *start;
struct memmap_init_callback_data args;
start = pfn_to_page(start_pfn);
args.start = start;
args.end = start + size;
args.nid = nid;
args.zone = zone;
efi_memmap_walk(virtual_memmap_init, &args);
}
}
int
ia64_pfn_valid (unsigned long pfn)
{
char byte;
struct page *pg = pfn_to_page(pfn);
return (__get_user(byte, (char __user *) pg) == 0)
&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
}
EXPORT_SYMBOL(ia64_pfn_valid);
int __init find_largest_hole(u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
static u64 last_end = PAGE_OFFSET;
/* NOTE: this algorithm assumes efi memmap table is ordered */
if (*max_gap < (start - last_end))
*max_gap = start - last_end;
last_end = end;
return 0;
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)
{
u64 end = start + len;
#ifdef CONFIG_KEXEC
if (start > crashk_res.start && start < crashk_res.end)
start = crashk_res.end;
if (end > crashk_res.start && end < crashk_res.end)
end = crashk_res.start;
#endif
if (start < end)
memblock_add_node(__pa(start), end - start, nid);
return 0;
}
int
find_max_min_low_pfn (u64 start, u64 end, void *arg)
{
unsigned long pfn_start, pfn_end;
#ifdef CONFIG_FLATMEM
pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
#else
pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
#endif
min_low_pfn = min(min_low_pfn, pfn_start);
max_low_pfn = max(max_low_pfn, pfn_end);
return 0;
}
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling
* down into the kernel and calling the normal (heavy-weight) syscall handler. This is
* useful for performance testing, but conceivably could also come in handy for debugging
* purposes.
*/
static int nolwsys __initdata;
static int __init
nolwsys_setup (char *s)
{
nolwsys = 1;
return 1;
}
__setup("nolwsys", nolwsys_setup);
void __init
mem_init (void)
{
int i;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
#ifdef CONFIG_PCI
/*
* This needs to be called _after_ the command line has been parsed but _before_
* any drivers that may need the PCI DMA interface are initialized or bootmem has
* been freed.
*/
platform_dma_init();
#endif
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
free_all_bootmem();
mem_init_print_info(NULL);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
* code can tell them apart.
*/
for (i = 0; i < NR_syscalls; ++i) {
extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls];
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
setup_gate();
}
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones +
zone_for_memory(nid, start, size, ZONE_NORMAL);
ret = __add_pages(nid, zone, start_pfn, nr_pages);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
return ret;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
int ret;
zone = page_zone(pfn_to_page(start_pfn));
ret = __remove_pages(zone, start_pfn, nr_pages);
if (ret)
pr_warn("%s: Problem encountered in __remove_pages() as"
" ret=%d\n", __func__, ret);
return ret;
}
#endif
#endif
/**
* show_mem - give short summary of memory stats
*
* Shows a simple page count of reserved and used pages in the system.
* For discontig machines, it does this on a per-pgdat basis.
*/
void show_mem(unsigned int filter)
{
int total_reserved = 0;
unsigned long total_present = 0;
pg_data_t *pgdat;
printk(KERN_INFO "Mem-info:\n");
show_free_areas(filter);
printk(KERN_INFO "Node memory in pages:\n");
for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int reserved = 0;
int nid = pgdat->node_id;
int zoneid;
if (skip_free_areas_node(filter, nid))
continue;
pgdat_resize_lock(pgdat, &flags);
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
reserved += zone->present_pages - zone->managed_pages;
}
present = pgdat->node_present_pages;
pgdat_resize_unlock(pgdat, &flags);
total_present += present;
total_reserved += reserved;
printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ",
nid, present, reserved);
}
printk(KERN_INFO "%ld pages of RAM\n", total_present);
printk(KERN_INFO "%d reserved pages\n", total_reserved);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
quicklist_total_size());
printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
}
| gpl-2.0 |
tamlok/linux | kernel/irq/devres.c | 456 | 4045 | #include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/gfp.h>
/*
* Device resource management aware IRQ request/free implementation.
*/
struct irq_devres {
unsigned int irq;
void *dev_id;
};
static void devm_irq_release(struct device *dev, void *res)
{
struct irq_devres *this = res;
free_irq(this->irq, this->dev_id);
}
static int devm_irq_match(struct device *dev, void *res, void *data)
{
struct irq_devres *this = res, *match = data;
return this->irq == match->irq && this->dev_id == match->dev_id;
}
/**
* devm_request_threaded_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @thread_fn: function to be called in a threaded interrupt context. NULL
* for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as
* request_threaded_irq(). IRQs requested with this function will be
* automatically freed on driver detach.
*
* If an IRQ allocated with this function needs to be freed
* separately, devm_free_irq() must be used.
*/
int devm_request_threaded_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, irq_handler_t thread_fn,
unsigned long irqflags, const char *devname,
void *dev_id)
{
struct irq_devres *dr;
int rc;
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
dev_id);
if (rc) {
devres_free(dr);
return rc;
}
dr->irq = irq;
dr->dev_id = dev_id;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL(devm_request_threaded_irq);
/**
* devm_request_any_context_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @thread_fn: function to be called in a threaded interrupt context. NULL
* for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as
* request_any_context_irq(). IRQs requested with this function will be
* automatically freed on driver detach.
*
* If an IRQ allocated with this function needs to be freed
* separately, devm_free_irq() must be used.
*/
int devm_request_any_context_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id)
{
struct irq_devres *dr;
int rc;
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
if (rc) {
devres_free(dr);
return rc;
}
dr->irq = irq;
dr->dev_id = dev_id;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL(devm_request_any_context_irq);
/**
* devm_free_irq - free an interrupt
* @dev: device to free interrupt for
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as free_irq().
* This function instead of free_irq() should be used to manually
* free IRQs allocated with devm_request_irq().
*/
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
{
struct irq_devres match_data = { irq, dev_id };
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
&match_data));
free_irq(irq, dev_id);
}
EXPORT_SYMBOL(devm_free_irq);
| gpl-2.0 |
RenderBroken/msm8974_G2_render_kernel | fs/nfs/nfs4proc.c | 712 | 181287 | /*
* fs/nfs/nfs4proc.c
*
* Client-side procedure declarations for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_mount.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/nfs_idmap.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "callback.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
#define NFS4_MAX_LOOP_ON_RECOVER (10)
static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state);
#ifdef CONFIG_NFS_V4_1
static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
#endif
/* Prevent leaks of NFSv4 errors into userland */
static int nfs4_map_errors(int err)
{
if (err >= -1000)
return err;
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
return -EPERM;
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
break;
}
return -EIO;
}
/*
* This is our standard bitmap for GETATTR requests.
*/
const u32 nfs4_fattr_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
};
const u32 nfs4_statfs_bitmap[2] = {
FATTR4_WORD0_FILES_AVAIL
| FATTR4_WORD0_FILES_FREE
| FATTR4_WORD0_FILES_TOTAL,
FATTR4_WORD1_SPACE_AVAIL
| FATTR4_WORD1_SPACE_FREE
| FATTR4_WORD1_SPACE_TOTAL
};
const u32 nfs4_pathconf_bitmap[2] = {
FATTR4_WORD0_MAXLINK
| FATTR4_WORD0_MAXNAME,
0
};
const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXREAD
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
FATTR4_WORD1_TIME_DELTA
| FATTR4_WORD1_FS_LAYOUT_TYPES,
FATTR4_WORD2_LAYOUT_BLKSIZE
};
const u32 nfs4_fs_locations_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID
| FATTR4_WORD0_FS_LOCATIONS,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
| FATTR4_WORD1_MOUNTED_ON_FILEID
};
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
struct nfs4_readdir_arg *readdir)
{
__be32 *start, *p;
BUG_ON(readdir->count < 80);
if (cookie > 2) {
readdir->cookie = cookie;
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
return;
}
readdir->cookie = 0;
memset(&readdir->verifier, 0, sizeof(readdir->verifier));
if (cookie == 2)
return;
/*
* NFSv4 servers do not return entries for '.' and '..'
* Therefore, we fake these entries here. We let '.'
* have cookie 0 and '..' have cookie 1. Note that
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
start = p = kmap_atomic(*readdir->pages);
if (cookie == 0) {
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_one; /* cookie, second word */
*p++ = xdr_one; /* entry len */
memcpy(p, ".\0\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
}
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_two; /* cookie, second word */
*p++ = xdr_two; /* entry len */
memcpy(p, "..\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
kunmap_atomic(start);
}
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
{
int res;
might_sleep();
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
return res;
}
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
int res = 0;
might_sleep();
if (*timeout <= 0)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
freezable_schedule_timeout_killable_unsafe(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
return res;
}
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
struct inode *inode = exception->inode;
int ret = errorcode;
exception->retry = 0;
switch(errorcode) {
case 0:
return 0;
case -NFS4ERR_OPENMODE:
if (inode && nfs_have_delegation(inode, FMODE_READ)) {
nfs_inode_return_delegation(inode);
exception->retry = 1;
return 0;
}
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
if (state == NULL)
break;
nfs_remove_bad_delegation(state->inode);
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
nfs4_schedule_session_recovery(clp->cl_session);
exception->retry = 1;
break;
#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
* fail
*/
ret = -EBUSY;
break;
}
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0)
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
exception->retry = 1;
break;
case -NFS4ERR_BADOWNER:
/* The following works around a Linux server bug! */
case -NFS4ERR_BADNAME:
if (server->caps & NFS_CAP_UIDGID_NOMAP) {
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
exception->retry = 1;
printk(KERN_WARNING "NFS: v4 server %s "
"does not accept raw "
"uid/gids. "
"Reenabling the idmapper.\n",
server->nfs_client->cl_hostname);
}
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
wait_on_recovery:
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
return ret;
}
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
{
spin_lock(&clp->cl_lock);
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
}
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
{
do_renew_lease(server->nfs_client, timestamp);
}
#if defined(CONFIG_NFS_V4_1)
/*
* nfs4_free_slot - free a slot and efficiently update slot table.
*
* freeing a slot is trivially done by clearing its respective bit
* in the bitmap.
* If the freed slotid equals highest_used_slotid we want to update it
* so that the server would be able to size down the slot table if needed,
* otherwise we know that the highest_used_slotid is still in use.
* When updating highest_used_slotid there may be "holes" in the bitmap
* so we need to scan down from highest_used_slotid to 0 looking for the now
* highest slotid in use.
* If none found, highest_used_slotid is set to NFS4_NO_SLOT.
*
* Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
{
BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
/* update highest_used_slotid when it is freed */
if (slotid == tbl->highest_used_slotid) {
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
if (slotid < tbl->max_slots)
tbl->highest_used_slotid = slotid;
else
tbl->highest_used_slotid = NFS4_NO_SLOT;
}
dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
slotid, tbl->highest_used_slotid);
}
bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return true;
}
/*
* Signal state manager thread if session fore channel is drained
*/
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
nfs4_set_task_privileged, NULL);
return;
}
if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
return;
dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
complete(&ses->fc_slot_table.complete);
}
/*
* Signal state manager thread if session back channel is drained
*/
void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
{
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
return;
dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
complete(&ses->bc_slot_table.complete);
}
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
tbl = &res->sr_session->fc_slot_table;
if (!res->sr_slot) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
dprintk("%s: No slot\n", __func__);
return;
}
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
}
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
unsigned long timestamp;
struct nfs_client *clp;
/*
* sr_status remains 1 if an RPC level error occurred. The server
* may or may not have processed the sequence operation..
* Proceed as if the server received and processed the sequence
* operation.
*/
if (res->sr_status == 1)
res->sr_status = NFS_OK;
/* don't increment the sequence number if the task wasn't sent */
if (!RPC_WAS_SENT(task))
goto out;
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
++res->sr_slot->seq_nr;
timestamp = res->sr_renewal_time;
clp = res->sr_session->clp;
do_renew_lease(clp, timestamp);
/* Check sequence flags */
if (res->sr_status_flags != 0)
nfs4_schedule_lease_recovery(clp);
break;
case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and
* returned NFS4ERR_DELAY as per Section 2.10.6.2
* of RFC5661.
*/
dprintk("%s: slot=%td seq=%d: Operation in progress\n",
__func__,
res->sr_slot - res->sr_session->fc_slot_table.slots,
res->sr_slot->seq_nr);
goto out_retry;
default:
/* Just update the slot sequence no. */
++res->sr_slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
nfs41_sequence_free_slot(res);
return 1;
out_retry:
if (!rpc_restart_call(task))
goto out;
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return 0;
}
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
if (res->sr_session == NULL)
return 1;
return nfs41_sequence_done(task, res);
}
/*
* nfs4_find_slot - efficiently look for a free slot
*
* nfs4_find_slot looks for an unset bit in the used_slots bitmap.
* If found, we mark the slot as used, update the highest_used_slotid,
* and respectively set up the sequence operation args.
* The slot number is returned if found, or NFS4_NO_SLOT otherwise.
*
* Note: must be called with under the slot_tbl_lock.
*/
static u32
nfs4_find_slot(struct nfs4_slot_table *tbl)
{
u32 slotid;
u32 ret_id = NFS4_NO_SLOT;
dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
tbl->max_slots);
slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
if (slotid >= tbl->max_slots)
goto out;
__set_bit(slotid, tbl->used_slots);
if (slotid > tbl->highest_used_slotid ||
tbl->highest_used_slotid == NFS4_NO_SLOT)
tbl->highest_used_slotid = slotid;
ret_id = slotid;
out:
dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
return ret_id;
}
static void nfs41_init_sequence(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res, int cache_reply)
{
args->sa_session = NULL;
args->sa_cache_this = 0;
if (cache_reply)
args->sa_cache_this = 1;
res->sr_session = NULL;
res->sr_slot = NULL;
}
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
struct rpc_task *task)
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
u32 slotid;
dprintk("--> %s\n", __func__);
/* slot already allocated? */
if (res->sr_slot != NULL)
return 0;
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s enforce FIFO order\n", __func__);
return -EAGAIN;
}
slotid = nfs4_find_slot(tbl);
if (slotid == NFS4_NO_SLOT) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("<-- %s: no free slots\n", __func__);
return -EAGAIN;
}
spin_unlock(&tbl->slot_tbl_lock);
rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
slot = tbl->slots + slotid;
args->sa_session = session;
args->sa_slotid = slotid;
dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
res->sr_session = session;
res->sr_slot = slot;
res->sr_renewal_time = jiffies;
res->sr_status_flags = 0;
/*
* sr_status is only set in decode_sequence, and so will remain
* set to 1 if an rpc level failure occurs.
*/
res->sr_status = 1;
return 0;
}
EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
struct rpc_task *task)
{
struct nfs4_session *session = nfs4_get_session(server);
int ret = 0;
if (session == NULL)
goto out;
dprintk("--> %s clp %p session %p sr_slot %td\n",
__func__, session->clp, session, res->sr_slot ?
res->sr_slot - session->fc_slot_table.slots : -1);
ret = nfs41_setup_sequence(session, args, res, task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs41_call_sync_data {
const struct nfs_server *seq_server;
struct nfs4_sequence_args *seq_args;
struct nfs4_sequence_res *seq_res;
};
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
if (nfs4_setup_sequence(data->seq_server, data->seq_args,
data->seq_res, task))
return;
rpc_call_start(task);
}
static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs41_call_sync_prepare(task, calldata);
}
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(task, data->seq_res);
}
static const struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_prepare = nfs41_call_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_prepare = nfs41_call_priv_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int privileged)
{
int ret;
struct rpc_task *task;
struct nfs41_call_sync_data data = {
.seq_server = server,
.seq_args = args,
.seq_res = res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
};
if (privileged)
task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
ret = task->tk_status;
rpc_put_task(task);
}
return ret;
}
int _nfs4_call_sync_session(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
nfs41_init_sequence(args, res, cache_reply);
return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
}
#else
static inline
void nfs41_init_sequence(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res, int cache_reply)
{
}
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
return 1;
}
#endif /* CONFIG_NFS_V4_1 */
int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
nfs41_init_sequence(args, res, cache_reply);
return rpc_call_sync(clnt, msg, 0);
}
static inline
int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
args, res, cache_reply);
}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
spin_unlock(&dir->i_lock);
}
struct nfs4_opendata {
struct kref kref;
struct nfs_openargs o_arg;
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
struct nfs4_string owner_name;
struct nfs4_string group_name;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
struct dentry *dentry;
struct nfs4_state_owner *owner;
struct nfs4_state *state;
struct iattr attrs;
unsigned long timestamp;
unsigned int rpc_done : 1;
int rpc_status;
int cancelled;
};
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
const struct iattr *attrs,
gfp_t gfp_mask)
{
struct dentry *parent = dget_parent(dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
goto err;
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
if (p->o_arg.seqid == NULL)
goto err_free;
nfs_sb_active(dentry->d_sb);
p->dentry = dget(dentry);
p->dir = parent;
p->owner = sp;
atomic_inc(&sp->so_count);
p->o_arg.fh = NFS_FH(dir);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
p->o_arg.clientid = server->nfs_client->cl_clientid;
p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (attrs != NULL && attrs->ia_valid != 0) {
__be32 verf[2];
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
verf[0] = jiffies;
verf[1] = current->pid;
memcpy(p->o_arg.u.verifier.data, verf,
sizeof(p->o_arg.u.verifier.data));
}
p->c_arg.fh = &p->o_res.fh;
p->c_arg.stateid = &p->o_res.stateid;
p->c_arg.seqid = p->o_arg.seqid;
nfs4_init_opendata_res(p);
kref_init(&p->kref);
return p;
err_free:
kfree(p);
err:
dput(parent);
return NULL;
}
static void nfs4_opendata_free(struct kref *kref)
{
struct nfs4_opendata *p = container_of(kref,
struct nfs4_opendata, kref);
struct super_block *sb = p->dentry->d_sb;
nfs_free_seqid(p->o_arg.seqid);
if (p->state != NULL)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
nfs_fattr_free_names(&p->f_attr);
kfree(p);
}
static void nfs4_opendata_put(struct nfs4_opendata *p)
{
if (p != NULL)
kref_put(&p->kref, nfs4_opendata_free);
}
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
{
int ret;
ret = rpc_wait_for_completion_task(task);
return ret;
}
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
{
int ret = 0;
if (open_mode & (O_EXCL|O_TRUNC))
goto out;
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
&& state->n_rdonly != 0;
break;
case FMODE_WRITE:
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
&& state->n_wronly != 0;
break;
case FMODE_READ|FMODE_WRITE:
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
&& state->n_rdwr != 0;
}
out:
return ret;
}
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
if (delegation == NULL)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
return 0;
nfs_mark_delegation_referenced(delegation);
return 1;
}
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
{
switch (fmode) {
case FMODE_WRITE:
state->n_wronly++;
break;
case FMODE_READ:
state->n_rdonly++;
break;
case FMODE_READ|FMODE_WRITE:
state->n_rdwr++;
}
nfs4_state_set_mode_locked(state, state->state | fmode);
}
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, stateid);
nfs4_stateid_copy(&state->open_stateid, stateid);
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
}
}
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock);
nfs_set_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock);
}
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
{
/*
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
nfs4_stateid_copy(&state->stateid, deleg_stateid);
set_bit(NFS_DELEGATED_STATE, &state->flags);
}
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode);
write_sequnlock(&state->seqlock);
spin_lock(&state->owner->so_lock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *deleg_cur;
int ret = 0;
fmode &= (FMODE_READ|FMODE_WRITE);
rcu_read_lock();
deleg_cur = rcu_dereference(nfsi->delegation);
if (deleg_cur == NULL)
goto no_delegation;
spin_lock(&deleg_cur->lock);
if (nfsi->delegation != deleg_cur ||
(deleg_cur->type & fmode) != fmode)
goto no_delegation_unlock;
if (delegation == NULL)
delegation = &deleg_cur->stateid;
else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
goto no_delegation_unlock;
nfs_mark_delegation_referenced(deleg_cur);
__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
ret = 1;
no_delegation_unlock:
spin_unlock(&deleg_cur->lock);
no_delegation:
rcu_read_unlock();
if (!ret && open_stateid != NULL) {
__update_open_stateid(state, open_stateid, NULL, fmode);
ret = 1;
}
return ret;
}
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
{
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation == NULL || (delegation->type & fmode) == fmode) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
nfs_inode_return_delegation(inode);
}
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
fmode_t fmode = opendata->o_arg.fmode;
nfs4_stateid stateid;
int ret = -EAGAIN;
for (;;) {
if (can_open_cached(state, fmode, open_mode)) {
spin_lock(&state->owner->so_lock);
if (can_open_cached(state, fmode, open_mode)) {
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
goto out_return_state;
}
spin_unlock(&state->owner->so_lock);
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
/* Save the delegation */
nfs4_stateid_copy(&stateid, &delegation->stateid);
rcu_read_unlock();
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
if (ret != 0)
goto out;
ret = -EAGAIN;
/* Try to update the stateid using the delegation */
if (update_open_stateid(state, NULL, &stateid, fmode))
goto out_return_state;
}
out:
return ERR_PTR(ret);
out_return_state:
atomic_inc(&state->count);
return state;
}
static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
{
struct inode *inode;
struct nfs4_state *state = NULL;
struct nfs_delegation *delegation;
int ret;
if (!data->rpc_done) {
state = nfs4_try_open_cached(data);
goto out;
}
ret = -EAGAIN;
if (!(data->f_attr.valid & NFS_ATTR_FATTR))
goto err;
inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
ret = PTR_ERR(inode);
if (IS_ERR(inode))
goto err;
ret = -ENOMEM;
state = nfs4_get_open_state(inode, data->owner);
if (state == NULL)
goto err_put_inode;
if (data->o_res.delegation_type != 0) {
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
int delegation_flags = 0;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
"returning a delegation for "
"OPEN(CLAIM_DELEGATE_CUR)\n",
clp->cl_hostname);
} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
else
nfs_inode_reclaim_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
}
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
iput(inode);
out:
return state;
err_put_inode:
iput(inode);
err:
return ERR_PTR(ret);
}
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_open_context *ctx;
spin_lock(&state->inode->i_lock);
list_for_each_entry(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
continue;
get_nfs_open_context(ctx);
spin_unlock(&state->inode->i_lock);
return ctx;
}
spin_unlock(&state->inode->i_lock);
return ERR_PTR(-ENOENT);
}
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
atomic_inc(&state->count);
return opendata;
}
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
{
struct nfs4_state *newstate;
int ret;
opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
ret = _nfs4_recover_proc_open(opendata);
if (ret != 0)
return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata);
if (IS_ERR(newstate))
return PTR_ERR(newstate);
nfs4_close_state(newstate, fmode);
*res = newstate;
return 0;
}
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{
struct nfs4_state *newstate;
int ret;
/* memory barrier prior to reading state->n_* */
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_wronly != 0) {
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_rdonly != 0) {
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
/*
* We may have performed cached opens for all three recoveries.
* Check if we need to update the current stateid.
*/
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
!nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
write_seqlock(&state->seqlock);
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
write_sequnlock(&state->seqlock);
}
return 0;
}
/*
* OPEN_RECLAIM:
* reclaim state on the server after a reboot.
*/
static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_delegation *delegation;
struct nfs4_opendata *opendata;
fmode_t delegation_type = 0;
int status;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
opendata->o_arg.fh = NFS_FH(state->inode);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
delegation_type = delegation->type;
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return status;
}
static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_do_open_reclaim(ctx, state);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
ret = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return ret;
}
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
struct nfs_server *server = NFS_SERVER(state->inode);
int err;
do {
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
case -ENOENT:
case -ESTALE:
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs_inode_find_state_and_recover(state->inode,
stateid);
nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
case -ENOMEM:
err = 0;
goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (data->rpc_status == 0) {
nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
nfs_confirm_seqid(&data->owner->so_seqid, 0);
renew_lease(data->o_res.server, data->timestamp);
data->rpc_done = 1;
}
}
static void nfs4_open_confirm_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (!data->rpc_done)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_confirm_ops = {
.rpc_call_done = nfs4_open_confirm_done,
.rpc_release = nfs4_open_confirm_release,
};
/*
* Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
*/
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
{
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
.rpc_argp = &data->c_arg,
.rpc_resp = &data->c_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->timestamp = jiffies;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state_owner *sp = data->owner;
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
return;
/*
* Check if we still need to send an OPEN call, or if we can use
* a delegation instead.
*/
if (data->state != NULL) {
struct nfs_delegation *delegation;
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
can_open_delegated(delegation, data->o_arg.fmode))
goto unlock_no_action;
rcu_read_unlock();
}
/* Update client id. */
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->o_arg.server,
&data->o_arg.seq_args,
&data->o_res.seq_res, task))
return;
rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
}
static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_open_prepare(task, calldata);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (!nfs4_sequence_done(task, &data->o_res.seq_res))
return;
if (task->tk_status == 0) {
switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
break;
case S_IFLNK:
data->rpc_status = -ELOOP;
break;
case S_IFDIR:
data->rpc_status = -EISDIR;
break;
default:
data->rpc_status = -ENOTDIR;
}
renew_lease(data->o_res.server, data->timestamp);
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
data->rpc_done = 1;
}
static void nfs4_open_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (data->rpc_status != 0 || !data->rpc_done)
goto out_free;
/* In case we need an open_confirm, no cleanup! */
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_ops = {
.rpc_call_prepare = nfs4_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static const struct rpc_call_ops nfs4_recover_open_ops = {
.rpc_call_prepare = nfs4_recover_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
.rpc_argp = o_arg,
.rpc_resp = o_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->cancelled = 0;
if (isrecover)
task_setup_data.callback_ops = &nfs4_recover_open_ops;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 1);
if (status != 0 || !data->rpc_done)
return status;
nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
return status;
}
/*
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
*/
static int _nfs4_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 0);
if (!data->rpc_done)
return status;
if (status != 0) {
if (status == -NFS4ERR_BADNAME &&
!(o_arg->open_flags & O_CREAT))
return -ENOENT;
return status;
}
nfs_fattr_map_and_free_names(server, &data->f_attr);
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
return 0;
}
static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
{
unsigned int loop;
int ret;
for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
break;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
ret = -EIO;
}
return ret;
}
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
return nfs4_client_recover_expired_lease(server->nfs_client);
}
/*
* OPEN_EXPIRED:
* reclaim state on the server after a network partition.
* Assumes caller holds the appropriate lock
*/
static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE)
d_drop(ctx->dentry);
nfs4_opendata_put(opendata);
return ret;
}
static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_open_expired(ctx, state);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_expired(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
{
int status = NFS_OK;
struct nfs_server *server = NFS_SERVER(state->inode);
if (state->flags & flags) {
status = nfs41_test_stateid(server, stateid);
if (status != NFS_OK) {
nfs41_free_stateid(server, stateid);
state->flags &= ~flags;
}
}
return status;
}
static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
int deleg_status, open_status;
int deleg_flags = 1 << NFS_DELEGATED_STATE;
int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
return NFS_OK;
return nfs4_open_expired(sp, state);
}
#endif
/*
* on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
* fields corresponding to attributes that were used to store the verifier.
* Make sure we clobber those fields in the later setattr call
*/
static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
{
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
!(sattr->ia_valid & ATTR_ATIME_SET))
sattr->ia_valid |= ATTR_ATIME;
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
!(sattr->ia_valid & ATTR_MTIME_SET))
sattr->ia_valid |= ATTR_MTIME;
}
/*
* Returns a referenced nfs4_state
*/
static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
{
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *opendata;
int status;
/* Protect against reboot recovery conflicts */
status = -ENOMEM;
sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
if (sp == NULL) {
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
goto out_err;
}
status = nfs4_recover_expired_lease(server);
if (status != 0)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
status = -ENOMEM;
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
status = _nfs4_proc_open(opendata);
if (status != 0)
goto err_opendata_put;
state = nfs4_opendata_to_nfs4_state(opendata);
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
if (opendata->o_arg.open_flags & O_EXCL) {
nfs4_exclusive_attrset(opendata, sattr);
nfs_fattr_init(opendata->o_res.f_attr);
status = nfs4_do_setattr(state->inode, cred,
opendata->o_res.f_attr, sattr,
state);
if (status == 0)
nfs_setattr_update_inode(state->inode, sattr);
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
}
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
return 0;
err_opendata_put:
nfs4_opendata_put(opendata);
err_put_state_owner:
nfs4_put_state_owner(sp);
out_err:
*res = NULL;
return status;
}
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
{
struct nfs4_exception exception = { };
struct nfs4_state *res;
int status;
do {
status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
if (status == 0)
break;
/* NOTE: BAD_SEQID means the server and client disagree about the
* book-keeping w.r.t. state-changing operations
* (OPEN/CLOSE/LOCK/LOCKU...)
* It is actually a sign of a bug on the client or on the server.
*
* If we receive a BAD_SEQID error in the particular case of
* doing an OPEN, we assume that nfs_increment_open_seqid() will
* have unhashed the old state_owner for us, and that we can
* therefore safely retry using a new one. We should still warn
* the user though...
*/
if (status == -NFS4ERR_BAD_SEQID) {
pr_warn_ratelimited("NFS: v4 server %s "
" returned a bad sequence-id error!\n",
NFS_SERVER(dir)->nfs_client->cl_hostname);
exception.retry = 1;
continue;
}
/*
* BAD_STATEID on OPEN means that the server cancelled our
* state before it received the OPEN_CONFIRM.
* Recover by retrying the request as per the discussion
* on Page 181 of RFC3530.
*/
if (status == -NFS4ERR_BAD_STATEID) {
exception.retry = 1;
continue;
}
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
continue;
}
res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
status, &exception));
} while (exception.retry);
return res;
}
static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_setattrargs arg = {
.fh = NFS_FH(inode),
.iap = sattr,
.server = server,
.bitmask = server->attr_bitmask,
};
struct nfs_setattrres res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = cred,
};
unsigned long timestamp = jiffies;
int status;
nfs_fattr_init(fattr);
if (state != NULL) {
nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
current->files, current->tgid);
} else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
FMODE_WRITE)) {
/* Use that stateid */
} else
nfs4_stateid_copy(&arg.stateid, &zero_stateid);
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
}
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = {
.state = state,
.inode = inode,
};
int err;
do {
err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
switch (err) {
case -NFS4ERR_OPENMODE:
if (state && !(state->state & FMODE_WRITE)) {
err = -EBADF;
if (sattr->ia_valid & ATTR_OPEN)
err = -EACCES;
goto out;
}
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
struct nfs_closeargs arg;
struct nfs_closeres res;
struct nfs_fattr fattr;
unsigned long timestamp;
bool roc;
u32 roc_barrier;
};
static void nfs4_free_closedata(void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state_owner *sp = calldata->state->owner;
struct super_block *sb = calldata->state->inode->i_sb;
if (calldata->roc)
pnfs_roc_release(calldata->state->inode);
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
nfs_sb_deactive(sb);
kfree(calldata);
}
static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
fmode_t fmode)
{
spin_lock(&state->owner->so_lock);
if (!(fmode & FMODE_READ))
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
if (!(fmode & FMODE_WRITE))
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
spin_unlock(&state->owner->so_lock);
}
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
switch (task->tk_status) {
case 0:
if (calldata->roc)
pnfs_roc_set_barrier(state->inode,
calldata->roc_barrier);
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
nfs4_close_clear_stateid_flags(state,
calldata->arg.fmode);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
if (calldata->arg.fmode == 0)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
rpc_restart_call_prepare(task);
}
nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
}
static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
int call_close = 0;
dprintk("%s: begin!\n", __func__);
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_WRITE;
}
}
spin_unlock(&state->owner->so_lock);
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
goto out;
}
if (calldata->arg.fmode == 0) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
if (calldata->roc &&
pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
task, NULL);
goto out;
}
}
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
&calldata->arg.seq_args,
&calldata->res.seq_res,
task))
goto out;
rpc_call_start(task);
out:
dprintk("%s: done!\n", __func__);
}
static const struct rpc_call_ops nfs4_close_ops = {
.rpc_call_prepare = nfs4_close_prepare,
.rpc_call_done = nfs4_close_done,
.rpc_release = nfs4_free_closedata,
};
/*
* It is possible for data to be read/written from a mem-mapped file
* after the sys_close call (which hits the vfs layer as a flush).
* This means that we can't safely call nfsv4 close on a file until
* the inode is cleared. This in turn means that we are not good
* NFSv4 citizens - we do not indicate to the server to update the file's
* share state even when we are done with one of the three share
* stateid's in the inode.
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
struct nfs4_state_owner *sp = state->owner;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
calldata = kzalloc(sizeof(*calldata), gfp_mask);
if (calldata == NULL)
goto out;
nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
if (calldata->arg.seqid == NULL)
goto out_free_calldata;
calldata->arg.fmode = 0;
calldata->arg.bitmask = server->cache_consistency_bitmask;
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->roc = roc;
nfs_sb_active(calldata->inode->i_sb);
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = 0;
if (wait)
status = rpc_wait_for_completion_task(task);
rpc_put_task(task);
return status;
out_free_calldata:
kfree(calldata);
out:
if (roc)
pnfs_roc_release(state->inode);
nfs4_put_open_state(state);
nfs4_put_state_owner(sp);
return status;
}
static struct inode *
nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
{
struct nfs4_state *state;
/* Protect against concurrent sillydeletes */
state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
if (IS_ERR(state))
return ERR_CAST(state);
ctx->state = state;
return igrab(state->inode);
}
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
if (ctx->state == NULL)
return;
if (is_sync)
nfs4_close_sync(ctx->state, ctx->mode);
else
nfs4_close_state(ctx->state, ctx->mode);
}
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_server_caps_arg args = {
.fhandle = fhandle,
};
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
NFS_CAP_CTIME|NFS_CAP_MTIME);
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
server->caps |= NFS_CAP_ACLS;
if (res.has_links != 0)
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
server->caps |= NFS_CAP_FILEID;
if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
server->caps |= NFS_CAP_MODE;
if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
server->caps |= NFS_CAP_NLINK;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
server->caps |= NFS_CAP_OWNER;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
server->caps |= NFS_CAP_OWNER_GROUP;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
server->caps |= NFS_CAP_ATIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
server->caps |= NFS_CAP_CTIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
server->caps |= NFS_CAP_MTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
server->fh_expire_type = res.fh_expire_type;
}
return status;
}
int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_lookup_root_arg args = {
.bitmask = nfs4_fattr_bitmap,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = info->fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(info->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_lookup_root(server, fhandle, info);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
goto out;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
out:
return err;
}
static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, rpc_authflavor_t flavor)
{
struct rpc_auth *auth;
int ret;
auth = rpcauth_create(flavor, server->client);
if (!auth) {
ret = -EIO;
goto out;
}
ret = nfs4_lookup_root(server, fhandle, info);
out:
return ret;
}
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int i, len, status = 0;
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
len = gss_mech_list_pseudoflavors(&flav_array[0]);
flav_array[len] = RPC_AUTH_NULL;
len += 1;
for (i = 0; i < len; i++) {
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
break;
}
/*
* -EACCESS could mean that the user doesn't have correct permissions
* to access the mount. It could also mean that we tried to mount
* with a gss auth flavor, but rpc.gssd isn't running. Either way,
* existing mount programs don't handle -EACCES very well so it should
* be mapped to -EPERM instead.
*/
if (status == -EACCES)
status = -EPERM;
return status;
}
/*
* get the file handle for the "/" directory on the server
*/
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int minor_version = server->nfs_client->cl_minorversion;
int status = nfs4_lookup_root(server, fhandle, info);
if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
/*
* A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
* by nfs4_map_errors() as this function exits.
*/
status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
if (status == 0)
status = nfs4_do_fsinfo(server, fhandle, info);
return nfs4_map_errors(status);
}
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
* we detect fsid mismatch in inode revalidation
*/
static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
const struct qstr *name, struct nfs_fattr *fattr,
struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
struct nfs4_fs_locations *locations = NULL;
page = alloc_page(GFP_KERNEL);
if (page == NULL)
goto out;
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (locations == NULL)
goto out;
status = nfs4_proc_fs_locations(client, dir, name, locations, page);
if (status != 0)
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
nfs_fixup_referral_attributes(&locations->fattr);
/* replace the lookup nfs_fattr with the locations nfs_fattr */
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
__free_page(page);
kfree(locations);
return status;
}
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_getattr_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_getattr_res res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getattr(server, fhandle, fattr),
&exception);
} while (exception.retry);
return err;
}
/*
* The file is not closed if it is opened due to the a request to change
* the size of the file. The open call will not be needed once the
* VFS layer lookup-intents are implemented.
*
* Close is called when the inode is destroyed.
* If we haven't opened the file for O_WRONLY, we
* need to in the size_change case to obtain a stateid.
*
* Got race?
* Because OPEN is always done by name in nfsv4, it is
* possible that we opened a different file by the same
* name. We can recognize this race condition, but we
* can't do anything about it besides returning an error.
*
* This will be fixed with VFS changes (lookup-intent).
*/
static int
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
struct inode *inode = dentry->d_inode;
struct rpc_cred *cred = NULL;
struct nfs4_state *state = NULL;
int status;
if (pnfs_ld_layoutret_on_setattr(inode))
pnfs_return_layout(inode);
nfs_fattr_init(fattr);
/* Search for an existing open(O_WRITE) file */
if (sattr->ia_valid & ATTR_FILE) {
struct nfs_open_context *ctx;
ctx = nfs_file_open_context(sattr->ia_file);
if (ctx) {
cred = ctx->cred;
state = ctx->state;
}
}
/* Deal with open(O_TRUNC) */
if (sattr->ia_valid & ATTR_OPEN)
sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
if (status == 0)
nfs_setattr_update_inode(inode, sattr);
return status;
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
dprintk("NFS call lookup %s\n", name->name);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
{
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
struct rpc_clnt *client = *clnt;
int err;
do {
err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
switch (err) {
case -NFS4ERR_BADNAME:
err = -ENOENT;
goto out;
case -NFS4ERR_MOVED:
err = nfs4_get_referral(client, dir, name, fattr, fhandle);
goto out;
case -NFS4ERR_WRONGSEC:
err = -EPERM;
if (client != *clnt)
goto out;
client = nfs4_create_sec_client(client, dir, name);
if (IS_ERR(client))
return PTR_ERR(client);
exception.retry = 1;
break;
default:
err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
}
} while (exception.retry);
out:
if (err == 0)
*clnt = client;
else if (client != *clnt)
rpc_shutdown_client(client);
return err;
}
static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
}
return status;
}
struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
if (status < 0) {
rpc_shutdown_client(client);
return ERR_PTR(status);
}
return client;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->cache_consistency_bitmask,
};
struct nfs4_accessres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = entry->cred,
};
int mode = entry->mask;
int status;
/*
* Determine which access bits we want to ask for...
*/
if (mode & MAY_READ)
args.access |= NFS4_ACCESS_READ;
if (S_ISDIR(inode->i_mode)) {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_LOOKUP;
} else {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
return -ENOMEM;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
entry->mask |= MAY_READ;
if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
entry->mask |= MAY_WRITE;
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
nfs_refresh_inode(inode, res.fattr);
}
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_access(inode, entry),
&exception);
} while (exception.retry);
return err;
}
/*
* TODO: For the time being, we don't try to get any attributes
* along with any of the zero-copy operations READ, READDIR,
* READLINK, WRITE.
*
* In the case of the first three, we want to put the GETATTR
* after the read-type operation -- this is because it is hard
* to predict the length of a GETATTR response in v4, and thus
* align the READ data correctly. This means that the GETATTR
* may end up partially falling into the page cache, and we should
* shift it into the 'tail' of the xdr_buf before processing.
* To do this efficiently, we need to know the total length
* of data received, which doesn't seem to be available outside
* of the RPC layer.
*
* In the case of WRITE, we also want to put the GETATTR after
* the operation -- in this case because we want to make sure
* we get the post-operation mtime and size. This means that
* we can't use xdr_encode_pages() as written: we need a variant
* of it which would leave room in the 'tail' iovec.
*
* Both of these changes to the XDR layer would in fact be quite
* minor, but I decided to leave them for a subsequent patch.
*/
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_readlink args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
.pglen = pglen,
.pages = &page,
};
struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_readlink(inode, page, pgbase, pglen),
&exception);
} while (exception.retry);
return err;
}
/*
* Got race?
* We will need to arrange for the VFS layer to provide an atomic open.
* Until then, this create/open method is prone to inefficiency and race
* conditions due to the lookup, create, and open VFS calls from sys_open()
* placed on the wire.
*
* Given the above sorry state of affairs, I'm simply sending an OPEN.
* The file will be opened again in the subsequent VFS open call
* (nfs4_proc_file_open).
*
* The open for read will just hang around to be used by any process that
* opens the file O_RDONLY. This will all be resolved with the VFS changes.
*/
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nfs_open_context *ctx)
{
struct dentry *de = dentry;
struct nfs4_state *state;
struct rpc_cred *cred = NULL;
fmode_t fmode = 0;
int status = 0;
if (ctx != NULL) {
cred = ctx->cred;
de = ctx->dentry;
fmode = ctx->mode;
}
sattr->ia_mode &= ~current_umask();
state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
goto out;
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
if (ctx != NULL)
ctx->state = state;
else
nfs4_close_sync(state, fmode);
out:
return status;
}
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
.fh = NFS_FH(dir),
.name.len = name->len,
.name.name = name->name,
.bitmask = server->attr_bitmask,
};
struct nfs_removeres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
}
nfs_free_fattr(res.dir_attr);
out:
return status;
}
static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_remove(dir, name),
&exception);
} while (exception.retry);
return err;
}
static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
args->bitmask = server->cache_consistency_bitmask;
res->server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
}
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
{
if (nfs4_setup_sequence(NFS_SERVER(data->dir),
&data->args.seq_args,
&data->res.seq_res,
task))
return;
rpc_call_start(task);
}
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_renameargs *arg = msg->rpc_argp;
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
arg->bitmask = server->attr_bitmask;
res->server = server;
nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
}
static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
{
if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
&data->args.seq_args,
&data->res.seq_res,
task))
return;
rpc_call_start(task);
}
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
struct inode *new_dir)
{
struct nfs_renameres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(old_dir, &res->old_cinfo);
nfs_post_op_update_inode(old_dir, res->old_fattr);
update_changeattr(new_dir, &res->new_cinfo);
nfs_post_op_update_inode(new_dir, res->new_fattr);
return 1;
}
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs_server *server = NFS_SERVER(old_dir);
struct nfs_renameargs arg = {
.old_dir = NFS_FH(old_dir),
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
struct nfs_renameres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.old_fattr = nfs_alloc_fattr();
res.new_fattr = nfs_alloc_fattr();
if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
nfs_post_op_update_inode(new_dir, res.new_fattr);
}
out:
nfs_free_fattr(res.new_fattr);
nfs_free_fattr(res.old_fattr);
return status;
}
static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(old_dir),
_nfs4_proc_rename(old_dir, old_name,
new_dir, new_name),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
.fh = NFS_FH(inode),
.dir_fh = NFS_FH(dir),
.name = name,
.bitmask = server->attr_bitmask,
};
struct nfs4_link_res res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.fattr = nfs_alloc_fattr();
res.dir_attr = nfs_alloc_fattr();
if (res.fattr == NULL || res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
out:
nfs_free_fattr(res.dir_attr);
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_link(inode, dir, name),
&exception);
} while (exception.retry);
return err;
}
struct nfs4_createdata {
struct rpc_message msg;
struct nfs4_create_arg arg;
struct nfs4_create_res res;
struct nfs_fh fh;
struct nfs_fattr fattr;
struct nfs_fattr dir_fattr;
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
struct qstr *name, struct iattr *sattr, u32 ftype)
{
struct nfs4_createdata *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data != NULL) {
struct nfs_server *server = NFS_SERVER(dir);
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
data->msg.rpc_argp = &data->arg;
data->msg.rpc_resp = &data->res;
data->arg.dir_fh = NFS_FH(dir);
data->arg.server = server;
data->arg.name = name;
data->arg.attrs = sattr;
data->arg.ftype = ftype;
data->arg.bitmask = server->attr_bitmask;
data->res.server = server;
data->res.fh = &data->fh;
data->res.fattr = &data->fattr;
data->res.dir_fattr = &data->dir_fattr;
nfs_fattr_init(data->res.fattr);
nfs_fattr_init(data->res.dir_fattr);
}
return data;
}
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
}
return status;
}
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
kfree(data);
}
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENAMETOOLONG;
if (len > NFS4_MAXPATHLEN)
goto out;
status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
if (data == NULL)
goto out;
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
data->arg.u.symlink.pages = &page;
data->arg.u.symlink.len = len;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_symlink(dir, dentry, page,
len, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mkdir(dir, dentry, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
.pages = pages,
.pgbase = 0,
.count = count,
.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
.plus = plus,
};
struct nfs4_readdir_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
int status;
dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
}
nfs_invalidate_atime(dir);
dprintk("%s: returns %d\n", __func__, status);
return status;
}
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
_nfs4_proc_readdir(dentry, cred, cookie,
pages, count, plus),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_createdata *data;
int mode = sattr->ia_mode;
int status = -ENOMEM;
BUG_ON(!(sattr->ia_valid & ATTR_MODE));
BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
if (data == NULL)
goto out;
if (S_ISFIFO(mode))
data->arg.ftype = NF4FIFO;
else if (S_ISBLK(mode)) {
data->arg.ftype = NF4BLK;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
else if (S_ISCHR(mode)) {
data->arg.ftype = NF4CHR;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mknod(dir, dentry, sattr, rdev),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsstat *fsstat)
{
struct nfs4_statfs_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_statfs_res res = {
.fsstat = fsstat,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_statfs(server, fhandle, fsstat),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *fsinfo)
{
struct nfs4_fsinfo_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_fsinfo_res res = {
.fsinfo = fsinfo,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_do_fsinfo(server, fhandle, fsinfo),
&exception);
} while (exception.retry);
return err;
}
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
nfs_fattr_init(fsinfo->fattr);
return nfs4_do_fsinfo(server, fhandle, fsinfo);
}
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_pathconf_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_pathconf_res res = {
.pathconf = pathconf,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
.rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
memset(pathconf, 0, sizeof(*pathconf));
return 0;
}
nfs_fattr_init(pathconf->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_pathconf(server, fhandle, pathconf),
&exception);
} while (exception.retry);
return err;
}
void __nfs4_read_done_cb(struct nfs_read_data *data)
{
nfs_invalidate_atime(data->inode);
}
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
__nfs4_read_done_cb(data);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
return 0;
}
static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->read_done_cb ? data->read_done_cb(task, data) :
nfs4_read_done_cb(task, data);
}
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
data->timestamp = jiffies;
data->read_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
}
static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
{
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
&data->args.seq_args,
&data->res.seq_res,
task))
return;
rpc_call_start(task);
}
/* Reset the the nfs_read_data to send the read to the MDS. */
void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
/* offsets will differ in the dense stripe case */
data->args.offset = data->mds_offset;
data->ds_clp = NULL;
data->args.fh = NFS_FH(data->inode);
data->read_done_cb = nfs4_read_done_cb;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_read);
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
}
return 0;
}
static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb ? data->write_done_cb(task, data) :
nfs4_write_done_cb(task, data);
}
/* Reset the the nfs_write_data to send the write to the MDS. */
void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
data->ds_clp = NULL;
data->write_done_cb = nfs4_write_done_cb;
data->args.fh = NFS_FH(data->inode);
data->args.bitmask = data->res.server->cache_consistency_bitmask;
data->args.offset = data->mds_offset;
data->res.fattr = &data->fattr;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_write);
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
data->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
}
static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
{
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
&data->args.seq_args,
&data->res.seq_res,
task))
return;
rpc_call_start(task);
}
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb(task, data);
}
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
}
struct nfs4_renewdata {
struct nfs_client *client;
unsigned long timestamp;
};
/*
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
static void nfs4_renew_release(void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
}
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
unsigned long timestamp = data->timestamp;
if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
return;
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
nfs4_schedule_lease_recovery(clp);
return;
}
nfs4_schedule_path_down_recovery(clp);
}
do_renew_lease(clp, timestamp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done,
.rpc_release = nfs4_renew_release,
};
static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
struct nfs4_renewdata *data;
if (renew_flags == 0)
return 0;
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->client = clp;
data->timestamp = jiffies;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
&nfs4_renew_ops, data);
}
static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
static inline int nfs4_server_supports_acls(struct nfs_server *server)
{
return (server->caps & NFS_CAP_ACLS)
&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
* the stack.
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
struct page *newpage, **spages;
int rc = 0;
size_t len;
spages = pages;
do {
len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL)
goto unwind;
memcpy(page_address(newpage), buf, len);
buf += len;
buflen -= len;
*pages++ = newpage;
rc++;
} while (buflen != 0);
return rc;
unwind:
for(; rc > 0; rc--)
__free_page(spages[rc-1]);
return -ENOMEM;
}
struct nfs4_cached_acl {
int cached;
size_t len;
char data[0];
};
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
{
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
kfree(nfsi->nfs4_acl);
nfsi->nfs4_acl = acl;
spin_unlock(&inode->i_lock);
}
static void nfs4_zap_acl_attr(struct inode *inode)
{
nfs4_set_cached_acl(inode, NULL);
}
static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
int ret = -ENOENT;
spin_lock(&inode->i_lock);
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
goto out;
ret = -ERANGE; /* see getxattr(2) man page */
if (acl->len > buflen)
goto out;
memcpy(buf, acl->data, acl->len);
out_len:
ret = acl->len;
out:
spin_unlock(&inode->i_lock);
return ret;
}
static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
{
struct nfs4_cached_acl *acl;
if (pages && acl_len <= PAGE_SIZE) {
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
_copy_from_pages(acl->data, pages, pgbase, acl_len);
} else {
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 0;
}
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
}
/*
* The getxattr API returns the required buffer length when called with a
* NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
* the required buf. On a NULL buf, we send a page of data to the server
* guessing that the ACL request can be serviced by a page. If so, we cache
* up to the page of ACL data, and the 2nd call to getxattr is serviced by
* the cache. If not so, we throw away the page, and cache the required
* length. The next getxattr call will then produce another round trip to
* the server, this time with the input buf of the required size.
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
/* Add an extra page to handle the bitmap returned */
npages++;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
/* for decoding across pages */
res.acl_scratch = alloc_page(GFP_KERNEL);
if (!res.acl_scratch)
goto out_free;
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, 0, acl_len);
else
nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
return ret;
}
static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
ssize_t ret;
do {
ret = __nfs4_get_acl_uncached(inode, buf, buflen);
if (ret >= 0)
break;
ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
} while (exception.retry);
return ret;
}
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
/* -ENOENT is returned if there is no ACL or if there is an ACL
* but no cached acl data, just the acl length */
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int ret, i;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
nfs_inode_return_delegation(inode);
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
* Free each page after tx, so the only ref left is
* held by the network stack
*/
for (; i > 0; i--)
put_page(pages[i-1]);
/*
* Acl update can result in inode attribute update.
* so mark the attribute cache invalid.
*/
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
}
static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
__nfs4_proc_set_acl(inode, buf, buflen),
&exception);
} while (exception.retry);
return err;
}
static int
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
{
struct nfs_client *clp = server->nfs_client;
if (task->tk_status >= 0)
return 0;
switch(task->tk_status) {
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
if (state == NULL)
break;
nfs_remove_bad_delegation(state->inode);
case -NFS4ERR_OPENMODE:
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
return -EAGAIN;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
task->tk_status = 0;
return -EAGAIN;
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
wait_on_recovery:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
}
static void nfs4_construct_boot_verifier(struct nfs_client *clp,
nfs4_verifier *bootverf)
{
__be32 verf[2];
verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
memcpy(bootverf->data, verf, sizeof(bootverf->data));
}
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
unsigned short port, struct rpc_cred *cred,
struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
.sc_verifier = &sc_verifier,
.sc_prog = program,
.sc_cb_ident = clp->cl_cb_ident,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
.rpc_resp = res,
.rpc_cred = cred,
};
int loop = 0;
int status;
nfs4_construct_boot_verifier(clp, &sc_verifier);
for(;;) {
rcu_read_lock();
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
sizeof(setclientid.sc_name), "%s/%s %s %s %u",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO),
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_NETID));
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
clp->cl_ipaddr, port >> 8, port & 255);
rcu_read_unlock();
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status != -NFS4ERR_CLID_INUSE)
break;
if (loop != 0) {
++clp->cl_id_uniquifier;
break;
}
++loop;
ssleep(clp->cl_lease_time / HZ + 1);
}
return status;
}
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
struct nfs4_setclientid_res *arg,
struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
.rpc_argp = arg,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
unsigned long now;
int status;
now = jiffies;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status == 0) {
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
return status;
}
struct nfs4_delegreturndata {
struct nfs4_delegreturnargs args;
struct nfs4_delegreturnres res;
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
struct nfs_fattr fattr;
int rpc_status;
};
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
data->rpc_status = task->tk_status;
}
static void nfs4_delegreturn_release(void *calldata)
{
kfree(calldata);
}
#if defined(CONFIG_NFS_V4_1)
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
d_data = (struct nfs4_delegreturndata *)data;
if (nfs4_setup_sequence(d_data->res.server,
&d_data->args.seq_args,
&d_data->res.seq_res, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs4_delegreturn_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs4_delegreturn_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs4_delegreturndata *data;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
data->args.bitmask = server->attr_bitmask;
nfs_copy_fh(&data->fh, NFS_FH(inode));
nfs4_stateid_copy(&data->stateid, stateid);
data->res.fattr = &data->fattr;
data->res.server = server;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (!issync)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = data->rpc_status;
if (status != 0)
goto out;
nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
}
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
switch (err) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
return 0;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
/*
* sleep, with exponential backoff, and retry the LOCK operation.
*/
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
freezable_schedule_timeout_killable_unsafe(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
return timeout;
}
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
struct nfs_lockt_args arg = {
.fh = NFS_FH(inode),
.fl = request,
};
struct nfs_lockt_res res = {
.denied = request,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = state->owner->so_cred,
};
struct nfs4_lock_state *lsp;
int status;
arg.lock_owner.clientid = clp->cl_clientid;
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_seqid.owner_id;
arg.lock_owner.s_dev = server->s_dev;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
break;
case -NFS4ERR_DENIED:
status = 0;
}
request->fl_ops->fl_release_private(request);
out:
return status;
}
static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(state->inode),
_nfs4_proc_getlk(state, cmd, request),
&exception);
} while (exception.retry);
return err;
}
static int do_vfs_lock(struct file *file, struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_file_wait(file, fl);
break;
case FL_FLOCK:
res = flock_lock_file_wait(file, fl);
break;
default:
BUG();
}
return res;
}
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
const struct nfs_server *server;
unsigned long timestamp;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
p->server = NFS_SERVER(inode);
return p;
}
static void nfs4_locku_release_calldata(void *data)
{
struct nfs4_unlockdata *calldata = data;
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_lock_state(calldata->lsp);
put_nfs_open_context(calldata->ctx);
kfree(calldata);
}
static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
switch (task->tk_status) {
case 0:
nfs4_stateid_copy(&calldata->lsp->ls_stateid,
&calldata->res.stateid);
renew_lease(calldata->server, calldata->timestamp);
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
rpc_restart_call_prepare(task);
}
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
return;
}
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(calldata->server,
&calldata->arg.seq_args,
&calldata->res.seq_res, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_locku_ops = {
.rpc_call_prepare = nfs4_locku_prepare,
.rpc_call_done = nfs4_locku_done,
.rpc_release = nfs4_locku_release_calldata,
};
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
/* Ensure this is an unlock - when canceling a lock, the
* canceled lock is passed in, and it won't be an unlock.
*/
fl->fl_type = F_UNLCK;
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
if (data == NULL) {
nfs_free_seqid(seqid);
return ERR_PTR(-ENOMEM);
}
nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
return rpc_run_task(&task_setup_data);
}
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
struct rpc_task *task;
int status = 0;
unsigned char fl_flags = request->fl_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
request->fl_flags |= FL_EXISTS;
down_read(&nfsi->rwsem);
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
up_read(&nfsi->rwsem);
goto out;
}
up_read(&nfsi->rwsem);
if (status != 0)
goto out;
/* Is this a delegated lock? */
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
goto out;
lsp = request->fl_u.nfs4_fl.owner;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
goto out;
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
status = PTR_ERR(task);
if (IS_ERR(task))
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
rpc_put_task(task);
out:
request->fl_flags = fl_flags;
return status;
}
struct nfs4_lockdata {
struct nfs_lock_args arg;
struct nfs_lock_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
unsigned long timestamp;
int rpc_status;
int cancelled;
struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (p->arg.open_seqid == NULL)
goto out_free;
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (p->arg.lock_seqid == NULL)
goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
struct nfs4_state *state = data->lsp->ls_state;
dprintk("%s: begin!\n", __func__);
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
return;
/* Do we need to do an open_to_lock_owner? */
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
return;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->server,
&data->arg.seq_args,
&data->res.seq_res, task))
return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_lock_prepare(task, calldata);
}
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
data->rpc_status = task->tk_status;
if (data->arg.new_lock_owner != 0) {
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
else
goto out;
}
if (data->rpc_status == 0) {
nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
}
out:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
static void nfs4_lock_release(void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
if (!IS_ERR(task))
rpc_put_task_async(task);
dprintk("%s: cancelling lock!\n", __func__);
} else
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
kfree(data);
dprintk("%s: done!\n", __func__);
}
static const struct rpc_call_ops nfs4_lock_ops = {
.rpc_call_prepare = nfs4_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_call_prepare = nfs4_recover_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
{
switch (error) {
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
if (new_lock_owner != 0 ||
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
break;
case -NFS4ERR_STALE_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(server->nfs_client);
};
}
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int ret;
dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
fl->fl_u.nfs4_fl.owner,
recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
if (data == NULL)
return -ENOMEM;
if (IS_SETLKW(cmd))
data->arg.block = 1;
if (recovery_type > NFS_LOCK_NEW) {
if (recovery_type == NFS_LOCK_RECLAIM)
data->arg.reclaim = NFS_LOCK_RECLAIM;
task_setup_data.callback_ops = &nfs4_recover_lock_ops;
}
nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
if (ret)
nfs4_handle_setlk_error(data->server, data->lsp,
data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
return ret;
}
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = {
.inode = state->inode,
};
int err;
do {
/* Cache the lock if possible... */
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = {
.inode = state->inode,
};
int err;
err = nfs4_set_lock_state(state, request);
if (err != 0)
return err;
do {
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_check_expired_locks(struct nfs4_state *state)
{
int status, ret = NFS_OK;
struct nfs4_lock_state *lsp;
struct nfs_server *server = NFS_SERVER(state->inode);
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
status = nfs41_test_stateid(server, &lsp->ls_stateid);
if (status != NFS_OK) {
nfs41_free_stateid(server, &lsp->ls_stateid);
lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
ret = status;
}
}
};
return ret;
}
static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
int status = NFS_OK;
if (test_bit(LK_STATE_IN_USE, &state->flags))
status = nfs41_check_expired_locks(state);
if (status == NFS_OK)
return status;
return nfs4_lock_expired(state, request);
}
#endif
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
int status = -ENOLCK;
if ((fl_flags & FL_POSIX) &&
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
status = do_vfs_lock(request->fl_file, request);
if (status < 0)
goto out;
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
goto out_unlock;
}
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
if (status != 0)
goto out_unlock;
/* Note: we always want to sleep here! */
request->fl_flags = fl_flags | FL_SLEEP;
if (do_vfs_lock(request->fl_file, request) < 0)
printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
"manager!\n", __func__);
out_unlock:
up_read(&nfsi->rwsem);
out:
request->fl_flags = fl_flags;
return status;
}
static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = {
.state = state,
.inode = state->inode,
};
int err;
do {
err = _nfs4_proc_setlk(state, cmd, request);
if (err == -NFS4ERR_DENIED)
err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
err, &exception);
} while (exception.retry);
return err;
}
static int
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
struct nfs_open_context *ctx;
struct nfs4_state *state;
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
int status;
/* verify open state */
ctx = nfs_file_open_context(filp);
state = ctx->state;
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
if (IS_GETLK(cmd)) {
if (state != NULL)
return nfs4_proc_getlk(state, F_GETLK, request);
return 0;
}
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
if (request->fl_type == F_UNLCK) {
if (state != NULL)
return nfs4_proc_unlck(state, cmd, request);
return 0;
}
if (state == NULL)
return -ENOLCK;
/*
* Don't rely on the VFS having checked the file open mode,
* since it won't do this for flock() locks.
*/
switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
break;
case F_WRLCK:
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
}
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
timeout = nfs4_set_lock_task_retry(timeout);
status = -ERESTARTSYS;
if (signalled())
break;
} while(status < 0);
return status;
}
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
err = nfs4_set_lock_state(state, fl);
if (err != 0)
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
switch (err) {
default:
printk(KERN_ERR "NFS: %s: unhandled error "
"%d.\n", __func__, err);
case 0:
case -ESTALE:
goto out;
case -NFS4ERR_EXPIRED:
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
nfs4_schedule_stateid_recovery(server, state);
err = 0;
goto out;
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
err = 0;
goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
err = 0;
goto out;
case -NFS4ERR_DELAY:
break;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
struct nfs_release_lockowner_data {
struct nfs4_lock_state *lsp;
struct nfs_server *server;
struct nfs_release_lockowner_args args;
};
static void nfs4_release_lockowner_release(void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
nfs4_free_lock_state(data->server, data->lsp);
kfree(calldata);
}
static const struct rpc_call_ops nfs4_release_lockowner_ops = {
.rpc_release = nfs4_release_lockowner_release,
};
int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
{
struct nfs_server *server = lsp->ls_state->owner->so_server;
struct nfs_release_lockowner_data *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
};
if (server->nfs_client->cl_mvops->minor_version != 0)
return -EINVAL;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (!data)
return -ENOMEM;
data->lsp = lsp;
data->server = server;
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
data->args.lock_owner.id = lsp->ls_seqid.owner_id;
data->args.lock_owner.s_dev = server->s_dev;
msg.rpc_argp = &data->args;
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
return 0;
}
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
const void *buf, size_t buflen,
int flags, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
}
static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
void *buf, size_t buflen, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
}
static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
size_t list_len, const char *name,
size_t name_len, int type)
{
size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
return 0;
if (list && len <= list_len)
memcpy(list, XATTR_NAME_NFSV4_ACL, len);
return len;
}
/*
* nfs_fhget will use either the mounted_on_fileid or the fileid
*/
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
{
if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
(fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
(fattr->valid & NFS_ATTR_FATTR_FSID) &&
(fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
return;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
const struct qstr *name,
struct nfs4_fs_locations *fs_locations,
struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
.page = page,
.bitmask = bitmask,
};
struct nfs4_fs_locations_res res = {
.fs_locations = fs_locations,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("%s: start\n", __func__);
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
* is not supported */
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
else
bitmask[0] |= FATTR4_WORD0_FILEID;
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
const struct qstr *name,
struct nfs4_fs_locations *fs_locations,
struct page *page)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
{
int status;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
dprintk("NFS call secinfo %s\n", name->name);
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply secinfo: %d\n", status);
return status;
}
int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_secinfo(dir, name, flavors),
&exception);
} while (exception.retry);
return err;
}
#ifdef CONFIG_NFS_V4_1
/*
* Check the exchange flags returned by the server for invalid flags, having
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
* DS flags set.
*/
static int nfs4_check_cl_exchange_flags(u32 flags)
{
if (flags & ~EXCHGID4_FLAG_MASK_R)
goto out_inval;
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
goto out_inval;
if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
goto out_inval;
return NFS_OK;
out_inval:
return -NFS4ERR_INVAL;
}
static bool
nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
{
if (a->server_scope_sz == b->server_scope_sz &&
memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
return true;
return false;
}
/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
* associated with the stale clientid will be returning
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
.verifier = &verifier,
.client = clp,
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
.client = clp,
};
int status;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
nfs4_construct_boot_verifier(clp, &verifier);
args.id_len = scnprintf(args.id, sizeof(args.id),
"%s/%s/%u",
clp->cl_ipaddr,
clp->cl_rpcclient->cl_nodename,
clp->cl_rpcclient->cl_auth->au_flavor);
res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
if (unlikely(!res.server_scope)) {
status = -ENOMEM;
goto out;
}
res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
if (unlikely(!res.impl_id)) {
status = -ENOMEM;
goto out_server_scope;
}
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
if (!status) {
/* use the most recent implementation id */
kfree(clp->impl_id);
clp->impl_id = res.impl_id;
} else
kfree(res.impl_id);
if (!status) {
if (clp->server_scope &&
!nfs41_same_server_scope(clp->server_scope,
res.server_scope)) {
dprintk("%s: server_scope mismatch detected\n",
__func__);
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
kfree(clp->server_scope);
clp->server_scope = NULL;
}
if (!clp->server_scope) {
clp->server_scope = res.server_scope;
goto out;
}
}
out_server_scope:
kfree(res.server_scope);
out:
if (clp->impl_id)
dprintk("%s: Server Implementation ID: "
"domain: %s, name: %s, date: %llu,%u\n",
__func__, clp->impl_id->domain, clp->impl_id->name,
clp->impl_id->date.seconds,
clp->impl_id->date.nseconds);
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
struct nfs_client *clp;
};
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
void *calldata)
{
int ret;
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
/* just setup sequence, do not trigger session recovery
since we're invoked within one */
ret = nfs41_setup_sequence(data->clp->cl_session,
&data->args->la_seq_args,
&data->res->lr_seq_res, task);
BUG_ON(ret == -EAGAIN);
rpc_call_start(task);
dprintk("<-- %s\n", __func__);
}
/*
* Called from nfs4_state_manager thread for session setup, so don't recover
* from sequence operation or clientid errors.
*/
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
{
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_get_lease_time_ops = {
.rpc_call_prepare = nfs4_get_lease_time_prepare,
.rpc_call_done = nfs4_get_lease_time_done,
};
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
};
struct nfs4_get_lease_time_data data = {
.args = &args,
.res = &res,
.clp = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_get_lease_time_ops,
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
int status;
nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
status = PTR_ERR(task);
else {
status = task->tk_status;
rpc_put_task(task);
}
dprintk("<-- %s return %d\n", __func__, status);
return status;
}
static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
{
return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
}
static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
struct nfs4_slot *new,
u32 max_slots,
u32 ivalue)
{
struct nfs4_slot *old = NULL;
u32 i;
spin_lock(&tbl->slot_tbl_lock);
if (new) {
old = tbl->slots;
tbl->slots = new;
tbl->max_slots = max_slots;
}
tbl->highest_used_slotid = -1; /* no slot is currently used */
for (i = 0; i < tbl->max_slots; i++)
tbl->slots[i].seq_nr = ivalue;
spin_unlock(&tbl->slot_tbl_lock);
kfree(old);
}
/*
* (re)Initialise a slot table
*/
static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
u32 ivalue)
{
struct nfs4_slot *new = NULL;
int ret = -ENOMEM;
dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
max_reqs, tbl->max_slots);
/* Does the newly negotiated max_reqs match the existing slot table? */
if (max_reqs != tbl->max_slots) {
new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
if (!new)
goto out;
}
ret = 0;
nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
}
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
if (session->fc_slot_table.slots != NULL) {
kfree(session->fc_slot_table.slots);
session->fc_slot_table.slots = NULL;
}
if (session->bc_slot_table.slots != NULL) {
kfree(session->bc_slot_table.slots);
session->bc_slot_table.slots = NULL;
}
return;
}
/*
* Initialize or reset the forechannel and backchannel tables
*/
static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
int status;
dprintk("--> %s\n", __func__);
/* Fore channel */
tbl = &ses->fc_slot_table;
status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status) /* -ENOMEM */
return status;
/* Back channel */
tbl = &ses->bc_slot_table;
status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status && tbl->slots == NULL)
/* Fore and back channel share a connection so get
* both slot tables or neither */
nfs4_destroy_slot_tables(ses);
return status;
}
struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
{
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
if (!session)
return NULL;
tbl = &session->fc_slot_table;
tbl->highest_used_slotid = NFS4_NO_SLOT;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
init_completion(&tbl->complete);
tbl = &session->bc_slot_table;
tbl->highest_used_slotid = NFS4_NO_SLOT;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
init_completion(&tbl->complete);
session->session_state = 1<<NFS4_SESSION_INITING;
session->clp = clp;
return session;
}
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
nfs4_proc_destroy_session(session);
rcu_read_lock();
xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
rcu_read_unlock();
dprintk("%s Destroy backchannel for xprt %p\n",
__func__, xprt);
xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
nfs4_destroy_slot_tables(session);
kfree(session);
}
/*
* Initialize the values to be used by the client in CREATE_SESSION
* If nfs4_init_session set the fore channel request and response sizes,
* use them.
*
* Set the back channel max_resp_sz_cached to zero to force the client to
* always set csa_cachethis to FALSE because the current implementation
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
*/
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
{
struct nfs4_session *session = args->client->cl_session;
unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
mxresp_sz = session->fc_attrs.max_resp_sz;
if (mxrqst_sz == 0)
mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
if (mxresp_sz == 0)
mxresp_sz = NFS_MAX_FILE_IO_SIZE;
/* Fore channel attributes */
args->fc_attrs.max_rqst_sz = mxrqst_sz;
args->fc_attrs.max_resp_sz = mxresp_sz;
args->fc_attrs.max_ops = NFS4_MAX_OPS;
args->fc_attrs.max_reqs = max_session_slots;
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_ops=%u max_reqs=%u\n",
__func__,
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
/* Back channel attributes */
args->bc_attrs.max_rqst_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
args->bc_attrs.max_reqs = 1;
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
__func__,
args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
args->bc_attrs.max_reqs);
}
static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->fc_attrs;
struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
/*
* Our requested max_ops is the minimum we need; we're not
* prepared to break up compounds into smaller pieces than that.
* So, no point even trying to continue if the server won't
* cooperate:
*/
if (rcvd->max_ops < sent->max_ops)
return -EINVAL;
if (rcvd->max_reqs == 0)
return -EINVAL;
if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
return 0;
}
static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->bc_attrs;
struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
if (rcvd->max_resp_sz < sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
/* These would render the backchannel useless: */
if (rcvd->max_ops != sent->max_ops)
return -EINVAL;
if (rcvd->max_reqs != sent->max_reqs)
return -EINVAL;
return 0;
}
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
struct nfs4_session *session)
{
int ret;
ret = nfs4_verify_fore_channel_attrs(args, session);
if (ret)
return ret;
return nfs4_verify_back_channel_attrs(args, session);
}
static int _nfs4_proc_create_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
.client = clp,
.cb_program = NFS4_CALLBACK,
};
struct nfs41_create_session_res res = {
.client = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
nfs4_init_channel_attrs(&args);
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
/* Verify the session's negotiated channel_attrs values */
status = nfs4_verify_channel_attrs(&args, session);
if (!status) {
/* Increment the clientid slot sequence id */
clp->cl_seqid++;
}
return status;
}
/*
* Issues a CREATE_SESSION operation to the server.
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
int nfs4_proc_create_session(struct nfs_client *clp)
{
int status;
unsigned *ptr;
struct nfs4_session *session = clp->cl_session;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
status = _nfs4_proc_create_session(clp);
if (status)
goto out;
/* Init or reset the session slot tables */
status = nfs4_setup_session_slot_tables(session);
dprintk("slot table setup returned %d\n", status);
if (status)
goto out;
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
out:
dprintk("<-- %s\n", __func__);
return status;
}
/*
* Issue the over-the-wire RPC DESTROY_SESSION.
* The caller must serialize access to this routine.
*/
int nfs4_proc_destroy_session(struct nfs4_session *session)
{
int status = 0;
struct rpc_message msg;
dprintk("--> nfs4_proc_destroy_session\n");
/* session is still being setup */
if (session->clp->cl_cons_state != NFS_CS_READY)
return status;
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
msg.rpc_argp = session;
msg.rpc_resp = NULL;
msg.rpc_cred = NULL;
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status)
printk(KERN_WARNING
"NFS: Got error %d from the server on DESTROY_SESSION. "
"Session has been destroyed regardless...\n", status);
dprintk("<-- nfs4_proc_destroy_session\n");
return status;
}
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_session *session;
unsigned int rsize, wsize;
int ret;
if (!nfs4_has_session(clp))
return 0;
session = clp->cl_session;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
rsize = server->rsize;
if (rsize == 0)
rsize = NFS_MAX_FILE_IO_SIZE;
wsize = server->wsize;
if (wsize == 0)
wsize = NFS_MAX_FILE_IO_SIZE;
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
ret = nfs4_recover_expired_lease(server);
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
int nfs4_init_ds_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
int ret;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
ret = nfs4_client_recover_expired_lease(clp);
if (!ret)
/* Test for the DS role */
if (!is_ds_client(clp))
ret = -ENODEV;
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
/*
* Renew the cl_session lease.
*/
struct nfs4_sequence_data {
struct nfs_client *clp;
struct nfs4_sequence_args args;
struct nfs4_sequence_res res;
};
static void nfs41_sequence_release(void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(calldata);
}
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
return;
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
goto out;
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
out:
dprintk("<-- %s\n", __func__);
}
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_args *args;
struct nfs4_sequence_res *res;
args = task->tk_msg.rpc_argp;
res = task->tk_msg.rpc_resp;
if (nfs41_setup_sequence(clp->cl_session, args, res, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_call_done = nfs41_sequence_call_done,
.rpc_call_prepare = nfs41_sequence_prepare,
.rpc_release = nfs41_sequence_release,
};
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct nfs4_sequence_data *calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs41_sequence_ops,
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
};
if (!atomic_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL) {
nfs_put_client(clp);
return ERR_PTR(-ENOMEM);
}
nfs41_init_sequence(&calldata->args, &calldata->res, 0);
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
calldata->clp = clp;
task_setup_data.callback_data = calldata;
return rpc_run_task(&task_setup_data);
}
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_task *task;
int ret = 0;
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
return 0;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task))
ret = PTR_ERR(task);
else
rpc_put_task_async(task);
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_task *task;
int ret;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
}
ret = rpc_wait_for_completion_task(task);
if (!ret) {
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
if (task->tk_status == 0)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
ret = task->tk_status;
}
rpc_put_task(task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs4_reclaim_complete_data {
struct nfs_client *clp;
struct nfs41_reclaim_complete_args arg;
struct nfs41_reclaim_complete_res res;
};
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
if (nfs41_setup_sequence(calldata->clp->cl_session,
&calldata->arg.seq_args,
&calldata->res.seq_res, task))
return;
rpc_call_start(task);
}
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case 0:
case -NFS4ERR_COMPLETE_ALREADY:
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_res *res = &calldata->res.seq_res;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, res))
return;
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_free_reclaim_complete_data(void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
kfree(calldata);
}
static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
.rpc_call_prepare = nfs4_reclaim_complete_prepare,
.rpc_call_done = nfs4_reclaim_complete_done,
.rpc_release = nfs4_free_reclaim_complete_data,
};
/*
* Issue a global reclaim complete.
*/
static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
{
struct nfs4_reclaim_complete_data *calldata;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
dprintk("--> %s\n", __func__);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL)
goto out;
calldata->clp = clp;
calldata->arg.one_fs = 0;
nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out;
}
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
/* Note the is a race here, where a CB_LAYOUTRECALL can come in
* right now covering the LAYOUTGET we are about to send.
* However, that is not so catastrophic, and there seems
* to be no way to prevent it completely.
*/
if (nfs4_setup_sequence(server, &lgp->args.seq_args,
&lgp->res.seq_res, task))
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
return;
}
rpc_call_start(task);
}
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lgp->res.seq_res))
return;
switch (task->tk_status) {
case 0:
break;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
task->tk_status = -NFS4ERR_DELAY;
/* Fall through */
default:
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_call_prepare = nfs4_layoutget_prepare,
.rpc_call_done = nfs4_layoutget_done,
.rpc_release = nfs4_layoutget_release,
};
int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
.rpc_argp = &lgp->args,
.rpc_resp = &lgp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutget_call_ops,
.callback_data = lgp,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
dprintk("--> %s\n", __func__);
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
if (status == 0)
status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
&lrp->res.seq_res, task))
return;
rpc_call_start(task);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct nfs_server *server;
struct pnfs_layout_hdr *lo = lrp->args.layout;
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lrp->res.seq_res))
return;
server = NFS_SERVER(lrp->args.inode);
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
spin_lock(&lo->plh_inode->i_lock);
if (task->tk_status == 0) {
if (lrp->res.lrs_present) {
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
} else
BUG_ON(!list_empty(&lo->plh_segs));
}
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutreturn_release(void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
put_layout_hdr(lrp->args.layout);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_call_prepare = nfs4_layoutreturn_prepare,
.rpc_call_done = nfs4_layoutreturn_done,
.rpc_release = nfs4_layoutreturn_release,
};
int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
{
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
.rpc_argp = &lrp->args,
.rpc_resp = &lrp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = lrp->clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
};
int status;
dprintk("--> %s\n", __func__);
nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
dprintk("<-- %s status=%d\n", __func__, status);
rpc_put_task(task);
return status;
}
/*
* Retrieve the list of Data Server devices from the MDS.
*/
static int _nfs4_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_getdevicelist_args args = {
.fh = fh,
.layoutclass = server->pnfs_curr_ld->id,
};
struct nfs4_getdevicelist_res res = {
.devlist = devlist,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
&res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_getdevicelist(server, fh, devlist),
&exception);
} while (exception.retry);
dprintk("%s: err=%d, num_devs=%u\n", __func__,
err, devlist->num_devs);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_getdeviceinfo_args args = {
.pdev = pdev,
};
struct nfs4_getdeviceinfo_res res = {
.pdev = pdev,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getdeviceinfo(server, pdev),
&exception);
} while (exception.retry);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (nfs4_setup_sequence(server, &data->args.seq_args,
&data->res.seq_res, task))
return;
rpc_call_start(task);
}
static void
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) { /* Just ignore these failures */
case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
case -NFS4ERR_BADLAYOUT: /* no layout */
case -NFS4ERR_GRACE: /* loca_recalim always false */
task->tk_status = 0;
break;
case 0:
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
break;
default:
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
}
static void nfs4_layoutcommit_release(void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct pnfs_layout_segment *lseg, *tmp;
unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
pnfs_cleanup_layoutcommit(data);
/* Matched by references in pnfs_set_layoutcommit */
list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
list_del_init(&lseg->pls_lc_list);
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
&lseg->pls_flags))
put_lseg(lseg);
}
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
smp_mb__after_clear_bit();
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
put_rpccred(data->cred);
kfree(data);
}
static const struct rpc_call_ops nfs4_layoutcommit_ops = {
.rpc_call_prepare = nfs4_layoutcommit_prepare,
.rpc_call_done = nfs4_layoutcommit_done,
.rpc_release = nfs4_layoutcommit_release,
};
int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(data->args.inode),
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
int status = 0;
dprintk("NFS: %4d initiating layoutcommit call. sync %d "
"lbw: %llu inode %lu\n",
data->task.tk_pid, sync,
data->args.lastbytewritten,
data->args.inode->i_ino);
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (sync == false)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = task->tk_status;
out:
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
}
static int
_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs41_secinfo_no_name_args args = {
.style = SECINFO_STYLE_CURRENT_FH,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int
nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_NOTSUPP:
goto out;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
out:
return err;
}
static int
nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int err;
struct page *page;
rpc_authflavor_t flavor;
struct nfs4_secinfo_flavors *flavors;
page = alloc_page(GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
flavors = page_address(page);
err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
/*
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
err = nfs4_find_root_sec(server, fhandle, info);
goto out_freepage;
}
if (err)
goto out_freepage;
flavor = nfs_find_best_sec(flavors);
if (err == 0)
err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
out_freepage:
put_page(page);
if (err == -EACCES)
return -EPERM;
out:
return err;
}
static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
int status;
struct nfs41_test_stateid_args args = {
.stateid = stateid,
};
struct nfs41_test_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == NFS_OK)
return res.status;
return status;
}
static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs41_test_stateid(server, stateid),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
struct nfs41_free_stateid_args args = {
.stateid = stateid,
};
struct nfs41_free_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
}
static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_free_stateid(server, stateid),
&exception);
} while (exception.retry);
return err;
}
static bool nfs41_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
return false;
if (s1->seqid == s2->seqid)
return true;
if (s1->seqid == 0 || s2->seqid == 0)
return true;
return false;
}
#endif /* CONFIG_NFS_V4_1 */
static bool nfs4_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
return nfs4_stateid_match(s1, s2);
}
static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
.reclaim_complete = nfs41_proc_reclaim_complete,
};
#endif /* CONFIG_NFS_V4_1 */
static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs41_open_expired,
.recover_lock = nfs41_lock_expired,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
.sched_state_renewal = nfs4_proc_async_renew,
.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
.renew_lease = nfs4_proc_renew,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
.sched_state_renewal = nfs41_proc_async_sequence,
.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
.renew_lease = nfs4_proc_sequence,
};
#endif
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.call_sync = _nfs4_call_sync,
.match_stateid = nfs4_match_stateid,
.find_root_sec = nfs4_find_root_sec,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.call_sync = _nfs4_call_sync_session,
.match_stateid = nfs41_match_stateid,
.find_root_sec = nfs41_find_root_sec,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
};
#endif
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
[0] = &nfs_v4_0_minor_ops,
#if defined(CONFIG_NFS_V4_1)
[1] = &nfs_v4_1_minor_ops,
#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
};
const struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
.dir_inode_ops = &nfs4_dir_inode_operations,
.file_inode_ops = &nfs4_file_inode_operations,
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.create = nfs4_proc_create,
.remove = nfs4_proc_remove,
.unlink_setup = nfs4_proc_unlink_setup,
.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
.unlink_done = nfs4_proc_unlink_done,
.rename = nfs4_proc_rename,
.rename_setup = nfs4_proc_rename_setup,
.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
.rename_done = nfs4_proc_rename_done,
.link = nfs4_proc_link,
.symlink = nfs4_proc_symlink,
.mkdir = nfs4_proc_mkdir,
.rmdir = nfs4_proc_remove,
.readdir = nfs4_proc_readdir,
.mknod = nfs4_proc_mknod,
.statfs = nfs4_proc_statfs,
.fsinfo = nfs4_proc_fsinfo,
.pathconf = nfs4_proc_pathconf,
.set_capabilities = nfs4_server_capabilities,
.decode_dirent = nfs4_decode_dirent,
.read_setup = nfs4_proc_read_setup,
.read_rpc_prepare = nfs4_proc_read_rpc_prepare,
.read_done = nfs4_read_done,
.write_setup = nfs4_proc_write_setup,
.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
.commit_done = nfs4_commit_done,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
.secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
.prefix = XATTR_NAME_NFSV4_ACL,
.list = nfs4_xattr_list_nfs4_acl,
.get = nfs4_xattr_get_nfs4_acl,
.set = nfs4_xattr_set_nfs4_acl,
};
const struct xattr_handler *nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
NULL
};
module_param(max_session_slots, ushort, 0644);
MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
"requests the client will negotiate");
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
sndnvaps/linux-1 | drivers/usb/serial/generic.c | 968 | 16056 | /*
* USB Serial Converter Generic functions
*
* Copyright (C) 2010 - 2013 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
#include <linux/serial.h>
#ifdef CONFIG_USB_SERIAL_GENERIC
static __u16 vendor = 0x05f9;
static __u16 product = 0xffff;
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified USB idVendor");
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
struct usb_serial_driver usb_serial_generic_device = {
.driver = {
.owner = THIS_MODULE,
.name = "generic",
},
.id_table = generic_device_ids,
.num_ports = 1,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.resume = usb_serial_generic_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
&usb_serial_generic_device, NULL
};
#endif
int usb_serial_generic_register(void)
{
int retval = 0;
#ifdef CONFIG_USB_SERIAL_GENERIC
generic_device_ids[0].idVendor = vendor;
generic_device_ids[0].idProduct = product;
generic_device_ids[0].match_flags =
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
retval = usb_serial_register_drivers(serial_drivers,
"usbserial_generic", generic_device_ids);
#endif
return retval;
}
void usb_serial_generic_deregister(void)
{
#ifdef CONFIG_USB_SERIAL_GENERIC
usb_serial_deregister_drivers(serial_drivers);
#endif
}
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->throttled = 0;
port->throttle_req = 0;
spin_unlock_irqrestore(&port->lock, flags);
if (port->bulk_in_size)
result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
return result;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_open);
void usb_serial_generic_close(struct usb_serial_port *port)
{
unsigned long flags;
int i;
if (port->bulk_out_size) {
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_kill_urb(port->write_urbs[i]);
spin_lock_irqsave(&port->lock, flags);
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
}
if (port->bulk_in_size) {
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_kill_urb(port->read_urbs[i]);
}
}
EXPORT_SYMBOL_GPL(usb_serial_generic_close);
int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
return kfifo_out_locked(&port->write_fifo, dest, size, &port->lock);
}
/**
* usb_serial_generic_write_start - start writing buffered data
* @port: usb-serial port
* @mem_flags: flags to use for memory allocations
*
* Serialised using USB_SERIAL_WRITE_BUSY flag.
*
* Return: Zero on success or if busy, otherwise a negative errno value.
*/
int usb_serial_generic_write_start(struct usb_serial_port *port,
gfp_t mem_flags)
{
struct urb *urb;
int count, result;
unsigned long flags;
int i;
if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags))
return 0;
retry:
spin_lock_irqsave(&port->lock, flags);
if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) {
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
i = (int)find_first_bit(&port->write_urbs_free,
ARRAY_SIZE(port->write_urbs));
spin_unlock_irqrestore(&port->lock, flags);
urb = port->write_urbs[i];
count = port->serial->type->prepare_write_buffer(port,
urb->transfer_buffer,
port->bulk_out_size);
urb->transfer_buffer_length = count;
usb_serial_debug_data(&port->dev, __func__, count, urb->transfer_buffer);
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes += count;
spin_unlock_irqrestore(&port->lock, flags);
clear_bit(i, &port->write_urbs_free);
result = usb_submit_urb(urb, mem_flags);
if (result) {
dev_err_console(port, "%s - error submitting urb: %d\n",
__func__, result);
set_bit(i, &port->write_urbs_free);
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= count;
spin_unlock_irqrestore(&port->lock, flags);
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return result;
}
goto retry; /* try sending off another urb */
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
/**
* usb_serial_generic_write - generic write function
* @tty: tty for the port
* @port: usb-serial port
* @buf: data to write
* @count: number of bytes to write
*
* Return: The number of characters buffered, which may be anything from
* zero to @count, or a negative errno value.
*/
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
int result;
if (!port->bulk_out_size)
return -ENODEV;
if (!count)
return 0;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port, GFP_ATOMIC);
if (result)
return result;
return count;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
int room;
if (!port->bulk_out_size)
return 0;
spin_lock_irqsave(&port->lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
int chars;
if (!port->bulk_out_size)
return 0;
spin_lock_irqsave(&port->lock, flags);
chars = kfifo_len(&port->write_fifo) + port->tx_bytes;
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int bps;
unsigned long period;
unsigned long expire;
bps = tty_get_baud_rate(tty);
if (!bps)
bps = 9600; /* B0 */
/*
* Use a poll-period of roughly the time it takes to send one
* character or at least one jiffy.
*/
period = max_t(unsigned long, (10 * HZ / bps), 1);
if (timeout)
period = min_t(unsigned long, period, timeout);
dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
__func__, jiffies_to_msecs(timeout),
jiffies_to_msecs(period));
expire = jiffies + timeout;
while (!port->serial->type->tx_empty(port)) {
schedule_timeout_interruptible(period);
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, expire))
break;
}
}
EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
int index, gfp_t mem_flags)
{
int res;
if (!test_and_clear_bit(index, &port->read_urbs_free))
return 0;
dev_dbg(&port->dev, "%s - urb %d\n", __func__, index);
res = usb_submit_urb(port->read_urbs[index], mem_flags);
if (res) {
if (res != -EPERM && res != -ENODEV) {
dev_err(&port->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
}
set_bit(index, &port->read_urbs_free);
return res;
}
return 0;
}
int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags)
{
int res;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
res = usb_serial_generic_submit_read_urb(port, i, mem_flags);
if (res)
goto err;
}
return 0;
err:
for (; i >= 0; --i)
usb_kill_urb(port->read_urbs[i]);
return res;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *ch = (char *)urb->transfer_buffer;
int i;
if (!urb->actual_length)
return;
/*
* The per character mucking around with sysrq path it too slow for
* stuff like 3G modems, so shortcircuit it in the 99.9999999% of
* cases where the USB serial is not a console anyway.
*/
if (!port->port.console || !port->sysrq) {
tty_insert_flip_string(&port->port, ch, urb->actual_length);
} else {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
}
tty_flip_buffer_push(&port->port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
if (urb == port->read_urbs[i])
break;
}
set_bit(i, &port->read_urbs_free);
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto resubmit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
port->serial->type->process_read_urb(urb);
resubmit:
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
} else {
spin_unlock_irqrestore(&port->lock, flags);
}
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
int i;
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
if (port->write_urbs[i] == urb)
break;
}
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= urb->transfer_buffer_length;
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
case -EPIPE:
dev_err_console(port, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
default:
dev_err_console(port, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto resubmit;
}
resubmit:
usb_serial_generic_write_start(port, GFP_ATOMIC);
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
void usb_serial_generic_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->throttle_req = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_throttle);
void usb_serial_generic_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int was_throttled;
spin_lock_irq(&port->lock);
was_throttled = port->throttled;
port->throttled = port->throttle_req = 0;
spin_unlock_irq(&port->lock);
if (was_throttled)
usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
static bool usb_serial_generic_msr_changed(struct tty_struct *tty,
unsigned long arg, struct async_icount *cprev)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
bool ret;
/*
* Use tty-port initialised flag to detect all hangups including the
* one generated at USB-device disconnect.
*/
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
return true;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy*/
spin_unlock_irqrestore(&port->lock, flags);
ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
int ret;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy */
spin_unlock_irqrestore(&port->lock, flags);
ret = wait_event_interruptible(port->port.delta_msr_wait,
usb_serial_generic_msr_changed(tty, arg, &cnow));
if (!ret && !test_bit(ASYNCB_INITIALIZED, &port->port.flags))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_tiocmiwait);
int usb_serial_generic_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy */
spin_unlock_irqrestore(&port->lock, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->tx = cnow.tx;
icount->rx = cnow.rx;
icount->frame = cnow.frame;
icount->parity = cnow.parity;
icount->overrun = cnow.overrun;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_get_icount);
#ifdef CONFIG_MAGIC_SYSRQ
int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
port->sysrq = 0;
}
return 0;
}
#else
int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
return 0;
}
#endif
EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
int usb_serial_handle_break(struct usb_serial_port *port)
{
if (!port->sysrq) {
port->sysrq = jiffies + HZ*5;
return 1;
}
port->sysrq = 0;
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
/**
* usb_serial_handle_dcd_change - handle a change of carrier detect state
* @port: usb-serial port
* @tty: tty for the port
* @status: new carrier detect status, nonzero if active
*/
void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
struct tty_struct *tty, unsigned int status)
{
struct tty_port *port = &usb_port->port;
dev_dbg(&usb_port->dev, "%s - status %d\n", __func__, status);
if (tty) {
struct tty_ldisc *ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->dcd_change)
ld->ops->dcd_change(tty, status);
tty_ldisc_deref(ld);
}
}
if (status)
wake_up_interruptible(&port->open_wait);
else if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
}
EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
int usb_serial_generic_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
int i, c = 0, r;
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
if (port->bulk_in_size) {
r = usb_serial_generic_submit_read_urbs(port,
GFP_NOIO);
if (r < 0)
c++;
}
if (port->bulk_out_size) {
r = usb_serial_generic_write_start(port, GFP_NOIO);
if (r < 0)
c++;
}
}
return c ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
| gpl-2.0 |
hroark13/hydro_kernel | drivers/staging/lirc/lirc_zilog.c | 2504 | 41560 | /*
* i2c IR lirc driver for devices with zilog IR processors
*
* Copyright (c) 2000 Gerd Knorr <kraxel@goldbach.in-berlin.de>
* modified for PixelView (BT878P+W/FM) by
* Michal Kochanowicz <mkochano@pld.org.pl>
* Christoph Bartelmus <lirc@bartelmus.de>
* modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by
* Ulrich Mueller <ulrich.mueller42@web.de>
* modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by
* Stefan Jahn <stefan@lkcc.org>
* modified for inclusion into kernel sources by
* Jerome Brock <jbrock@users.sourceforge.net>
* modified for Leadtek Winfast PVR2000 by
* Thomas Reitmayr (treitmayr@yahoo.com)
* modified for Hauppauge PVR-150 IR TX device by
* Mark Weaver <mark@npsl.co.uk>
* changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150
* Jarod Wilson <jarod@redhat.com>
*
* parts are cut&pasted from the lirc_i2c.c driver
*
* Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
* Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <media/lirc_dev.h>
#include <media/lirc.h>
struct IR;
struct IR_rx {
struct kref ref;
struct IR *ir;
/* RX device */
struct mutex client_lock;
struct i2c_client *c;
/* RX polling thread data */
struct task_struct *task;
/* RX read data */
unsigned char b[3];
bool hdpvr_data_fmt;
};
struct IR_tx {
struct kref ref;
struct IR *ir;
/* TX device */
struct mutex client_lock;
struct i2c_client *c;
/* TX additional actions needed */
int need_boot;
bool post_tx_ready_poll;
};
struct IR {
struct kref ref;
struct list_head list;
/* FIXME spinlock access to l.features */
struct lirc_driver l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
struct IR_rx *rx;
spinlock_t tx_ref_lock; /* struct IR_tx kref get()/put() */
struct IR_tx *tx;
};
/* IR transceiver instance object list */
/*
* This lock is used for the following:
* a. ir_devices_list access, insertions, deletions
* b. struct IR kref get()s and put()s
* c. serialization of ir_probe() for the two i2c_clients for a Z8
*/
static DEFINE_MUTEX(ir_devices_lock);
static LIST_HEAD(ir_devices_list);
/* Block size for IR transmitter */
#define TX_BLOCK_SIZE 99
/* Hauppauge IR transmitter data */
struct tx_data_struct {
/* Boot block */
unsigned char *boot_data;
/* Start of binary data block */
unsigned char *datap;
/* End of binary data block */
unsigned char *endp;
/* Number of installed codesets */
unsigned int num_code_sets;
/* Pointers to codesets */
unsigned char **code_sets;
/* Global fixed data template */
int fixed[TX_BLOCK_SIZE];
};
static struct tx_data_struct *tx_data;
static struct mutex tx_data_lock;
#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
## args)
#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static int debug; /* debug output */
static int tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
do { \
if (debug) \
printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
## args); \
} while (0)
/* struct IR reference counting */
static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
if (ir_devices_lock_held) {
kref_get(&ir->ref);
} else {
mutex_lock(&ir_devices_lock);
kref_get(&ir->ref);
mutex_unlock(&ir_devices_lock);
}
return ir;
}
static void release_ir_device(struct kref *ref)
{
struct IR *ir = container_of(ref, struct IR, ref);
/*
* Things should be in this state by now:
* ir->rx set to NULL and deallocated - happens before ir->rx->ir put()
* ir->rx->task kthread stopped - happens before ir->rx->ir put()
* ir->tx set to NULL and deallocated - happens before ir->tx->ir put()
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
lirc_unregister_driver(ir->l.minor);
ir->l.minor = MAX_IRCTL_DEVICES;
}
if (ir->rbuf.fifo_initialized)
lirc_buffer_free(&ir->rbuf);
list_del(&ir->list);
kfree(ir);
}
static int put_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
int released;
if (ir_devices_lock_held)
return kref_put(&ir->ref, release_ir_device);
mutex_lock(&ir_devices_lock);
released = kref_put(&ir->ref, release_ir_device);
mutex_unlock(&ir_devices_lock);
return released;
}
/* struct IR_rx reference counting */
static struct IR_rx *get_ir_rx(struct IR *ir)
{
struct IR_rx *rx;
spin_lock(&ir->rx_ref_lock);
rx = ir->rx;
if (rx != NULL)
kref_get(&rx->ref);
spin_unlock(&ir->rx_ref_lock);
return rx;
}
static void destroy_rx_kthread(struct IR_rx *rx, bool ir_devices_lock_held)
{
/* end up polling thread */
if (!IS_ERR_OR_NULL(rx->task)) {
kthread_stop(rx->task);
rx->task = NULL;
/* Put the ir ptr that ir_probe() gave to the rx poll thread */
put_ir_device(rx->ir, ir_devices_lock_held);
}
}
static void release_ir_rx(struct kref *ref)
{
struct IR_rx *rx = container_of(ref, struct IR_rx, ref);
struct IR *ir = rx->ir;
/*
* This release function can't do all the work, as we want
* to keep the rx_ref_lock a spinlock, and killing the poll thread
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
return;
}
static int put_ir_rx(struct IR_rx *rx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = rx->ir;
spin_lock(&ir->rx_ref_lock);
released = kref_put(&rx->ref, release_ir_rx);
spin_unlock(&ir->rx_ref_lock);
/* Destroy the rx kthread while not holding the spinlock */
if (released) {
destroy_rx_kthread(rx, ir_devices_lock_held);
kfree(rx);
/* Make sure we're not still in a poll_table somewhere */
wake_up_interruptible(&ir->rbuf.wait_poll);
}
/* Do a reference put() for the rx->ir reference, if we released rx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
/* struct IR_tx reference counting */
static struct IR_tx *get_ir_tx(struct IR *ir)
{
struct IR_tx *tx;
spin_lock(&ir->tx_ref_lock);
tx = ir->tx;
if (tx != NULL)
kref_get(&tx->ref);
spin_unlock(&ir->tx_ref_lock);
return tx;
}
static void release_ir_tx(struct kref *ref)
{
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
ir->l.features &= ~LIRC_CAN_SEND_PULSE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
}
static int put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = tx->ir;
spin_lock(&ir->tx_ref_lock);
released = kref_put(&tx->ref, release_ir_tx);
spin_unlock(&ir->tx_ref_lock);
/* Do a reference put() for the tx->ir reference, if we released tx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
static int add_to_buf(struct IR *ir)
{
__u16 code;
unsigned char codes[2];
unsigned char keybuf[6];
int got_data = 0;
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
struct lirc_buffer *rbuf = ir->l.rbuf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
dprintk("buffer overflow\n");
return -EOVERFLOW;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/* Ensure our rx->c i2c_client remains valid for the duration */
mutex_lock(&rx->client_lock);
if (rx->c == NULL) {
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
return -ENXIO;
}
tx = get_ir_tx(ir);
/*
* service the device as long as it is returning
* data and we have space
*/
do {
if (kthread_should_stop()) {
ret = -ENODATA;
break;
}
/*
* Lock i2c bus for the duration. RX/TX chips interfere so
* this is worth it
*/
mutex_lock(&ir->ir_lock);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
/*
* Send random "poll command" (?) Windows driver does this
* and it is a good point to detect chip failure.
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
zilog_error("unable to read from the IR chip "
"after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
zilog_error("polling the IR receiver chip failed, "
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
schedule_timeout((100 * HZ + 999) / 1000);
if (tx != NULL)
tx->need_boot = 1;
++failures;
mutex_unlock(&ir->ir_lock);
ret = 0;
continue;
}
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
zilog_error("i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
}
/* key pressed ? */
if (rx->hdpvr_data_fmt) {
if (got_data && (keybuf[0] == 0x80)) {
ret = 0;
break;
} else if (got_data && (keybuf[0] == 0x00)) {
ret = -ENODATA;
break;
}
} else if ((rx->b[0] & 0x80) == 0) {
ret = got_data ? 0 : -ENODATA;
break;
}
/* look what we have */
code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
codes[0] = (code >> 8) & 0xff;
codes[1] = code & 0xff;
/* return it */
lirc_buffer_write(rbuf, codes);
++got_data;
ret = 0;
} while (!lirc_buffer_full(rbuf));
mutex_unlock(&rx->client_lock);
if (tx != NULL)
put_ir_tx(tx, false);
put_ir_rx(rx, false);
return ret;
}
/*
* Main function of the polling thread -- from lirc_dev.
* We don't fit the LIRC model at all anymore. This is horrible, but
* basically we have a single RX/TX device with a nasty failure mode
* that needs to be accounted for across the pair. lirc lets us provide
* fops, but prevents us from using the internal polling, etc. if we do
* so. Hence the replication. Might be neater to extend the LIRC model
* to account for this but I'd think it's a very special case of seriously
* messed up hardware.
*/
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
struct lirc_buffer *rbuf = ir->l.rbuf;
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
* it (bad!). This is therefore just sufficient to catch all
* of the button presses. It makes the remote much more
* responsive. You can see the difference by running irw and
* holding down a button. With 100ms, the old polling
* interval, you'll notice breaks in the repeat sequence
* corresponding to lost keypresses.
*/
schedule_timeout((260 * HZ) / 1000);
if (kthread_should_stop())
break;
if (!add_to_buf(ir))
wake_up_interruptible(&rbuf->wait_poll);
}
dprintk("poll thread ended\n");
return 0;
}
static int set_use_inc(void *data)
{
return 0;
}
static void set_use_dec(void *data)
{
return;
}
/* safe read of a uint32 (always network byte order) */
static int read_uint32(unsigned char **data,
unsigned char *endp, unsigned int *val)
{
if (*data + 4 > endp)
return 0;
*val = ((*data)[0] << 24) | ((*data)[1] << 16) |
((*data)[2] << 8) | (*data)[3];
*data += 4;
return 1;
}
/* safe read of a uint8 */
static int read_uint8(unsigned char **data,
unsigned char *endp, unsigned char *val)
{
if (*data + 1 > endp)
return 0;
*val = *((*data)++);
return 1;
}
/* safe skipping of N bytes */
static int skip(unsigned char **data,
unsigned char *endp, unsigned int distance)
{
if (*data + distance > endp)
return 0;
*data += distance;
return 1;
}
/* decompress key data into the given buffer */
static int get_key_data(unsigned char *buf,
unsigned int codeset, unsigned int key)
{
unsigned char *data, *endp, *diffs, *key_block;
unsigned char keys, ndiffs, id;
unsigned int base, lim, pos, i;
/* Binary search for the codeset */
for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) {
pos = base + (lim >> 1);
data = tx_data->code_sets[pos];
if (!read_uint32(&data, tx_data->endp, &i))
goto corrupt;
if (i == codeset)
break;
else if (codeset > i) {
base = pos + 1;
--lim;
}
}
/* Not found? */
if (!lim)
return -EPROTO;
/* Set end of data block */
endp = pos < tx_data->num_code_sets - 1 ?
tx_data->code_sets[pos + 1] : tx_data->endp;
/* Read the block header */
if (!read_uint8(&data, endp, &keys) ||
!read_uint8(&data, endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* Save diffs & skip */
diffs = data;
if (!skip(&data, endp, ndiffs))
goto corrupt;
/* Read the id of the first key */
if (!read_uint8(&data, endp, &id))
goto corrupt;
/* Unpack the first key's data */
for (i = 0; i < TX_BLOCK_SIZE; ++i) {
if (tx_data->fixed[i] == -1) {
if (!read_uint8(&data, endp, &buf[i]))
goto corrupt;
} else {
buf[i] = (unsigned char)tx_data->fixed[i];
}
}
/* Early out key found/not found */
if (key == id)
return 0;
if (keys == 1)
return -EPROTO;
/* Sanity check */
key_block = data;
if (!skip(&data, endp, (keys - 1) * (ndiffs + 1)))
goto corrupt;
/* Binary search for the key */
for (base = 0, lim = keys - 1; lim; lim >>= 1) {
/* Seek to block */
unsigned char *key_data;
pos = base + (lim >> 1);
key_data = key_block + (ndiffs + 1) * pos;
if (*key_data == key) {
/* skip key id */
++key_data;
/* found, so unpack the diffs */
for (i = 0; i < ndiffs; ++i) {
unsigned char val;
if (!read_uint8(&key_data, endp, &val) ||
diffs[i] >= TX_BLOCK_SIZE)
goto corrupt;
buf[diffs[i]] = val;
}
return 0;
} else if (key > *key_data) {
base = pos + 1;
--lim;
}
}
/* Key not found */
return -EPROTO;
corrupt:
zilog_error("firmware is corrupt\n");
return -EFAULT;
}
/* send a block of data to the IR TX device */
static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
{
int i, j, ret;
unsigned char buf[5];
for (i = 0; i < TX_BLOCK_SIZE;) {
int tosend = TX_BLOCK_SIZE - i;
if (tosend > 4)
tosend = 4;
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
dprintk("%02x %02x %02x %02x %02x",
buf[0], buf[1], buf[2], buf[3], buf[4]);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
}
return 0;
}
/* send boot data to the IR TX device */
static int send_boot_data(struct IR_tx *tx)
{
int ret, i;
unsigned char buf[4];
/* send the boot block */
ret = send_data_block(tx, tx_data->boot_data);
if (ret != 0)
return ret;
/* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* Wait for zilog to settle after hitting go post boot block upload.
* Without this delay, the HD-PVR and HVR-1950 both return an -EIO
* upon attempting to get firmware revision, and tx probe thus fails.
*/
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
"%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
return 0;
}
/* unload "firmware", lock held */
static void fw_unload_locked(void)
{
if (tx_data) {
if (tx_data->code_sets)
vfree(tx_data->code_sets);
if (tx_data->datap)
vfree(tx_data->datap);
vfree(tx_data);
tx_data = NULL;
dprintk("successfully unloaded IR blaster firmware\n");
}
}
/* unload "firmware" for the IR TX device */
static void fw_unload(void)
{
mutex_lock(&tx_data_lock);
fw_unload_locked();
mutex_unlock(&tx_data_lock);
}
/* load "firmware" for the IR TX device */
static int fw_load(struct IR_tx *tx)
{
int ret;
unsigned int i;
unsigned char *data, version, num_global_fixed;
const struct firmware *fw_entry;
/* Already loaded? */
mutex_lock(&tx_data_lock);
if (tx_data) {
ret = 0;
goto out;
}
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
zilog_error("firmware haup-ir-blaster.bin not available "
"(%d)\n", ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
dprintk("firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
if (tx_data == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
}
tx_data->code_sets = NULL;
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
if (tx_data->datap == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
goto out;
}
memcpy(tx_data->datap, fw_entry->data, fw_entry->size);
tx_data->endp = tx_data->datap + fw_entry->size;
release_firmware(fw_entry); fw_entry = NULL;
/* Check version */
data = tx_data->datap;
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
zilog_error("unsupported code set file version (%u, expected"
"1) -- please upgrade to a newer driver",
version);
fw_unload_locked();
ret = -EFAULT;
goto out;
}
/* Save boot block for later */
tx_data->boot_data = data;
if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE))
goto corrupt;
if (!read_uint32(&data, tx_data->endp,
&tx_data->num_code_sets))
goto corrupt;
dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
if (tx_data->code_sets == NULL) {
fw_unload_locked();
ret = -ENOMEM;
goto out;
}
for (i = 0; i < TX_BLOCK_SIZE; ++i)
tx_data->fixed[i] = -1;
/* Read global fixed data template */
if (!read_uint8(&data, tx_data->endp, &num_global_fixed) ||
num_global_fixed > TX_BLOCK_SIZE)
goto corrupt;
for (i = 0; i < num_global_fixed; ++i) {
unsigned char pos, val;
if (!read_uint8(&data, tx_data->endp, &pos) ||
!read_uint8(&data, tx_data->endp, &val) ||
pos >= TX_BLOCK_SIZE)
goto corrupt;
tx_data->fixed[pos] = (int)val;
}
/* Filch out the position of each code set */
for (i = 0; i < tx_data->num_code_sets; ++i) {
unsigned int id;
unsigned char keys;
unsigned char ndiffs;
/* Save the codeset position */
tx_data->code_sets[i] = data;
/* Read header */
if (!read_uint32(&data, tx_data->endp, &id) ||
!read_uint8(&data, tx_data->endp, &keys) ||
!read_uint8(&data, tx_data->endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* skip diff positions */
if (!skip(&data, tx_data->endp, ndiffs))
goto corrupt;
/*
* After the diffs we have the first key id + data -
* global fixed
*/
if (!skip(&data, tx_data->endp,
1 + TX_BLOCK_SIZE - num_global_fixed))
goto corrupt;
/* Then we have keys-1 blocks of key id+diffs */
if (!skip(&data, tx_data->endp,
(ndiffs + 1) * (keys - 1)))
goto corrupt;
}
ret = 0;
goto out;
corrupt:
zilog_error("firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
out:
mutex_unlock(&tx_data_lock);
return ret;
}
/* copied from lirc_dev */
static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
dprintk("read called\n");
if (n % rbuf->chunk_size) {
dprintk("read result = -EINVAL\n");
return -EINVAL;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/*
* we add ourselves to the task queue before buffer check
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
add_wait_queue(&rbuf->wait_poll, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < n && ret == 0) {
if (lirc_buffer_empty(rbuf)) {
/*
* According to the read(2) man page, 'written' can be
* returned as less than 'n', instead of blocking
* again, returning -EWOULDBLOCK, or returning
* -ERESTARTSYS
*/
if (written)
break;
if (filep->f_flags & O_NONBLOCK) {
ret = -EWOULDBLOCK;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
unsigned char buf[rbuf->chunk_size];
m = lirc_buffer_read(rbuf, buf);
if (m == rbuf->chunk_size) {
ret = copy_to_user((void *)outbuf+written, buf,
rbuf->chunk_size);
written += rbuf->chunk_size;
} else {
retries++;
}
if (retries >= 5) {
zilog_error("Buffer read failed!\n");
ret = -EIO;
}
}
}
remove_wait_queue(&rbuf->wait_poll, &wait);
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
dprintk("read result = %d (%s)\n", ret, ret ? "Error" : "OK");
return ret ? ret : written;
}
/* send a keypress to the IR TX device */
static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
{
unsigned char data_block[TX_BLOCK_SIZE];
unsigned char buf[2];
int i, ret;
/* Get data for the codeset/key */
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
zilog_error("failed to get data for code %u, key %u -- check "
"lircd.conf entries\n", code, key);
return ret;
} else if (ret != 0)
return ret;
/* Send the data block */
ret = send_data_block(tx, data_block);
if (ret != 0)
return ret;
/* Send data block length? */
buf[0] = 0x00;
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Give the z8 a moment to process data block */
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
zilog_error("unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
/* Send prepare command? */
buf[0] = 0x00;
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* The sleep bits aren't necessary on the HD PVR, and in fact, the
* last i2c_master_recv always fails with a -5, so for now, we're
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* This bit NAKs until the device is ready, so we retry it
* sleeping a bit each time. This seems to be what the windows
* driver does, approximately.
* Try for up to 1s.
*/
for (i = 0; i < 20; ++i) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((50 * HZ + 999) / 1000);
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
dprintk("NAK expected: i2c_master_send "
"failed with %d (try %d)\n", ret, i+1);
}
if (ret != 1) {
zilog_error("IR TX chip never got ready: last i2c_master_send "
"failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
zilog_error("unexpected IR TX response #2: %02x\n", buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* Write a code to the device. We take in a 32-bit number (an int) and then
* decode this to a codeset/key index. The key data is then decompressed and
* sent to the device. We have a spin lock as per i2c documentation to prevent
* multiple concurrent sends which would probably cause the device to explode.
*/
static ssize_t write(struct file *filep, const char *buf, size_t n,
loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_tx *tx;
size_t i;
int failures = 0;
/* Validate user parameters */
if (n % sizeof(int))
return -EINVAL;
/* Get a struct IR_tx reference */
tx = get_ir_tx(ir);
if (tx == NULL)
return -ENXIO;
/* Ensure our tx->c i2c_client remains valid for the duration */
mutex_lock(&tx->client_lock);
if (tx->c == NULL) {
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -ENXIO;
}
/* Lock i2c bus for the duration */
mutex_lock(&ir->ir_lock);
/* Send each keypress */
for (i = 0; i < n;) {
int ret = 0;
int command;
if (copy_from_user(&command, buf + i, sizeof(command))) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -EFAULT;
}
/* Send boot data first if required */
if (tx->need_boot == 1) {
/* Make sure we have the 'firmware' loaded, first */
ret = fw_load(tx);
if (ret != 0) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
if (ret != -ENOMEM)
ret = -EIO;
return ret;
}
/* Prep the chip for transmitting codes */
ret = send_boot_data(tx);
if (ret == 0)
tx->need_boot = 0;
}
/* Send the code */
if (ret == 0) {
ret = send_code(tx, (unsigned)command >> 16,
(unsigned)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
}
/*
* Hmm, a failure. If we've had a few then give up, otherwise
* try a reset
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
zilog_error("sending to the IR transmitter chip "
"failed, trying reset\n");
if (failures >= 3) {
zilog_error("unable to send to the IR chip "
"after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ + 999) / 1000);
tx->need_boot = 1;
++failures;
} else
i += sizeof(int);
}
/* Release i2c bus */
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
/* Give back our struct IR_tx reference */
put_ir_tx(tx, false);
/* All looks good */
return n;
}
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
dprintk("poll called\n");
rx = get_ir_rx(ir);
if (rx == NULL) {
/*
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
dprintk("poll result = POLLERR\n");
return POLLERR;
}
/*
* Add our lirc_buffer's wait_queue to the poll_table. A wake up on
* that buffer's wait queue indicates we may have a new poll status.
*/
poll_wait(filep, &rbuf->wait_poll, wait);
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
dprintk("poll result = %s\n", ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct IR *ir = filep->private_data;
int result;
unsigned long mode, features;
features = ir->l.features;
switch (cmd) {
case LIRC_GET_LENGTH:
result = put_user((unsigned long)13,
(unsigned long *)arg);
break;
case LIRC_GET_FEATURES:
result = put_user(features, (unsigned long *) arg);
break;
case LIRC_GET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = put_user(LIRC_REC2MODE
(features&LIRC_CAN_REC_MASK),
(unsigned long *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *)arg);
if (!result && !(LIRC_MODE2REC(mode) & features))
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
break;
case LIRC_SET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *) arg);
if (!result && mode != LIRC_MODE_PULSE)
return -EINVAL;
break;
default:
return -EINVAL;
}
return result;
}
static struct IR *get_ir_device_by_minor(unsigned int minor)
{
struct IR *ir;
struct IR *ret = NULL;
mutex_lock(&ir_devices_lock);
if (!list_empty(&ir_devices_list)) {
list_for_each_entry(ir, &ir_devices_list, list) {
if (ir->l.minor == minor) {
ret = get_ir_device(ir, true);
break;
}
}
}
mutex_unlock(&ir_devices_lock);
return ret;
}
/*
* Open the IR device. Get hold of our IR structure and
* stash it in private_data for the file
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
unsigned int minor = MINOR(node->i_rdev);
/* find our IR struct */
ir = get_ir_device_by_minor(minor);
if (ir == NULL)
return -ENODEV;
atomic_inc(&ir->open_count);
/* stash our IR struct */
filep->private_data = ir;
nonseekable_open(node, filep);
return 0;
}
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
/* find our IR struct */
struct IR *ir = filep->private_data;
if (ir == NULL) {
zilog_error("close: no private_data attached to the file!\n");
return -ENODEV;
}
atomic_dec(&ir->open_count);
put_ir_device(ir, false);
return 0;
}
static int ir_remove(struct i2c_client *client);
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
#define ID_FLAG_TX 0x01
#define ID_FLAG_HDPVR 0x02
static const struct i2c_device_id ir_transceiver_id[] = {
{ "ir_tx_z8f0811_haup", ID_FLAG_TX },
{ "ir_rx_z8f0811_haup", 0 },
{ "ir_tx_z8f0811_hdpvr", ID_FLAG_HDPVR | ID_FLAG_TX },
{ "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR },
{ }
};
static struct i2c_driver driver = {
.driver = {
.owner = THIS_MODULE,
.name = "Zilog/Hauppauge i2c IR",
},
.probe = ir_probe,
.remove = ir_remove,
.id_table = ir_transceiver_id,
};
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = read,
.write = write,
.poll = poll,
.unlocked_ioctl = ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ioctl,
#endif
.open = open,
.release = close
};
static struct lirc_driver lirc_template = {
.name = "lirc_zilog",
.minor = -1,
.code_length = 13,
.buffer_size = BUFLEN / 2,
.sample_rate = 0, /* tell lirc_dev to not start its own kthread */
.chunk_size = 2,
.set_use_inc = set_use_inc,
.set_use_dec = set_use_dec,
.fops = &lirc_fops,
.owner = THIS_MODULE,
};
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
struct IR_tx *tx = i2c_get_clientdata(client);
if (tx != NULL) {
mutex_lock(&tx->client_lock);
tx->c = NULL;
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
}
} else if (strncmp("ir_rx_z8", client->name, 8) == 0) {
struct IR_rx *rx = i2c_get_clientdata(client);
if (rx != NULL) {
mutex_lock(&rx->client_lock);
rx->c = NULL;
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
}
}
return 0;
}
/* ir_devices_lock must be held */
static struct IR *get_ir_device_by_adapter(struct i2c_adapter *adapter)
{
struct IR *ir;
if (list_empty(&ir_devices_list))
return NULL;
list_for_each_entry(ir, &ir_devices_list, list)
if (ir->adapter == adapter) {
get_ir_device(ir, true);
return ir;
}
return NULL;
}
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct IR *ir;
struct IR_tx *tx;
struct IR_rx *rx;
struct i2c_adapter *adap = client->adapter;
int ret;
bool tx_probe = false;
dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
__func__, id->name, adap->nr, adap->name, client->addr);
/*
* The IR receiver is at i2c address 0x71.
* The IR transmitter is at i2c address 0x70.
*/
if (id->driver_data & ID_FLAG_TX)
tx_probe = true;
else if (tx_only) /* module option */
return -ENXIO;
zilog_info("probing IR %s on %s (i2c-%d)\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
/* Use a single struct IR instance for both the Rx and Tx functions */
ir = get_ir_device_by_adapter(adap);
if (ir == NULL) {
ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
if (ir == NULL) {
ret = -ENOMEM;
goto out_no_ir;
}
kref_init(&ir->ref);
/* store for use in ir_probe() again, and open() later on */
INIT_LIST_HEAD(&ir->list);
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
* Currently our own lirc_fops rely on this ir->l.rbuf pointer
*/
ir->l.rbuf = &ir->rbuf;
ir->l.dev = &adap->dev;
ret = lirc_buffer_init(ir->l.rbuf,
ir->l.chunk_size, ir->l.buffer_size);
if (ret)
goto out_put_ir;
}
if (tx_probe) {
/* Get the IR_rx instance for later, if already allocated */
rx = get_ir_rx(ir);
/* Set up a struct IR_tx instance */
tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
if (tx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&tx->ref);
ir->tx = tx;
ir->l.features |= LIRC_CAN_SEND_PULSE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
tx->post_tx_ready_poll =
(id->driver_data & ID_FLAG_HDPVR) ? false : true;
/* An ir ref goes to the struct IR_tx instance */
tx->ir = get_ir_device(ir, true);
/* A tx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_tx(ir));
/*
* Load the 'firmware'. We do this before registering with
* lirc_dev, so the first firmware load attempt does not happen
* after a open() or write() call on the device.
*
* Failure here is not deemed catastrophic, so the receiver will
* still be usable. Firmware load will be retried in write(),
* if it is needed.
*/
fw_load(tx);
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
zilog_info("probe of IR Tx on %s (i2c-%d) done. Waiting"
" on IR Rx.\n", adap->name, adap->nr);
goto out_ok;
}
} else {
/* Get the IR_tx instance for later, if already allocated */
tx = get_ir_tx(ir);
/* Set up a struct IR_rx instance */
rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
if (rx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&rx->ref);
ir->rx = rx;
ir->l.features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
(id->driver_data & ID_FLAG_HDPVR) ? true : false;
/* An ir ref goes to the struct IR_rx instance */
rx->ir = get_ir_device(ir, true);
/* An rx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_rx(ir));
/*
* Start the polling thread.
* It will only perform an empty loop around schedule_timeout()
* until we register with lirc_dev and the first user open()
*/
/* An ir ref goes to the new rx polling kthread */
rx->task = kthread_run(lirc_thread, get_ir_device(ir, true),
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
zilog_error("%s: could not start IR Rx polling thread"
"\n", __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_xx;
}
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
zilog_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
" on IR Tx.\n", adap->name, adap->nr);
goto out_ok;
}
}
/* register with lirc */
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
__func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
zilog_info("IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
adap->name, adap->nr, ir->l.minor);
out_ok:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
zilog_info("probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
out_put_xx:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
__func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
ret);
mutex_unlock(&ir_devices_lock);
return ret;
}
static int __init zilog_init(void)
{
int ret;
zilog_notify("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
request_module("firmware_class");
ret = i2c_add_driver(&driver);
if (ret)
zilog_error("initialization failed\n");
else
zilog_notify("initialization complete\n");
return ret;
}
static void __exit zilog_exit(void)
{
i2c_del_driver(&driver);
/* if loaded */
fw_unload();
zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
}
module_init(zilog_init);
module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
"Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
"Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
module_param(minor, int, 0444);
MODULE_PARM_DESC(minor, "Preferred minor device number");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
module_param(tx_only, bool, 0644);
MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
| gpl-2.0 |
CryToCry96/android_kernel_ef51lsk | drivers/of/gpio.c | 2504 | 6019 | /*
* OF helpers for the GPIO API
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
/**
* of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
* @propname: property name containing gpio specifier(s)
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
int ret;
struct gpio_chip *gc;
struct of_phandle_args gpiospec;
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
&gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
gc = of_node_to_gpiochip(gpiospec.np);
if (!gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
np->full_name, gpiospec.np->full_name);
ret = -ENODEV;
goto err1;
}
if (gpiospec.args_count != gc->of_gpio_n_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
np->full_name, gpiospec.np->full_name);
ret = -EINVAL;
goto err1;
}
/* .xlate might decide to not fill in the flags, so clear it. */
if (flags)
*flags = 0;
ret = gc->of_xlate(gc, &gpiospec, flags);
if (ret < 0)
goto err1;
ret += gc->base;
err1:
of_node_put(gpiospec.np);
err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
* of_gpio_named_count - Count GPIOs for a device
* @np: device node to count GPIOs for
* @propname: property name containing gpio specifier(s)
*
* The function returns the count of GPIOs specified for a node.
*
* Note that the empty GPIO specifiers counts too. For example,
*
* gpios = <0
* &pio1 1 2
* 0
* &pio2 3 4>;
*
* defines four GPIOs (so this function will return 4), two of which
* are not specified.
*/
unsigned int of_gpio_named_count(struct device_node *np, const char* propname)
{
unsigned int cnt = 0;
do {
int ret;
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells",
cnt, NULL);
/* A hole in the gpios = <> counts anyway. */
if (ret < 0 && ret != -EEXIST)
break;
} while (++cnt);
return cnt;
}
EXPORT_SYMBOL(of_gpio_named_count);
/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @np: device node of the GPIO chip
* @gpio_spec: gpio specifier as found in the device tree
* @flags: a flags pointer to fill in
*
* This is simple translation function, suitable for the most 1:1 mapped
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags)
{
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
* write your own xlate function (that will have to retrive the GPIO
* number and the flags from a single gpio cell -- this is possible,
* but not recommended).
*/
if (gc->of_gpio_n_cells < 2) {
WARN_ON(1);
return -EINVAL;
}
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
if (gpiospec->args[0] >= gc->ngpio)
return -EINVAL;
if (flags)
*flags = gpiospec->args[1];
return gpiospec->args[0];
}
EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
* @np: device node of the GPIO chip
* @mm_gc: pointer to the of_mm_gpio_chip allocated structure
*
* To use this function you should allocate and fill mm_gc with:
*
* 1) In the gpio_chip structure:
* - all the callbacks
* - of_gpio_n_cells
* - of_xlate callback (optional)
*
* 3) In the of_mm_gpio_chip structure:
* - save_regs callback (optional)
*
* If succeeded, this function will map bank's memory and will
* do all necessary work for you. Then you'll able to use .regs
* to manage GPIOs from the callbacks.
*/
int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc)
{
int ret = -ENOMEM;
struct gpio_chip *gc = &mm_gc->gc;
gc->label = kstrdup(np->full_name, GFP_KERNEL);
if (!gc->label)
goto err0;
mm_gc->regs = of_iomap(np, 0);
if (!mm_gc->regs)
goto err1;
gc->base = -1;
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
mm_gc->gc.of_node = np;
ret = gpiochip_add(gc);
if (ret)
goto err2;
return 0;
err2:
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
err0:
pr_err("%s: GPIO chip registration failed with status %d\n",
np->full_name, ret);
return ret;
}
EXPORT_SYMBOL(of_mm_gpiochip_add);
void of_gpiochip_add(struct gpio_chip *chip)
{
if ((!chip->of_node) && (chip->dev))
chip->of_node = chip->dev->of_node;
if (!chip->of_node)
return;
if (!chip->of_xlate) {
chip->of_gpio_n_cells = 2;
chip->of_xlate = of_gpio_simple_xlate;
}
of_node_get(chip->of_node);
}
void of_gpiochip_remove(struct gpio_chip *chip)
{
if (chip->of_node)
of_node_put(chip->of_node);
}
/* Private function for resolving node pointer to gpio_chip */
static int of_gpiochip_is_match(struct gpio_chip *chip, const void *data)
{
return chip->of_node == data;
}
struct gpio_chip *of_node_to_gpiochip(struct device_node *np)
{
return gpiochip_find(np, of_gpiochip_is_match);
}
| gpl-2.0 |
gelotus/hammerhead-nirvana | net/l2tp/l2tp_eth.c | 2760 | 7362 | /*
* L2TPv3 ethernet pseudowire driver
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/hash.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "l2tp_core.h"
/* Default device name. May be overridden by name specified by user */
#define L2TP_ETH_DEV_NAME "l2tpeth%d"
/* via netdev_priv() */
struct l2tp_eth {
struct net_device *dev;
struct sock *tunnel_sock;
struct l2tp_session *session;
struct list_head list;
};
/* via l2tp_session_priv() */
struct l2tp_eth_sess {
struct net_device *dev;
};
/* per-net private data for this module */
static unsigned int l2tp_eth_net_id;
struct l2tp_eth_net {
struct list_head l2tp_eth_dev_list;
spinlock_t l2tp_eth_lock;
};
static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
{
return net_generic(net, l2tp_eth_net_id);
}
static int l2tp_eth_dev_init(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
priv->dev = dev;
eth_hw_addr_random(dev);
memset(&dev->broadcast[0], 0xff, 6);
return 0;
}
static void l2tp_eth_dev_uninit(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_del_init(&priv->list);
spin_unlock(&pn->l2tp_eth_lock);
dev_put(dev);
}
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
l2tp_xmit_skb(session, skb, session->hdr_len);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
return 0;
}
static struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
};
static void l2tp_eth_dev_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->destructor = free_netdev;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
int offset;
u8 *ptr = skb->data;
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto error;
printk(KERN_DEBUG "%s: eth recv: ", session->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
goto error;
secpath_reset(skb);
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
skb_dst_drop(skb);
nf_reset(skb);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
dev->stats.rx_packets++;
dev->stats.rx_bytes += data_len;
} else
dev->stats.rx_errors++;
return;
error:
dev->stats.rx_errors++;
kfree_skb(skb);
}
static void l2tp_eth_delete(struct l2tp_session *session)
{
struct l2tp_eth_sess *spriv;
struct net_device *dev;
if (session) {
spriv = l2tp_session_priv(session);
dev = spriv->dev;
if (dev) {
unregister_netdev(dev);
spriv->dev = NULL;
}
}
}
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
seq_printf(m, " interface %s\n", dev->name);
}
#endif
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct net_device *dev;
char name[IFNAMSIZ];
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
struct l2tp_eth_net *pn;
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (!tunnel) {
rc = -ENODEV;
goto out;
}
session = l2tp_session_find(net, tunnel, session_id);
if (session) {
rc = -EEXIST;
goto out;
}
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
dev_put(dev);
rc = -EEXIST;
goto out;
}
strlcpy(name, cfg->ifname, IFNAMSIZ);
} else
strcpy(name, L2TP_ETH_DEV_NAME);
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
if (!session) {
rc = -ENOMEM;
goto out;
}
dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
if (!dev) {
rc = -ENOMEM;
goto out_del_session;
}
dev_net_set(dev, net);
if (session->mtu == 0)
session->mtu = dev->mtu - session->hdr_len;
dev->mtu = session->mtu;
dev->needed_headroom += session->hdr_len;
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
session->show = l2tp_eth_show;
#endif
spriv = l2tp_session_priv(session);
spriv->dev = dev;
rc = register_netdev(dev);
if (rc < 0)
goto out_del_dev;
/* Must be done after register_netdev() */
strlcpy(session->ifname, dev->name, IFNAMSIZ);
dev_hold(dev);
pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_add(&priv->list, &pn->l2tp_eth_dev_list);
spin_unlock(&pn->l2tp_eth_lock);
return 0;
out_del_dev:
free_netdev(dev);
out_del_session:
l2tp_session_delete(session);
out:
return rc;
}
static __net_init int l2tp_eth_init_net(struct net *net)
{
struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
spin_lock_init(&pn->l2tp_eth_lock);
return 0;
}
static struct pernet_operations l2tp_eth_net_ops = {
.init = l2tp_eth_init_net,
.id = &l2tp_eth_net_id,
.size = sizeof(struct l2tp_eth_net),
};
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
.session_create = l2tp_eth_create,
.session_delete = l2tp_session_delete,
};
static int __init l2tp_eth_init(void)
{
int err = 0;
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
if (err)
goto out;
err = register_pernet_device(&l2tp_eth_net_ops);
if (err)
goto out_unreg;
printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
out_unreg:
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
out:
return err;
}
static void __exit l2tp_eth_exit(void)
{
unregister_pernet_device(&l2tp_eth_net_ops);
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
}
module_init(l2tp_eth_init);
module_exit(l2tp_eth_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
MODULE_VERSION("1.0");
| gpl-2.0 |
garwynn/L900_MA7_Kernel | lib/flex_array.c | 3016 | 11135 | /*
* Flexible array managed in PAGE_SIZE parts
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2009
*
* Author: Dave Hansen <dave@linux.vnet.ibm.com>
*/
#include <linux/flex_array.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/reciprocal_div.h>
struct flex_array_part {
char elements[FLEX_ARRAY_PART_SIZE];
};
/*
* If a user requests an allocation which is small
* enough, we may simply use the space in the
* flex_array->parts[] array to store the user
* data.
*/
static inline int elements_fit_in_base(struct flex_array *fa)
{
int data_size = fa->element_size * fa->total_nr_elements;
if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
return 1;
return 0;
}
/**
* flex_array_alloc - allocate a new flexible array
* @element_size: the size of individual elements in the array
* @total: total number of elements that this should hold
* @flags: page allocation flags to use for base array
*
* Note: all locking must be provided by the caller.
*
* @total is used to size internal structures. If the user ever
* accesses any array indexes >=@total, it will produce errors.
*
* The maximum number of elements is defined as: the number of
* elements that can be stored in a page times the number of
* page pointers that we can fit in the base structure or (using
* integer math):
*
* (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
*
* Here's a table showing example capacities. Note that the maximum
* index that the get/put() functions is just nr_objects-1. This
* basically means that you get 4MB of storage on 32-bit and 2MB on
* 64-bit.
*
*
* Element size | Objects | Objects |
* PAGE_SIZE=4k | 32-bit | 64-bit |
* ---------------------------------|
* 1 bytes | 4177920 | 2088960 |
* 2 bytes | 2088960 | 1044480 |
* 3 bytes | 1392300 | 696150 |
* 4 bytes | 1044480 | 522240 |
* 32 bytes | 130560 | 65408 |
* 33 bytes | 126480 | 63240 |
* 2048 bytes | 2040 | 1020 |
* 2049 bytes | 1020 | 510 |
* void * | 1044480 | 261120 |
*
* Since 64-bit pointers are twice the size, we lose half the
* capacity in the base structure. Also note that no effort is made
* to efficiently pack objects across page boundaries.
*/
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags)
{
struct flex_array *ret;
int elems_per_part = 0;
int reciprocal_elems = 0;
int max_size = 0;
if (element_size) {
elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
reciprocal_elems = reciprocal_value(elems_per_part);
max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
}
/* max_size will end up 0 if element_size > PAGE_SIZE */
if (total > max_size)
return NULL;
ret = kzalloc(sizeof(struct flex_array), flags);
if (!ret)
return NULL;
ret->element_size = element_size;
ret->total_nr_elements = total;
ret->elems_per_part = elems_per_part;
ret->reciprocal_elems = reciprocal_elems;
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
memset(&ret->parts[0], FLEX_ARRAY_FREE,
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
EXPORT_SYMBOL(flex_array_alloc);
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
{
return reciprocal_divide(element_nr, fa->reciprocal_elems);
}
/**
* flex_array_free_parts - just free the second-level pages
* @fa: the flex array from which to free parts
*
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
*/
void flex_array_free_parts(struct flex_array *fa)
{
int part_nr;
if (elements_fit_in_base(fa))
return;
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
kfree(fa->parts[part_nr]);
}
EXPORT_SYMBOL(flex_array_free_parts);
void flex_array_free(struct flex_array *fa)
{
flex_array_free_parts(fa);
kfree(fa);
}
EXPORT_SYMBOL(flex_array_free);
static unsigned int index_inside_part(struct flex_array *fa,
unsigned int element_nr,
unsigned int part_nr)
{
unsigned int part_offset;
part_offset = element_nr - part_nr * fa->elems_per_part;
return part_offset * fa->element_size;
}
static struct flex_array_part *
__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
{
struct flex_array_part *part = fa->parts[part_nr];
if (!part) {
part = kmalloc(sizeof(struct flex_array_part), flags);
if (!part)
return NULL;
if (!(flags & __GFP_ZERO))
memset(part, FLEX_ARRAY_FREE,
sizeof(struct flex_array_part));
fa->parts[part_nr] = part;
}
return part;
}
/**
* flex_array_put - copy data into the array at @element_nr
* @fa: the flex array to copy data into
* @element_nr: index of the position in which to insert
* the new element.
* @src: address of data to copy into the array
* @flags: page allocation flags to use for array expansion
*
*
* Note that this *copies* the contents of @src into
* the array. If you are trying to store an array of
* pointers, make sure to pass in &ptr instead of ptr.
* You may instead wish to use the flex_array_put_ptr()
* helper function.
*
* Locking must be provided by the caller.
*/
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags)
{
int part_nr = 0;
struct flex_array_part *part;
void *dst;
if (element_nr >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = __fa_get_part(fa, part_nr, flags);
if (!part)
return -ENOMEM;
}
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
memcpy(dst, src, fa->element_size);
return 0;
}
EXPORT_SYMBOL(flex_array_put);
/**
* flex_array_clear - clear element in array at @element_nr
* @fa: the flex array of the element.
* @element_nr: index of the position to clear.
*
* Locking must be provided by the caller.
*/
int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
void *dst;
if (element_nr >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = fa->parts[part_nr];
if (!part)
return -EINVAL;
}
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
memset(dst, FLEX_ARRAY_FREE, fa->element_size);
return 0;
}
EXPORT_SYMBOL(flex_array_clear);
/**
* flex_array_prealloc - guarantee that array space exists
* @fa: the flex array for which to preallocate parts
* @start: index of first array element for which space is allocated
* @nr_elements: number of elements for which space is allocated
* @flags: page allocation flags
*
* This will guarantee that no future calls to flex_array_put()
* will allocate memory. It can be used if you are expecting to
* be holding a lock or in some atomic context while writing
* data into the array.
*
* Locking must be provided by the caller.
*/
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags)
{
int start_part;
int end_part;
int part_nr;
unsigned int end;
struct flex_array_part *part;
if (!start && !nr_elements)
return 0;
if (start >= fa->total_nr_elements)
return -ENOSPC;
if (!nr_elements)
return 0;
end = start + nr_elements - 1;
if (end >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
return 0;
start_part = fa_element_to_part_nr(fa, start);
end_part = fa_element_to_part_nr(fa, end);
for (part_nr = start_part; part_nr <= end_part; part_nr++) {
part = __fa_get_part(fa, part_nr, flags);
if (!part)
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(flex_array_prealloc);
/**
* flex_array_get - pull data back out of the array
* @fa: the flex array from which to extract data
* @element_nr: index of the element to fetch from the array
*
* Returns a pointer to the data at index @element_nr. Note
* that this is a copy of the data that was passed in. If you
* are using this to store pointers, you'll get back &ptr. You
* may instead wish to use the flex_array_get_ptr helper.
*
* Locking must be provided by the caller.
*/
void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
if (!fa->element_size)
return NULL;
if (element_nr >= fa->total_nr_elements)
return NULL;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = fa->parts[part_nr];
if (!part)
return NULL;
}
return &part->elements[index_inside_part(fa, element_nr, part_nr)];
}
EXPORT_SYMBOL(flex_array_get);
/**
* flex_array_get_ptr - pull a ptr back out of the array
* @fa: the flex array from which to extract data
* @element_nr: index of the element to fetch from the array
*
* Returns the pointer placed in the flex array at element_nr using
* flex_array_put_ptr(). This function should not be called if the
* element in question was not set using the _put_ptr() helper.
*/
void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
{
void **tmp;
tmp = flex_array_get(fa, element_nr);
if (!tmp)
return NULL;
return *tmp;
}
EXPORT_SYMBOL(flex_array_get_ptr);
static int part_is_free(struct flex_array_part *part)
{
int i;
for (i = 0; i < sizeof(struct flex_array_part); i++)
if (part->elements[i] != FLEX_ARRAY_FREE)
return 0;
return 1;
}
/**
* flex_array_shrink - free unused second-level pages
* @fa: the flex array to shrink
*
* Frees all second-level pages that consist solely of unused
* elements. Returns the number of pages freed.
*
* Locking must be provided by the caller.
*/
int flex_array_shrink(struct flex_array *fa)
{
struct flex_array_part *part;
int part_nr;
int ret = 0;
if (!fa->total_nr_elements || !fa->element_size)
return 0;
if (elements_fit_in_base(fa))
return ret;
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
part = fa->parts[part_nr];
if (!part)
continue;
if (part_is_free(part)) {
fa->parts[part_nr] = NULL;
kfree(part);
ret++;
}
}
return ret;
}
EXPORT_SYMBOL(flex_array_shrink);
| gpl-2.0 |
klin1344/kernel_ville_2.31 | drivers/staging/msm/mddihosti.c | 3016 | 65457 | /* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include "msm_fb_panel.h"
#include "mddihost.h"
#include "mddihosti.h"
#define FEATURE_MDDI_UNDERRUN_RECOVERY
#ifndef FEATURE_MDDI_DISABLE_REVERSE
static void mddi_read_rev_packet(byte *data_ptr);
#endif
struct timer_list mddi_host_timer;
#define MDDI_DEFAULT_TIMER_LENGTH 5000 /* 5 seconds */
uint32 mddi_rtd_frequency = 60000; /* send RTD every 60 seconds */
uint32 mddi_client_status_frequency = 60000; /* get status pkt every 60 secs */
boolean mddi_vsync_detect_enabled = FALSE;
mddi_gpio_info_type mddi_gpio;
uint32 mddi_host_core_version;
boolean mddi_debug_log_statistics = FALSE;
/* #define FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION */
/* default to TRUE in case MDP does not vote */
static boolean mddi_host_mdp_active_flag = TRUE;
static uint32 mddi_log_stats_counter;
uint32 mddi_log_stats_frequency = 4000;
#define MDDI_DEFAULT_REV_PKT_SIZE 0x20
#ifndef FEATURE_MDDI_DISABLE_REVERSE
static boolean mddi_rev_ptr_workaround = TRUE;
static uint32 mddi_reg_read_retry;
static uint32 mddi_reg_read_retry_max = 20;
static boolean mddi_enable_reg_read_retry = TRUE;
static boolean mddi_enable_reg_read_retry_once = FALSE;
#define MDDI_MAX_REV_PKT_SIZE 0x60
#define MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE 0x60
#define MDDI_VIDEO_REV_PKT_SIZE 0x40
#define MDDI_REV_BUFFER_SIZE MDDI_MAX_REV_PKT_SIZE
static byte rev_packet_data[MDDI_MAX_REV_PKT_SIZE];
#endif /* FEATURE_MDDI_DISABLE_REVERSE */
/* leave these variables so graphics will compile */
#define MDDI_MAX_REV_DATA_SIZE 128
/*lint -d__align(x) */
boolean mddi_debug_clear_rev_data = TRUE;
uint32 *mddi_reg_read_value_ptr;
mddi_client_capability_type mddi_client_capability_pkt;
static boolean mddi_client_capability_request = FALSE;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
#define MAX_MDDI_REV_HANDLERS 2
#define INVALID_PKT_TYPE 0xFFFF
typedef struct {
mddi_rev_handler_type handler; /* ISR to be executed */
uint16 pkt_type;
} mddi_rev_pkt_handler_type;
static mddi_rev_pkt_handler_type mddi_rev_pkt_handler[MAX_MDDI_REV_HANDLERS] =
{ {NULL, INVALID_PKT_TYPE}, {NULL, INVALID_PKT_TYPE} };
static boolean mddi_rev_encap_user_request = FALSE;
static mddi_linked_list_notify_type mddi_rev_user;
spinlock_t mddi_host_spin_lock;
extern uint32 mdp_in_processing;
#endif
typedef enum {
MDDI_REV_IDLE
#ifndef FEATURE_MDDI_DISABLE_REVERSE
, MDDI_REV_REG_READ_ISSUED,
MDDI_REV_REG_READ_SENT,
MDDI_REV_ENCAP_ISSUED,
MDDI_REV_STATUS_REQ_ISSUED,
MDDI_REV_CLIENT_CAP_ISSUED
#endif
} mddi_rev_link_state_type;
typedef enum {
MDDI_LINK_DISABLED,
MDDI_LINK_HIBERNATING,
MDDI_LINK_ACTIVATING,
MDDI_LINK_ACTIVE
} mddi_host_link_state_type;
typedef struct {
uint32 count;
uint32 in_count;
uint32 disp_req_count;
uint32 state_change_count;
uint32 ll_done_count;
uint32 rev_avail_count;
uint32 error_count;
uint32 rev_encap_count;
uint32 llist_ptr_write_1;
uint32 llist_ptr_write_2;
} mddi_host_int_type;
typedef struct {
uint32 fwd_crc_count;
uint32 rev_crc_count;
uint32 pri_underflow;
uint32 sec_underflow;
uint32 rev_overflow;
uint32 pri_overwrite;
uint32 sec_overwrite;
uint32 rev_overwrite;
uint32 dma_failure;
uint32 rtd_failure;
uint32 reg_read_failure;
#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
uint32 pri_underrun_detected;
#endif
} mddi_host_stat_type;
typedef struct {
uint32 rtd_cnt;
uint32 rev_enc_cnt;
uint32 vid_cnt;
uint32 reg_acc_cnt;
uint32 cli_stat_cnt;
uint32 cli_cap_cnt;
uint32 reg_read_cnt;
uint32 link_active_cnt;
uint32 link_hibernate_cnt;
uint32 vsync_response_cnt;
uint32 fwd_crc_cnt;
uint32 rev_crc_cnt;
} mddi_log_params_struct_type;
typedef struct {
uint32 rtd_value;
uint32 rtd_counter;
uint32 client_status_cnt;
boolean rev_ptr_written;
uint8 *rev_ptr_start;
uint8 *rev_ptr_curr;
uint32 mddi_rev_ptr_write_val;
dma_addr_t rev_data_dma_addr;
uint16 rev_pkt_size;
mddi_rev_link_state_type rev_state;
mddi_host_link_state_type link_state;
mddi_host_driver_state_type driver_state;
boolean disable_hibernation;
uint32 saved_int_reg;
uint32 saved_int_en;
mddi_linked_list_type *llist_ptr;
dma_addr_t llist_dma_addr;
mddi_linked_list_type *llist_dma_ptr;
uint32 *rev_data_buf;
struct completion mddi_llist_avail_comp;
boolean mddi_waiting_for_llist_avail;
mddi_host_int_type int_type;
mddi_host_stat_type stats;
mddi_log_params_struct_type log_parms;
mddi_llist_info_type llist_info;
mddi_linked_list_notify_type llist_notify[MDDI_MAX_NUM_LLIST_ITEMS];
} mddi_host_cntl_type;
static mddi_host_type mddi_curr_host = MDDI_HOST_PRIM;
static mddi_host_cntl_type mhctl[MDDI_NUM_HOST_CORES];
mddi_linked_list_type *llist_extern[MDDI_NUM_HOST_CORES];
mddi_linked_list_type *llist_dma_extern[MDDI_NUM_HOST_CORES];
mddi_linked_list_notify_type *llist_extern_notify[MDDI_NUM_HOST_CORES];
static mddi_log_params_struct_type prev_parms[MDDI_NUM_HOST_CORES];
extern uint32 mdp_total_vdopkts;
static boolean mddi_host_io_clock_on = FALSE;
static boolean mddi_host_hclk_on = FALSE;
int int_mddi_pri_flag = FALSE;
int int_mddi_ext_flag = FALSE;
static void mddi_report_errors(uint32 int_reg)
{
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if (int_reg & MDDI_INT_PRI_UNDERFLOW) {
pmhctl->stats.pri_underflow++;
MDDI_MSG_ERR("!!! MDDI Primary Underflow !!!\n");
}
if (int_reg & MDDI_INT_SEC_UNDERFLOW) {
pmhctl->stats.sec_underflow++;
MDDI_MSG_ERR("!!! MDDI Secondary Underflow !!!\n");
}
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (int_reg & MDDI_INT_REV_OVERFLOW) {
pmhctl->stats.rev_overflow++;
MDDI_MSG_ERR("!!! MDDI Reverse Overflow !!!\n");
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
}
if (int_reg & MDDI_INT_CRC_ERROR)
MDDI_MSG_ERR("!!! MDDI Reverse CRC Error !!!\n");
#endif
if (int_reg & MDDI_INT_PRI_OVERWRITE) {
pmhctl->stats.pri_overwrite++;
MDDI_MSG_ERR("!!! MDDI Primary Overwrite !!!\n");
}
if (int_reg & MDDI_INT_SEC_OVERWRITE) {
pmhctl->stats.sec_overwrite++;
MDDI_MSG_ERR("!!! MDDI Secondary Overwrite !!!\n");
}
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (int_reg & MDDI_INT_REV_OVERWRITE) {
pmhctl->stats.rev_overwrite++;
/* This will show up normally and is not a problem */
MDDI_MSG_DEBUG("MDDI Reverse Overwrite!\n");
}
if (int_reg & MDDI_INT_RTD_FAILURE) {
mddi_host_reg_outm(INTEN, MDDI_INT_RTD_FAILURE, 0);
pmhctl->stats.rtd_failure++;
MDDI_MSG_ERR("!!! MDDI RTD Failure !!!\n");
}
#endif
if (int_reg & MDDI_INT_DMA_FAILURE) {
pmhctl->stats.dma_failure++;
MDDI_MSG_ERR("!!! MDDI DMA Abort !!!\n");
}
}
static void mddi_host_enable_io_clock(void)
{
if (!MDDI_HOST_IS_IO_CLOCK_ON)
MDDI_HOST_ENABLE_IO_CLOCK;
}
static void mddi_host_enable_hclk(void)
{
if (!MDDI_HOST_IS_HCLK_ON)
MDDI_HOST_ENABLE_HCLK;
}
static void mddi_host_disable_io_clock(void)
{
#ifndef FEATURE_MDDI_HOST_IO_CLOCK_CONTROL_DISABLE
if (MDDI_HOST_IS_IO_CLOCK_ON)
MDDI_HOST_DISABLE_IO_CLOCK;
#endif
}
static void mddi_host_disable_hclk(void)
{
#ifndef FEATURE_MDDI_HOST_HCLK_CONTROL_DISABLE
if (MDDI_HOST_IS_HCLK_ON)
MDDI_HOST_DISABLE_HCLK;
#endif
}
static void mddi_vote_to_sleep(mddi_host_type host_idx, boolean sleep)
{
uint16 vote_mask;
if (host_idx == MDDI_HOST_PRIM)
vote_mask = 0x01;
else
vote_mask = 0x02;
}
static void mddi_report_state_change(uint32 int_reg)
{
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if ((pmhctl->saved_int_reg & MDDI_INT_IN_HIBERNATION) &&
(pmhctl->saved_int_reg & MDDI_INT_LINK_ACTIVE)) {
/* recover from condition where the io_clock was turned off by the
clock driver during a transition to hibernation. The io_clock
disable is to prevent MDP/MDDI underruns when changing ARM
clock speeds. In the process of halting the ARM, the hclk
divider needs to be set to 1. When it is set to 1, there is
a small time (usecs) when hclk is off or slow, and this can
cause an underrun. To prevent the underrun, clock driver turns
off the MDDI io_clock before making the change. */
mddi_host_reg_out(CMD, MDDI_CMD_POWERUP);
}
if (int_reg & MDDI_INT_LINK_ACTIVE) {
pmhctl->link_state = MDDI_LINK_ACTIVE;
pmhctl->log_parms.link_active_cnt++;
pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
MDDI_MSG_DEBUG("!!! MDDI Active RTD:0x%x!!!\n",
pmhctl->rtd_value);
/* now interrupt on hibernation */
mddi_host_reg_outm(INTEN,
(MDDI_INT_IN_HIBERNATION |
MDDI_INT_LINK_ACTIVE),
MDDI_INT_IN_HIBERNATION);
#ifdef DEBUG_MDDIHOSTI
/* if gpio interrupt is enabled, start polling at fastest
* registered rate
*/
if (mddi_gpio.polling_enabled) {
timer_reg(&mddi_gpio_poll_timer,
mddi_gpio_poll_timer_cb, 0, mddi_gpio.polling_interval, 0);
}
#endif
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (mddi_rev_ptr_workaround) {
/* HW CR: need to reset reverse register stuff */
pmhctl->rev_ptr_written = FALSE;
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
}
#endif
/* vote on sleep */
mddi_vote_to_sleep(host_idx, FALSE);
if (host_idx == MDDI_HOST_PRIM) {
if (mddi_vsync_detect_enabled) {
/*
* Indicate to client specific code that vsync
* was enabled, but we did not detect a client
* intiated wakeup. The client specific
* handler can either reassert vsync detection,
* or treat this as a valid vsync.
*/
mddi_client_lcd_vsync_detected(FALSE);
pmhctl->log_parms.vsync_response_cnt++;
}
}
}
if (int_reg & MDDI_INT_IN_HIBERNATION) {
pmhctl->link_state = MDDI_LINK_HIBERNATING;
pmhctl->log_parms.link_hibernate_cnt++;
MDDI_MSG_DEBUG("!!! MDDI Hibernating !!!\n");
/* now interrupt on link_active */
#ifdef FEATURE_MDDI_DISABLE_REVERSE
mddi_host_reg_outm(INTEN,
(MDDI_INT_MDDI_IN |
MDDI_INT_IN_HIBERNATION |
MDDI_INT_LINK_ACTIVE),
MDDI_INT_LINK_ACTIVE);
#else
mddi_host_reg_outm(INTEN,
(MDDI_INT_MDDI_IN |
MDDI_INT_IN_HIBERNATION |
MDDI_INT_LINK_ACTIVE),
(MDDI_INT_MDDI_IN | MDDI_INT_LINK_ACTIVE));
pmhctl->rtd_counter = mddi_rtd_frequency;
if (pmhctl->rev_state != MDDI_REV_IDLE) {
/* a rev_encap will not wake up the link, so we do that here */
pmhctl->link_state = MDDI_LINK_ACTIVATING;
mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
}
#endif
if (pmhctl->disable_hibernation) {
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
pmhctl->link_state = MDDI_LINK_ACTIVATING;
}
#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
if ((pmhctl->llist_info.transmitting_start_idx !=
UNASSIGNED_INDEX)
&&
((pmhctl->
saved_int_reg & (MDDI_INT_PRI_LINK_LIST_DONE |
MDDI_INT_PRI_PTR_READ)) ==
MDDI_INT_PRI_PTR_READ)) {
mddi_linked_list_type *llist_dma;
llist_dma = pmhctl->llist_dma_ptr;
/*
* All indications are that we have not received a
* linked list done interrupt, due to an underrun
* condition. Recovery attempt is to send again.
*/
dma_coherent_pre_ops();
/* Write to primary pointer register again */
mddi_host_reg_out(PRI_PTR,
&llist_dma[pmhctl->llist_info.
transmitting_start_idx]);
pmhctl->stats.pri_underrun_detected++;
}
#endif
/* vote on sleep */
if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
mddi_vote_to_sleep(host_idx, TRUE);
}
#ifdef DEBUG_MDDIHOSTI
/* need to stop polling timer */
if (mddi_gpio.polling_enabled) {
(void) timer_clr(&mddi_gpio_poll_timer, T_NONE);
}
#endif
}
}
void mddi_host_timer_service(unsigned long data)
{
#ifndef FEATURE_MDDI_DISABLE_REVERSE
unsigned long flags;
#endif
mddi_host_type host_idx;
mddi_host_cntl_type *pmhctl;
unsigned long time_ms = MDDI_DEFAULT_TIMER_LENGTH;
init_timer(&mddi_host_timer);
mddi_host_timer.function = mddi_host_timer_service;
mddi_host_timer.data = 0;
mddi_host_timer.expires = jiffies + ((time_ms * HZ) / 1000);
add_timer(&mddi_host_timer);
for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
host_idx++) {
pmhctl = &(mhctl[host_idx]);
mddi_log_stats_counter += (uint32) time_ms;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
pmhctl->rtd_counter += (uint32) time_ms;
pmhctl->client_status_cnt += (uint32) time_ms;
if (host_idx == MDDI_HOST_PRIM) {
if (pmhctl->client_status_cnt >=
mddi_client_status_frequency) {
if ((pmhctl->link_state ==
MDDI_LINK_HIBERNATING)
&& (pmhctl->client_status_cnt >
mddi_client_status_frequency)) {
/*
* special case where we are hibernating
* and mddi_host_isr is not firing, so
* kick the link so that the status can
* be retrieved
*/
/* need to wake up link before issuing
* rev encap command
*/
MDDI_MSG_INFO("wake up link!\n");
spin_lock_irqsave(&mddi_host_spin_lock,
flags);
mddi_host_enable_hclk();
mddi_host_enable_io_clock();
pmhctl->link_state =
MDDI_LINK_ACTIVATING;
mddi_host_reg_out(CMD,
MDDI_CMD_LINK_ACTIVE);
spin_unlock_irqrestore
(&mddi_host_spin_lock, flags);
} else
if ((pmhctl->link_state == MDDI_LINK_ACTIVE)
&& pmhctl->disable_hibernation) {
/*
* special case where we have disabled
* hibernation and mddi_host_isr
* is not firing, so enable interrupt
* for no pkts pending, which will
* generate an interrupt
*/
MDDI_MSG_INFO("kick isr!\n");
spin_lock_irqsave(&mddi_host_spin_lock,
flags);
mddi_host_enable_hclk();
mddi_host_reg_outm(INTEN,
MDDI_INT_NO_CMD_PKTS_PEND,
MDDI_INT_NO_CMD_PKTS_PEND);
spin_unlock_irqrestore
(&mddi_host_spin_lock, flags);
}
}
}
#endif /* #ifndef FEATURE_MDDI_DISABLE_REVERSE */
}
/* Check if logging is turned on */
for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
host_idx++) {
mddi_log_params_struct_type *prev_ptr = &(prev_parms[host_idx]);
pmhctl = &(mhctl[host_idx]);
if (mddi_debug_log_statistics) {
/* get video pkt count from MDP, since MDDI sw cannot know this */
pmhctl->log_parms.vid_cnt = mdp_total_vdopkts;
if (mddi_log_stats_counter >= mddi_log_stats_frequency) {
/* mddi_log_stats_counter = 0; */
if (mddi_debug_log_statistics) {
MDDI_MSG_NOTICE
("MDDI Statistics since last report:\n");
MDDI_MSG_NOTICE(" Packets sent:\n");
MDDI_MSG_NOTICE
(" %d RTD packet(s)\n",
pmhctl->log_parms.rtd_cnt -
prev_ptr->rtd_cnt);
if (prev_ptr->rtd_cnt !=
pmhctl->log_parms.rtd_cnt) {
unsigned long flags;
spin_lock_irqsave
(&mddi_host_spin_lock,
flags);
mddi_host_enable_hclk();
pmhctl->rtd_value =
mddi_host_reg_in(RTD_VAL);
spin_unlock_irqrestore
(&mddi_host_spin_lock,
flags);
MDDI_MSG_NOTICE
(" RTD value=%d\n",
pmhctl->rtd_value);
}
MDDI_MSG_NOTICE
(" %d VIDEO packets\n",
pmhctl->log_parms.vid_cnt -
prev_ptr->vid_cnt);
MDDI_MSG_NOTICE
(" %d Register Access packets\n",
pmhctl->log_parms.reg_acc_cnt -
prev_ptr->reg_acc_cnt);
MDDI_MSG_NOTICE
(" %d Reverse Encapsulation packet(s)\n",
pmhctl->log_parms.rev_enc_cnt -
prev_ptr->rev_enc_cnt);
if (prev_ptr->rev_enc_cnt !=
pmhctl->log_parms.rev_enc_cnt) {
/* report # of reverse CRC errors */
MDDI_MSG_NOTICE
(" %d reverse CRC errors detected\n",
pmhctl->log_parms.
rev_crc_cnt -
prev_ptr->rev_crc_cnt);
}
MDDI_MSG_NOTICE
(" Packets received:\n");
MDDI_MSG_NOTICE
(" %d Client Status packets",
pmhctl->log_parms.cli_stat_cnt -
prev_ptr->cli_stat_cnt);
if (prev_ptr->cli_stat_cnt !=
pmhctl->log_parms.cli_stat_cnt) {
MDDI_MSG_NOTICE
(" %d forward CRC errors reported\n",
pmhctl->log_parms.
fwd_crc_cnt -
prev_ptr->fwd_crc_cnt);
}
MDDI_MSG_NOTICE
(" %d Register Access Read packets\n",
pmhctl->log_parms.reg_read_cnt -
prev_ptr->reg_read_cnt);
if (pmhctl->link_state ==
MDDI_LINK_ACTIVE) {
MDDI_MSG_NOTICE
(" Current Link Status: Active\n");
} else
if ((pmhctl->link_state ==
MDDI_LINK_HIBERNATING)
|| (pmhctl->link_state ==
MDDI_LINK_ACTIVATING)) {
MDDI_MSG_NOTICE
(" Current Link Status: Hibernation\n");
} else {
MDDI_MSG_NOTICE
(" Current Link Status: Inactive\n");
}
MDDI_MSG_NOTICE
(" Active state entered %d times\n",
pmhctl->log_parms.link_active_cnt -
prev_ptr->link_active_cnt);
MDDI_MSG_NOTICE
(" Hibernation state entered %d times\n",
pmhctl->log_parms.
link_hibernate_cnt -
prev_ptr->link_hibernate_cnt);
}
}
prev_parms[host_idx] = pmhctl->log_parms;
}
}
if (mddi_log_stats_counter >= mddi_log_stats_frequency)
mddi_log_stats_counter = 0;
return;
} /* mddi_host_timer_cb */
static void mddi_process_link_list_done(void)
{
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
/* normal forward linked list packet(s) were sent */
if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
MDDI_MSG_ERR("**** getting LL done, but no list ****\n");
} else {
uint16 idx;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (pmhctl->rev_state == MDDI_REV_REG_READ_ISSUED) {
/* special case where a register read packet was sent */
pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
MDDI_MSG_ERR
("**** getting LL done, but no list ****\n");
}
}
#endif
for (idx = pmhctl->llist_info.transmitting_start_idx;;) {
uint16 next_idx = pmhctl->llist_notify[idx].next_idx;
/* with reg read we don't release the waiting tcb until after
* the reverse encapsulation has completed.
*/
if (idx != pmhctl->llist_info.reg_read_idx) {
/* notify task that may be waiting on this completion */
if (pmhctl->llist_notify[idx].waiting) {
complete(&
(pmhctl->llist_notify[idx].
done_comp));
}
if (pmhctl->llist_notify[idx].done_cb != NULL) {
(*(pmhctl->llist_notify[idx].done_cb))
();
}
pmhctl->llist_notify[idx].in_use = FALSE;
pmhctl->llist_notify[idx].waiting = FALSE;
pmhctl->llist_notify[idx].done_cb = NULL;
if (idx < MDDI_NUM_DYNAMIC_LLIST_ITEMS) {
/* static LLIST items are configured only once */
pmhctl->llist_notify[idx].next_idx =
UNASSIGNED_INDEX;
}
/*
* currently, all linked list packets are
* register access, so we can increment the
* counter for that packet type here.
*/
pmhctl->log_parms.reg_acc_cnt++;
}
if (idx == pmhctl->llist_info.transmitting_end_idx)
break;
idx = next_idx;
if (idx == UNASSIGNED_INDEX)
MDDI_MSG_CRIT("MDDI linked list corruption!\n");
}
pmhctl->llist_info.transmitting_start_idx = UNASSIGNED_INDEX;
pmhctl->llist_info.transmitting_end_idx = UNASSIGNED_INDEX;
if (pmhctl->mddi_waiting_for_llist_avail) {
if (!
(pmhctl->
llist_notify[pmhctl->llist_info.next_free_idx].
in_use)) {
pmhctl->mddi_waiting_for_llist_avail = FALSE;
complete(&(pmhctl->mddi_llist_avail_comp));
}
}
}
/* Turn off MDDI_INT_PRI_LINK_LIST_DONE interrupt */
mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE, 0);
}
static void mddi_queue_forward_linked_list(void)
{
uint16 first_pkt_index;
mddi_linked_list_type *llist_dma;
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
llist_dma = pmhctl->llist_dma_ptr;
first_pkt_index = UNASSIGNED_INDEX;
if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (pmhctl->llist_info.reg_read_waiting) {
if (pmhctl->rev_state == MDDI_REV_IDLE) {
/*
* we have a register read to send and
* can send it now
*/
pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
mddi_reg_read_retry = 0;
first_pkt_index =
pmhctl->llist_info.waiting_start_idx;
pmhctl->llist_info.reg_read_waiting = FALSE;
}
} else
#endif
{
/*
* not register read to worry about, go ahead and write
* anything that may be on the waiting list.
*/
first_pkt_index = pmhctl->llist_info.waiting_start_idx;
}
}
if (first_pkt_index != UNASSIGNED_INDEX) {
pmhctl->llist_info.transmitting_start_idx =
pmhctl->llist_info.waiting_start_idx;
pmhctl->llist_info.transmitting_end_idx =
pmhctl->llist_info.waiting_end_idx;
pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
/* write to the primary pointer register */
MDDI_MSG_DEBUG("MDDI writing primary ptr with idx=%d\n",
first_pkt_index);
pmhctl->int_type.llist_ptr_write_2++;
dma_coherent_pre_ops();
mddi_host_reg_out(PRI_PTR, &llist_dma[first_pkt_index]);
/* enable interrupt when complete */
mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
MDDI_INT_PRI_LINK_LIST_DONE);
}
}
#ifndef FEATURE_MDDI_DISABLE_REVERSE
static void mddi_read_rev_packet(byte *data_ptr)
{
uint16 i, length;
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
uint8 *rev_ptr_overflow =
(pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE);
/* first determine the length and handle invalid lengths */
length = *pmhctl->rev_ptr_curr++;
if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
length |= ((*pmhctl->rev_ptr_curr++) << 8);
if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
if (length > (pmhctl->rev_pkt_size - 2)) {
MDDI_MSG_ERR("Invalid rev pkt length %d\n", length);
/* rev_pkt_size should always be <= rev_ptr_size so limit to packet size */
length = pmhctl->rev_pkt_size - 2;
}
/* If the data pointer is NULL, just increment the pmhctl->rev_ptr_curr.
* Loop around if necessary. Don't bother reading the data.
*/
if (data_ptr == NULL) {
pmhctl->rev_ptr_curr += length;
if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
pmhctl->rev_ptr_curr -= MDDI_REV_BUFFER_SIZE;
return;
}
data_ptr[0] = length & 0x0ff;
data_ptr[1] = length >> 8;
data_ptr += 2;
/* copy the data to data_ptr byte-at-a-time */
for (i = 0; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow);
i++)
*data_ptr++ = *pmhctl->rev_ptr_curr++;
if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
for (; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow); i++)
*data_ptr++ = *pmhctl->rev_ptr_curr++;
}
static void mddi_process_rev_packets(void)
{
uint32 rev_packet_count;
word i;
uint32 crc_errors;
boolean mddi_reg_read_successful = FALSE;
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
pmhctl->log_parms.rev_enc_cnt++;
if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
(pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED) &&
(pmhctl->rev_state != MDDI_REV_CLIENT_CAP_ISSUED)) {
MDDI_MSG_ERR("Wrong state %d for reverse int\n",
pmhctl->rev_state);
}
/* Turn off MDDI_INT_REV_AVAIL interrupt */
mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL, 0);
/* Clear rev data avail int */
mddi_host_reg_out(INT, MDDI_INT_REV_DATA_AVAIL);
/* Get Number of packets */
rev_packet_count = mddi_host_reg_in(REV_PKT_CNT);
#ifndef T_MSM7500
/* Clear out rev packet counter */
mddi_host_reg_out(REV_PKT_CNT, 0x0000);
#endif
#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
if ((pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) &&
(rev_packet_count > 0) &&
(mddi_host_core_version == 0x28 ||
mddi_host_core_version == 0x30)) {
uint32 int_reg;
uint32 max_count = 0;
mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
int_reg = mddi_host_reg_in(INT);
while ((int_reg & 0x100000) == 0) {
udelay(3);
int_reg = mddi_host_reg_in(INT);
if (++max_count > 100)
break;
}
}
#endif
/* Get CRC error count */
crc_errors = mddi_host_reg_in(REV_CRC_ERR);
if (crc_errors != 0) {
pmhctl->log_parms.rev_crc_cnt += crc_errors;
pmhctl->stats.rev_crc_count += crc_errors;
MDDI_MSG_ERR("!!! MDDI %d Reverse CRC Error(s) !!!\n",
crc_errors);
#ifndef T_MSM7500
/* Clear CRC error count */
mddi_host_reg_out(REV_CRC_ERR, 0x0000);
#endif
/* also issue an RTD to attempt recovery */
pmhctl->rtd_counter = mddi_rtd_frequency;
}
pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
MDDI_MSG_DEBUG("MDDI rev pkt cnt=%d, ptr=0x%x, RTD:0x%x\n",
rev_packet_count,
pmhctl->rev_ptr_curr - pmhctl->rev_ptr_start,
pmhctl->rtd_value);
if (rev_packet_count >= 1) {
mddi_invalidate_cache_lines((uint32 *) pmhctl->rev_ptr_start,
MDDI_REV_BUFFER_SIZE);
}
/* order the reads */
dma_coherent_post_ops();
for (i = 0; i < rev_packet_count; i++) {
mddi_rev_packet_type *rev_pkt_ptr;
mddi_read_rev_packet(rev_packet_data);
rev_pkt_ptr = (mddi_rev_packet_type *) rev_packet_data;
if (rev_pkt_ptr->packet_length > pmhctl->rev_pkt_size) {
MDDI_MSG_ERR("!!!invalid packet size: %d\n",
rev_pkt_ptr->packet_length);
}
MDDI_MSG_DEBUG("MDDI rev pkt 0x%x size 0x%x\n",
rev_pkt_ptr->packet_type,
rev_pkt_ptr->packet_length);
/* Do whatever you want to do with the data based on the packet type */
switch (rev_pkt_ptr->packet_type) {
case 66: /* Client Capability */
{
mddi_client_capability_type
*client_capability_pkt_ptr;
client_capability_pkt_ptr =
(mddi_client_capability_type *)
rev_packet_data;
MDDI_MSG_NOTICE
("Client Capability: Week=%d, Year=%d\n",
client_capability_pkt_ptr->
Week_of_Manufacture,
client_capability_pkt_ptr->
Year_of_Manufacture);
memcpy((void *)&mddi_client_capability_pkt,
(void *)rev_packet_data,
sizeof(mddi_client_capability_type));
pmhctl->log_parms.cli_cap_cnt++;
}
break;
case 70: /* Display Status */
{
mddi_client_status_type *client_status_pkt_ptr;
client_status_pkt_ptr =
(mddi_client_status_type *) rev_packet_data;
if ((client_status_pkt_ptr->crc_error_count !=
0)
|| (client_status_pkt_ptr->
reverse_link_request != 0)) {
MDDI_MSG_ERR
("Client Status: RevReq=%d, CrcErr=%d\n",
client_status_pkt_ptr->
reverse_link_request,
client_status_pkt_ptr->
crc_error_count);
} else {
MDDI_MSG_DEBUG
("Client Status: RevReq=%d, CrcErr=%d\n",
client_status_pkt_ptr->
reverse_link_request,
client_status_pkt_ptr->
crc_error_count);
}
pmhctl->log_parms.fwd_crc_cnt +=
client_status_pkt_ptr->crc_error_count;
pmhctl->stats.fwd_crc_count +=
client_status_pkt_ptr->crc_error_count;
pmhctl->log_parms.cli_stat_cnt++;
}
break;
case 146: /* register access packet */
{
mddi_register_access_packet_type
* regacc_pkt_ptr;
regacc_pkt_ptr =
(mddi_register_access_packet_type *)
rev_packet_data;
MDDI_MSG_DEBUG
("Reg Acc parse reg=0x%x, value=0x%x\n",
regacc_pkt_ptr->register_address,
regacc_pkt_ptr->register_data_list);
/* Copy register value to location passed in */
if (mddi_reg_read_value_ptr) {
#if defined(T_MSM6280) && !defined(T_MSM7200)
/* only least significant 16 bits are valid with 6280 */
*mddi_reg_read_value_ptr =
regacc_pkt_ptr->
register_data_list & 0x0000ffff;
#else
*mddi_reg_read_value_ptr =
regacc_pkt_ptr->register_data_list;
#endif
mddi_reg_read_successful = TRUE;
mddi_reg_read_value_ptr = NULL;
}
#ifdef DEBUG_MDDIHOSTI
if ((mddi_gpio.polling_enabled) &&
(regacc_pkt_ptr->register_address ==
mddi_gpio.polling_reg)) {
/*
* ToDo: need to call Linux GPIO call
* here...
*/
mddi_client_lcd_gpio_poll(
regacc_pkt_ptr->register_data_list);
}
#endif
pmhctl->log_parms.reg_read_cnt++;
}
break;
default: /* any other packet */
{
uint16 hdlr;
for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS;
hdlr++) {
if (mddi_rev_pkt_handler[hdlr].
pkt_type ==
rev_pkt_ptr->packet_type) {
(*
(mddi_rev_pkt_handler[hdlr].
handler)) (rev_pkt_ptr);
/* pmhctl->rev_state = MDDI_REV_IDLE; */
break;
}
}
if (hdlr >= MAX_MDDI_REV_HANDLERS)
MDDI_MSG_ERR("MDDI unknown rev pkt\n");
}
break;
}
}
if ((pmhctl->rev_ptr_curr + pmhctl->rev_pkt_size) >=
(pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE)) {
pmhctl->rev_ptr_written = FALSE;
}
if (pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) {
pmhctl->rev_state = MDDI_REV_IDLE;
if (mddi_rev_user.waiting) {
mddi_rev_user.waiting = FALSE;
complete(&(mddi_rev_user.done_comp));
} else if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
MDDI_MSG_ERR
("Reverse Encap state, but no reg read in progress\n");
} else {
if ((!mddi_reg_read_successful) &&
(mddi_reg_read_retry < mddi_reg_read_retry_max) &&
(mddi_enable_reg_read_retry)) {
/*
* There is a race condition that can happen
* where the reverse encapsulation message is
* sent out by the MDDI host before the register
* read packet is sent. As a work-around for
* that problem we issue the reverse
* encapsulation one more time before giving up.
*/
if (mddi_enable_reg_read_retry_once)
mddi_reg_read_retry =
mddi_reg_read_retry_max;
pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
pmhctl->stats.reg_read_failure++;
} else {
uint16 reg_read_idx =
pmhctl->llist_info.reg_read_idx;
mddi_reg_read_retry = 0;
if (pmhctl->llist_notify[reg_read_idx].waiting) {
complete(&
(pmhctl->
llist_notify[reg_read_idx].
done_comp));
}
pmhctl->llist_info.reg_read_idx =
UNASSIGNED_INDEX;
if (pmhctl->llist_notify[reg_read_idx].
done_cb != NULL) {
(*
(pmhctl->llist_notify[reg_read_idx].
done_cb)) ();
}
pmhctl->llist_notify[reg_read_idx].next_idx =
UNASSIGNED_INDEX;
pmhctl->llist_notify[reg_read_idx].in_use =
FALSE;
pmhctl->llist_notify[reg_read_idx].waiting =
FALSE;
pmhctl->llist_notify[reg_read_idx].done_cb =
NULL;
if (!mddi_reg_read_successful)
pmhctl->stats.reg_read_failure++;
}
}
} else if (pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) {
#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
if (mddi_host_core_version == 0x28 ||
mddi_host_core_version == 0x30) {
mddi_host_reg_out(FIFO_ALLOC, 0x00);
pmhctl->rev_ptr_written = TRUE;
mddi_host_reg_out(REV_PTR,
pmhctl->mddi_rev_ptr_write_val);
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
mddi_host_reg_out(CMD, 0xC00);
}
#endif
if (mddi_rev_user.waiting) {
mddi_rev_user.waiting = FALSE;
complete(&(mddi_rev_user.done_comp));
}
pmhctl->rev_state = MDDI_REV_IDLE;
} else {
pmhctl->rev_state = MDDI_REV_IDLE;
}
/* pmhctl->rev_state = MDDI_REV_IDLE; */
/* Re-enable interrupt */
mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL,
MDDI_INT_REV_DATA_AVAIL);
}
static void mddi_issue_reverse_encapsulation(void)
{
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
/* Only issue a reverse encapsulation packet if:
* 1) another reverse is not in progress (MDDI_REV_IDLE).
* 2) a register read has been sent (MDDI_REV_REG_READ_SENT).
* 3) forward is not in progress, because of a hw bug in client that
* causes forward crc errors on packet immediately after rev encap.
*/
if (((pmhctl->rev_state == MDDI_REV_IDLE) ||
(pmhctl->rev_state == MDDI_REV_REG_READ_SENT)) &&
(pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
(!mdp_in_processing)) {
uint32 mddi_command = MDDI_CMD_SEND_REV_ENCAP;
if ((pmhctl->rev_state == MDDI_REV_REG_READ_SENT) ||
(mddi_rev_encap_user_request == TRUE)) {
mddi_host_enable_io_clock();
if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
/* need to wake up link before issuing rev encap command */
MDDI_MSG_DEBUG("wake up link!\n");
pmhctl->link_state = MDDI_LINK_ACTIVATING;
mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
} else {
if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
MDDI_MSG_DEBUG
("mddi sending RTD command!\n");
mddi_host_reg_out(CMD,
MDDI_CMD_SEND_RTD);
pmhctl->rtd_counter = 0;
pmhctl->log_parms.rtd_cnt++;
}
if (pmhctl->rev_state != MDDI_REV_REG_READ_SENT) {
/* this is generic reverse request by user, so
* reset the waiting flag. */
mddi_rev_encap_user_request = FALSE;
}
/* link is active so send reverse encap to get register read results */
pmhctl->rev_state = MDDI_REV_ENCAP_ISSUED;
mddi_command = MDDI_CMD_SEND_REV_ENCAP;
MDDI_MSG_DEBUG("sending rev encap!\n");
}
} else
if ((pmhctl->client_status_cnt >=
mddi_client_status_frequency)
|| mddi_client_capability_request) {
mddi_host_enable_io_clock();
if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
/* only wake up the link if it client status is overdue */
if ((pmhctl->client_status_cnt >=
(mddi_client_status_frequency * 2))
|| mddi_client_capability_request) {
/* need to wake up link before issuing rev encap command */
MDDI_MSG_DEBUG("wake up link!\n");
pmhctl->link_state =
MDDI_LINK_ACTIVATING;
mddi_host_reg_out(CMD,
MDDI_CMD_LINK_ACTIVE);
}
} else {
if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
MDDI_MSG_DEBUG
("mddi sending RTD command!\n");
mddi_host_reg_out(CMD,
MDDI_CMD_SEND_RTD);
pmhctl->rtd_counter = 0;
pmhctl->log_parms.rtd_cnt++;
}
/* periodically get client status */
MDDI_MSG_DEBUG
("mddi sending rev enc! (get status)\n");
if (mddi_client_capability_request) {
pmhctl->rev_state =
MDDI_REV_CLIENT_CAP_ISSUED;
mddi_command = MDDI_CMD_GET_CLIENT_CAP;
mddi_client_capability_request = FALSE;
} else {
pmhctl->rev_state =
MDDI_REV_STATUS_REQ_ISSUED;
pmhctl->client_status_cnt = 0;
mddi_command =
MDDI_CMD_GET_CLIENT_STATUS;
}
}
}
if ((pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) ||
(pmhctl->rev_state == MDDI_REV_STATUS_REQ_ISSUED) ||
(pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED)) {
pmhctl->int_type.rev_encap_count++;
#if defined(T_MSM6280) && !defined(T_MSM7200)
mddi_rev_pointer_written = TRUE;
mddi_host_reg_out(REV_PTR, mddi_rev_ptr_write_val);
mddi_rev_ptr_curr = mddi_rev_ptr_start;
/* force new rev ptr command */
mddi_host_reg_out(CMD, 0xC00);
#else
if (!pmhctl->rev_ptr_written) {
MDDI_MSG_DEBUG("writing reverse pointer!\n");
pmhctl->rev_ptr_written = TRUE;
#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
if ((pmhctl->rev_state ==
MDDI_REV_CLIENT_CAP_ISSUED) &&
(mddi_host_core_version == 0x28 ||
mddi_host_core_version == 0x30)) {
pmhctl->rev_ptr_written = FALSE;
mddi_host_reg_out(FIFO_ALLOC, 0x02);
} else
mddi_host_reg_out(REV_PTR,
pmhctl->
mddi_rev_ptr_write_val);
#else
mddi_host_reg_out(REV_PTR,
pmhctl->
mddi_rev_ptr_write_val);
#endif
}
#endif
if (mddi_debug_clear_rev_data) {
uint16 i;
for (i = 0; i < MDDI_MAX_REV_DATA_SIZE / 4; i++)
pmhctl->rev_data_buf[i] = 0xdddddddd;
/* clean cache */
mddi_flush_cache_lines(pmhctl->rev_data_buf,
MDDI_MAX_REV_DATA_SIZE);
}
/* send reverse encapsulation to get needed data */
mddi_host_reg_out(CMD, mddi_command);
}
}
}
static void mddi_process_client_initiated_wakeup(void)
{
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
/* Disable MDDI_INT Interrupt, we detect client initiated wakeup one
* time for each entry into hibernation */
mddi_host_reg_outm(INTEN, MDDI_INT_MDDI_IN, 0);
if (host_idx == MDDI_HOST_PRIM) {
if (mddi_vsync_detect_enabled) {
mddi_host_enable_io_clock();
#ifndef MDDI_HOST_DISP_LISTEN
/* issue command to bring up link */
/* need to do this to clear the vsync condition */
if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
pmhctl->link_state = MDDI_LINK_ACTIVATING;
mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
}
#endif
/*
* Indicate to client specific code that vsync was
* enabled, and we did not detect a client initiated
* wakeup. The client specific handler can clear the
* condition if necessary to prevent subsequent
* client initiated wakeups.
*/
mddi_client_lcd_vsync_detected(TRUE);
pmhctl->log_parms.vsync_response_cnt++;
MDDI_MSG_NOTICE("MDDI_INT_IN condition\n");
}
}
if (mddi_gpio.polling_enabled) {
mddi_host_enable_io_clock();
/* check interrupt status now */
(void)mddi_queue_register_read_int(mddi_gpio.polling_reg,
&mddi_gpio.polling_val);
}
}
#endif /* FEATURE_MDDI_DISABLE_REVERSE */
static void mddi_host_isr(void)
{
uint32 int_reg, int_en;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
uint32 status_reg;
#endif
mddi_host_type host_idx = mddi_curr_host;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if (!MDDI_HOST_IS_HCLK_ON) {
MDDI_HOST_ENABLE_HCLK;
MDDI_MSG_DEBUG("HCLK disabled, but isr is firing\n");
}
int_reg = mddi_host_reg_in(INT);
int_en = mddi_host_reg_in(INTEN);
pmhctl->saved_int_reg = int_reg;
pmhctl->saved_int_en = int_en;
int_reg = int_reg & int_en;
pmhctl->int_type.count++;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
status_reg = mddi_host_reg_in(STAT);
if ((int_reg & MDDI_INT_MDDI_IN) ||
((int_en & MDDI_INT_MDDI_IN) &&
((int_reg == 0) || (status_reg & MDDI_STAT_CLIENT_WAKEUP_REQ)))) {
/*
* The MDDI_IN condition will clear itself, and so it is
* possible that MDDI_IN was the reason for the isr firing,
* even though the interrupt register does not have the
* MDDI_IN bit set. To check if this was the case we need to
* look at the status register bit that signifies a client
* initiated wakeup. If the status register bit is set, as well
* as the MDDI_IN interrupt enabled, then we treat this as a
* client initiated wakeup.
*/
if (int_reg & MDDI_INT_MDDI_IN)
pmhctl->int_type.in_count++;
mddi_process_client_initiated_wakeup();
}
#endif
if (int_reg & MDDI_INT_LINK_STATE_CHANGES) {
pmhctl->int_type.state_change_count++;
mddi_report_state_change(int_reg);
}
if (int_reg & MDDI_INT_PRI_LINK_LIST_DONE) {
pmhctl->int_type.ll_done_count++;
mddi_process_link_list_done();
}
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (int_reg & MDDI_INT_REV_DATA_AVAIL) {
pmhctl->int_type.rev_avail_count++;
mddi_process_rev_packets();
}
#endif
if (int_reg & MDDI_INT_ERROR_CONDITIONS) {
pmhctl->int_type.error_count++;
mddi_report_errors(int_reg);
mddi_host_reg_out(INT, int_reg & MDDI_INT_ERROR_CONDITIONS);
}
#ifndef FEATURE_MDDI_DISABLE_REVERSE
mddi_issue_reverse_encapsulation();
if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
(pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED))
#endif
/* don't want simultaneous reverse and forward with Eagle */
mddi_queue_forward_linked_list();
if (int_reg & MDDI_INT_NO_CMD_PKTS_PEND) {
/* this interrupt is used to kick the isr when hibernation is disabled */
mddi_host_reg_outm(INTEN, MDDI_INT_NO_CMD_PKTS_PEND, 0);
}
if ((!mddi_host_mdp_active_flag) &&
(!mddi_vsync_detect_enabled) &&
(pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
(pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
(pmhctl->rev_state == MDDI_REV_IDLE)) {
if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
mddi_host_disable_io_clock();
mddi_host_disable_hclk();
}
#ifdef FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION
else if ((pmhctl->link_state == MDDI_LINK_ACTIVE) &&
(!pmhctl->disable_hibernation)) {
mddi_host_reg_out(CMD, MDDI_CMD_POWERDOWN);
}
#endif
}
}
static void mddi_host_isr_primary(void)
{
mddi_curr_host = MDDI_HOST_PRIM;
mddi_host_isr();
}
irqreturn_t mddi_pmdh_isr_proxy(int irq, void *ptr)
{
mddi_host_isr_primary();
return IRQ_HANDLED;
}
static void mddi_host_isr_external(void)
{
mddi_curr_host = MDDI_HOST_EXT;
mddi_host_isr();
mddi_curr_host = MDDI_HOST_PRIM;
}
irqreturn_t mddi_emdh_isr_proxy(int irq, void *ptr)
{
mddi_host_isr_external();
return IRQ_HANDLED;
}
static void mddi_host_initialize_registers(mddi_host_type host_idx)
{
uint32 pad_reg_val;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
return;
/* turn on HCLK to MDDI host core */
mddi_host_enable_hclk();
/* MDDI Reset command */
mddi_host_reg_out(CMD, MDDI_CMD_RESET);
/* Version register (= 0x01) */
mddi_host_reg_out(VERSION, 0x0001);
/* Bytes per subframe register */
mddi_host_reg_out(BPS, MDDI_HOST_BYTES_PER_SUBFRAME);
/* Subframes per media frames register (= 0x03) */
mddi_host_reg_out(SPM, 0x0003);
/* Turn Around 1 register (= 0x05) */
mddi_host_reg_out(TA1_LEN, 0x0005);
/* Turn Around 2 register (= 0x0C) */
mddi_host_reg_out(TA2_LEN, MDDI_HOST_TA2_LEN);
/* Drive hi register (= 0x96) */
mddi_host_reg_out(DRIVE_HI, 0x0096);
/* Drive lo register (= 0x32) */
mddi_host_reg_out(DRIVE_LO, 0x0032);
/* Display wakeup count register (= 0x3c) */
mddi_host_reg_out(DISP_WAKE, 0x003c);
/* Reverse Rate Divisor register (= 0x2) */
mddi_host_reg_out(REV_RATE_DIV, MDDI_HOST_REV_RATE_DIV);
#ifndef FEATURE_MDDI_DISABLE_REVERSE
/* Reverse Pointer Size */
mddi_host_reg_out(REV_SIZE, MDDI_REV_BUFFER_SIZE);
/* Rev Encap Size */
mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
#endif
/* Periodic Rev Encap */
/* don't send periodically */
mddi_host_reg_out(CMD, MDDI_CMD_PERIODIC_REV_ENCAP);
pad_reg_val = mddi_host_reg_in(PAD_CTL);
if (pad_reg_val == 0) {
/* If we are turning on band gap, need to wait 5us before turning
* on the rest of the PAD */
mddi_host_reg_out(PAD_CTL, 0x08000);
udelay(5);
}
#ifdef T_MSM7200
/* Recommendation from PAD hw team */
mddi_host_reg_out(PAD_CTL, 0xa850a);
#else
/* Recommendation from PAD hw team */
mddi_host_reg_out(PAD_CTL, 0xa850f);
#endif
#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
mddi_host_reg_out(PAD_IO_CTL, 0x00320000);
mddi_host_reg_out(PAD_CAL, 0x00220020);
#endif
mddi_host_core_version = mddi_host_reg_inm(CORE_VER, 0xffff);
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (mddi_host_core_version >= 8)
mddi_rev_ptr_workaround = FALSE;
pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
#endif
if ((mddi_host_core_version > 8) && (mddi_host_core_version < 0x19))
mddi_host_reg_out(TEST, 0x2);
/* Need an even number for counts */
mddi_host_reg_out(DRIVER_START_CNT, 0x60006);
#ifndef T_MSM7500
/* Setup defaults for MDP related register */
mddi_host_reg_out(MDP_VID_FMT_DES, 0x5666);
mddi_host_reg_out(MDP_VID_PIX_ATTR, 0x00C3);
mddi_host_reg_out(MDP_VID_CLIENTID, 0);
#endif
/* automatically hibernate after 1 empty subframe */
if (pmhctl->disable_hibernation)
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
else
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
/* Bring up link if display (client) requests it */
#ifdef MDDI_HOST_DISP_LISTEN
mddi_host_reg_out(CMD, MDDI_CMD_DISP_LISTEN);
#else
mddi_host_reg_out(CMD, MDDI_CMD_DISP_IGNORE);
#endif
}
void mddi_host_configure_interrupts(mddi_host_type host_idx, boolean enable)
{
unsigned long flags;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
spin_lock_irqsave(&mddi_host_spin_lock, flags);
/* turn on HCLK to MDDI host core if it has been disabled */
mddi_host_enable_hclk();
/* Clear MDDI Interrupt enable reg */
mddi_host_reg_out(INTEN, 0);
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
if (enable) {
pmhctl->driver_state = MDDI_DRIVER_ENABLED;
if (host_idx == MDDI_HOST_PRIM) {
if (request_irq
(INT_MDDI_PRI, mddi_pmdh_isr_proxy, IRQF_DISABLED,
"PMDH", 0) != 0)
printk(KERN_ERR
"a mddi: unable to request_irq\n");
else
int_mddi_pri_flag = TRUE;
} else {
if (request_irq
(INT_MDDI_EXT, mddi_emdh_isr_proxy, IRQF_DISABLED,
"EMDH", 0) != 0)
printk(KERN_ERR
"b mddi: unable to request_irq\n");
else
int_mddi_ext_flag = TRUE;
}
/* Set MDDI Interrupt enable reg -- Enable Reverse data avail */
#ifdef FEATURE_MDDI_DISABLE_REVERSE
mddi_host_reg_out(INTEN,
MDDI_INT_ERROR_CONDITIONS |
MDDI_INT_LINK_STATE_CHANGES);
#else
/* Reverse Pointer register */
pmhctl->rev_ptr_written = FALSE;
mddi_host_reg_out(INTEN,
MDDI_INT_REV_DATA_AVAIL |
MDDI_INT_ERROR_CONDITIONS |
MDDI_INT_LINK_STATE_CHANGES);
pmhctl->rtd_counter = mddi_rtd_frequency;
pmhctl->client_status_cnt = 0;
#endif
} else {
if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
pmhctl->driver_state = MDDI_DRIVER_DISABLED;
}
}
static void mddi_host_powerup(mddi_host_type host_idx)
{
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if (pmhctl->link_state != MDDI_LINK_DISABLED)
return;
/* enable IO_CLK and hclk to MDDI host core */
mddi_host_enable_io_clock();
mddi_host_initialize_registers(host_idx);
mddi_host_configure_interrupts(host_idx, TRUE);
pmhctl->link_state = MDDI_LINK_ACTIVATING;
/* Link activate command */
mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
#ifdef CLKRGM_MDDI_IO_CLOCK_IN_MHZ
MDDI_MSG_NOTICE("MDDI Host: Activating Link %d Mbps\n",
CLKRGM_MDDI_IO_CLOCK_IN_MHZ * 2);
#else
MDDI_MSG_NOTICE("MDDI Host: Activating Link\n");
#endif
/* Initialize the timer */
if (host_idx == MDDI_HOST_PRIM)
mddi_host_timer_service(0);
}
void mddi_host_init(mddi_host_type host_idx)
/* Write out the MDDI configuration registers */
{
static boolean initialized = FALSE;
mddi_host_cntl_type *pmhctl;
if (host_idx >= MDDI_NUM_HOST_CORES) {
MDDI_MSG_ERR("Invalid host core index\n");
return;
}
if (!initialized) {
uint16 idx;
mddi_host_type host;
for (host = MDDI_HOST_PRIM; host < MDDI_NUM_HOST_CORES; host++) {
pmhctl = &(mhctl[host]);
initialized = TRUE;
pmhctl->llist_ptr =
dma_alloc_coherent(NULL, MDDI_LLIST_POOL_SIZE,
&(pmhctl->llist_dma_addr),
GFP_KERNEL);
pmhctl->llist_dma_ptr =
(mddi_linked_list_type *) (void *)pmhctl->
llist_dma_addr;
#ifdef FEATURE_MDDI_DISABLE_REVERSE
pmhctl->rev_data_buf = NULL;
if (pmhctl->llist_ptr == NULL)
#else
mddi_rev_user.waiting = FALSE;
init_completion(&(mddi_rev_user.done_comp));
pmhctl->rev_data_buf =
dma_alloc_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
&(pmhctl->rev_data_dma_addr),
GFP_KERNEL);
if ((pmhctl->llist_ptr == NULL)
|| (pmhctl->rev_data_buf == NULL))
#endif
{
MDDI_MSG_CRIT
("unable to alloc non-cached memory\n");
}
llist_extern[host] = pmhctl->llist_ptr;
llist_dma_extern[host] = pmhctl->llist_dma_ptr;
llist_extern_notify[host] = pmhctl->llist_notify;
for (idx = 0; idx < UNASSIGNED_INDEX; idx++) {
init_completion(&
(pmhctl->llist_notify[idx].
done_comp));
}
init_completion(&(pmhctl->mddi_llist_avail_comp));
spin_lock_init(&mddi_host_spin_lock);
pmhctl->mddi_waiting_for_llist_avail = FALSE;
pmhctl->mddi_rev_ptr_write_val =
(uint32) (void *)(pmhctl->rev_data_dma_addr);
pmhctl->rev_ptr_start = (void *)pmhctl->rev_data_buf;
pmhctl->rev_pkt_size = MDDI_DEFAULT_REV_PKT_SIZE;
pmhctl->rev_state = MDDI_REV_IDLE;
#ifdef IMAGE_MODEM_PROC
/* assume hibernation state is last state from APPS proc, so that
* we don't reinitialize the host core */
pmhctl->link_state = MDDI_LINK_HIBERNATING;
#else
pmhctl->link_state = MDDI_LINK_DISABLED;
#endif
pmhctl->driver_state = MDDI_DRIVER_DISABLED;
pmhctl->disable_hibernation = FALSE;
/* initialize llist variables */
pmhctl->llist_info.transmitting_start_idx =
UNASSIGNED_INDEX;
pmhctl->llist_info.transmitting_end_idx =
UNASSIGNED_INDEX;
pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
pmhctl->llist_info.reg_read_idx = UNASSIGNED_INDEX;
pmhctl->llist_info.next_free_idx =
MDDI_FIRST_DYNAMIC_LLIST_IDX;
pmhctl->llist_info.reg_read_waiting = FALSE;
mddi_vsync_detect_enabled = FALSE;
mddi_gpio.polling_enabled = FALSE;
pmhctl->int_type.count = 0;
pmhctl->int_type.in_count = 0;
pmhctl->int_type.disp_req_count = 0;
pmhctl->int_type.state_change_count = 0;
pmhctl->int_type.ll_done_count = 0;
pmhctl->int_type.rev_avail_count = 0;
pmhctl->int_type.error_count = 0;
pmhctl->int_type.rev_encap_count = 0;
pmhctl->int_type.llist_ptr_write_1 = 0;
pmhctl->int_type.llist_ptr_write_2 = 0;
pmhctl->stats.fwd_crc_count = 0;
pmhctl->stats.rev_crc_count = 0;
pmhctl->stats.pri_underflow = 0;
pmhctl->stats.sec_underflow = 0;
pmhctl->stats.rev_overflow = 0;
pmhctl->stats.pri_overwrite = 0;
pmhctl->stats.sec_overwrite = 0;
pmhctl->stats.rev_overwrite = 0;
pmhctl->stats.dma_failure = 0;
pmhctl->stats.rtd_failure = 0;
pmhctl->stats.reg_read_failure = 0;
#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
pmhctl->stats.pri_underrun_detected = 0;
#endif
pmhctl->log_parms.rtd_cnt = 0;
pmhctl->log_parms.rev_enc_cnt = 0;
pmhctl->log_parms.vid_cnt = 0;
pmhctl->log_parms.reg_acc_cnt = 0;
pmhctl->log_parms.cli_stat_cnt = 0;
pmhctl->log_parms.cli_cap_cnt = 0;
pmhctl->log_parms.reg_read_cnt = 0;
pmhctl->log_parms.link_active_cnt = 0;
pmhctl->log_parms.link_hibernate_cnt = 0;
pmhctl->log_parms.fwd_crc_cnt = 0;
pmhctl->log_parms.rev_crc_cnt = 0;
pmhctl->log_parms.vsync_response_cnt = 0;
prev_parms[host_idx] = pmhctl->log_parms;
mddi_client_capability_pkt.packet_length = 0;
}
#ifndef T_MSM7500
/* tell clock driver we are user of this PLL */
MDDI_HOST_ENABLE_IO_CLOCK;
#endif
}
mddi_host_powerup(host_idx);
pmhctl = &(mhctl[host_idx]);
}
#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
static uint32 mddi_client_id;
uint32 mddi_get_client_id(void)
{
#ifndef FEATURE_MDDI_DISABLE_REVERSE
mddi_host_type host_idx = MDDI_HOST_PRIM;
static boolean client_detection_try = FALSE;
mddi_host_cntl_type *pmhctl;
unsigned long flags;
uint16 saved_rev_pkt_size;
if (!client_detection_try) {
/* Toshiba display requires larger drive_lo value */
mddi_host_reg_out(DRIVE_LO, 0x0050);
pmhctl = &(mhctl[MDDI_HOST_PRIM]);
saved_rev_pkt_size = pmhctl->rev_pkt_size;
/* Increase Rev Encap Size */
pmhctl->rev_pkt_size = MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE;
mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
/* disable hibernation temporarily */
if (!pmhctl->disable_hibernation)
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
mddi_rev_user.waiting = TRUE;
INIT_COMPLETION(mddi_rev_user.done_comp);
spin_lock_irqsave(&mddi_host_spin_lock, flags);
/* turn on clock(s), if they have been disabled */
mddi_host_enable_hclk();
mddi_host_enable_io_clock();
mddi_client_capability_request = TRUE;
if (pmhctl->rev_state == MDDI_REV_IDLE) {
/* attempt to send the reverse encapsulation now */
mddi_issue_reverse_encapsulation();
}
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
wait_for_completion_killable(&(mddi_rev_user.done_comp));
/* Set Rev Encap Size back to its original value */
pmhctl->rev_pkt_size = saved_rev_pkt_size;
mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
/* reenable auto-hibernate */
if (!pmhctl->disable_hibernation)
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
mddi_host_reg_out(DRIVE_LO, 0x0032);
client_detection_try = TRUE;
mddi_client_id = (mddi_client_capability_pkt.Mfr_Name<<16) |
mddi_client_capability_pkt.Product_Code;
if (!mddi_client_id)
mddi_disable(1);
}
#if 0
switch (mddi_client_capability_pkt.Mfr_Name) {
case 0x4474:
if ((mddi_client_capability_pkt.Product_Code != 0x8960) &&
(target == DISPLAY_1)) {
ret = PRISM_WVGA;
}
break;
case 0xD263:
if (target == DISPLAY_1)
ret = TOSHIBA_VGA_PRIM;
else if (target == DISPLAY_2)
ret = TOSHIBA_QCIF_SECD;
break;
case 0:
if (mddi_client_capability_pkt.Product_Code == 0x8835) {
if (target == DISPLAY_1)
ret = SHARP_QVGA_PRIM;
else if (target == DISPLAY_2)
ret = SHARP_128x128_SECD;
}
break;
default:
break;
}
if ((!client_detection_try) && (ret != TOSHIBA_VGA_PRIM)
&& (ret != TOSHIBA_QCIF_SECD)) {
/* Not a Toshiba display, so change drive_lo back to default value */
mddi_host_reg_out(DRIVE_LO, 0x0032);
}
#endif
#endif
return mddi_client_id;
}
#endif
void mddi_host_powerdown(mddi_host_type host_idx)
{
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if (host_idx >= MDDI_NUM_HOST_CORES) {
MDDI_MSG_ERR("Invalid host core index\n");
return;
}
if (pmhctl->driver_state == MDDI_DRIVER_RESET) {
return;
}
if (host_idx == MDDI_HOST_PRIM) {
/* disable timer */
del_timer(&mddi_host_timer);
}
mddi_host_configure_interrupts(host_idx, FALSE);
/* turn on HCLK to MDDI host core if it has been disabled */
mddi_host_enable_hclk();
/* MDDI Reset command */
mddi_host_reg_out(CMD, MDDI_CMD_RESET);
/* Pad Control Register */
mddi_host_reg_out(PAD_CTL, 0x0);
/* disable IO_CLK and hclk to MDDI host core */
mddi_host_disable_io_clock();
mddi_host_disable_hclk();
pmhctl->link_state = MDDI_LINK_DISABLED;
pmhctl->driver_state = MDDI_DRIVER_RESET;
MDDI_MSG_NOTICE("MDDI Host: Disabling Link\n");
}
uint16 mddi_get_next_free_llist_item(mddi_host_type host_idx, boolean wait)
{
unsigned long flags;
uint16 ret_idx;
boolean forced_wait = FALSE;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
ret_idx = pmhctl->llist_info.next_free_idx;
pmhctl->llist_info.next_free_idx++;
if (pmhctl->llist_info.next_free_idx >= MDDI_NUM_DYNAMIC_LLIST_ITEMS)
pmhctl->llist_info.next_free_idx = MDDI_FIRST_DYNAMIC_LLIST_IDX;
spin_lock_irqsave(&mddi_host_spin_lock, flags);
if (pmhctl->llist_notify[ret_idx].in_use) {
if (!wait) {
pmhctl->llist_info.next_free_idx = ret_idx;
ret_idx = UNASSIGNED_INDEX;
} else {
forced_wait = TRUE;
INIT_COMPLETION(pmhctl->mddi_llist_avail_comp);
}
}
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
if (forced_wait) {
wait_for_completion_killable(&
(pmhctl->
mddi_llist_avail_comp));
MDDI_MSG_ERR("task waiting on mddi llist item\n");
}
if (ret_idx != UNASSIGNED_INDEX) {
pmhctl->llist_notify[ret_idx].waiting = FALSE;
pmhctl->llist_notify[ret_idx].done_cb = NULL;
pmhctl->llist_notify[ret_idx].in_use = TRUE;
pmhctl->llist_notify[ret_idx].next_idx = UNASSIGNED_INDEX;
}
return ret_idx;
}
uint16 mddi_get_reg_read_llist_item(mddi_host_type host_idx, boolean wait)
{
#ifdef FEATURE_MDDI_DISABLE_REVERSE
MDDI_MSG_CRIT("No reverse link available\n");
(void)wait;
return FALSE;
#else
unsigned long flags;
uint16 ret_idx;
boolean error = FALSE;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
spin_lock_irqsave(&mddi_host_spin_lock, flags);
if (pmhctl->llist_info.reg_read_idx != UNASSIGNED_INDEX) {
/* need to block here or is this an error condition? */
error = TRUE;
ret_idx = UNASSIGNED_INDEX;
}
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
if (!error) {
ret_idx = pmhctl->llist_info.reg_read_idx =
mddi_get_next_free_llist_item(host_idx, wait);
/* clear the reg_read_waiting flag */
pmhctl->llist_info.reg_read_waiting = FALSE;
}
if (error)
MDDI_MSG_ERR("***** Reg read still in progress! ****\n");
return ret_idx;
#endif
}
void mddi_queue_forward_packets(uint16 first_llist_idx,
uint16 last_llist_idx,
boolean wait,
mddi_llist_done_cb_type llist_done_cb,
mddi_host_type host_idx)
{
unsigned long flags;
mddi_linked_list_type *llist;
mddi_linked_list_type *llist_dma;
mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
if ((first_llist_idx >= UNASSIGNED_INDEX) ||
(last_llist_idx >= UNASSIGNED_INDEX)) {
MDDI_MSG_ERR("MDDI queueing invalid linked list\n");
return;
}
if (pmhctl->link_state == MDDI_LINK_DISABLED)
MDDI_MSG_CRIT("MDDI host powered down!\n");
llist = pmhctl->llist_ptr;
llist_dma = pmhctl->llist_dma_ptr;
/* clean cache so MDDI host can read data */
memory_barrier();
pmhctl->llist_notify[last_llist_idx].waiting = wait;
if (wait)
INIT_COMPLETION(pmhctl->llist_notify[last_llist_idx].done_comp);
pmhctl->llist_notify[last_llist_idx].done_cb = llist_done_cb;
spin_lock_irqsave(&mddi_host_spin_lock, flags);
if ((pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
(pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
(pmhctl->rev_state == MDDI_REV_IDLE)) {
/* no packets are currently transmitting */
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
/* This is the special case where the packet is a register read. */
pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
mddi_reg_read_retry = 0;
/* mddi_rev_reg_read_attempt = 1; */
}
#endif
/* assign transmitting index values */
pmhctl->llist_info.transmitting_start_idx = first_llist_idx;
pmhctl->llist_info.transmitting_end_idx = last_llist_idx;
/* turn on clock(s), if they have been disabled */
mddi_host_enable_hclk();
mddi_host_enable_io_clock();
pmhctl->int_type.llist_ptr_write_1++;
/* Write to primary pointer register */
dma_coherent_pre_ops();
mddi_host_reg_out(PRI_PTR, &llist_dma[first_llist_idx]);
/* enable interrupt when complete */
mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
MDDI_INT_PRI_LINK_LIST_DONE);
} else if (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) {
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
/*
* we have a register read to send but need to wait
* for current reverse activity to end or there are
* packets currently transmitting
*/
/* mddi_rev_reg_read_attempt = 0; */
pmhctl->llist_info.reg_read_waiting = TRUE;
}
#endif
/* assign waiting index values */
pmhctl->llist_info.waiting_start_idx = first_llist_idx;
pmhctl->llist_info.waiting_end_idx = last_llist_idx;
} else {
uint16 prev_end_idx = pmhctl->llist_info.waiting_end_idx;
#ifndef FEATURE_MDDI_DISABLE_REVERSE
if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
/*
* we have a register read to send but need to wait
* for current reverse activity to end or there are
* packets currently transmitting
*/
/* mddi_rev_reg_read_attempt = 0; */
pmhctl->llist_info.reg_read_waiting = TRUE;
}
#endif
llist = pmhctl->llist_ptr;
/* clear end flag in previous last packet */
llist[prev_end_idx].link_controller_flags = 0;
pmhctl->llist_notify[prev_end_idx].next_idx = first_llist_idx;
/* set the next_packet_pointer of the previous last packet */
llist[prev_end_idx].next_packet_pointer =
(void *)(&llist_dma[first_llist_idx]);
/* clean cache so MDDI host can read data */
memory_barrier();
/* assign new waiting last index value */
pmhctl->llist_info.waiting_end_idx = last_llist_idx;
}
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
}
void mddi_host_write_pix_attr_reg(uint32 value)
{
(void)value;
}
void mddi_queue_reverse_encapsulation(boolean wait)
{
#ifdef FEATURE_MDDI_DISABLE_REVERSE
MDDI_MSG_CRIT("No reverse link available\n");
(void)wait;
#else
unsigned long flags;
boolean error = FALSE;
mddi_host_type host_idx = MDDI_HOST_PRIM;
mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
spin_lock_irqsave(&mddi_host_spin_lock, flags);
/* turn on clock(s), if they have been disabled */
mddi_host_enable_hclk();
mddi_host_enable_io_clock();
if (wait) {
if (!mddi_rev_user.waiting) {
mddi_rev_user.waiting = TRUE;
INIT_COMPLETION(mddi_rev_user.done_comp);
} else
error = TRUE;
}
mddi_rev_encap_user_request = TRUE;
if (pmhctl->rev_state == MDDI_REV_IDLE) {
/* attempt to send the reverse encapsulation now */
mddi_host_type orig_host_idx = mddi_curr_host;
mddi_curr_host = host_idx;
mddi_issue_reverse_encapsulation();
mddi_curr_host = orig_host_idx;
}
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
if (error) {
MDDI_MSG_ERR("Reverse Encap request already in progress\n");
} else if (wait)
wait_for_completion_killable(&(mddi_rev_user.done_comp));
#endif
}
/* ISR to be executed */
boolean mddi_set_rev_handler(mddi_rev_handler_type handler, uint16 pkt_type)
{
#ifdef FEATURE_MDDI_DISABLE_REVERSE
MDDI_MSG_CRIT("No reverse link available\n");
(void)handler;
(void)pkt_type;
return (FALSE);
#else
unsigned long flags;
uint16 hdlr;
boolean handler_set = FALSE;
boolean overwrite = FALSE;
mddi_host_type host_idx = MDDI_HOST_PRIM;
mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
/* Disable interrupts */
spin_lock_irqsave(&mddi_host_spin_lock, flags);
for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
if (mddi_rev_pkt_handler[hdlr].pkt_type == pkt_type) {
mddi_rev_pkt_handler[hdlr].handler = handler;
if (handler == NULL) {
/* clearing handler from table */
mddi_rev_pkt_handler[hdlr].pkt_type =
INVALID_PKT_TYPE;
handler_set = TRUE;
if (pkt_type == 0x10) { /* video stream packet */
/* ensure HCLK on to MDDI host core before register write */
mddi_host_enable_hclk();
/* No longer getting video, so reset rev encap size to default */
pmhctl->rev_pkt_size =
MDDI_DEFAULT_REV_PKT_SIZE;
mddi_host_reg_out(REV_ENCAP_SZ,
pmhctl->rev_pkt_size);
}
} else {
/* already a handler for this packet */
overwrite = TRUE;
}
break;
}
}
if ((hdlr >= MAX_MDDI_REV_HANDLERS) && (handler != NULL)) {
/* assigning new handler */
for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
if (mddi_rev_pkt_handler[hdlr].pkt_type ==
INVALID_PKT_TYPE) {
if ((pkt_type == 0x10) && /* video stream packet */
(pmhctl->rev_pkt_size <
MDDI_VIDEO_REV_PKT_SIZE)) {
/* ensure HCLK on to MDDI host core before register write */
mddi_host_enable_hclk();
/* Increase Rev Encap Size */
pmhctl->rev_pkt_size =
MDDI_VIDEO_REV_PKT_SIZE;
mddi_host_reg_out(REV_ENCAP_SZ,
pmhctl->rev_pkt_size);
}
mddi_rev_pkt_handler[hdlr].handler = handler;
mddi_rev_pkt_handler[hdlr].pkt_type = pkt_type;
handler_set = TRUE;
break;
}
}
}
/* Restore interrupts */
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
if (overwrite)
MDDI_MSG_ERR("Overwriting previous rev packet handler\n");
return handler_set;
#endif
} /* mddi_set_rev_handler */
void mddi_host_disable_hibernation(boolean disable)
{
mddi_host_type host_idx = MDDI_HOST_PRIM;
mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
if (disable) {
pmhctl->disable_hibernation = TRUE;
/* hibernation will be turned off by isr next time it is entered */
} else {
if (pmhctl->disable_hibernation) {
unsigned long flags;
spin_lock_irqsave(&mddi_host_spin_lock, flags);
if (!MDDI_HOST_IS_HCLK_ON)
MDDI_HOST_ENABLE_HCLK;
mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
pmhctl->disable_hibernation = FALSE;
}
}
}
void mddi_mhctl_remove(mddi_host_type host_idx)
{
mddi_host_cntl_type *pmhctl;
pmhctl = &(mhctl[host_idx]);
dma_free_coherent(NULL, MDDI_LLIST_POOL_SIZE, (void *)pmhctl->llist_ptr,
pmhctl->llist_dma_addr);
dma_free_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
(void *)pmhctl->rev_data_buf,
pmhctl->rev_data_dma_addr);
}
| gpl-2.0 |
zhaochengw/ef40s_kernel-4.2 | arch/mips/alchemy/devboards/db1x00/platform.c | 3784 | 4109 | /*
* DBAu1xxx board platform device registration
*
* Copyright (C) 2009 Manuel Lauss
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/mach-au1x00/au1xxx.h>
#include <asm/mach-db1x00/bcsr.h>
#include "../platform.h"
/* DB1xxx PCMCIA interrupt sources:
* CD0/1 GPIO0/3
* STSCHG0/1 GPIO1/4
* CARD0/1 GPIO2/5
* Db1550: 0/1, 21/22, 3/5
*/
#define DB1XXX_HAS_PCMCIA
#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
#if defined(CONFIG_MIPS_DB1000)
#define DB1XXX_PCMCIA_CD0 AU1000_GPIO0_INT
#define DB1XXX_PCMCIA_STSCHG0 AU1000_GPIO1_INT
#define DB1XXX_PCMCIA_CARD0 AU1000_GPIO2_INT
#define DB1XXX_PCMCIA_CD1 AU1000_GPIO3_INT
#define DB1XXX_PCMCIA_STSCHG1 AU1000_GPIO4_INT
#define DB1XXX_PCMCIA_CARD1 AU1000_GPIO5_INT
#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
#define BOARD_FLASH_WIDTH 4 /* 32-bits */
#elif defined(CONFIG_MIPS_DB1100)
#define DB1XXX_PCMCIA_CD0 AU1100_GPIO0_INT
#define DB1XXX_PCMCIA_STSCHG0 AU1100_GPIO1_INT
#define DB1XXX_PCMCIA_CARD0 AU1100_GPIO2_INT
#define DB1XXX_PCMCIA_CD1 AU1100_GPIO3_INT
#define DB1XXX_PCMCIA_STSCHG1 AU1100_GPIO4_INT
#define DB1XXX_PCMCIA_CARD1 AU1100_GPIO5_INT
#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
#define BOARD_FLASH_WIDTH 4 /* 32-bits */
#elif defined(CONFIG_MIPS_DB1500)
#define DB1XXX_PCMCIA_CD0 AU1500_GPIO0_INT
#define DB1XXX_PCMCIA_STSCHG0 AU1500_GPIO1_INT
#define DB1XXX_PCMCIA_CARD0 AU1500_GPIO2_INT
#define DB1XXX_PCMCIA_CD1 AU1500_GPIO3_INT
#define DB1XXX_PCMCIA_STSCHG1 AU1500_GPIO4_INT
#define DB1XXX_PCMCIA_CARD1 AU1500_GPIO5_INT
#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
#define BOARD_FLASH_WIDTH 4 /* 32-bits */
#elif defined(CONFIG_MIPS_DB1550)
#define DB1XXX_PCMCIA_CD0 AU1550_GPIO0_INT
#define DB1XXX_PCMCIA_STSCHG0 AU1550_GPIO21_INT
#define DB1XXX_PCMCIA_CARD0 AU1550_GPIO3_INT
#define DB1XXX_PCMCIA_CD1 AU1550_GPIO1_INT
#define DB1XXX_PCMCIA_STSCHG1 AU1550_GPIO22_INT
#define DB1XXX_PCMCIA_CARD1 AU1550_GPIO5_INT
#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
#define BOARD_FLASH_WIDTH 4 /* 32-bits */
#else
/* other board: no PCMCIA */
#undef DB1XXX_HAS_PCMCIA
#undef F_SWAPPED
#define F_SWAPPED 0
#if defined(CONFIG_MIPS_BOSPORUS)
#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
#define BOARD_FLASH_WIDTH 2 /* 16-bits */
#elif defined(CONFIG_MIPS_MIRAGE)
#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
#define BOARD_FLASH_WIDTH 4 /* 32-bits */
#endif
#endif
static int __init db1xxx_dev_init(void)
{
#ifdef DB1XXX_HAS_PCMCIA
db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR,
PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
PCMCIA_MEM_PHYS_ADDR,
PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
PCMCIA_IO_PHYS_ADDR,
PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
DB1XXX_PCMCIA_CARD0,
DB1XXX_PCMCIA_CD0,
/*DB1XXX_PCMCIA_STSCHG0*/0,
0,
0);
db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
PCMCIA_MEM_PHYS_ADDR + 0x004000000,
PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
PCMCIA_IO_PHYS_ADDR + 0x004000000,
PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
DB1XXX_PCMCIA_CARD1,
DB1XXX_PCMCIA_CD1,
/*DB1XXX_PCMCIA_STSCHG1*/0,
0,
1);
#endif
db1x_register_norflash(BOARD_FLASH_SIZE, BOARD_FLASH_WIDTH, F_SWAPPED);
return 0;
}
device_initcall(db1xxx_dev_init);
| gpl-2.0 |
nasty007/android_kernel_lge_mako | drivers/video/backlight/ams369fg06.c | 4808 | 13772 | /*
* ams369fg06 AMOLED LCD panel driver.
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
*
* Derived from drivers/video/s6e63m0.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/lcd.h>
#include <linux/backlight.h>
#define SLEEPMSEC 0x1000
#define ENDDEF 0x2000
#define DEFMASK 0xFF00
#define COMMAND_ONLY 0xFE
#define DATA_ONLY 0xFF
#define MAX_GAMMA_LEVEL 5
#define GAMMA_TABLE_COUNT 21
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 255
#define DEFAULT_BRIGHTNESS 150
struct ams369fg06 {
struct device *dev;
struct spi_device *spi;
unsigned int power;
struct lcd_device *ld;
struct backlight_device *bd;
struct lcd_platform_data *lcd_pd;
};
static const unsigned short seq_display_on[] = {
0x14, 0x03,
ENDDEF, 0x0000
};
static const unsigned short seq_display_off[] = {
0x14, 0x00,
ENDDEF, 0x0000
};
static const unsigned short seq_stand_by_on[] = {
0x1D, 0xA1,
SLEEPMSEC, 200,
ENDDEF, 0x0000
};
static const unsigned short seq_stand_by_off[] = {
0x1D, 0xA0,
SLEEPMSEC, 250,
ENDDEF, 0x0000
};
static const unsigned short seq_setting[] = {
0x31, 0x08,
0x32, 0x14,
0x30, 0x02,
0x27, 0x01,
0x12, 0x08,
0x13, 0x08,
0x15, 0x00,
0x16, 0x00,
0xef, 0xd0,
DATA_ONLY, 0xe8,
0x39, 0x44,
0x40, 0x00,
0x41, 0x3f,
0x42, 0x2a,
0x43, 0x27,
0x44, 0x27,
0x45, 0x1f,
0x46, 0x44,
0x50, 0x00,
0x51, 0x00,
0x52, 0x17,
0x53, 0x24,
0x54, 0x26,
0x55, 0x1f,
0x56, 0x43,
0x60, 0x00,
0x61, 0x3f,
0x62, 0x2a,
0x63, 0x25,
0x64, 0x24,
0x65, 0x1b,
0x66, 0x5c,
0x17, 0x22,
0x18, 0x33,
0x19, 0x03,
0x1a, 0x01,
0x22, 0xa4,
0x23, 0x00,
0x26, 0xa0,
0x1d, 0xa0,
SLEEPMSEC, 300,
0x14, 0x03,
ENDDEF, 0x0000
};
/* gamma value: 2.2 */
static const unsigned int ams369fg06_22_250[] = {
0x00, 0x3f, 0x2a, 0x27, 0x27, 0x1f, 0x44,
0x00, 0x00, 0x17, 0x24, 0x26, 0x1f, 0x43,
0x00, 0x3f, 0x2a, 0x25, 0x24, 0x1b, 0x5c,
};
static const unsigned int ams369fg06_22_200[] = {
0x00, 0x3f, 0x28, 0x29, 0x27, 0x21, 0x3e,
0x00, 0x00, 0x10, 0x25, 0x27, 0x20, 0x3d,
0x00, 0x3f, 0x28, 0x27, 0x25, 0x1d, 0x53,
};
static const unsigned int ams369fg06_22_150[] = {
0x00, 0x3f, 0x2d, 0x29, 0x28, 0x23, 0x37,
0x00, 0x00, 0x0b, 0x25, 0x28, 0x22, 0x36,
0x00, 0x3f, 0x2b, 0x28, 0x26, 0x1f, 0x4a,
};
static const unsigned int ams369fg06_22_100[] = {
0x00, 0x3f, 0x30, 0x2a, 0x2b, 0x24, 0x2f,
0x00, 0x00, 0x00, 0x25, 0x29, 0x24, 0x2e,
0x00, 0x3f, 0x2f, 0x29, 0x29, 0x21, 0x3f,
};
static const unsigned int ams369fg06_22_50[] = {
0x00, 0x3f, 0x3c, 0x2c, 0x2d, 0x27, 0x24,
0x00, 0x00, 0x00, 0x22, 0x2a, 0x27, 0x23,
0x00, 0x3f, 0x3b, 0x2c, 0x2b, 0x24, 0x31,
};
struct ams369fg06_gamma {
unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
};
static struct ams369fg06_gamma gamma_table = {
.gamma_22_table[0] = (unsigned int *)&ams369fg06_22_50,
.gamma_22_table[1] = (unsigned int *)&ams369fg06_22_100,
.gamma_22_table[2] = (unsigned int *)&ams369fg06_22_150,
.gamma_22_table[3] = (unsigned int *)&ams369fg06_22_200,
.gamma_22_table[4] = (unsigned int *)&ams369fg06_22_250,
};
static int ams369fg06_spi_write_byte(struct ams369fg06 *lcd, int addr, int data)
{
u16 buf[1];
struct spi_message msg;
struct spi_transfer xfer = {
.len = 2,
.tx_buf = buf,
};
buf[0] = (addr << 8) | data;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(lcd->spi, &msg);
}
static int ams369fg06_spi_write(struct ams369fg06 *lcd, unsigned char address,
unsigned char command)
{
int ret = 0;
if (address != DATA_ONLY)
ret = ams369fg06_spi_write_byte(lcd, 0x70, address);
if (command != COMMAND_ONLY)
ret = ams369fg06_spi_write_byte(lcd, 0x72, command);
return ret;
}
static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd,
const unsigned short *wbuf)
{
int ret = 0, i = 0;
while ((wbuf[i] & DEFMASK) != ENDDEF) {
if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
} else
mdelay(wbuf[i+1]);
i += 2;
}
return ret;
}
static int _ams369fg06_gamma_ctl(struct ams369fg06 *lcd,
const unsigned int *gamma)
{
unsigned int i = 0;
int ret = 0;
for (i = 0 ; i < GAMMA_TABLE_COUNT / 3; i++) {
ret = ams369fg06_spi_write(lcd, 0x40 + i, gamma[i]);
ret = ams369fg06_spi_write(lcd, 0x50 + i, gamma[i+7*1]);
ret = ams369fg06_spi_write(lcd, 0x60 + i, gamma[i+7*2]);
if (ret) {
dev_err(lcd->dev, "failed to set gamma table.\n");
goto gamma_err;
}
}
gamma_err:
return ret;
}
static int ams369fg06_gamma_ctl(struct ams369fg06 *lcd, int brightness)
{
int ret = 0;
int gamma = 0;
if ((brightness >= 0) && (brightness <= 50))
gamma = 0;
else if ((brightness > 50) && (brightness <= 100))
gamma = 1;
else if ((brightness > 100) && (brightness <= 150))
gamma = 2;
else if ((brightness > 150) && (brightness <= 200))
gamma = 3;
else if ((brightness > 200) && (brightness <= 255))
gamma = 4;
ret = _ams369fg06_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
return ret;
}
static int ams369fg06_ldi_init(struct ams369fg06 *lcd)
{
int ret, i;
static const unsigned short *init_seq[] = {
seq_setting,
seq_stand_by_off,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
if (ret)
break;
}
return ret;
}
static int ams369fg06_ldi_enable(struct ams369fg06 *lcd)
{
int ret, i;
static const unsigned short *init_seq[] = {
seq_stand_by_off,
seq_display_on,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
if (ret)
break;
}
return ret;
}
static int ams369fg06_ldi_disable(struct ams369fg06 *lcd)
{
int ret, i;
static const unsigned short *init_seq[] = {
seq_display_off,
seq_stand_by_on,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
if (ret)
break;
}
return ret;
}
static int ams369fg06_power_is_on(int power)
{
return ((power) <= FB_BLANK_NORMAL);
}
static int ams369fg06_power_on(struct ams369fg06 *lcd)
{
int ret = 0;
struct lcd_platform_data *pd = NULL;
struct backlight_device *bd = NULL;
pd = lcd->lcd_pd;
if (!pd) {
dev_err(lcd->dev, "platform data is NULL.\n");
return -EFAULT;
}
bd = lcd->bd;
if (!bd) {
dev_err(lcd->dev, "backlight device is NULL.\n");
return -EFAULT;
}
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
return -EFAULT;
} else {
pd->power_on(lcd->ld, 1);
mdelay(pd->power_on_delay);
}
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EFAULT;
} else {
pd->reset(lcd->ld);
mdelay(pd->reset_delay);
}
ret = ams369fg06_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
return ret;
}
ret = ams369fg06_ldi_enable(lcd);
if (ret) {
dev_err(lcd->dev, "failed to enable ldi.\n");
return ret;
}
/* set brightness to current value after power on or resume. */
ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness);
if (ret) {
dev_err(lcd->dev, "lcd gamma setting failed.\n");
return ret;
}
return 0;
}
static int ams369fg06_power_off(struct ams369fg06 *lcd)
{
int ret = 0;
struct lcd_platform_data *pd = NULL;
pd = lcd->lcd_pd;
if (!pd) {
dev_err(lcd->dev, "platform data is NULL\n");
return -EFAULT;
}
ret = ams369fg06_ldi_disable(lcd);
if (ret) {
dev_err(lcd->dev, "lcd setting failed.\n");
return -EIO;
}
mdelay(pd->power_off_delay);
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
return -EFAULT;
} else
pd->power_on(lcd->ld, 0);
return 0;
}
static int ams369fg06_power(struct ams369fg06 *lcd, int power)
{
int ret = 0;
if (ams369fg06_power_is_on(power) &&
!ams369fg06_power_is_on(lcd->power))
ret = ams369fg06_power_on(lcd);
else if (!ams369fg06_power_is_on(power) &&
ams369fg06_power_is_on(lcd->power))
ret = ams369fg06_power_off(lcd);
if (!ret)
lcd->power = power;
return ret;
}
static int ams369fg06_get_power(struct lcd_device *ld)
{
struct ams369fg06 *lcd = lcd_get_data(ld);
return lcd->power;
}
static int ams369fg06_set_power(struct lcd_device *ld, int power)
{
struct ams369fg06 *lcd = lcd_get_data(ld);
if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
power != FB_BLANK_NORMAL) {
dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
return -EINVAL;
}
return ams369fg06_power(lcd, power);
}
static int ams369fg06_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static int ams369fg06_set_brightness(struct backlight_device *bd)
{
int ret = 0;
int brightness = bd->props.brightness;
struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev);
if (brightness < MIN_BRIGHTNESS ||
brightness > bd->props.max_brightness) {
dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
MIN_BRIGHTNESS, MAX_BRIGHTNESS);
return -EINVAL;
}
ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness);
if (ret) {
dev_err(&bd->dev, "lcd brightness setting failed.\n");
return -EIO;
}
return ret;
}
static struct lcd_ops ams369fg06_lcd_ops = {
.get_power = ams369fg06_get_power,
.set_power = ams369fg06_set_power,
};
static const struct backlight_ops ams369fg06_backlight_ops = {
.get_brightness = ams369fg06_get_brightness,
.update_status = ams369fg06_set_brightness,
};
static int __devinit ams369fg06_probe(struct spi_device *spi)
{
int ret = 0;
struct ams369fg06 *lcd = NULL;
struct lcd_device *ld = NULL;
struct backlight_device *bd = NULL;
struct backlight_properties props;
lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
/* ams369fg06 lcd panel uses 3-wire 16bits SPI Mode. */
spi->bits_per_word = 16;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
goto out_free_lcd;
}
lcd->spi = spi;
lcd->dev = &spi->dev;
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
goto out_free_lcd;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
&ams369fg06_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
goto out_free_lcd;
}
lcd->ld = ld;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd,
&ams369fg06_backlight_ops, &props);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto out_lcd_unregister;
}
bd->props.brightness = DEFAULT_BRIGHTNESS;
lcd->bd = bd;
if (!lcd->lcd_pd->lcd_enabled) {
/*
* if lcd panel was off from bootloader then
* current lcd status is powerdown and then
* it enables lcd panel.
*/
lcd->power = FB_BLANK_POWERDOWN;
ams369fg06_power(lcd, FB_BLANK_UNBLANK);
} else
lcd->power = FB_BLANK_UNBLANK;
dev_set_drvdata(&spi->dev, lcd);
dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
return 0;
out_lcd_unregister:
lcd_device_unregister(ld);
out_free_lcd:
kfree(lcd);
return ret;
}
static int __devexit ams369fg06_remove(struct spi_device *spi)
{
struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
kfree(lcd);
return 0;
}
#if defined(CONFIG_PM)
static unsigned int before_power;
static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg)
{
int ret = 0;
struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
before_power = lcd->power;
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
return ret;
}
static int ams369fg06_resume(struct spi_device *spi)
{
int ret = 0;
struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
/*
* after suspended, if lcd panel status is FB_BLANK_UNBLANK
* (at that time, before_power is FB_BLANK_UNBLANK) then
* it changes that status to FB_BLANK_POWERDOWN to get lcd on.
*/
if (before_power == FB_BLANK_UNBLANK)
lcd->power = FB_BLANK_POWERDOWN;
dev_dbg(&spi->dev, "before_power = %d\n", before_power);
ret = ams369fg06_power(lcd, before_power);
return ret;
}
#else
#define ams369fg06_suspend NULL
#define ams369fg06_resume NULL
#endif
static void ams369fg06_shutdown(struct spi_device *spi)
{
struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
}
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ams369fg06_probe,
.remove = __devexit_p(ams369fg06_remove),
.shutdown = ams369fg06_shutdown,
.suspend = ams369fg06_suspend,
.resume = ams369fg06_resume,
};
module_spi_driver(ams369fg06_driver);
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("ams369fg06 LCD Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shivamk11/phantom_taoshan | drivers/media/dvb/ddbridge/ddbridge-core.c | 4808 | 42627 | /*
* ddbridge.c: Digital Devices PCIe bridge driver
*
* Copyright (C) 2010-2011 Digital Devices GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 only, as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/timer.h>
#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/swab.h>
#include <linux/vmalloc.h>
#include "ddbridge.h"
#include "ddbridge-regs.h"
#include "tda18271c2dd.h"
#include "stv6110x.h"
#include "stv090x.h"
#include "lnbh24.h"
#include "drxk.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* MSI had problems with lost interrupts, fixed but needs testing */
#undef CONFIG_PCI_MSI
/******************************************************************************/
static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
{
struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1 } };
return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
}
static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = ®, .len = 1 },
{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1 } };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
u16 reg, u8 *val)
{
u8 msg[2] = {reg>>8, reg&0xff};
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = msg, .len = 2},
{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1} };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
{
struct ddb *dev = i2c->dev;
int stat;
u32 val;
i2c->done = 0;
ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
if (stat <= 0) {
printk(KERN_ERR "I2C timeout\n");
{ /* MSI debugging*/
u32 istat = ddbreadl(INTERRUPT_STATUS);
printk(KERN_ERR "IRS %08x\n", istat);
ddbwritel(istat, INTERRUPT_ACK);
}
return -EIO;
}
val = ddbreadl(i2c->regs+I2C_COMMAND);
if (val & 0x70000)
return -EIO;
return 0;
}
static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
struct i2c_msg msg[], int num)
{
struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter);
struct ddb *dev = i2c->dev;
u8 addr = 0;
if (num)
addr = msg[0].addr;
if (num == 2 && msg[1].flags & I2C_M_RD &&
!(msg[0].flags & I2C_M_RD)) {
memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf,
msg[0].buf, msg[0].len);
ddbwritel(msg[0].len|(msg[1].len << 16),
i2c->regs+I2C_TASKLENGTH);
if (!ddb_i2c_cmd(i2c, addr, 1)) {
memcpy_fromio(msg[1].buf,
dev->regs + I2C_TASKMEM_BASE + i2c->rbuf,
msg[1].len);
return num;
}
}
if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len);
ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH);
if (!ddb_i2c_cmd(i2c, addr, 2))
return num;
}
if (num == 1 && (msg[0].flags & I2C_M_RD)) {
ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH);
if (!ddb_i2c_cmd(i2c, addr, 3)) {
ddbcpyfrom(msg[0].buf,
I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len);
return num;
}
}
return -EIO;
}
static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_SMBUS_EMUL;
}
struct i2c_algorithm ddb_i2c_algo = {
.master_xfer = ddb_i2c_master_xfer,
.functionality = ddb_i2c_functionality,
};
static void ddb_i2c_release(struct ddb *dev)
{
int i;
struct ddb_i2c *i2c;
struct i2c_adapter *adap;
for (i = 0; i < dev->info->port_num; i++) {
i2c = &dev->i2c[i];
adap = &i2c->adap;
i2c_del_adapter(adap);
}
}
static int ddb_i2c_init(struct ddb *dev)
{
int i, j, stat = 0;
struct ddb_i2c *i2c;
struct i2c_adapter *adap;
for (i = 0; i < dev->info->port_num; i++) {
i2c = &dev->i2c[i];
i2c->dev = dev;
i2c->nr = i;
i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4);
i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8);
i2c->regs = 0x80 + i * 0x20;
ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING);
ddbwritel((i2c->rbuf << 16) | i2c->wbuf,
i2c->regs + I2C_TASKADDRESS);
init_waitqueue_head(&i2c->wq);
adap = &i2c->adap;
i2c_set_adapdata(adap, i2c);
#ifdef I2C_ADAP_CLASS_TV_DIGITAL
adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
#else
#ifdef I2C_CLASS_TV_ANALOG
adap->class = I2C_CLASS_TV_ANALOG;
#endif
#endif
strcpy(adap->name, "ddbridge");
adap->algo = &ddb_i2c_algo;
adap->algo_data = (void *)i2c;
adap->dev.parent = &dev->pdev->dev;
stat = i2c_add_adapter(adap);
if (stat)
break;
}
if (stat)
for (j = 0; j < i; j++) {
i2c = &dev->i2c[j];
adap = &i2c->adap;
i2c_del_adapter(adap);
}
return stat;
}
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
#if 0
static void set_table(struct ddb *dev, u32 off,
dma_addr_t *pbuf, u32 num)
{
u32 i, base;
u64 mem;
base = DMA_BASE_ADDRESS_TABLE + off;
for (i = 0; i < num; i++) {
mem = pbuf[i];
ddbwritel(mem & 0xffffffff, base + i * 8);
ddbwritel(mem >> 32, base + i * 8 + 4);
}
}
#endif
static void ddb_address_table(struct ddb *dev)
{
u32 i, j, base;
u64 mem;
dma_addr_t *pbuf;
for (i = 0; i < dev->info->port_num * 2; i++) {
base = DMA_BASE_ADDRESS_TABLE + i * 0x100;
pbuf = dev->input[i].pbuf;
for (j = 0; j < dev->input[i].dma_buf_num; j++) {
mem = pbuf[j];
ddbwritel(mem & 0xffffffff, base + j * 8);
ddbwritel(mem >> 32, base + j * 8 + 4);
}
}
for (i = 0; i < dev->info->port_num; i++) {
base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100;
pbuf = dev->output[i].pbuf;
for (j = 0; j < dev->output[i].dma_buf_num; j++) {
mem = pbuf[j];
ddbwritel(mem & 0xffffffff, base + j * 8);
ddbwritel(mem >> 32, base + j * 8 + 4);
}
}
}
static void io_free(struct pci_dev *pdev, u8 **vbuf,
dma_addr_t *pbuf, u32 size, int num)
{
int i;
for (i = 0; i < num; i++) {
if (vbuf[i]) {
pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
vbuf[i] = 0;
}
}
}
static int io_alloc(struct pci_dev *pdev, u8 **vbuf,
dma_addr_t *pbuf, u32 size, int num)
{
int i;
for (i = 0; i < num; i++) {
vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]);
if (!vbuf[i])
return -ENOMEM;
}
return 0;
}
static int ddb_buffers_alloc(struct ddb *dev)
{
int i;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
switch (port->class) {
case DDB_PORT_TUNER:
if (io_alloc(dev->pdev, port->input[0]->vbuf,
port->input[0]->pbuf,
port->input[0]->dma_buf_size,
port->input[0]->dma_buf_num) < 0)
return -1;
if (io_alloc(dev->pdev, port->input[1]->vbuf,
port->input[1]->pbuf,
port->input[1]->dma_buf_size,
port->input[1]->dma_buf_num) < 0)
return -1;
break;
case DDB_PORT_CI:
if (io_alloc(dev->pdev, port->input[0]->vbuf,
port->input[0]->pbuf,
port->input[0]->dma_buf_size,
port->input[0]->dma_buf_num) < 0)
return -1;
if (io_alloc(dev->pdev, port->output->vbuf,
port->output->pbuf,
port->output->dma_buf_size,
port->output->dma_buf_num) < 0)
return -1;
break;
default:
break;
}
}
ddb_address_table(dev);
return 0;
}
static void ddb_buffers_free(struct ddb *dev)
{
int i;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
io_free(dev->pdev, port->input[0]->vbuf,
port->input[0]->pbuf,
port->input[0]->dma_buf_size,
port->input[0]->dma_buf_num);
io_free(dev->pdev, port->input[1]->vbuf,
port->input[1]->pbuf,
port->input[1]->dma_buf_size,
port->input[1]->dma_buf_num);
io_free(dev->pdev, port->output->vbuf,
port->output->pbuf,
port->output->dma_buf_size,
port->output->dma_buf_num);
}
}
static void ddb_input_start(struct ddb_input *input)
{
struct ddb *dev = input->port->dev;
spin_lock_irq(&input->lock);
input->cbuf = 0;
input->coff = 0;
/* reset */
ddbwritel(0, TS_INPUT_CONTROL(input->nr));
ddbwritel(2, TS_INPUT_CONTROL(input->nr));
ddbwritel(0, TS_INPUT_CONTROL(input->nr));
ddbwritel((1 << 16) |
(input->dma_buf_num << 11) |
(input->dma_buf_size >> 7),
DMA_BUFFER_SIZE(input->nr));
ddbwritel(0, DMA_BUFFER_ACK(input->nr));
ddbwritel(1, DMA_BASE_WRITE);
ddbwritel(3, DMA_BUFFER_CONTROL(input->nr));
ddbwritel(9, TS_INPUT_CONTROL(input->nr));
input->running = 1;
spin_unlock_irq(&input->lock);
}
static void ddb_input_stop(struct ddb_input *input)
{
struct ddb *dev = input->port->dev;
spin_lock_irq(&input->lock);
ddbwritel(0, TS_INPUT_CONTROL(input->nr));
ddbwritel(0, DMA_BUFFER_CONTROL(input->nr));
input->running = 0;
spin_unlock_irq(&input->lock);
}
static void ddb_output_start(struct ddb_output *output)
{
struct ddb *dev = output->port->dev;
spin_lock_irq(&output->lock);
output->cbuf = 0;
output->coff = 0;
ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
ddbwritel(2, TS_OUTPUT_CONTROL(output->nr));
ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr));
ddbwritel((1 << 16) |
(output->dma_buf_num << 11) |
(output->dma_buf_size >> 7),
DMA_BUFFER_SIZE(output->nr + 8));
ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8));
ddbwritel(1, DMA_BASE_READ);
ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8));
/* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */
ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr));
output->running = 1;
spin_unlock_irq(&output->lock);
}
static void ddb_output_stop(struct ddb_output *output)
{
struct ddb *dev = output->port->dev;
spin_lock_irq(&output->lock);
ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8));
output->running = 0;
spin_unlock_irq(&output->lock);
}
static u32 ddb_output_free(struct ddb_output *output)
{
u32 idx, off, stat = output->stat;
s32 diff;
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
if (output->cbuf != idx) {
if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
(output->dma_buf_size - output->coff <= 188))
return 0;
return 188;
}
diff = off - output->coff;
if (diff <= 0 || diff > 188)
return 188;
return 0;
}
static ssize_t ddb_output_write(struct ddb_output *output,
const u8 *buf, size_t count)
{
struct ddb *dev = output->port->dev;
u32 idx, off, stat = output->stat;
u32 left = count, len;
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
while (left) {
len = output->dma_buf_size - output->coff;
if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
(off == 0)) {
if (len <= 188)
break;
len -= 188;
}
if (output->cbuf == idx) {
if (off > output->coff) {
#if 1
len = off - output->coff;
len -= (len % 188);
if (len <= 188)
#endif
break;
len -= 188;
}
}
if (len > left)
len = left;
if (copy_from_user(output->vbuf[output->cbuf] + output->coff,
buf, len))
return -EIO;
left -= len;
buf += len;
output->coff += len;
if (output->coff == output->dma_buf_size) {
output->coff = 0;
output->cbuf = ((output->cbuf + 1) % output->dma_buf_num);
}
ddbwritel((output->cbuf << 11) | (output->coff >> 7),
DMA_BUFFER_ACK(output->nr + 8));
}
return count - left;
}
static u32 ddb_input_avail(struct ddb_input *input)
{
struct ddb *dev = input->port->dev;
u32 idx, off, stat = input->stat;
u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr));
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
if (ctrl & 4) {
printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
return 0;
}
if (input->cbuf != idx)
return 188;
return 0;
}
static ssize_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
{
struct ddb *dev = input->port->dev;
u32 left = count;
u32 idx, free, stat = input->stat;
int ret;
idx = (stat >> 11) & 0x1f;
while (left) {
if (input->cbuf == idx)
return count - left;
free = input->dma_buf_size - input->coff;
if (free > left)
free = left;
ret = copy_to_user(buf, input->vbuf[input->cbuf] +
input->coff, free);
if (ret)
return -EFAULT;
input->coff += free;
if (input->coff == input->dma_buf_size) {
input->coff = 0;
input->cbuf = (input->cbuf+1) % input->dma_buf_num;
}
left -= free;
ddbwritel((input->cbuf << 11) | (input->coff >> 7),
DMA_BUFFER_ACK(input->nr));
}
return count;
}
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
#if 0
static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe)
{
int i;
for (i = 0; i < dev->info->port_num * 2; i++) {
if (dev->input[i].fe == fe)
return &dev->input[i];
}
return NULL;
}
#endif
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct ddb_input *input = fe->sec_priv;
struct ddb_port *port = input->port;
int status;
if (enable) {
mutex_lock(&port->i2c_gate_lock);
status = input->gate_ctrl(fe, 1);
} else {
status = input->gate_ctrl(fe, 0);
mutex_unlock(&port->i2c_gate_lock);
}
return status;
}
static int demod_attach_drxk(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct dvb_frontend *fe;
struct drxk_config config;
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
config.adr = 0x29 + (input->nr & 1);
fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
if (!input->fe) {
printk(KERN_ERR "No DRXK found!\n");
return -ENODEV;
}
fe->sec_priv = input;
input->gate_ctrl = fe->ops.i2c_gate_ctrl;
fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
return 0;
}
static int tuner_attach_tda18271(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct dvb_frontend *fe;
if (input->fe->ops.i2c_gate_ctrl)
input->fe->ops.i2c_gate_ctrl(input->fe, 1);
fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
if (!fe) {
printk(KERN_ERR "No TDA18271 found!\n");
return -ENODEV;
}
if (input->fe->ops.i2c_gate_ctrl)
input->fe->ops.i2c_gate_ctrl(input->fe, 0);
return 0;
}
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
static struct stv090x_config stv0900 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x69,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
};
static struct stv090x_config stv0900_aa = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x68,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
};
static struct stv6110x_config stv6110a = {
.addr = 0x60,
.refclk = 27000000,
.clk_div = 1,
};
static struct stv6110x_config stv6110b = {
.addr = 0x63,
.refclk = 27000000,
.clk_div = 1,
};
static int demod_attach_stv0900(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
input->fe = dvb_attach(stv090x_attach, feconf, i2c,
(input->nr & 1) ? STV090x_DEMODULATOR_1
: STV090x_DEMODULATOR_0);
if (!input->fe) {
printk(KERN_ERR "No STV0900 found!\n");
return -ENODEV;
}
if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
0, (input->nr & 1) ?
(0x09 - type) : (0x0b - type))) {
printk(KERN_ERR "No LNBH24 found!\n");
return -ENODEV;
}
return 0;
}
static int tuner_attach_stv6110(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
struct stv6110x_config *tunerconf = (input->nr & 1) ?
&stv6110b : &stv6110a;
struct stv6110x_devctl *ctl;
ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
if (!ctl) {
printk(KERN_ERR "No STV6110X found!\n");
return -ENODEV;
}
printk(KERN_INFO "attach tuner input %d adr %02x\n",
input->nr, tunerconf->addr);
feconf->tuner_init = ctl->tuner_init;
feconf->tuner_sleep = ctl->tuner_sleep;
feconf->tuner_set_mode = ctl->tuner_set_mode;
feconf->tuner_set_frequency = ctl->tuner_set_frequency;
feconf->tuner_get_frequency = ctl->tuner_get_frequency;
feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
feconf->tuner_set_refclk = ctl->tuner_set_refclk;
feconf->tuner_get_status = ctl->tuner_get_status;
return 0;
}
static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
int (*start_feed)(struct dvb_demux_feed *),
int (*stop_feed)(struct dvb_demux_feed *),
void *priv)
{
dvbdemux->priv = priv;
dvbdemux->filternum = 256;
dvbdemux->feednum = 256;
dvbdemux->start_feed = start_feed;
dvbdemux->stop_feed = stop_feed;
dvbdemux->write_to_decoder = NULL;
dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
return dvb_dmx_init(dvbdemux);
}
static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
struct dvb_demux *dvbdemux,
struct dmx_frontend *hw_frontend,
struct dmx_frontend *mem_frontend,
struct dvb_adapter *dvb_adapter)
{
int ret;
dmxdev->filternum = 256;
dmxdev->demux = &dvbdemux->dmx;
dmxdev->capabilities = 0;
ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
if (ret < 0)
return ret;
hw_frontend->source = DMX_FRONTEND_0;
dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
mem_frontend->source = DMX_MEMORY_FE;
dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
}
static int start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ddb_input *input = dvbdmx->priv;
if (!input->users)
ddb_input_start(input);
return ++input->users;
}
static int stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ddb_input *input = dvbdmx->priv;
if (--input->users)
return input->users;
ddb_input_stop(input);
return 0;
}
static void dvb_input_detach(struct ddb_input *input)
{
struct dvb_adapter *adap = &input->adap;
struct dvb_demux *dvbdemux = &input->demux;
switch (input->attached) {
case 5:
if (input->fe2)
dvb_unregister_frontend(input->fe2);
if (input->fe) {
dvb_unregister_frontend(input->fe);
dvb_frontend_detach(input->fe);
input->fe = NULL;
}
case 4:
dvb_net_release(&input->dvbnet);
case 3:
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&input->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&input->mem_frontend);
dvb_dmxdev_release(&input->dmxdev);
case 2:
dvb_dmx_release(&input->demux);
case 1:
dvb_unregister_adapter(adap);
}
input->attached = 0;
}
static int dvb_input_attach(struct ddb_input *input)
{
int ret;
struct ddb_port *port = input->port;
struct dvb_adapter *adap = &input->adap;
struct dvb_demux *dvbdemux = &input->demux;
ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
&input->port->dev->pdev->dev,
adapter_nr);
if (ret < 0) {
printk(KERN_ERR "ddbridge: Could not register adapter."
"Check if you enabled enough adapters in dvb-core!\n");
return ret;
}
input->attached = 1;
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
start_feed,
stop_feed, input);
if (ret < 0)
return ret;
input->attached = 2;
ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux,
&input->hw_frontend,
&input->mem_frontend, adap);
if (ret < 0)
return ret;
input->attached = 3;
ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux);
if (ret < 0)
return ret;
input->attached = 4;
input->fe = 0;
switch (port->type) {
case DDB_TUNER_DVBS_ST:
if (demod_attach_stv0900(input, 0) < 0)
return -ENODEV;
if (tuner_attach_stv6110(input, 0) < 0)
return -ENODEV;
if (input->fe) {
if (dvb_register_frontend(adap, input->fe) < 0)
return -ENODEV;
}
break;
case DDB_TUNER_DVBS_ST_AA:
if (demod_attach_stv0900(input, 1) < 0)
return -ENODEV;
if (tuner_attach_stv6110(input, 1) < 0)
return -ENODEV;
if (input->fe) {
if (dvb_register_frontend(adap, input->fe) < 0)
return -ENODEV;
}
break;
case DDB_TUNER_DVBCT_TR:
if (demod_attach_drxk(input) < 0)
return -ENODEV;
if (tuner_attach_tda18271(input) < 0)
return -ENODEV;
if (input->fe) {
if (dvb_register_frontend(adap, input->fe) < 0)
return -ENODEV;
}
if (input->fe2) {
if (dvb_register_frontend(adap, input->fe2) < 0)
return -ENODEV;
input->fe2->tuner_priv = input->fe->tuner_priv;
memcpy(&input->fe2->ops.tuner_ops,
&input->fe->ops.tuner_ops,
sizeof(struct dvb_tuner_ops));
}
break;
}
input->attached = 5;
return 0;
}
/****************************************************************************/
/****************************************************************************/
static ssize_t ts_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
size_t left = count;
int stat;
while (left) {
if (ddb_output_free(output) < 188) {
if (file->f_flags & O_NONBLOCK)
break;
if (wait_event_interruptible(
output->wq, ddb_output_free(output) >= 188) < 0)
break;
}
stat = ddb_output_write(output, buf, left);
if (stat < 0)
break;
buf += stat;
left -= stat;
}
return (left == count) ? -EAGAIN : (count - left);
}
static ssize_t ts_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
int left, read;
count -= count % 188;
left = count;
while (left) {
if (ddb_input_avail(input) < 188) {
if (file->f_flags & O_NONBLOCK)
break;
if (wait_event_interruptible(
input->wq, ddb_input_avail(input) >= 188) < 0)
break;
}
read = ddb_input_read(input, buf, left);
if (read < 0)
return read;
left -= read;
buf += read;
}
return (left == count) ? -EAGAIN : (count - left);
}
static unsigned int ts_poll(struct file *file, poll_table *wait)
{
/*
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
*/
unsigned int mask = 0;
#if 0
if (data_avail_to_read)
mask |= POLLIN | POLLRDNORM;
if (data_avail_to_write)
mask |= POLLOUT | POLLWRNORM;
poll_wait(file, &read_queue, wait);
poll_wait(file, &write_queue, wait);
#endif
return mask;
}
static const struct file_operations ci_fops = {
.owner = THIS_MODULE,
.read = ts_read,
.write = ts_write,
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = ts_poll,
.mmap = 0,
};
static struct dvb_device dvbdev_ci = {
.priv = 0,
.readers = -1,
.writers = -1,
.users = -1,
.fops = &ci_fops,
};
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
static void input_tasklet(unsigned long data)
{
struct ddb_input *input = (struct ddb_input *) data;
struct ddb *dev = input->port->dev;
spin_lock(&input->lock);
if (!input->running) {
spin_unlock(&input->lock);
return;
}
input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
if (input->port->class == DDB_PORT_TUNER) {
if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
printk(KERN_ERR "Overflow input %d\n", input->nr);
while (input->cbuf != ((input->stat >> 11) & 0x1f)
|| (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
dvb_dmx_swfilter_packets(&input->demux,
input->vbuf[input->cbuf],
input->dma_buf_size / 188);
input->cbuf = (input->cbuf + 1) % input->dma_buf_num;
ddbwritel((input->cbuf << 11),
DMA_BUFFER_ACK(input->nr));
input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
}
}
if (input->port->class == DDB_PORT_CI)
wake_up(&input->wq);
spin_unlock(&input->lock);
}
static void output_tasklet(unsigned long data)
{
struct ddb_output *output = (struct ddb_output *) data;
struct ddb *dev = output->port->dev;
spin_lock(&output->lock);
if (!output->running) {
spin_unlock(&output->lock);
return;
}
output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8));
wake_up(&output->wq);
spin_unlock(&output->lock);
}
struct cxd2099_cfg cxd_cfg = {
.bitrate = 62000,
.adr = 0x40,
.polarity = 1,
.clock_mode = 1,
};
static int ddb_ci_attach(struct ddb_port *port)
{
int ret;
ret = dvb_register_adapter(&port->output->adap,
"DDBridge",
THIS_MODULE,
&port->dev->pdev->dev,
adapter_nr);
if (ret < 0)
return ret;
port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
if (!port->en) {
dvb_unregister_adapter(&port->output->adap);
return -ENODEV;
}
ddb_input_start(port->input[0]);
ddb_output_start(port->output);
dvb_ca_en50221_init(&port->output->adap,
port->en, 0, 1);
ret = dvb_register_device(&port->output->adap, &port->output->dev,
&dvbdev_ci, (void *) port->output,
DVB_DEVICE_SEC);
return ret;
}
static int ddb_port_attach(struct ddb_port *port)
{
int ret = 0;
switch (port->class) {
case DDB_PORT_TUNER:
ret = dvb_input_attach(port->input[0]);
if (ret < 0)
break;
ret = dvb_input_attach(port->input[1]);
break;
case DDB_PORT_CI:
ret = ddb_ci_attach(port);
break;
default:
break;
}
if (ret < 0)
printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
return ret;
}
static int ddb_ports_attach(struct ddb *dev)
{
int i, ret = 0;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
ret = ddb_port_attach(port);
if (ret < 0)
break;
}
return ret;
}
static void ddb_ports_detach(struct ddb *dev)
{
int i;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
switch (port->class) {
case DDB_PORT_TUNER:
dvb_input_detach(port->input[0]);
dvb_input_detach(port->input[1]);
break;
case DDB_PORT_CI:
if (port->output->dev)
dvb_unregister_device(port->output->dev);
if (port->en) {
ddb_input_stop(port->input[0]);
ddb_output_stop(port->output);
dvb_ca_en50221_release(port->en);
kfree(port->en);
port->en = 0;
dvb_unregister_adapter(&port->output->adap);
}
break;
}
}
}
/****************************************************************************/
/****************************************************************************/
static int port_has_ci(struct ddb_port *port)
{
u8 val;
return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1;
}
static int port_has_stv0900(struct ddb_port *port)
{
u8 val;
if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
return 0;
return 1;
}
static int port_has_stv0900_aa(struct ddb_port *port)
{
u8 val;
if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0)
return 0;
return 1;
}
static int port_has_drxks(struct ddb_port *port)
{
u8 val;
if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
return 0;
if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
return 0;
return 1;
}
static void ddb_port_probe(struct ddb_port *port)
{
struct ddb *dev = port->dev;
char *modname = "NO MODULE";
port->class = DDB_PORT_NONE;
if (port_has_ci(port)) {
modname = "CI";
port->class = DDB_PORT_CI;
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
} else if (port_has_stv0900(port)) {
modname = "DUAL DVB-S2";
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DVBS_ST;
ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
} else if (port_has_stv0900_aa(port)) {
modname = "DUAL DVB-S2";
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DVBS_ST_AA;
ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
} else if (port_has_drxks(port)) {
modname = "DUAL DVB-C/T";
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DVBCT_TR;
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
}
printk(KERN_INFO "Port %d (TAB %d): %s\n",
port->nr, port->nr+1, modname);
}
static void ddb_input_init(struct ddb_port *port, int nr)
{
struct ddb *dev = port->dev;
struct ddb_input *input = &dev->input[nr];
input->nr = nr;
input->port = port;
input->dma_buf_num = INPUT_DMA_BUFS;
input->dma_buf_size = INPUT_DMA_SIZE;
ddbwritel(0, TS_INPUT_CONTROL(nr));
ddbwritel(2, TS_INPUT_CONTROL(nr));
ddbwritel(0, TS_INPUT_CONTROL(nr));
ddbwritel(0, DMA_BUFFER_ACK(nr));
tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input);
spin_lock_init(&input->lock);
init_waitqueue_head(&input->wq);
}
static void ddb_output_init(struct ddb_port *port, int nr)
{
struct ddb *dev = port->dev;
struct ddb_output *output = &dev->output[nr];
output->nr = nr;
output->port = port;
output->dma_buf_num = OUTPUT_DMA_BUFS;
output->dma_buf_size = OUTPUT_DMA_SIZE;
ddbwritel(0, TS_OUTPUT_CONTROL(nr));
ddbwritel(2, TS_OUTPUT_CONTROL(nr));
ddbwritel(0, TS_OUTPUT_CONTROL(nr));
tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output);
init_waitqueue_head(&output->wq);
}
static void ddb_ports_init(struct ddb *dev)
{
int i;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
port->dev = dev;
port->nr = i;
port->i2c = &dev->i2c[i];
port->input[0] = &dev->input[2 * i];
port->input[1] = &dev->input[2 * i + 1];
port->output = &dev->output[i];
mutex_init(&port->i2c_gate_lock);
ddb_port_probe(port);
ddb_input_init(port, 2 * i);
ddb_input_init(port, 2 * i + 1);
ddb_output_init(port, i);
}
}
static void ddb_ports_release(struct ddb *dev)
{
int i;
struct ddb_port *port;
for (i = 0; i < dev->info->port_num; i++) {
port = &dev->port[i];
port->dev = dev;
tasklet_kill(&port->input[0]->tasklet);
tasklet_kill(&port->input[1]->tasklet);
tasklet_kill(&port->output->tasklet);
}
}
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
static void irq_handle_i2c(struct ddb *dev, int n)
{
struct ddb_i2c *i2c = &dev->i2c[n];
i2c->done = 1;
wake_up(&i2c->wq);
}
static irqreturn_t irq_handler(int irq, void *dev_id)
{
struct ddb *dev = (struct ddb *) dev_id;
u32 s = ddbreadl(INTERRUPT_STATUS);
if (!s)
return IRQ_NONE;
do {
ddbwritel(s, INTERRUPT_ACK);
if (s & 0x00000001)
irq_handle_i2c(dev, 0);
if (s & 0x00000002)
irq_handle_i2c(dev, 1);
if (s & 0x00000004)
irq_handle_i2c(dev, 2);
if (s & 0x00000008)
irq_handle_i2c(dev, 3);
if (s & 0x00000100)
tasklet_schedule(&dev->input[0].tasklet);
if (s & 0x00000200)
tasklet_schedule(&dev->input[1].tasklet);
if (s & 0x00000400)
tasklet_schedule(&dev->input[2].tasklet);
if (s & 0x00000800)
tasklet_schedule(&dev->input[3].tasklet);
if (s & 0x00001000)
tasklet_schedule(&dev->input[4].tasklet);
if (s & 0x00002000)
tasklet_schedule(&dev->input[5].tasklet);
if (s & 0x00004000)
tasklet_schedule(&dev->input[6].tasklet);
if (s & 0x00008000)
tasklet_schedule(&dev->input[7].tasklet);
if (s & 0x00010000)
tasklet_schedule(&dev->output[0].tasklet);
if (s & 0x00020000)
tasklet_schedule(&dev->output[1].tasklet);
if (s & 0x00040000)
tasklet_schedule(&dev->output[2].tasklet);
if (s & 0x00080000)
tasklet_schedule(&dev->output[3].tasklet);
/* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */
} while ((s = ddbreadl(INTERRUPT_STATUS)));
return IRQ_HANDLED;
}
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
{
u32 data, shift;
if (wlen > 4)
ddbwritel(1, SPI_CONTROL);
while (wlen > 4) {
/* FIXME: check for big-endian */
data = swab32(*(u32 *)wbuf);
wbuf += 4;
wlen -= 4;
ddbwritel(data, SPI_DATA);
while (ddbreadl(SPI_CONTROL) & 0x0004)
;
}
if (rlen)
ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
else
ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
data = 0;
shift = ((4 - wlen) * 8);
while (wlen) {
data <<= 8;
data |= *wbuf;
wlen--;
wbuf++;
}
if (shift)
data <<= shift;
ddbwritel(data, SPI_DATA);
while (ddbreadl(SPI_CONTROL) & 0x0004)
;
if (!rlen) {
ddbwritel(0, SPI_CONTROL);
return 0;
}
if (rlen > 4)
ddbwritel(1, SPI_CONTROL);
while (rlen > 4) {
ddbwritel(0xffffffff, SPI_DATA);
while (ddbreadl(SPI_CONTROL) & 0x0004)
;
data = ddbreadl(SPI_DATA);
*(u32 *) rbuf = swab32(data);
rbuf += 4;
rlen -= 4;
}
ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
ddbwritel(0xffffffff, SPI_DATA);
while (ddbreadl(SPI_CONTROL) & 0x0004)
;
data = ddbreadl(SPI_DATA);
ddbwritel(0, SPI_CONTROL);
if (rlen < 4)
data <<= ((4 - rlen) * 8);
while (rlen > 0) {
*rbuf = ((data >> 24) & 0xff);
data <<= 8;
rbuf++;
rlen--;
}
return 0;
}
#define DDB_MAGIC 'd'
struct ddb_flashio {
__u8 *write_buf;
__u32 write_len;
__u8 *read_buf;
__u32 read_len;
};
#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio)
#define DDB_NAME "ddbridge"
static u32 ddb_num;
static struct ddb *ddbs[32];
static struct class *ddb_class;
static int ddb_major;
static int ddb_open(struct inode *inode, struct file *file)
{
struct ddb *dev = ddbs[iminor(inode)];
file->private_data = dev;
return 0;
}
static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ddb *dev = file->private_data;
void *parg = (void *)arg;
int res;
switch (cmd) {
case IOCTL_DDB_FLASHIO:
{
struct ddb_flashio fio;
u8 *rbuf, *wbuf;
if (copy_from_user(&fio, parg, sizeof(fio)))
return -EFAULT;
if (fio.write_len > 1028 || fio.read_len > 1028)
return -EINVAL;
if (fio.write_len + fio.read_len > 1028)
return -EINVAL;
wbuf = &dev->iobuf[0];
rbuf = wbuf + fio.write_len;
if (copy_from_user(wbuf, fio.write_buf, fio.write_len))
return -EFAULT;
res = flashio(dev, wbuf, fio.write_len, rbuf, fio.read_len);
if (res)
return res;
if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
return -EFAULT;
break;
}
default:
return -ENOTTY;
}
return 0;
}
static const struct file_operations ddb_fops = {
.unlocked_ioctl = ddb_ioctl,
.open = ddb_open,
};
static char *ddb_devnode(struct device *device, umode_t *mode)
{
struct ddb *dev = dev_get_drvdata(device);
return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr);
}
static int ddb_class_create(void)
{
ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
if (ddb_major < 0)
return ddb_major;
ddb_class = class_create(THIS_MODULE, DDB_NAME);
if (IS_ERR(ddb_class)) {
unregister_chrdev(ddb_major, DDB_NAME);
return -1;
}
ddb_class->devnode = ddb_devnode;
return 0;
}
static void ddb_class_destroy(void)
{
class_destroy(ddb_class);
unregister_chrdev(ddb_major, DDB_NAME);
}
static int ddb_device_create(struct ddb *dev)
{
dev->nr = ddb_num++;
dev->ddb_dev = device_create(ddb_class, NULL,
MKDEV(ddb_major, dev->nr),
dev, "ddbridge%d", dev->nr);
ddbs[dev->nr] = dev;
if (IS_ERR(dev->ddb_dev))
return -1;
return 0;
}
static void ddb_device_destroy(struct ddb *dev)
{
ddb_num--;
if (IS_ERR(dev->ddb_dev))
return;
device_destroy(ddb_class, MKDEV(ddb_major, 0));
}
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
static void ddb_unmap(struct ddb *dev)
{
if (dev->regs)
iounmap(dev->regs);
vfree(dev);
}
static void __devexit ddb_remove(struct pci_dev *pdev)
{
struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
ddb_ports_detach(dev);
ddb_i2c_release(dev);
ddbwritel(0, INTERRUPT_ENABLE);
free_irq(dev->pdev->irq, dev);
#ifdef CONFIG_PCI_MSI
if (dev->msi)
pci_disable_msi(dev->pdev);
#endif
ddb_ports_release(dev);
ddb_buffers_free(dev);
ddb_device_destroy(dev);
ddb_unmap(dev);
pci_set_drvdata(pdev, 0);
pci_disable_device(pdev);
}
static int __devinit ddb_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ddb *dev;
int stat = 0;
int irq_flag = IRQF_SHARED;
if (pci_enable_device(pdev) < 0)
return -ENODEV;
dev = vmalloc(sizeof(struct ddb));
if (dev == NULL)
return -ENOMEM;
memset(dev, 0, sizeof(struct ddb));
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
dev->info = (struct ddb_info *) id->driver_data;
printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
if (!dev->regs) {
stat = -ENOMEM;
goto fail;
}
printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
#ifdef CONFIG_PCI_MSI
if (pci_msi_enabled())
stat = pci_enable_msi(dev->pdev);
if (stat) {
printk(KERN_INFO ": MSI not available.\n");
} else {
irq_flag = 0;
dev->msi = 1;
}
#endif
stat = request_irq(dev->pdev->irq, irq_handler,
irq_flag, "DDBridge", (void *) dev);
if (stat < 0)
goto fail1;
ddbwritel(0, DMA_BASE_WRITE);
ddbwritel(0, DMA_BASE_READ);
ddbwritel(0xffffffff, INTERRUPT_ACK);
ddbwritel(0xfff0f, INTERRUPT_ENABLE);
ddbwritel(0, MSI1_ENABLE);
if (ddb_i2c_init(dev) < 0)
goto fail1;
ddb_ports_init(dev);
if (ddb_buffers_alloc(dev) < 0) {
printk(KERN_INFO ": Could not allocate buffer memory\n");
goto fail2;
}
if (ddb_ports_attach(dev) < 0)
goto fail3;
ddb_device_create(dev);
return 0;
fail3:
ddb_ports_detach(dev);
printk(KERN_ERR "fail3\n");
ddb_ports_release(dev);
fail2:
printk(KERN_ERR "fail2\n");
ddb_buffers_free(dev);
fail1:
printk(KERN_ERR "fail1\n");
if (dev->msi)
pci_disable_msi(dev->pdev);
free_irq(dev->pdev->irq, dev);
fail:
printk(KERN_ERR "fail\n");
ddb_unmap(dev);
pci_set_drvdata(pdev, 0);
pci_disable_device(pdev);
return -1;
}
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
static struct ddb_info ddb_none = {
.type = DDB_NONE,
.name = "Digital Devices PCIe bridge",
};
static struct ddb_info ddb_octopus = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus DVB adapter",
.port_num = 4,
};
static struct ddb_info ddb_octopus_le = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus LE DVB adapter",
.port_num = 2,
};
static struct ddb_info ddb_v6 = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Cine S2 V6 DVB adapter",
.port_num = 3,
};
#define DDVID 0xdd01 /* Digital Devices Vendor ID */
#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
.vendor = _vend, .device = _dev, \
.subvendor = _subvend, .subdevice = _subdev, \
.driver_data = (unsigned long)&_driverdata }
static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
/* in case sub-ids got deleted in flash */
DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
{0}
};
MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
static struct pci_driver ddb_pci_driver = {
.name = "DDBridge",
.id_table = ddb_id_tbl,
.probe = ddb_probe,
.remove = ddb_remove,
};
static __init int module_init_ddbridge(void)
{
printk(KERN_INFO "Digital Devices PCIE bridge driver, "
"Copyright (C) 2010-11 Digital Devices GmbH\n");
if (ddb_class_create())
return -1;
return pci_register_driver(&ddb_pci_driver);
}
static __exit void module_exit_ddbridge(void)
{
pci_unregister_driver(&ddb_pci_driver);
ddb_class_destroy();
}
module_init(module_init_ddbridge);
module_exit(module_exit_ddbridge);
MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.5");
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8960t | drivers/video/backlight/ld9040.c | 4808 | 16816 | /*
* ld9040 AMOLED LCD panel driver.
*
* Copyright (c) 2011 Samsung Electronics
* Author: Donghwa Lee <dh09.lee@samsung.com>
* Derived from drivers/video/backlight/s6e63m0.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/wait.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/lcd.h>
#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include "ld9040_gamma.h"
#define SLEEPMSEC 0x1000
#define ENDDEF 0x2000
#define DEFMASK 0xFF00
#define COMMAND_ONLY 0xFE
#define DATA_ONLY 0xFF
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 24
#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
struct ld9040 {
struct device *dev;
struct spi_device *spi;
unsigned int power;
unsigned int current_brightness;
struct lcd_device *ld;
struct backlight_device *bd;
struct lcd_platform_data *lcd_pd;
struct mutex lock;
bool enabled;
};
static struct regulator_bulk_data supplies[] = {
{ .supply = "vdd3", },
{ .supply = "vci", },
};
static void ld9040_regulator_enable(struct ld9040 *lcd)
{
int ret = 0;
struct lcd_platform_data *pd = NULL;
pd = lcd->lcd_pd;
mutex_lock(&lcd->lock);
if (!lcd->enabled) {
ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
if (ret)
goto out;
lcd->enabled = true;
}
mdelay(pd->power_on_delay);
out:
mutex_unlock(&lcd->lock);
}
static void ld9040_regulator_disable(struct ld9040 *lcd)
{
int ret = 0;
mutex_lock(&lcd->lock);
if (lcd->enabled) {
ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
if (ret)
goto out;
lcd->enabled = false;
}
out:
mutex_unlock(&lcd->lock);
}
static const unsigned short seq_swreset[] = {
0x01, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_user_setting[] = {
0xF0, 0x5A,
DATA_ONLY, 0x5A,
ENDDEF, 0x00
};
static const unsigned short seq_elvss_on[] = {
0xB1, 0x0D,
DATA_ONLY, 0x00,
DATA_ONLY, 0x16,
ENDDEF, 0x00
};
static const unsigned short seq_gtcon[] = {
0xF7, 0x09,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
ENDDEF, 0x00
};
static const unsigned short seq_panel_condition[] = {
0xF8, 0x05,
DATA_ONLY, 0x65,
DATA_ONLY, 0x96,
DATA_ONLY, 0x71,
DATA_ONLY, 0x7D,
DATA_ONLY, 0x19,
DATA_ONLY, 0x3B,
DATA_ONLY, 0x0D,
DATA_ONLY, 0x19,
DATA_ONLY, 0x7E,
DATA_ONLY, 0x0D,
DATA_ONLY, 0xE2,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x7E,
DATA_ONLY, 0x7D,
DATA_ONLY, 0x07,
DATA_ONLY, 0x07,
DATA_ONLY, 0x20,
DATA_ONLY, 0x20,
DATA_ONLY, 0x20,
DATA_ONLY, 0x02,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_gamma_set1[] = {
0xF9, 0x00,
DATA_ONLY, 0xA7,
DATA_ONLY, 0xB4,
DATA_ONLY, 0xAE,
DATA_ONLY, 0xBF,
DATA_ONLY, 0x00,
DATA_ONLY, 0x91,
DATA_ONLY, 0x00,
DATA_ONLY, 0xB2,
DATA_ONLY, 0xB4,
DATA_ONLY, 0xAA,
DATA_ONLY, 0xBB,
DATA_ONLY, 0x00,
DATA_ONLY, 0xAC,
DATA_ONLY, 0x00,
DATA_ONLY, 0xB3,
DATA_ONLY, 0xB1,
DATA_ONLY, 0xAA,
DATA_ONLY, 0xBC,
DATA_ONLY, 0x00,
DATA_ONLY, 0xB3,
ENDDEF, 0x00
};
static const unsigned short seq_gamma_ctrl[] = {
0xFB, 0x02,
DATA_ONLY, 0x5A,
ENDDEF, 0x00
};
static const unsigned short seq_gamma_start[] = {
0xF9, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_apon[] = {
0xF3, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x0A,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_display_ctrl[] = {
0xF2, 0x02,
DATA_ONLY, 0x08,
DATA_ONLY, 0x08,
DATA_ONLY, 0x10,
DATA_ONLY, 0x10,
ENDDEF, 0x00
};
static const unsigned short seq_manual_pwr[] = {
0xB0, 0x04,
ENDDEF, 0x00
};
static const unsigned short seq_pwr_ctrl[] = {
0xF4, 0x0A,
DATA_ONLY, 0x87,
DATA_ONLY, 0x25,
DATA_ONLY, 0x6A,
DATA_ONLY, 0x44,
DATA_ONLY, 0x02,
DATA_ONLY, 0x88,
ENDDEF, 0x00
};
static const unsigned short seq_sleep_out[] = {
0x11, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_sleep_in[] = {
0x10, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_display_on[] = {
0x29, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_display_off[] = {
0x28, COMMAND_ONLY,
ENDDEF, 0x00
};
static const unsigned short seq_vci1_1st_en[] = {
0xF3, 0x10,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vl1_en[] = {
0xF3, 0x11,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vl2_en[] = {
0xF3, 0x13,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vci1_2nd_en[] = {
0xF3, 0x33,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vl3_en[] = {
0xF3, 0x37,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vreg1_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0x01,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vgh_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0x11,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vgl_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0x31,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x02,
ENDDEF, 0x00
};
static const unsigned short seq_vmos_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xB1,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
ENDDEF, 0x00
};
static const unsigned short seq_vint_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xF1,
/* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static const unsigned short seq_vbh_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xF9,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
ENDDEF, 0x00
};
static const unsigned short seq_vbl_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xFD,
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
ENDDEF, 0x00
};
static const unsigned short seq_gam_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xFF,
/* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
DATA_ONLY, 0x00,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static const unsigned short seq_sd_amp_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xFF,
/* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
DATA_ONLY, 0x80,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static const unsigned short seq_gls_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xFF,
/* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
DATA_ONLY, 0x81,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static const unsigned short seq_els_en[] = {
0xF3, 0x37,
DATA_ONLY, 0xFF,
/* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
DATA_ONLY, 0x83,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static const unsigned short seq_el_on[] = {
0xF3, 0x37,
DATA_ONLY, 0xFF,
/* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
DATA_ONLY, 0x87,
DATA_ONLY, 0x00,
DATA_ONLY, 0x03,
/* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
ENDDEF, 0x00
};
static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
{
u16 buf[1];
struct spi_message msg;
struct spi_transfer xfer = {
.len = 2,
.tx_buf = buf,
};
buf[0] = (addr << 8) | data;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(lcd->spi, &msg);
}
static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
unsigned char command)
{
int ret = 0;
if (address != DATA_ONLY)
ret = ld9040_spi_write_byte(lcd, 0x0, address);
if (command != COMMAND_ONLY)
ret = ld9040_spi_write_byte(lcd, 0x1, command);
return ret;
}
static int ld9040_panel_send_sequence(struct ld9040 *lcd,
const unsigned short *wbuf)
{
int ret = 0, i = 0;
while ((wbuf[i] & DEFMASK) != ENDDEF) {
if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
} else
udelay(wbuf[i+1]*1000);
i += 2;
}
return ret;
}
static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
{
unsigned int i = 0;
int ret = 0;
/* start gamma table updating. */
ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
if (ret) {
dev_err(lcd->dev, "failed to disable gamma table updating.\n");
goto gamma_err;
}
for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
if (ret) {
dev_err(lcd->dev, "failed to set gamma table.\n");
goto gamma_err;
}
}
/* update gamma table. */
ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
if (ret)
dev_err(lcd->dev, "failed to update gamma table.\n");
gamma_err:
return ret;
}
static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
{
int ret = 0;
ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
return ret;
}
static int ld9040_ldi_init(struct ld9040 *lcd)
{
int ret, i;
static const unsigned short *init_seq[] = {
seq_user_setting,
seq_panel_condition,
seq_display_ctrl,
seq_manual_pwr,
seq_elvss_on,
seq_gtcon,
seq_gamma_set1,
seq_gamma_ctrl,
seq_sleep_out,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
/* workaround: minimum delay time for transferring CMD */
udelay(300);
if (ret)
break;
}
return ret;
}
static int ld9040_ldi_enable(struct ld9040 *lcd)
{
int ret = 0;
ret = ld9040_panel_send_sequence(lcd, seq_display_on);
return ret;
}
static int ld9040_ldi_disable(struct ld9040 *lcd)
{
int ret;
ret = ld9040_panel_send_sequence(lcd, seq_display_off);
ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
return ret;
}
static int ld9040_power_on(struct ld9040 *lcd)
{
int ret = 0;
struct lcd_platform_data *pd = NULL;
pd = lcd->lcd_pd;
if (!pd) {
dev_err(lcd->dev, "platform data is NULL.\n");
return -EFAULT;
}
/* lcd power on */
ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EFAULT;
} else {
pd->reset(lcd->ld);
mdelay(pd->reset_delay);
}
ret = ld9040_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
return ret;
}
ret = ld9040_ldi_enable(lcd);
if (ret) {
dev_err(lcd->dev, "failed to enable ldi.\n");
return ret;
}
return 0;
}
static int ld9040_power_off(struct ld9040 *lcd)
{
int ret = 0;
struct lcd_platform_data *pd = NULL;
pd = lcd->lcd_pd;
if (!pd) {
dev_err(lcd->dev, "platform data is NULL.\n");
return -EFAULT;
}
ret = ld9040_ldi_disable(lcd);
if (ret) {
dev_err(lcd->dev, "lcd setting failed.\n");
return -EIO;
}
mdelay(pd->power_off_delay);
/* lcd power off */
ld9040_regulator_disable(lcd);
return 0;
}
static int ld9040_power(struct ld9040 *lcd, int power)
{
int ret = 0;
if (power_is_on(power) && !power_is_on(lcd->power))
ret = ld9040_power_on(lcd);
else if (!power_is_on(power) && power_is_on(lcd->power))
ret = ld9040_power_off(lcd);
if (!ret)
lcd->power = power;
return ret;
}
static int ld9040_set_power(struct lcd_device *ld, int power)
{
struct ld9040 *lcd = lcd_get_data(ld);
if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
power != FB_BLANK_NORMAL) {
dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
return -EINVAL;
}
return ld9040_power(lcd, power);
}
static int ld9040_get_power(struct lcd_device *ld)
{
struct ld9040 *lcd = lcd_get_data(ld);
return lcd->power;
}
static int ld9040_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static int ld9040_set_brightness(struct backlight_device *bd)
{
int ret = 0, brightness = bd->props.brightness;
struct ld9040 *lcd = bl_get_data(bd);
if (brightness < MIN_BRIGHTNESS ||
brightness > bd->props.max_brightness) {
dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
MIN_BRIGHTNESS, MAX_BRIGHTNESS);
return -EINVAL;
}
ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
if (ret) {
dev_err(&bd->dev, "lcd brightness setting failed.\n");
return -EIO;
}
return ret;
}
static struct lcd_ops ld9040_lcd_ops = {
.set_power = ld9040_set_power,
.get_power = ld9040_get_power,
};
static const struct backlight_ops ld9040_backlight_ops = {
.get_brightness = ld9040_get_brightness,
.update_status = ld9040_set_brightness,
};
static int ld9040_probe(struct spi_device *spi)
{
int ret = 0;
struct ld9040 *lcd = NULL;
struct lcd_device *ld = NULL;
struct backlight_device *bd = NULL;
struct backlight_properties props;
lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
/* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
spi->bits_per_word = 9;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
goto out_free_lcd;
}
lcd->spi = spi;
lcd->dev = &spi->dev;
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
goto out_free_lcd;
}
mutex_init(&lcd->lock);
ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
goto out_free_lcd;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
goto out_free_lcd;
}
lcd->ld = ld;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHTNESS;
bd = backlight_device_register("ld9040-bl", &spi->dev,
lcd, &ld9040_backlight_ops, &props);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto out_unregister_lcd;
}
bd->props.brightness = MAX_BRIGHTNESS;
lcd->bd = bd;
/*
* if lcd panel was on from bootloader like u-boot then
* do not lcd on.
*/
if (!lcd->lcd_pd->lcd_enabled) {
/*
* if lcd panel was off from bootloader then
* current lcd status is powerdown and then
* it enables lcd panel.
*/
lcd->power = FB_BLANK_POWERDOWN;
ld9040_power(lcd, FB_BLANK_UNBLANK);
} else
lcd->power = FB_BLANK_UNBLANK;
dev_set_drvdata(&spi->dev, lcd);
dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
return 0;
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
out_free_lcd:
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
kfree(lcd);
return ret;
}
static int __devexit ld9040_remove(struct spi_device *spi)
{
struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
kfree(lcd);
return 0;
}
#if defined(CONFIG_PM)
static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
{
int ret = 0;
struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
return ret;
}
static int ld9040_resume(struct spi_device *spi)
{
int ret = 0;
struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
lcd->power = FB_BLANK_POWERDOWN;
ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
return ret;
}
#else
#define ld9040_suspend NULL
#define ld9040_resume NULL
#endif
/* Power down all displays on reboot, poweroff or halt. */
static void ld9040_shutdown(struct spi_device *spi)
{
struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
}
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ld9040_probe,
.remove = __devexit_p(ld9040_remove),
.shutdown = ld9040_shutdown,
.suspend = ld9040_suspend,
.resume = ld9040_resume,
};
module_spi_driver(ld9040_driver);
MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
MODULE_DESCRIPTION("ld9040 LCD Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
klabit87/kltevzw_pb1 | drivers/video/backlight/lms283gf05.c | 4808 | 4850 | /*
* lms283gf05.c -- support for Samsung LMS283GF05 LCD
*
* Copyright (c) 2009 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/spi/spi.h>
#include <linux/spi/lms283gf05.h>
#include <linux/module.h>
struct lms283gf05_state {
struct spi_device *spi;
struct lcd_device *ld;
};
struct lms283gf05_seq {
unsigned char reg;
unsigned short value;
unsigned char delay;
};
/* Magic sequences supplied by manufacturer, for details refer to datasheet */
static struct lms283gf05_seq disp_initseq[] = {
/* REG, VALUE, DELAY */
{ 0x07, 0x0000, 0 },
{ 0x13, 0x0000, 10 },
{ 0x11, 0x3004, 0 },
{ 0x14, 0x200F, 0 },
{ 0x10, 0x1a20, 0 },
{ 0x13, 0x0040, 50 },
{ 0x13, 0x0060, 0 },
{ 0x13, 0x0070, 200 },
{ 0x01, 0x0127, 0 },
{ 0x02, 0x0700, 0 },
{ 0x03, 0x1030, 0 },
{ 0x08, 0x0208, 0 },
{ 0x0B, 0x0620, 0 },
{ 0x0C, 0x0110, 0 },
{ 0x30, 0x0120, 0 },
{ 0x31, 0x0127, 0 },
{ 0x32, 0x0000, 0 },
{ 0x33, 0x0503, 0 },
{ 0x34, 0x0727, 0 },
{ 0x35, 0x0124, 0 },
{ 0x36, 0x0706, 0 },
{ 0x37, 0x0701, 0 },
{ 0x38, 0x0F00, 0 },
{ 0x39, 0x0F00, 0 },
{ 0x40, 0x0000, 0 },
{ 0x41, 0x0000, 0 },
{ 0x42, 0x013f, 0 },
{ 0x43, 0x0000, 0 },
{ 0x44, 0x013f, 0 },
{ 0x45, 0x0000, 0 },
{ 0x46, 0xef00, 0 },
{ 0x47, 0x013f, 0 },
{ 0x48, 0x0000, 0 },
{ 0x07, 0x0015, 30 },
{ 0x07, 0x0017, 0 },
{ 0x20, 0x0000, 0 },
{ 0x21, 0x0000, 0 },
{ 0x22, 0x0000, 0 }
};
static struct lms283gf05_seq disp_pdwnseq[] = {
{ 0x07, 0x0016, 30 },
{ 0x07, 0x0004, 0 },
{ 0x10, 0x0220, 20 },
{ 0x13, 0x0060, 50 },
{ 0x13, 0x0040, 50 },
{ 0x13, 0x0000, 0 },
{ 0x10, 0x0000, 0 }
};
static void lms283gf05_reset(unsigned long gpio, bool inverted)
{
gpio_set_value(gpio, !inverted);
mdelay(100);
gpio_set_value(gpio, inverted);
mdelay(20);
gpio_set_value(gpio, !inverted);
mdelay(20);
}
static void lms283gf05_toggle(struct spi_device *spi,
struct lms283gf05_seq *seq, int sz)
{
char buf[3];
int i;
for (i = 0; i < sz; i++) {
buf[0] = 0x74;
buf[1] = 0x00;
buf[2] = seq[i].reg;
spi_write(spi, buf, 3);
buf[0] = 0x76;
buf[1] = seq[i].value >> 8;
buf[2] = seq[i].value & 0xff;
spi_write(spi, buf, 3);
mdelay(seq[i].delay);
}
}
static int lms283gf05_power_set(struct lcd_device *ld, int power)
{
struct lms283gf05_state *st = lcd_get_data(ld);
struct spi_device *spi = st->spi;
struct lms283gf05_pdata *pdata = spi->dev.platform_data;
if (power <= FB_BLANK_NORMAL) {
if (pdata)
lms283gf05_reset(pdata->reset_gpio,
pdata->reset_inverted);
lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
} else {
lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
if (pdata)
gpio_set_value(pdata->reset_gpio,
pdata->reset_inverted);
}
return 0;
}
static struct lcd_ops lms_ops = {
.set_power = lms283gf05_power_set,
.get_power = NULL,
};
static int __devinit lms283gf05_probe(struct spi_device *spi)
{
struct lms283gf05_state *st;
struct lms283gf05_pdata *pdata = spi->dev.platform_data;
struct lcd_device *ld;
int ret = 0;
if (pdata != NULL) {
ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
if (ret)
return ret;
ret = gpio_direction_output(pdata->reset_gpio,
!pdata->reset_inverted);
if (ret)
goto err;
}
st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
ret = -ENOMEM;
goto err;
}
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
goto err2;
}
st->spi = spi;
st->ld = ld;
dev_set_drvdata(&spi->dev, st);
/* kick in the LCD */
if (pdata)
lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
return 0;
err2:
kfree(st);
err:
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
return ret;
}
static int __devexit lms283gf05_remove(struct spi_device *spi)
{
struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
lcd_device_unregister(st->ld);
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
kfree(st);
return 0;
}
static struct spi_driver lms283gf05_driver = {
.driver = {
.name = "lms283gf05",
.owner = THIS_MODULE,
},
.probe = lms283gf05_probe,
.remove = __devexit_p(lms283gf05_remove),
};
module_spi_driver(lms283gf05_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("LCD283GF05 LCD");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
grzmot22/android_kernel_samsung_jf | drivers/video/backlight/l4f00242t03.c | 4808 | 7026 | /*
* l4f00242t03.c -- support for Epson L4F00242T03 LCD
*
* Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
* Inspired by Marek Vasut work in l4f00242t03.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/spi/l4f00242t03.h>
struct l4f00242t03_priv {
struct spi_device *spi;
struct lcd_device *ld;
int lcd_state;
struct regulator *io_reg;
struct regulator *core_reg;
};
static void l4f00242t03_reset(unsigned int gpio)
{
pr_debug("l4f00242t03_reset.\n");
gpio_set_value(gpio, 1);
mdelay(100);
gpio_set_value(gpio, 0);
mdelay(10); /* tRES >= 100us */
gpio_set_value(gpio, 1);
mdelay(20);
}
#define param(x) ((x) | 0x100)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
dev_dbg(&spi->dev, "initializing LCD\n");
regulator_set_voltage(priv->io_reg, 1800000, 1800000);
regulator_enable(priv->io_reg);
regulator_set_voltage(priv->core_reg, 2800000, 2800000);
regulator_enable(priv->core_reg);
l4f00242t03_reset(pdata->reset_gpio);
gpio_set_value(pdata->data_enable_gpio, 1);
msleep(60);
spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
}
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
dev_dbg(&spi->dev, "Powering down LCD\n");
gpio_set_value(pdata->data_enable_gpio, 0);
regulator_disable(priv->io_reg);
regulator_disable(priv->core_reg);
}
static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
{
struct l4f00242t03_priv *priv = lcd_get_data(ld);
return priv->lcd_state;
}
static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
{
struct l4f00242t03_priv *priv = lcd_get_data(ld);
struct spi_device *spi = priv->spi;
const u16 slpout = 0x11;
const u16 dison = 0x29;
const u16 slpin = 0x10;
const u16 disoff = 0x28;
if (power <= FB_BLANK_NORMAL) {
if (priv->lcd_state <= FB_BLANK_NORMAL) {
/* Do nothing, the LCD is running */
} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
dev_dbg(&spi->dev, "Resuming LCD\n");
spi_write(spi, (const u8 *)&slpout, sizeof(u16));
msleep(60);
spi_write(spi, (const u8 *)&dison, sizeof(u16));
} else {
/* priv->lcd_state == FB_BLANK_POWERDOWN */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
l4f00242t03_lcd_power_set(priv->ld, power);
}
} else if (power < FB_BLANK_POWERDOWN) {
if (priv->lcd_state <= FB_BLANK_NORMAL) {
/* Send the display in standby */
dev_dbg(&spi->dev, "Standby the LCD\n");
spi_write(spi, (const u8 *)&disoff, sizeof(u16));
msleep(60);
spi_write(spi, (const u8 *)&slpin, sizeof(u16));
} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
/* Do nothing, the LCD is already in standby */
} else {
/* priv->lcd_state == FB_BLANK_POWERDOWN */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_UNBLANK;
l4f00242t03_lcd_power_set(ld, power);
}
} else {
/* power == FB_BLANK_POWERDOWN */
if (priv->lcd_state != FB_BLANK_POWERDOWN) {
/* Clear the screen before shutting down */
spi_write(spi, (const u8 *)&disoff, sizeof(u16));
msleep(60);
l4f00242t03_lcd_powerdown(spi);
}
}
priv->lcd_state = power;
return 0;
}
static struct lcd_ops l4f_ops = {
.set_power = l4f00242t03_lcd_power_set,
.get_power = l4f00242t03_lcd_power_get,
};
static int __devinit l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
int ret;
if (pdata == NULL) {
dev_err(&spi->dev, "Uninitialized platform data.\n");
return -EINVAL;
}
priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
return -ENOMEM;
}
dev_set_drvdata(&spi->dev, priv);
spi->bits_per_word = 9;
spi_setup(spi);
priv->spi = spi;
ret = gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH,
"lcd l4f00242t03 reset");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
goto err;
}
ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
"lcd l4f00242t03 data enable");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
goto err2;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
goto err3;
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
goto err4;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
goto err5;
}
/* Init the LCD */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
return 0;
err5:
regulator_put(priv->core_reg);
err4:
regulator_put(priv->io_reg);
err3:
gpio_free(pdata->data_enable_gpio);
err2:
gpio_free(pdata->reset_gpio);
err:
kfree(priv);
return ret;
}
static int __devexit l4f00242t03_remove(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
dev_set_drvdata(&spi->dev, NULL);
gpio_free(pdata->data_enable_gpio);
gpio_free(pdata->reset_gpio);
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
kfree(priv);
return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
if (priv)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
}
static struct spi_driver l4f00242t03_driver = {
.driver = {
.name = "l4f00242t03",
.owner = THIS_MODULE,
},
.probe = l4f00242t03_probe,
.remove = __devexit_p(l4f00242t03_remove),
.shutdown = l4f00242t03_shutdown,
};
module_spi_driver(l4f00242t03_driver);
MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
MODULE_DESCRIPTION("EPSON L4F00242T03 LCD");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.